< prev index next >

src/share/vm/runtime/objectMonitor.cpp

Print this page




   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"


  27 #include "memory/resourceArea.hpp"
  28 #include "oops/markOop.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/handles.inline.hpp"
  31 #include "runtime/interfaceSupport.hpp"
  32 #include "runtime/mutexLocker.hpp"
  33 #include "runtime/objectMonitor.hpp"
  34 #include "runtime/objectMonitor.inline.hpp"
  35 #include "runtime/orderAccess.inline.hpp"
  36 #include "runtime/osThread.hpp"
  37 #include "runtime/stubRoutines.hpp"
  38 #include "runtime/thread.inline.hpp"
  39 #include "services/threadService.hpp"
  40 #include "trace/tracing.hpp"
  41 #include "trace/traceMacros.hpp"
  42 #include "utilities/dtrace.hpp"
  43 #include "utilities/macros.hpp"
  44 #include "utilities/preserveException.hpp"
  45 #ifdef TARGET_OS_FAMILY_linux
  46 # include "os_linux.inline.hpp"
  47 #endif
  48 #ifdef TARGET_OS_FAMILY_solaris
  49 # include "os_solaris.inline.hpp"
  50 #endif
  51 #ifdef TARGET_OS_FAMILY_windows
  52 # include "os_windows.inline.hpp"
  53 #endif
  54 #ifdef TARGET_OS_FAMILY_bsd
  55 # include "os_bsd.inline.hpp"
  56 #endif



  57 
  58 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
  59   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
  60   #define ATTR __attribute__((noinline))
  61 #else
  62   #define ATTR
  63 #endif
  64 
  65 
  66 #ifdef DTRACE_ENABLED
  67 
  68 // Only bother with this argument setup if dtrace is available
  69 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  70 
  71 
  72 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  73   char* bytes = NULL;                                                      \
  74   int len = 0;                                                             \
  75   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  76   Symbol* klassname = ((oop)obj)->klass()->name();                         \


 359      assert (_owner == Self      , "invariant") ;
 360      assert (_recursions == 0    , "invariant") ;
 361      assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 362      Self->_Stalled = 0 ;
 363      return ;
 364   }
 365 
 366   assert (_owner != Self          , "invariant") ;
 367   assert (_succ  != Self          , "invariant") ;
 368   assert (Self->is_Java_thread()  , "invariant") ;
 369   JavaThread * jt = (JavaThread *) Self ;
 370   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
 371   assert (jt->thread_state() != _thread_blocked   , "invariant") ;
 372   assert (this->object() != NULL  , "invariant") ;
 373   assert (_count >= 0, "invariant") ;
 374 
 375   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 376   // Ensure the object-monitor relationship remains stable while there's contention.
 377   Atomic::inc_ptr(&_count);
 378 

 379   EventJavaMonitorEnter event;




 380 
 381   { // Change java thread status to indicate blocked on monitor enter.
 382     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 383 
 384     Self->set_current_pending_monitor(this);
 385 
 386     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 387     if (JvmtiExport::should_post_monitor_contended_enter()) {
 388       JvmtiExport::post_monitor_contended_enter(jt, this);
 389 
 390       // The current thread does not yet own the monitor and does not
 391       // yet appear on any queues that would get it made the successor.
 392       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
 393       // handler cannot accidentally consume an unpark() meant for the
 394       // ParkEvent associated with this ObjectMonitor.
 395     }
 396 
 397     OSThreadContendState osts(Self->osthread());
 398     ThreadBlockInVM tbivm(jt);
 399 


 448   // section.  Amdahl's parallel speedup law comes vividly into play.
 449   //
 450   // Another option might be to aggregate the events (thread local or
 451   // per-monitor aggregation) and defer reporting until a more opportune
 452   // time -- such as next time some thread encounters contention but has
 453   // yet to acquire the lock.  While spinning that thread could
 454   // spinning we could increment JVMStat counters, etc.
 455 
 456   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
 457   if (JvmtiExport::should_post_monitor_contended_entered()) {
 458     JvmtiExport::post_monitor_contended_entered(jt, this);
 459 
 460     // The current thread already owns the monitor and is not going to
 461     // call park() for the remainder of the monitor enter protocol. So
 462     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 463     // event handler consumed an unpark() issued by the thread that
 464     // just exited the monitor.
 465   }
 466 
 467   if (event.should_commit()) {
 468     event.set_klass(((oop)this->object())->klass());
 469     event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
 470     event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
 471     event.commit();
 472   }
 473 
 474   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
 475      ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
 476   }
 477 }
 478 
 479 
 480 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 481 // Callers must compensate as needed.
 482 
 483 int ObjectMonitor::TryLock (Thread * Self) {
 484    for (;;) {
 485       void * own = _owner ;
 486       if (own != NULL) return 0 ;
 487       if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 488          // Either guarantee _recursions == 0 or set _recursions = 0.
 489          assert (_recursions == 0, "invariant") ;
 490          assert (_owner == Self, "invariant") ;


 973        assert(false, "Non-balanced monitor enter/exit!");
 974        if (false) {
 975           THROW(vmSymbols::java_lang_IllegalMonitorStateException());
 976        }
 977        return;
 978      }
 979    }
 980 
 981    if (_recursions != 0) {
 982      _recursions--;        // this is simple recursive enter
 983      TEVENT (Inflated exit - recursive) ;
 984      return ;
 985    }
 986 
 987    // Invariant: after setting Responsible=null an thread must execute
 988    // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 989    if ((SyncFlags & 4) == 0) {
 990       _Responsible = NULL ;
 991    }
 992 
 993 #if INCLUDE_TRACE
 994    // get the owner's thread id for the MonitorEnter event
 995    // if it is enabled and the thread isn't suspended
 996    if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
 997      _previous_owner_tid = SharedRuntime::get_java_tid(Self);
 998    }
 999 #endif
1000 
1001    for (;;) {
1002       assert (THREAD == _owner, "invariant") ;
1003 
1004 
1005       if (Knob_ExitPolicy == 0) {
1006          // release semantics: prior loads and stores from within the critical section
1007          // must not float (reorder) past the following store that drops the lock.
1008          // On SPARC that requires MEMBAR #loadstore|#storestore.
1009          // But of course in TSO #loadstore|#storestore is not required.
1010          // I'd like to write one of the following:
1011          // A.  OrderAccess::release() ; _owner = NULL
1012          // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
1013          // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
1014          // store into a _dummy variable.  That store is not needed, but can result
1015          // in massive wasteful coherency traffic on classic SMP systems.
1016          // Instead, I use release_store(), which is implemented as just a simple
1017          // ST on x64, x86 and SPARC.


1426       }                                                                           \
1427     }                                                                             \
1428   } while (false)
1429 
1430 // check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
1431 // TODO-FIXME: remove check_slow() -- it's likely dead.
1432 
1433 void ObjectMonitor::check_slow(TRAPS) {
1434   TEVENT (check_slow - throw IMSX) ;
1435   assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1436   THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1437 }
1438 
1439 static int Adjust (volatile int * adr, int dx) {
1440   int v ;
1441   for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
1442   return v ;
1443 }
1444 
1445 // helper method for posting a monitor wait event
1446 void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,

1447                                                            jlong notifier_tid,
1448                                                            jlong timeout,
1449                                                            bool timedout) {
1450   event->set_klass(((oop)this->object())->klass());
1451   event->set_timeout((TYPE_ULONG)timeout);
1452   event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
1453   event->set_notifier((TYPE_OSTHREAD)notifier_tid);
1454   event->set_timedOut((TYPE_BOOLEAN)timedout);

1455   event->commit();
1456 }
1457 
1458 // -----------------------------------------------------------------------------
1459 // Wait/Notify/NotifyAll
1460 //
1461 // Note: a subset of changes to ObjectMonitor::wait()
1462 // will need to be replicated in complete_exit above
1463 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1464    Thread * const Self = THREAD ;
1465    assert(Self->is_Java_thread(), "Must be Java thread!");
1466    JavaThread *jt = (JavaThread *)THREAD;
1467 
1468    DeferredInitialize () ;
1469 
1470    // Throw IMSX or IEX.
1471    CHECK_OWNER();
1472 
1473    EventJavaMonitorWait event;
1474 
1475    // check for a pending interrupt
1476    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1477      // post monitor waited event.  Note that this is past-tense, we are done waiting.
1478      if (JvmtiExport::should_post_monitor_waited()) {
1479         // Note: 'false' parameter is passed here because the
1480         // wait was not timed out due to thread interrupt.
1481         JvmtiExport::post_monitor_waited(jt, this, false);
1482 
1483         // In this short circuit of the monitor wait protocol, the
1484         // current thread never drops ownership of the monitor and
1485         // never gets added to the wait queue so the current thread
1486         // cannot be made the successor. This means that the
1487         // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1488         // consume an unpark() meant for the ParkEvent associated with
1489         // this ObjectMonitor.
1490      }
1491      if (event.should_commit()) {
1492        post_monitor_wait_event(&event, 0, millis, false);
1493      }
1494      TEVENT (Wait - Throw IEX) ;
1495      THROW(vmSymbols::java_lang_InterruptedException());
1496      return ;
1497    }
1498 
1499    TEVENT (Wait) ;
1500 
1501    assert (Self->_Stalled == 0, "invariant") ;
1502    Self->_Stalled = intptr_t(this) ;
1503    jt->set_current_waiting_monitor(this);
1504 
1505    // create a node to be put into the queue
1506    // Critically, after we reset() the event but prior to park(), we must check
1507    // for a pending interrupt.
1508    ObjectWaiter node(Self);
1509    node.TState = ObjectWaiter::TS_WAIT ;
1510    Self->_ParkEvent->reset() ;
1511    OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
1512 


1616          // In this part of the monitor wait-notify-reenter protocol it
1617          // is possible (and normal) for another thread to do a fastpath
1618          // monitor enter-exit while this thread is still trying to get
1619          // to the reenter portion of the protocol.
1620          //
1621          // The ObjectMonitor was notified and the current thread is
1622          // the successor which also means that an unpark() has already
1623          // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
1624          // consume the unpark() that was done when the successor was
1625          // set because the same ParkEvent is shared between Java
1626          // monitors and JVM/TI RawMonitors (for now).
1627          //
1628          // We redo the unpark() to ensure forward progress, i.e., we
1629          // don't want all pending threads hanging (parked) with none
1630          // entering the unlocked monitor.
1631          node._event->unpark();
1632        }
1633      }
1634 
1635      if (event.should_commit()) {
1636        post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
1637      }
1638 
1639      OrderAccess::fence() ;
1640 
1641      assert (Self->_Stalled != 0, "invariant") ;
1642      Self->_Stalled = 0 ;
1643 
1644      assert (_owner != Self, "invariant") ;
1645      ObjectWaiter::TStates v = node.TState ;
1646      if (v == ObjectWaiter::TS_RUN) {
1647          enter (Self) ;
1648      } else {
1649          guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
1650          ReenterI (Self, &node) ;
1651          node.wait_reenter_end(this);
1652      }
1653 
1654      // Self has reacquired the lock.
1655      // Lifecycle - the node representing Self must not appear on any queues.
1656      // Node is about to go out-of-scope, but even if it were immortal we wouldn't


1699   CHECK_OWNER();
1700   if (_WaitSet == NULL) {
1701      TEVENT (Empty-Notify) ;
1702      return ;
1703   }
1704   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1705 
1706   int Policy = Knob_MoveNotifyee ;
1707 
1708   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
1709   ObjectWaiter * iterator = DequeueWaiter() ;
1710   if (iterator != NULL) {
1711      TEVENT (Notify1 - Transfer) ;
1712      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1713      guarantee (iterator->_notified == 0, "invariant") ;
1714      if (Policy != 4) {
1715         iterator->TState = ObjectWaiter::TS_ENTER ;
1716      }
1717      iterator->_notified = 1 ;
1718      Thread * Self = THREAD;
1719      iterator->_notifier_tid = Self->osthread()->thread_id();
1720 
1721      ObjectWaiter * List = _EntryList ;
1722      if (List != NULL) {
1723         assert (List->_prev == NULL, "invariant") ;
1724         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1725         assert (List != iterator, "invariant") ;
1726      }
1727 
1728      if (Policy == 0) {       // prepend to EntryList
1729          if (List == NULL) {
1730              iterator->_next = iterator->_prev = NULL ;
1731              _EntryList = iterator ;
1732          } else {
1733              List->_prev = iterator ;
1734              iterator->_next = List ;
1735              iterator->_prev = NULL ;
1736              _EntryList = iterator ;
1737         }
1738      } else
1739      if (Policy == 1) {      // append to EntryList


1825 
1826   int Policy = Knob_MoveNotifyee ;
1827   int Tally = 0 ;
1828   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
1829 
1830   for (;;) {
1831      iterator = DequeueWaiter () ;
1832      if (iterator == NULL) break ;
1833      TEVENT (NotifyAll - Transfer1) ;
1834      ++Tally ;
1835 
1836      // Disposition - what might we do with iterator ?
1837      // a.  add it directly to the EntryList - either tail or head.
1838      // b.  push it onto the front of the _cxq.
1839      // For now we use (a).
1840 
1841      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1842      guarantee (iterator->_notified == 0, "invariant") ;
1843      iterator->_notified = 1 ;
1844      Thread * Self = THREAD;
1845      iterator->_notifier_tid = Self->osthread()->thread_id();
1846      if (Policy != 4) {
1847         iterator->TState = ObjectWaiter::TS_ENTER ;
1848      }
1849 
1850      ObjectWaiter * List = _EntryList ;
1851      if (List != NULL) {
1852         assert (List->_prev == NULL, "invariant") ;
1853         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1854         assert (List != iterator, "invariant") ;
1855      }
1856 
1857      if (Policy == 0) {       // prepend to EntryList
1858          if (List == NULL) {
1859              iterator->_next = iterator->_prev = NULL ;
1860              _EntryList = iterator ;
1861          } else {
1862              List->_prev = iterator ;
1863              iterator->_next = List ;
1864              iterator->_prev = NULL ;
1865              _EntryList = iterator ;




   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "jfr/jfrEvents.hpp"
  28 #include "jfr/support/jfrThreadId.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/markOop.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/handles.inline.hpp"
  33 #include "runtime/interfaceSupport.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 #include "runtime/objectMonitor.hpp"
  36 #include "runtime/objectMonitor.inline.hpp"
  37 #include "runtime/orderAccess.inline.hpp"
  38 #include "runtime/osThread.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/thread.inline.hpp"
  41 #include "services/threadService.hpp"


  42 #include "utilities/dtrace.hpp"
  43 #include "utilities/macros.hpp"
  44 #include "utilities/preserveException.hpp"
  45 #ifdef TARGET_OS_FAMILY_linux
  46 # include "os_linux.inline.hpp"
  47 #endif
  48 #ifdef TARGET_OS_FAMILY_solaris
  49 # include "os_solaris.inline.hpp"
  50 #endif
  51 #ifdef TARGET_OS_FAMILY_windows
  52 # include "os_windows.inline.hpp"
  53 #endif
  54 #ifdef TARGET_OS_FAMILY_bsd
  55 # include "os_bsd.inline.hpp"
  56 #endif
  57 #if INCLUDE_JFR
  58 #include "jfr/support/jfrFlush.hpp"
  59 #endif
  60 
  61 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
  62   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
  63   #define ATTR __attribute__((noinline))
  64 #else
  65   #define ATTR
  66 #endif
  67 
  68 
  69 #ifdef DTRACE_ENABLED
  70 
  71 // Only bother with this argument setup if dtrace is available
  72 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  73 
  74 
  75 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  76   char* bytes = NULL;                                                      \
  77   int len = 0;                                                             \
  78   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  79   Symbol* klassname = ((oop)obj)->klass()->name();                         \


 362      assert (_owner == Self      , "invariant") ;
 363      assert (_recursions == 0    , "invariant") ;
 364      assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 365      Self->_Stalled = 0 ;
 366      return ;
 367   }
 368 
 369   assert (_owner != Self          , "invariant") ;
 370   assert (_succ  != Self          , "invariant") ;
 371   assert (Self->is_Java_thread()  , "invariant") ;
 372   JavaThread * jt = (JavaThread *) Self ;
 373   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
 374   assert (jt->thread_state() != _thread_blocked   , "invariant") ;
 375   assert (this->object() != NULL  , "invariant") ;
 376   assert (_count >= 0, "invariant") ;
 377 
 378   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 379   // Ensure the object-monitor relationship remains stable while there's contention.
 380   Atomic::inc_ptr(&_count);
 381 
 382   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 383   EventJavaMonitorEnter event;
 384   if (event.should_commit()) {
 385     event.set_monitorClass(((oop)this->object())->klass());
 386     event.set_address((uintptr_t)(this->object_addr()));
 387   }
 388 
 389   { // Change java thread status to indicate blocked on monitor enter.
 390     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 391 
 392     Self->set_current_pending_monitor(this);
 393 
 394     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 395     if (JvmtiExport::should_post_monitor_contended_enter()) {
 396       JvmtiExport::post_monitor_contended_enter(jt, this);
 397 
 398       // The current thread does not yet own the monitor and does not
 399       // yet appear on any queues that would get it made the successor.
 400       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
 401       // handler cannot accidentally consume an unpark() meant for the
 402       // ParkEvent associated with this ObjectMonitor.
 403     }
 404 
 405     OSThreadContendState osts(Self->osthread());
 406     ThreadBlockInVM tbivm(jt);
 407 


 456   // section.  Amdahl's parallel speedup law comes vividly into play.
 457   //
 458   // Another option might be to aggregate the events (thread local or
 459   // per-monitor aggregation) and defer reporting until a more opportune
 460   // time -- such as next time some thread encounters contention but has
 461   // yet to acquire the lock.  While spinning that thread could
 462   // spinning we could increment JVMStat counters, etc.
 463 
 464   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
 465   if (JvmtiExport::should_post_monitor_contended_entered()) {
 466     JvmtiExport::post_monitor_contended_entered(jt, this);
 467 
 468     // The current thread already owns the monitor and is not going to
 469     // call park() for the remainder of the monitor enter protocol. So
 470     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 471     // event handler consumed an unpark() issued by the thread that
 472     // just exited the monitor.
 473   }
 474 
 475   if (event.should_commit()) {
 476     event.set_previousOwner((uintptr_t)_previous_owner_tid);


 477     event.commit();
 478   }
 479 
 480   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
 481      ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
 482   }
 483 }
 484 
 485 
 486 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 487 // Callers must compensate as needed.
 488 
 489 int ObjectMonitor::TryLock (Thread * Self) {
 490    for (;;) {
 491       void * own = _owner ;
 492       if (own != NULL) return 0 ;
 493       if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 494          // Either guarantee _recursions == 0 or set _recursions = 0.
 495          assert (_recursions == 0, "invariant") ;
 496          assert (_owner == Self, "invariant") ;


 979        assert(false, "Non-balanced monitor enter/exit!");
 980        if (false) {
 981           THROW(vmSymbols::java_lang_IllegalMonitorStateException());
 982        }
 983        return;
 984      }
 985    }
 986 
 987    if (_recursions != 0) {
 988      _recursions--;        // this is simple recursive enter
 989      TEVENT (Inflated exit - recursive) ;
 990      return ;
 991    }
 992 
 993    // Invariant: after setting Responsible=null an thread must execute
 994    // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 995    if ((SyncFlags & 4) == 0) {
 996       _Responsible = NULL ;
 997    }
 998 
 999 #if INCLUDE_JFR
1000    // get the owner's thread id for the MonitorEnter event
1001    // if it is enabled and the thread isn't suspended
1002    if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1003     _previous_owner_tid = JFR_THREAD_ID(Self);
1004    }
1005 #endif
1006 
1007    for (;;) {
1008       assert (THREAD == _owner, "invariant") ;
1009 
1010 
1011       if (Knob_ExitPolicy == 0) {
1012          // release semantics: prior loads and stores from within the critical section
1013          // must not float (reorder) past the following store that drops the lock.
1014          // On SPARC that requires MEMBAR #loadstore|#storestore.
1015          // But of course in TSO #loadstore|#storestore is not required.
1016          // I'd like to write one of the following:
1017          // A.  OrderAccess::release() ; _owner = NULL
1018          // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
1019          // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
1020          // store into a _dummy variable.  That store is not needed, but can result
1021          // in massive wasteful coherency traffic on classic SMP systems.
1022          // Instead, I use release_store(), which is implemented as just a simple
1023          // ST on x64, x86 and SPARC.


1432       }                                                                           \
1433     }                                                                             \
1434   } while (false)
1435 
1436 // check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
1437 // TODO-FIXME: remove check_slow() -- it's likely dead.
1438 
1439 void ObjectMonitor::check_slow(TRAPS) {
1440   TEVENT (check_slow - throw IMSX) ;
1441   assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1442   THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1443 }
1444 
1445 static int Adjust (volatile int * adr, int dx) {
1446   int v ;
1447   for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
1448   return v ;
1449 }
1450 
1451 // helper method for posting a monitor wait event
1452 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1453                                     ObjectMonitor* monitor,
1454                                     jlong notifier_tid,
1455                                     jlong timeout,
1456                                     bool timedout) {
1457   assert(monitor != NULL, "invariant");
1458   event->set_monitorClass(((oop)monitor->object())->klass());
1459   event->set_timeout(timeout);
1460   event->set_address((uintptr_t)monitor->object_addr());
1461   event->set_notifier((u8)notifier_tid);
1462   event->set_timedOut(timedout);
1463   event->commit();
1464 }
1465 
1466 // -----------------------------------------------------------------------------
1467 // Wait/Notify/NotifyAll
1468 //
1469 // Note: a subset of changes to ObjectMonitor::wait()
1470 // will need to be replicated in complete_exit above
1471 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1472    Thread * const Self = THREAD ;
1473    assert(Self->is_Java_thread(), "Must be Java thread!");
1474    JavaThread *jt = (JavaThread *)THREAD;
1475 
1476    DeferredInitialize () ;
1477 
1478    // Throw IMSX or IEX.
1479    CHECK_OWNER();
1480 
1481    EventJavaMonitorWait event;
1482 
1483    // check for a pending interrupt
1484    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1485      // post monitor waited event.  Note that this is past-tense, we are done waiting.
1486      if (JvmtiExport::should_post_monitor_waited()) {
1487         // Note: 'false' parameter is passed here because the
1488         // wait was not timed out due to thread interrupt.
1489         JvmtiExport::post_monitor_waited(jt, this, false);
1490 
1491         // In this short circuit of the monitor wait protocol, the
1492         // current thread never drops ownership of the monitor and
1493         // never gets added to the wait queue so the current thread
1494         // cannot be made the successor. This means that the
1495         // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1496         // consume an unpark() meant for the ParkEvent associated with
1497         // this ObjectMonitor.
1498      }
1499      if (event.should_commit()) {
1500        post_monitor_wait_event(&event, this, 0, millis, false);
1501      }
1502      TEVENT (Wait - Throw IEX) ;
1503      THROW(vmSymbols::java_lang_InterruptedException());
1504      return ;
1505    }
1506 
1507    TEVENT (Wait) ;
1508 
1509    assert (Self->_Stalled == 0, "invariant") ;
1510    Self->_Stalled = intptr_t(this) ;
1511    jt->set_current_waiting_monitor(this);
1512 
1513    // create a node to be put into the queue
1514    // Critically, after we reset() the event but prior to park(), we must check
1515    // for a pending interrupt.
1516    ObjectWaiter node(Self);
1517    node.TState = ObjectWaiter::TS_WAIT ;
1518    Self->_ParkEvent->reset() ;
1519    OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
1520 


1624          // In this part of the monitor wait-notify-reenter protocol it
1625          // is possible (and normal) for another thread to do a fastpath
1626          // monitor enter-exit while this thread is still trying to get
1627          // to the reenter portion of the protocol.
1628          //
1629          // The ObjectMonitor was notified and the current thread is
1630          // the successor which also means that an unpark() has already
1631          // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
1632          // consume the unpark() that was done when the successor was
1633          // set because the same ParkEvent is shared between Java
1634          // monitors and JVM/TI RawMonitors (for now).
1635          //
1636          // We redo the unpark() to ensure forward progress, i.e., we
1637          // don't want all pending threads hanging (parked) with none
1638          // entering the unlocked monitor.
1639          node._event->unpark();
1640        }
1641      }
1642 
1643      if (event.should_commit()) {
1644        post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1645      }
1646 
1647      OrderAccess::fence() ;
1648 
1649      assert (Self->_Stalled != 0, "invariant") ;
1650      Self->_Stalled = 0 ;
1651 
1652      assert (_owner != Self, "invariant") ;
1653      ObjectWaiter::TStates v = node.TState ;
1654      if (v == ObjectWaiter::TS_RUN) {
1655          enter (Self) ;
1656      } else {
1657          guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
1658          ReenterI (Self, &node) ;
1659          node.wait_reenter_end(this);
1660      }
1661 
1662      // Self has reacquired the lock.
1663      // Lifecycle - the node representing Self must not appear on any queues.
1664      // Node is about to go out-of-scope, but even if it were immortal we wouldn't


1707   CHECK_OWNER();
1708   if (_WaitSet == NULL) {
1709      TEVENT (Empty-Notify) ;
1710      return ;
1711   }
1712   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1713 
1714   int Policy = Knob_MoveNotifyee ;
1715 
1716   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
1717   ObjectWaiter * iterator = DequeueWaiter() ;
1718   if (iterator != NULL) {
1719      TEVENT (Notify1 - Transfer) ;
1720      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1721      guarantee (iterator->_notified == 0, "invariant") ;
1722      if (Policy != 4) {
1723         iterator->TState = ObjectWaiter::TS_ENTER ;
1724      }
1725      iterator->_notified = 1 ;
1726      Thread * Self = THREAD;
1727      iterator->_notifier_tid = JFR_THREAD_ID(Self);
1728 
1729      ObjectWaiter * List = _EntryList ;
1730      if (List != NULL) {
1731         assert (List->_prev == NULL, "invariant") ;
1732         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1733         assert (List != iterator, "invariant") ;
1734      }
1735 
1736      if (Policy == 0) {       // prepend to EntryList
1737          if (List == NULL) {
1738              iterator->_next = iterator->_prev = NULL ;
1739              _EntryList = iterator ;
1740          } else {
1741              List->_prev = iterator ;
1742              iterator->_next = List ;
1743              iterator->_prev = NULL ;
1744              _EntryList = iterator ;
1745         }
1746      } else
1747      if (Policy == 1) {      // append to EntryList


1833 
1834   int Policy = Knob_MoveNotifyee ;
1835   int Tally = 0 ;
1836   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
1837 
1838   for (;;) {
1839      iterator = DequeueWaiter () ;
1840      if (iterator == NULL) break ;
1841      TEVENT (NotifyAll - Transfer1) ;
1842      ++Tally ;
1843 
1844      // Disposition - what might we do with iterator ?
1845      // a.  add it directly to the EntryList - either tail or head.
1846      // b.  push it onto the front of the _cxq.
1847      // For now we use (a).
1848 
1849      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1850      guarantee (iterator->_notified == 0, "invariant") ;
1851      iterator->_notified = 1 ;
1852      Thread * Self = THREAD;
1853      iterator->_notifier_tid = JFR_THREAD_ID(Self);
1854      if (Policy != 4) {
1855         iterator->TState = ObjectWaiter::TS_ENTER ;
1856      }
1857 
1858      ObjectWaiter * List = _EntryList ;
1859      if (List != NULL) {
1860         assert (List->_prev == NULL, "invariant") ;
1861         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1862         assert (List != iterator, "invariant") ;
1863      }
1864 
1865      if (Policy == 0) {       // prepend to EntryList
1866          if (List == NULL) {
1867              iterator->_next = iterator->_prev = NULL ;
1868              _EntryList = iterator ;
1869          } else {
1870              List->_prev = iterator ;
1871              iterator->_next = List ;
1872              iterator->_prev = NULL ;
1873              _EntryList = iterator ;


< prev index next >