19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/markOop.hpp"
29 #include "oops/oop.inline.hpp"
30 #include "runtime/handles.inline.hpp"
31 #include "runtime/interfaceSupport.hpp"
32 #include "runtime/mutexLocker.hpp"
33 #include "runtime/objectMonitor.hpp"
34 #include "runtime/objectMonitor.inline.hpp"
35 #include "runtime/osThread.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/thread.inline.hpp"
38 #include "services/threadService.hpp"
39 #include "utilities/dtrace.hpp"
40 #include "utilities/preserveException.hpp"
41 #ifdef TARGET_OS_FAMILY_linux
42 # include "os_linux.inline.hpp"
43 #endif
44 #ifdef TARGET_OS_FAMILY_solaris
45 # include "os_solaris.inline.hpp"
46 #endif
47 #ifdef TARGET_OS_FAMILY_windows
48 # include "os_windows.inline.hpp"
49 #endif
50 #ifdef TARGET_OS_FAMILY_bsd
51 # include "os_bsd.inline.hpp"
52 #endif
53
54 #if defined(__GNUC__) && !defined(IA64)
55 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
56 #define ATTR __attribute__((noinline))
57 #else
58 #define ATTR
59 #endif
354 assert (_owner == Self , "invariant") ;
355 assert (_recursions == 0 , "invariant") ;
356 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
357 Self->_Stalled = 0 ;
358 return ;
359 }
360
361 assert (_owner != Self , "invariant") ;
362 assert (_succ != Self , "invariant") ;
363 assert (Self->is_Java_thread() , "invariant") ;
364 JavaThread * jt = (JavaThread *) Self ;
365 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
366 assert (jt->thread_state() != _thread_blocked , "invariant") ;
367 assert (this->object() != NULL , "invariant") ;
368 assert (_count >= 0, "invariant") ;
369
370 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
371 // Ensure the object-monitor relationship remains stable while there's contention.
372 Atomic::inc_ptr(&_count);
373
374 { // Change java thread status to indicate blocked on monitor enter.
375 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
376
377 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
378 if (JvmtiExport::should_post_monitor_contended_enter()) {
379 JvmtiExport::post_monitor_contended_enter(jt, this);
380 }
381
382 OSThreadContendState osts(Self->osthread());
383 ThreadBlockInVM tbivm(jt);
384
385 Self->set_current_pending_monitor(this);
386
387 // TODO-FIXME: change the following for(;;) loop to straight-line code.
388 for (;;) {
389 jt->set_suspend_equivalent();
390 // cleared by handle_special_suspend_equivalent_condition()
391 // or java_suspend_self()
392
393 EnterI (THREAD) ;
394
395 if (!ExitSuspendEquivalent(jt)) break ;
396
397 //
398 // We have acquired the contended monitor, but while we were
399 // waiting another thread suspended us. We don't want to enter
400 // the monitor while suspended because that would surprise the
401 // thread that suspended us.
402 //
403 _recursions = 0 ;
404 _succ = NULL ;
405 exit (Self) ;
406
407 jt->java_suspend_self();
408 }
409 Self->set_current_pending_monitor(NULL);
410 }
411
412 Atomic::dec_ptr(&_count);
413 assert (_count >= 0, "invariant") ;
414 Self->_Stalled = 0 ;
415
416 // Must either set _recursions = 0 or ASSERT _recursions == 0.
417 assert (_recursions == 0 , "invariant") ;
418 assert (_owner == Self , "invariant") ;
419 assert (_succ != Self , "invariant") ;
420 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
421
422 // The thread -- now the owner -- is back in vm mode.
423 // Report the glorious news via TI,DTrace and jvmstat.
424 // The probe effect is non-trivial. All the reportage occurs
425 // while we hold the monitor, increasing the length of the critical
426 // section. Amdahl's parallel speedup law comes vividly into play.
427 //
428 // Another option might be to aggregate the events (thread local or
429 // per-monitor aggregation) and defer reporting until a more opportune
430 // time -- such as next time some thread encounters contention but has
431 // yet to acquire the lock. While spinning that thread could
432 // spinning we could increment JVMStat counters, etc.
433
434 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
435 if (JvmtiExport::should_post_monitor_contended_entered()) {
436 JvmtiExport::post_monitor_contended_entered(jt, this);
437 }
438 if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
439 ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
440 }
441 }
442
443
444 // Caveat: TryLock() is not necessarily serializing if it returns failure.
445 // Callers must compensate as needed.
446
447 int ObjectMonitor::TryLock (Thread * Self) {
448 for (;;) {
449 void * own = _owner ;
450 if (own != NULL) return 0 ;
451 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
452 // Either guarantee _recursions == 0 or set _recursions = 0.
453 assert (_recursions == 0, "invariant") ;
454 assert (_owner == Self, "invariant") ;
455 // CONSIDER: set or assert that OwnerIsThread == 1
456 return 1 ;
457 }
900 //
901 // The CAS() in enter provides for safety and exclusion, while the CAS or
902 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
903 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
904 // We detect and recover from stranding with timers.
905 //
906 // If a thread transiently strands it'll park until (a) another
907 // thread acquires the lock and then drops the lock, at which time the
908 // exiting thread will notice and unpark the stranded thread, or, (b)
909 // the timer expires. If the lock is high traffic then the stranding latency
910 // will be low due to (a). If the lock is low traffic then the odds of
911 // stranding are lower, although the worst-case stranding latency
912 // is longer. Critically, we don't want to put excessive load in the
913 // platform's timer subsystem. We want to minimize both the timer injection
914 // rate (timers created/sec) as well as the number of timers active at
915 // any one time. (more precisely, we want to minimize timer-seconds, which is
916 // the integral of the # of active timers at any instant over time).
917 // Both impinge on OS scalability. Given that, at most one thread parked on
918 // a monitor will use a timer.
919
920 void ATTR ObjectMonitor::exit(TRAPS) {
921 Thread * Self = THREAD ;
922 if (THREAD != _owner) {
923 if (THREAD->is_lock_owned((address) _owner)) {
924 // Transmute _owner from a BasicLock pointer to a Thread address.
925 // We don't need to hold _mutex for this transition.
926 // Non-null to Non-null is safe as long as all readers can
927 // tolerate either flavor.
928 assert (_recursions == 0, "invariant") ;
929 _owner = THREAD ;
930 _recursions = 0 ;
931 OwnerIsThread = 1 ;
932 } else {
933 // NOTE: we need to handle unbalanced monitor enter/exit
934 // in native code by throwing an exception.
935 // TODO: Throw an IllegalMonitorStateException ?
936 TEVENT (Exit - Throw IMSX) ;
937 assert(false, "Non-balanced monitor enter/exit!");
938 if (false) {
939 THROW(vmSymbols::java_lang_IllegalMonitorStateException());
940 }
941 return;
942 }
943 }
944
945 if (_recursions != 0) {
946 _recursions--; // this is simple recursive enter
947 TEVENT (Inflated exit - recursive) ;
948 return ;
949 }
950
951 // Invariant: after setting Responsible=null an thread must execute
952 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
953 if ((SyncFlags & 4) == 0) {
954 _Responsible = NULL ;
955 }
956
957 for (;;) {
958 assert (THREAD == _owner, "invariant") ;
959
960
961 if (Knob_ExitPolicy == 0) {
962 // release semantics: prior loads and stores from within the critical section
963 // must not float (reorder) past the following store that drops the lock.
964 // On SPARC that requires MEMBAR #loadstore|#storestore.
965 // But of course in TSO #loadstore|#storestore is not required.
966 // I'd like to write one of the following:
967 // A. OrderAccess::release() ; _owner = NULL
968 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
969 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
970 // store into a _dummy variable. That store is not needed, but can result
971 // in massive wasteful coherency traffic on classic SMP systems.
972 // Instead, I use release_store(), which is implemented as just a simple
973 // ST on x64, x86 and SPARC.
974 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
975 OrderAccess::storeload() ; // See if we need to wake a successor
976 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1326 // thread due to contention.
1327 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1328 Thread * const Self = THREAD;
1329 assert(Self->is_Java_thread(), "Must be Java thread!");
1330 JavaThread *jt = (JavaThread *)THREAD;
1331
1332 DeferredInitialize();
1333
1334 if (THREAD != _owner) {
1335 if (THREAD->is_lock_owned ((address)_owner)) {
1336 assert(_recursions == 0, "internal state error");
1337 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */
1338 _recursions = 0 ;
1339 OwnerIsThread = 1 ;
1340 }
1341 }
1342
1343 guarantee(Self == _owner, "complete_exit not owner");
1344 intptr_t save = _recursions; // record the old recursion count
1345 _recursions = 0; // set the recursion level to be 0
1346 exit (Self) ; // exit the monitor
1347 guarantee (_owner != Self, "invariant");
1348 return save;
1349 }
1350
1351 // reenter() enters a lock and sets recursion count
1352 // complete_exit/reenter operate as a wait without waiting
1353 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1354 Thread * const Self = THREAD;
1355 assert(Self->is_Java_thread(), "Must be Java thread!");
1356 JavaThread *jt = (JavaThread *)THREAD;
1357
1358 guarantee(_owner != Self, "reenter already owner");
1359 enter (THREAD); // enter the monitor
1360 guarantee (_recursions == 0, "reenter recursion");
1361 _recursions = recursions;
1362 return;
1363 }
1364
1365
1366 // -----------------------------------------------------------------------------
1380 TEVENT (Throw IMSX) ; \
1381 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
1382 } \
1383 } \
1384 } while (false)
1385
1386 // check_slow() is a misnomer. It's called to simply to throw an IMSX exception.
1387 // TODO-FIXME: remove check_slow() -- it's likely dead.
1388
1389 void ObjectMonitor::check_slow(TRAPS) {
1390 TEVENT (check_slow - throw IMSX) ;
1391 assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1392 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1393 }
1394
1395 static int Adjust (volatile int * adr, int dx) {
1396 int v ;
1397 for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
1398 return v ;
1399 }
1400 // -----------------------------------------------------------------------------
1401 // Wait/Notify/NotifyAll
1402 //
1403 // Note: a subset of changes to ObjectMonitor::wait()
1404 // will need to be replicated in complete_exit above
1405 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1406 Thread * const Self = THREAD ;
1407 assert(Self->is_Java_thread(), "Must be Java thread!");
1408 JavaThread *jt = (JavaThread *)THREAD;
1409
1410 DeferredInitialize () ;
1411
1412 // Throw IMSX or IEX.
1413 CHECK_OWNER();
1414
1415 // check for a pending interrupt
1416 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1417 // post monitor waited event. Note that this is past-tense, we are done waiting.
1418 if (JvmtiExport::should_post_monitor_waited()) {
1419 // Note: 'false' parameter is passed here because the
1420 // wait was not timed out due to thread interrupt.
1421 JvmtiExport::post_monitor_waited(jt, this, false);
1422 }
1423 TEVENT (Wait - Throw IEX) ;
1424 THROW(vmSymbols::java_lang_InterruptedException());
1425 return ;
1426 }
1427 TEVENT (Wait) ;
1428
1429 assert (Self->_Stalled == 0, "invariant") ;
1430 Self->_Stalled = intptr_t(this) ;
1431 jt->set_current_waiting_monitor(this);
1432
1433 // create a node to be put into the queue
1434 // Critically, after we reset() the event but prior to park(), we must check
1435 // for a pending interrupt.
1436 ObjectWaiter node(Self);
1437 node.TState = ObjectWaiter::TS_WAIT ;
1438 Self->_ParkEvent->reset() ;
1439 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
1440
1441 // Enter the waiting queue, which is a circular doubly linked list in this case
1442 // but it could be a priority queue or any data structure.
1443 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
1444 // by the the owner of the monitor *except* in the case where park()
1445 // returns because of a timeout of interrupt. Contention is exceptionally rare
1446 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1447
1448 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
1449 AddWaiter (&node) ;
1450 Thread::SpinRelease (&_WaitSetLock) ;
1451
1452 if ((SyncFlags & 4) == 0) {
1453 _Responsible = NULL ;
1454 }
1455 intptr_t save = _recursions; // record the old recursion count
1456 _waiters++; // increment the number of waiters
1457 _recursions = 0; // set the recursion level to be 1
1458 exit (Self) ; // exit the monitor
1459 guarantee (_owner != Self, "invariant") ;
1460
1461 // As soon as the ObjectMonitor's ownership is dropped in the exit()
1462 // call above, another thread can enter() the ObjectMonitor, do the
1463 // notify(), and exit() the ObjectMonitor. If the other thread's
1464 // exit() call chooses this thread as the successor and the unpark()
1465 // call happens to occur while this thread is posting a
1466 // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
1467 // handler using RawMonitors and consuming the unpark().
1468 //
1469 // To avoid the problem, we re-post the event. This does no harm
1470 // even if the original unpark() was not consumed because we are the
1471 // chosen successor for this monitor.
1472 if (node._notified != 0 && _succ == Self) {
1473 node._event->unpark();
1474 }
1475
1476 // The thread is on the WaitSet list - now park() it.
1477 // On MP systems it's conceivable that a brief spin before we park
1478 // could be profitable.
1538 // The thread is now either on off-list (TS_RUN),
1539 // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
1540 // The Node's TState variable is stable from the perspective of this thread.
1541 // No other threads will asynchronously modify TState.
1542 guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
1543 OrderAccess::loadload() ;
1544 if (_succ == Self) _succ = NULL ;
1545 WasNotified = node._notified ;
1546
1547 // Reentry phase -- reacquire the monitor.
1548 // re-enter contended monitor after object.wait().
1549 // retain OBJECT_WAIT state until re-enter successfully completes
1550 // Thread state is thread_in_vm and oop access is again safe,
1551 // although the raw address of the object may have changed.
1552 // (Don't cache naked oops over safepoints, of course).
1553
1554 // post monitor waited event. Note that this is past-tense, we are done waiting.
1555 if (JvmtiExport::should_post_monitor_waited()) {
1556 JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
1557 }
1558 OrderAccess::fence() ;
1559
1560 assert (Self->_Stalled != 0, "invariant") ;
1561 Self->_Stalled = 0 ;
1562
1563 assert (_owner != Self, "invariant") ;
1564 ObjectWaiter::TStates v = node.TState ;
1565 if (v == ObjectWaiter::TS_RUN) {
1566 enter (Self) ;
1567 } else {
1568 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
1569 ReenterI (Self, &node) ;
1570 node.wait_reenter_end(this);
1571 }
1572
1573 // Self has reacquired the lock.
1574 // Lifecycle - the node representing Self must not appear on any queues.
1575 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1576 // want residual elements associated with this thread left on any lists.
1577 guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
1617 void ObjectMonitor::notify(TRAPS) {
1618 CHECK_OWNER();
1619 if (_WaitSet == NULL) {
1620 TEVENT (Empty-Notify) ;
1621 return ;
1622 }
1623 DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1624
1625 int Policy = Knob_MoveNotifyee ;
1626
1627 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
1628 ObjectWaiter * iterator = DequeueWaiter() ;
1629 if (iterator != NULL) {
1630 TEVENT (Notify1 - Transfer) ;
1631 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1632 guarantee (iterator->_notified == 0, "invariant") ;
1633 if (Policy != 4) {
1634 iterator->TState = ObjectWaiter::TS_ENTER ;
1635 }
1636 iterator->_notified = 1 ;
1637
1638 ObjectWaiter * List = _EntryList ;
1639 if (List != NULL) {
1640 assert (List->_prev == NULL, "invariant") ;
1641 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1642 assert (List != iterator, "invariant") ;
1643 }
1644
1645 if (Policy == 0) { // prepend to EntryList
1646 if (List == NULL) {
1647 iterator->_next = iterator->_prev = NULL ;
1648 _EntryList = iterator ;
1649 } else {
1650 List->_prev = iterator ;
1651 iterator->_next = List ;
1652 iterator->_prev = NULL ;
1653 _EntryList = iterator ;
1654 }
1655 } else
1656 if (Policy == 1) { // append to EntryList
1741 DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1742
1743 int Policy = Knob_MoveNotifyee ;
1744 int Tally = 0 ;
1745 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
1746
1747 for (;;) {
1748 iterator = DequeueWaiter () ;
1749 if (iterator == NULL) break ;
1750 TEVENT (NotifyAll - Transfer1) ;
1751 ++Tally ;
1752
1753 // Disposition - what might we do with iterator ?
1754 // a. add it directly to the EntryList - either tail or head.
1755 // b. push it onto the front of the _cxq.
1756 // For now we use (a).
1757
1758 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1759 guarantee (iterator->_notified == 0, "invariant") ;
1760 iterator->_notified = 1 ;
1761 if (Policy != 4) {
1762 iterator->TState = ObjectWaiter::TS_ENTER ;
1763 }
1764
1765 ObjectWaiter * List = _EntryList ;
1766 if (List != NULL) {
1767 assert (List->_prev == NULL, "invariant") ;
1768 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1769 assert (List != iterator, "invariant") ;
1770 }
1771
1772 if (Policy == 0) { // prepend to EntryList
1773 if (List == NULL) {
1774 iterator->_next = iterator->_prev = NULL ;
1775 _EntryList = iterator ;
1776 } else {
1777 List->_prev = iterator ;
1778 iterator->_next = List ;
1779 iterator->_prev = NULL ;
1780 _EntryList = iterator ;
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/markOop.hpp"
29 #include "oops/oop.inline.hpp"
30 #include "runtime/handles.inline.hpp"
31 #include "runtime/interfaceSupport.hpp"
32 #include "runtime/mutexLocker.hpp"
33 #include "runtime/objectMonitor.hpp"
34 #include "runtime/objectMonitor.inline.hpp"
35 #include "runtime/osThread.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/thread.inline.hpp"
38 #include "services/threadService.hpp"
39 #include "trace/tracing.hpp"
40 #include "trace/traceMacros.hpp"
41 #include "utilities/dtrace.hpp"
42 #include "utilities/macros.hpp"
43 #include "utilities/preserveException.hpp"
44 #ifdef TARGET_OS_FAMILY_linux
45 # include "os_linux.inline.hpp"
46 #endif
47 #ifdef TARGET_OS_FAMILY_solaris
48 # include "os_solaris.inline.hpp"
49 #endif
50 #ifdef TARGET_OS_FAMILY_windows
51 # include "os_windows.inline.hpp"
52 #endif
53 #ifdef TARGET_OS_FAMILY_bsd
54 # include "os_bsd.inline.hpp"
55 #endif
56
57 #if defined(__GNUC__) && !defined(IA64)
58 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
59 #define ATTR __attribute__((noinline))
60 #else
61 #define ATTR
62 #endif
357 assert (_owner == Self , "invariant") ;
358 assert (_recursions == 0 , "invariant") ;
359 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
360 Self->_Stalled = 0 ;
361 return ;
362 }
363
364 assert (_owner != Self , "invariant") ;
365 assert (_succ != Self , "invariant") ;
366 assert (Self->is_Java_thread() , "invariant") ;
367 JavaThread * jt = (JavaThread *) Self ;
368 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
369 assert (jt->thread_state() != _thread_blocked , "invariant") ;
370 assert (this->object() != NULL , "invariant") ;
371 assert (_count >= 0, "invariant") ;
372
373 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
374 // Ensure the object-monitor relationship remains stable while there's contention.
375 Atomic::inc_ptr(&_count);
376
377 EventJavaMonitorEnter event;
378
379 { // Change java thread status to indicate blocked on monitor enter.
380 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
381
382 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
383 if (JvmtiExport::should_post_monitor_contended_enter()) {
384 JvmtiExport::post_monitor_contended_enter(jt, this);
385 }
386
387 OSThreadContendState osts(Self->osthread());
388 ThreadBlockInVM tbivm(jt);
389
390 Self->set_current_pending_monitor(this);
391
392 // TODO-FIXME: change the following for(;;) loop to straight-line code.
393 for (;;) {
394 jt->set_suspend_equivalent();
395 // cleared by handle_special_suspend_equivalent_condition()
396 // or java_suspend_self()
397
398 EnterI (THREAD) ;
399
400 if (!ExitSuspendEquivalent(jt)) break ;
401
402 //
403 // We have acquired the contended monitor, but while we were
404 // waiting another thread suspended us. We don't want to enter
405 // the monitor while suspended because that would surprise the
406 // thread that suspended us.
407 //
408 _recursions = 0 ;
409 _succ = NULL ;
410 exit (false, Self) ;
411
412 jt->java_suspend_self();
413 }
414 Self->set_current_pending_monitor(NULL);
415 }
416
417 Atomic::dec_ptr(&_count);
418 assert (_count >= 0, "invariant") ;
419 Self->_Stalled = 0 ;
420
421 // Must either set _recursions = 0 or ASSERT _recursions == 0.
422 assert (_recursions == 0 , "invariant") ;
423 assert (_owner == Self , "invariant") ;
424 assert (_succ != Self , "invariant") ;
425 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
426
427 // The thread -- now the owner -- is back in vm mode.
428 // Report the glorious news via TI,DTrace and jvmstat.
429 // The probe effect is non-trivial. All the reportage occurs
430 // while we hold the monitor, increasing the length of the critical
431 // section. Amdahl's parallel speedup law comes vividly into play.
432 //
433 // Another option might be to aggregate the events (thread local or
434 // per-monitor aggregation) and defer reporting until a more opportune
435 // time -- such as next time some thread encounters contention but has
436 // yet to acquire the lock. While spinning that thread could
437 // spinning we could increment JVMStat counters, etc.
438
439 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
440 if (JvmtiExport::should_post_monitor_contended_entered()) {
441 JvmtiExport::post_monitor_contended_entered(jt, this);
442 }
443
444 if (event.should_commit()) {
445 event.set_klass(((oop)this->object())->klass());
446 event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
447 event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
448 event.commit();
449 }
450
451 if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
452 ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
453 }
454 }
455
456
457 // Caveat: TryLock() is not necessarily serializing if it returns failure.
458 // Callers must compensate as needed.
459
460 int ObjectMonitor::TryLock (Thread * Self) {
461 for (;;) {
462 void * own = _owner ;
463 if (own != NULL) return 0 ;
464 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
465 // Either guarantee _recursions == 0 or set _recursions = 0.
466 assert (_recursions == 0, "invariant") ;
467 assert (_owner == Self, "invariant") ;
468 // CONSIDER: set or assert that OwnerIsThread == 1
469 return 1 ;
470 }
913 //
914 // The CAS() in enter provides for safety and exclusion, while the CAS or
915 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
916 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
917 // We detect and recover from stranding with timers.
918 //
919 // If a thread transiently strands it'll park until (a) another
920 // thread acquires the lock and then drops the lock, at which time the
921 // exiting thread will notice and unpark the stranded thread, or, (b)
922 // the timer expires. If the lock is high traffic then the stranding latency
923 // will be low due to (a). If the lock is low traffic then the odds of
924 // stranding are lower, although the worst-case stranding latency
925 // is longer. Critically, we don't want to put excessive load in the
926 // platform's timer subsystem. We want to minimize both the timer injection
927 // rate (timers created/sec) as well as the number of timers active at
928 // any one time. (more precisely, we want to minimize timer-seconds, which is
929 // the integral of the # of active timers at any instant over time).
930 // Both impinge on OS scalability. Given that, at most one thread parked on
931 // a monitor will use a timer.
932
933 void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
934 Thread * Self = THREAD ;
935 if (THREAD != _owner) {
936 if (THREAD->is_lock_owned((address) _owner)) {
937 // Transmute _owner from a BasicLock pointer to a Thread address.
938 // We don't need to hold _mutex for this transition.
939 // Non-null to Non-null is safe as long as all readers can
940 // tolerate either flavor.
941 assert (_recursions == 0, "invariant") ;
942 _owner = THREAD ;
943 _recursions = 0 ;
944 OwnerIsThread = 1 ;
945 } else {
946 // NOTE: we need to handle unbalanced monitor enter/exit
947 // in native code by throwing an exception.
948 // TODO: Throw an IllegalMonitorStateException ?
949 TEVENT (Exit - Throw IMSX) ;
950 assert(false, "Non-balanced monitor enter/exit!");
951 if (false) {
952 THROW(vmSymbols::java_lang_IllegalMonitorStateException());
953 }
954 return;
955 }
956 }
957
958 if (_recursions != 0) {
959 _recursions--; // this is simple recursive enter
960 TEVENT (Inflated exit - recursive) ;
961 return ;
962 }
963
964 // Invariant: after setting Responsible=null an thread must execute
965 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
966 if ((SyncFlags & 4) == 0) {
967 _Responsible = NULL ;
968 }
969
970 #if INCLUDE_TRACE
971 // get the owner's thread id for the MonitorEnter event
972 // if it is enabled and the thread isn't suspended
973 if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
974 _previous_owner_tid = SharedRuntime::get_java_tid(Self);
975 }
976 #endif
977
978 for (;;) {
979 assert (THREAD == _owner, "invariant") ;
980
981
982 if (Knob_ExitPolicy == 0) {
983 // release semantics: prior loads and stores from within the critical section
984 // must not float (reorder) past the following store that drops the lock.
985 // On SPARC that requires MEMBAR #loadstore|#storestore.
986 // But of course in TSO #loadstore|#storestore is not required.
987 // I'd like to write one of the following:
988 // A. OrderAccess::release() ; _owner = NULL
989 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
990 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
991 // store into a _dummy variable. That store is not needed, but can result
992 // in massive wasteful coherency traffic on classic SMP systems.
993 // Instead, I use release_store(), which is implemented as just a simple
994 // ST on x64, x86 and SPARC.
995 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
996 OrderAccess::storeload() ; // See if we need to wake a successor
997 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1347 // thread due to contention.
1348 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1349 Thread * const Self = THREAD;
1350 assert(Self->is_Java_thread(), "Must be Java thread!");
1351 JavaThread *jt = (JavaThread *)THREAD;
1352
1353 DeferredInitialize();
1354
1355 if (THREAD != _owner) {
1356 if (THREAD->is_lock_owned ((address)_owner)) {
1357 assert(_recursions == 0, "internal state error");
1358 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */
1359 _recursions = 0 ;
1360 OwnerIsThread = 1 ;
1361 }
1362 }
1363
1364 guarantee(Self == _owner, "complete_exit not owner");
1365 intptr_t save = _recursions; // record the old recursion count
1366 _recursions = 0; // set the recursion level to be 0
1367 exit (true, Self) ; // exit the monitor
1368 guarantee (_owner != Self, "invariant");
1369 return save;
1370 }
1371
1372 // reenter() enters a lock and sets recursion count
1373 // complete_exit/reenter operate as a wait without waiting
1374 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1375 Thread * const Self = THREAD;
1376 assert(Self->is_Java_thread(), "Must be Java thread!");
1377 JavaThread *jt = (JavaThread *)THREAD;
1378
1379 guarantee(_owner != Self, "reenter already owner");
1380 enter (THREAD); // enter the monitor
1381 guarantee (_recursions == 0, "reenter recursion");
1382 _recursions = recursions;
1383 return;
1384 }
1385
1386
1387 // -----------------------------------------------------------------------------
1401 TEVENT (Throw IMSX) ; \
1402 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
1403 } \
1404 } \
1405 } while (false)
1406
1407 // check_slow() is a misnomer. It's called to simply to throw an IMSX exception.
1408 // TODO-FIXME: remove check_slow() -- it's likely dead.
1409
1410 void ObjectMonitor::check_slow(TRAPS) {
1411 TEVENT (check_slow - throw IMSX) ;
1412 assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1413 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1414 }
1415
1416 static int Adjust (volatile int * adr, int dx) {
1417 int v ;
1418 for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
1419 return v ;
1420 }
1421
1422 // helper method for posting a monitor wait event
1423 void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
1424 jlong notifier_tid,
1425 jlong timeout,
1426 bool timedout) {
1427 event->set_klass(((oop)this->object())->klass());
1428 event->set_timeout((TYPE_ULONG)timeout);
1429 event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
1430 event->set_notifier((TYPE_OSTHREAD)notifier_tid);
1431 event->set_timedOut((TYPE_BOOLEAN)timedout);
1432 event->commit();
1433 }
1434
1435 // -----------------------------------------------------------------------------
1436 // Wait/Notify/NotifyAll
1437 //
1438 // Note: a subset of changes to ObjectMonitor::wait()
1439 // will need to be replicated in complete_exit above
1440 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1441 Thread * const Self = THREAD ;
1442 assert(Self->is_Java_thread(), "Must be Java thread!");
1443 JavaThread *jt = (JavaThread *)THREAD;
1444
1445 DeferredInitialize () ;
1446
1447 // Throw IMSX or IEX.
1448 CHECK_OWNER();
1449
1450 EventJavaMonitorWait event;
1451
1452 // check for a pending interrupt
1453 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1454 // post monitor waited event. Note that this is past-tense, we are done waiting.
1455 if (JvmtiExport::should_post_monitor_waited()) {
1456 // Note: 'false' parameter is passed here because the
1457 // wait was not timed out due to thread interrupt.
1458 JvmtiExport::post_monitor_waited(jt, this, false);
1459 }
1460 if (event.should_commit()) {
1461 post_monitor_wait_event(&event, 0, millis, false);
1462 }
1463 TEVENT (Wait - Throw IEX) ;
1464 THROW(vmSymbols::java_lang_InterruptedException());
1465 return ;
1466 }
1467
1468 TEVENT (Wait) ;
1469
1470 assert (Self->_Stalled == 0, "invariant") ;
1471 Self->_Stalled = intptr_t(this) ;
1472 jt->set_current_waiting_monitor(this);
1473
1474 // create a node to be put into the queue
1475 // Critically, after we reset() the event but prior to park(), we must check
1476 // for a pending interrupt.
1477 ObjectWaiter node(Self);
1478 node.TState = ObjectWaiter::TS_WAIT ;
1479 Self->_ParkEvent->reset() ;
1480 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
1481
1482 // Enter the waiting queue, which is a circular doubly linked list in this case
1483 // but it could be a priority queue or any data structure.
1484 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
1485 // by the the owner of the monitor *except* in the case where park()
1486 // returns because of a timeout of interrupt. Contention is exceptionally rare
1487 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1488
1489 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
1490 AddWaiter (&node) ;
1491 Thread::SpinRelease (&_WaitSetLock) ;
1492
1493 if ((SyncFlags & 4) == 0) {
1494 _Responsible = NULL ;
1495 }
1496 intptr_t save = _recursions; // record the old recursion count
1497 _waiters++; // increment the number of waiters
1498 _recursions = 0; // set the recursion level to be 1
1499 exit (true, Self) ; // exit the monitor
1500 guarantee (_owner != Self, "invariant") ;
1501
1502 // As soon as the ObjectMonitor's ownership is dropped in the exit()
1503 // call above, another thread can enter() the ObjectMonitor, do the
1504 // notify(), and exit() the ObjectMonitor. If the other thread's
1505 // exit() call chooses this thread as the successor and the unpark()
1506 // call happens to occur while this thread is posting a
1507 // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
1508 // handler using RawMonitors and consuming the unpark().
1509 //
1510 // To avoid the problem, we re-post the event. This does no harm
1511 // even if the original unpark() was not consumed because we are the
1512 // chosen successor for this monitor.
1513 if (node._notified != 0 && _succ == Self) {
1514 node._event->unpark();
1515 }
1516
1517 // The thread is on the WaitSet list - now park() it.
1518 // On MP systems it's conceivable that a brief spin before we park
1519 // could be profitable.
1579 // The thread is now either on off-list (TS_RUN),
1580 // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
1581 // The Node's TState variable is stable from the perspective of this thread.
1582 // No other threads will asynchronously modify TState.
1583 guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
1584 OrderAccess::loadload() ;
1585 if (_succ == Self) _succ = NULL ;
1586 WasNotified = node._notified ;
1587
1588 // Reentry phase -- reacquire the monitor.
1589 // re-enter contended monitor after object.wait().
1590 // retain OBJECT_WAIT state until re-enter successfully completes
1591 // Thread state is thread_in_vm and oop access is again safe,
1592 // although the raw address of the object may have changed.
1593 // (Don't cache naked oops over safepoints, of course).
1594
1595 // post monitor waited event. Note that this is past-tense, we are done waiting.
1596 if (JvmtiExport::should_post_monitor_waited()) {
1597 JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
1598 }
1599
1600 if (event.should_commit()) {
1601 post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
1602 }
1603
1604 OrderAccess::fence() ;
1605
1606 assert (Self->_Stalled != 0, "invariant") ;
1607 Self->_Stalled = 0 ;
1608
1609 assert (_owner != Self, "invariant") ;
1610 ObjectWaiter::TStates v = node.TState ;
1611 if (v == ObjectWaiter::TS_RUN) {
1612 enter (Self) ;
1613 } else {
1614 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
1615 ReenterI (Self, &node) ;
1616 node.wait_reenter_end(this);
1617 }
1618
1619 // Self has reacquired the lock.
1620 // Lifecycle - the node representing Self must not appear on any queues.
1621 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1622 // want residual elements associated with this thread left on any lists.
1623 guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
1663 void ObjectMonitor::notify(TRAPS) {
1664 CHECK_OWNER();
1665 if (_WaitSet == NULL) {
1666 TEVENT (Empty-Notify) ;
1667 return ;
1668 }
1669 DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1670
1671 int Policy = Knob_MoveNotifyee ;
1672
1673 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
1674 ObjectWaiter * iterator = DequeueWaiter() ;
1675 if (iterator != NULL) {
1676 TEVENT (Notify1 - Transfer) ;
1677 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1678 guarantee (iterator->_notified == 0, "invariant") ;
1679 if (Policy != 4) {
1680 iterator->TState = ObjectWaiter::TS_ENTER ;
1681 }
1682 iterator->_notified = 1 ;
1683 Thread * Self = THREAD;
1684 iterator->_notifier_tid = Self->osthread()->thread_id();
1685
1686 ObjectWaiter * List = _EntryList ;
1687 if (List != NULL) {
1688 assert (List->_prev == NULL, "invariant") ;
1689 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1690 assert (List != iterator, "invariant") ;
1691 }
1692
1693 if (Policy == 0) { // prepend to EntryList
1694 if (List == NULL) {
1695 iterator->_next = iterator->_prev = NULL ;
1696 _EntryList = iterator ;
1697 } else {
1698 List->_prev = iterator ;
1699 iterator->_next = List ;
1700 iterator->_prev = NULL ;
1701 _EntryList = iterator ;
1702 }
1703 } else
1704 if (Policy == 1) { // append to EntryList
1789 DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1790
1791 int Policy = Knob_MoveNotifyee ;
1792 int Tally = 0 ;
1793 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
1794
1795 for (;;) {
1796 iterator = DequeueWaiter () ;
1797 if (iterator == NULL) break ;
1798 TEVENT (NotifyAll - Transfer1) ;
1799 ++Tally ;
1800
1801 // Disposition - what might we do with iterator ?
1802 // a. add it directly to the EntryList - either tail or head.
1803 // b. push it onto the front of the _cxq.
1804 // For now we use (a).
1805
1806 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1807 guarantee (iterator->_notified == 0, "invariant") ;
1808 iterator->_notified = 1 ;
1809 Thread * Self = THREAD;
1810 iterator->_notifier_tid = Self->osthread()->thread_id();
1811 if (Policy != 4) {
1812 iterator->TState = ObjectWaiter::TS_ENTER ;
1813 }
1814
1815 ObjectWaiter * List = _EntryList ;
1816 if (List != NULL) {
1817 assert (List->_prev == NULL, "invariant") ;
1818 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1819 assert (List != iterator, "invariant") ;
1820 }
1821
1822 if (Policy == 0) { // prepend to EntryList
1823 if (List == NULL) {
1824 iterator->_next = iterator->_prev = NULL ;
1825 _EntryList = iterator ;
1826 } else {
1827 List->_prev = iterator ;
1828 iterator->_next = List ;
1829 iterator->_prev = NULL ;
1830 _EntryList = iterator ;
|