22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/markOop.hpp"
29 #include "oops/oop.inline.hpp"
30 #include "runtime/handles.inline.hpp"
31 #include "runtime/interfaceSupport.hpp"
32 #include "runtime/mutexLocker.hpp"
33 #include "runtime/objectMonitor.hpp"
34 #include "runtime/objectMonitor.inline.hpp"
35 #include "runtime/orderAccess.inline.hpp"
36 #include "runtime/osThread.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "runtime/thread.inline.hpp"
39 #include "services/threadService.hpp"
40 #include "trace/tracing.hpp"
41 #include "trace/traceMacros.hpp"
42 #include "utilities/dtrace.hpp"
43 #include "utilities/macros.hpp"
44 #include "utilities/preserveException.hpp"
45 #ifdef TARGET_OS_FAMILY_linux
46 # include "os_linux.inline.hpp"
47 #endif
48 #ifdef TARGET_OS_FAMILY_solaris
49 # include "os_solaris.inline.hpp"
50 #endif
51 #ifdef TARGET_OS_FAMILY_windows
52 # include "os_windows.inline.hpp"
53 #endif
54 #ifdef TARGET_OS_FAMILY_bsd
55 # include "os_bsd.inline.hpp"
56 #endif
57
58 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
59 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
60 #define ATTR __attribute__((noinline))
61 #else
298
299 bool ObjectMonitor::try_enter(Thread* THREAD) {
300 if (THREAD != _owner) {
301 if (THREAD->is_lock_owned ((address)_owner)) {
302 assert(_recursions == 0, "internal state error");
303 _owner = THREAD ;
304 _recursions = 1 ;
305 OwnerIsThread = 1 ;
306 return true;
307 }
308 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
309 return false;
310 }
311 return true;
312 } else {
313 _recursions++;
314 return true;
315 }
316 }
317
318 void ATTR ObjectMonitor::enter(TRAPS) {
319 // The following code is ordered to check the most common cases first
320 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
321 Thread * const Self = THREAD ;
322 void * cur ;
323
324 cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
325 if (cur == NULL) {
326 // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
327 assert (_recursions == 0 , "invariant") ;
328 assert (_owner == Self, "invariant") ;
329 // CONSIDER: set or assert OwnerIsThread == 1
330 return ;
331 }
332
333 if (cur == Self) {
334 // TODO-FIXME: check for integer overflow! BUGID 6557169.
335 _recursions ++ ;
336 return ;
337 }
338
361 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
362 Self->_Stalled = 0 ;
363 return ;
364 }
365
366 assert (_owner != Self , "invariant") ;
367 assert (_succ != Self , "invariant") ;
368 assert (Self->is_Java_thread() , "invariant") ;
369 JavaThread * jt = (JavaThread *) Self ;
370 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
371 assert (jt->thread_state() != _thread_blocked , "invariant") ;
372 assert (this->object() != NULL , "invariant") ;
373 assert (_count >= 0, "invariant") ;
374
375 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
376 // Ensure the object-monitor relationship remains stable while there's contention.
377 Atomic::inc_ptr(&_count);
378
379 EventJavaMonitorEnter event;
380
381 { // Change java thread status to indicate blocked on monitor enter.
382 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
383
384 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
385 if (JvmtiExport::should_post_monitor_contended_enter()) {
386 JvmtiExport::post_monitor_contended_enter(jt, this);
387
388 // The current thread does not yet own the monitor and does not
389 // yet appear on any queues that would get it made the successor.
390 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
391 // handler cannot accidentally consume an unpark() meant for the
392 // ParkEvent associated with this ObjectMonitor.
393 }
394
395 OSThreadContendState osts(Self->osthread());
396 ThreadBlockInVM tbivm(jt);
397
398 Self->set_current_pending_monitor(this);
399
400 // TODO-FIXME: change the following for(;;) loop to straight-line code.
401 for (;;) {
402 jt->set_suspend_equivalent();
403 // cleared by handle_special_suspend_equivalent_condition()
404 // or java_suspend_self()
405
406 EnterI (THREAD) ;
407
408 if (!ExitSuspendEquivalent(jt)) break ;
409
410 //
411 // We have acquired the contended monitor, but while we were
412 // waiting another thread suspended us. We don't want to enter
413 // the monitor while suspended because that would surprise the
414 // thread that suspended us.
415 //
416 _recursions = 0 ;
417 _succ = NULL ;
418 exit (false, Self) ;
419
420 jt->java_suspend_self();
421 }
422 Self->set_current_pending_monitor(NULL);
423
424 // We cleared the pending monitor info since we've just gotten past
425 // the enter-check-for-suspend dance and we now own the monitor free
426 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
436 Self->_Stalled = 0 ;
437
438 // Must either set _recursions = 0 or ASSERT _recursions == 0.
439 assert (_recursions == 0 , "invariant") ;
440 assert (_owner == Self , "invariant") ;
441 assert (_succ != Self , "invariant") ;
442 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
443
444 // The thread -- now the owner -- is back in vm mode.
445 // Report the glorious news via TI,DTrace and jvmstat.
446 // The probe effect is non-trivial. All the reportage occurs
447 // while we hold the monitor, increasing the length of the critical
448 // section. Amdahl's parallel speedup law comes vividly into play.
449 //
450 // Another option might be to aggregate the events (thread local or
451 // per-monitor aggregation) and defer reporting until a more opportune
452 // time -- such as next time some thread encounters contention but has
453 // yet to acquire the lock. While spinning that thread could
454 // spinning we could increment JVMStat counters, etc.
455
456 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
457 if (JvmtiExport::should_post_monitor_contended_entered()) {
458 JvmtiExport::post_monitor_contended_entered(jt, this);
459
460 // The current thread already owns the monitor and is not going to
461 // call park() for the remainder of the monitor enter protocol. So
462 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
463 // event handler consumed an unpark() issued by the thread that
464 // just exited the monitor.
465 }
466
467 if (event.should_commit()) {
468 event.set_klass(((oop)this->object())->klass());
469 event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
470 event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
471 event.commit();
472 }
473
474 if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
475 ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
482
483 int ObjectMonitor::TryLock (Thread * Self) {
484 for (;;) {
485 void * own = _owner ;
486 if (own != NULL) return 0 ;
487 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
488 // Either guarantee _recursions == 0 or set _recursions = 0.
489 assert (_recursions == 0, "invariant") ;
490 assert (_owner == Self, "invariant") ;
491 // CONSIDER: set or assert that OwnerIsThread == 1
492 return 1 ;
493 }
494 // The lock had been free momentarily, but we lost the race to the lock.
495 // Interference -- the CAS failed.
496 // We can either return -1 or retry.
497 // Retry doesn't make as much sense because the lock was just acquired.
498 if (true) return -1 ;
499 }
500 }
501
502 void ATTR ObjectMonitor::EnterI (TRAPS) {
503 Thread * Self = THREAD ;
504 assert (Self->is_Java_thread(), "invariant") ;
505 assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ;
506
507 // Try the lock - TATAS
508 if (TryLock (Self) > 0) {
509 assert (_succ != Self , "invariant") ;
510 assert (_owner == Self , "invariant") ;
511 assert (_Responsible != Self , "invariant") ;
512 return ;
513 }
514
515 DeferredInitialize () ;
516
517 // We try one round of spinning *before* enqueueing Self.
518 //
519 // If the _owner is ready but OFFPROC we could use a YieldTo()
520 // operation to donate the remainder of this thread's quantum
521 // to the owner. This has subtle but beneficial affinity
522 // effects.
523
524 if (TrySpin (Self) > 0) {
525 assert (_owner == Self , "invariant") ;
526 assert (_succ != Self , "invariant") ;
527 assert (_Responsible != Self , "invariant") ;
528 return ;
529 }
530
531 // The Spin failed -- Enqueue and park the thread ...
532 assert (_succ != Self , "invariant") ;
533 assert (_owner != Self , "invariant") ;
534 assert (_Responsible != Self , "invariant") ;
535
536 // Enqueue "Self" on ObjectMonitor's _cxq.
537 //
538 // Node acts as a proxy for Self.
539 // As an aside, if were to ever rewrite the synchronization code mostly
540 // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
541 // Java objects. This would avoid awkward lifecycle and liveness issues,
542 // as well as eliminate a subset of ABA issues.
543 // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
544 //
545
546 ObjectWaiter node(Self) ;
547 Self->_ParkEvent->reset() ;
548 node._prev = (ObjectWaiter *) 0xBAD ;
549 node.TState = ObjectWaiter::TS_CXQ ;
550
551 // Push "Self" onto the front of the _cxq.
552 // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
553 // Note that spinning tends to reduce the rate at which threads
554 // enqueue and dequeue on EntryList|cxq.
555 ObjectWaiter * nxt ;
556 for (;;) {
557 node._next = nxt = _cxq ;
558 if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
559
560 // Interference - the CAS failed because _cxq changed. Just retry.
561 // As an optional optimization we retry the lock.
562 if (TryLock (Self) > 0) {
563 assert (_succ != Self , "invariant") ;
564 assert (_owner == Self , "invariant") ;
565 assert (_Responsible != Self , "invariant") ;
566 return ;
567 }
568 }
569
570 // Check for cxq|EntryList edge transition to non-null. This indicates
571 // the onset of contention. While contention persists exiting threads
572 // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit
573 // operations revert to the faster 1-0 mode. This enter operation may interleave
574 // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
575 // arrange for one of the contending thread to use a timed park() operations
576 // to detect and recover from the race. (Stranding is form of progress failure
577 // where the monitor is unlocked but all the contending threads remain parked).
578 // That is, at least one of the contended threads will periodically poll _owner.
579 // One of the contending threads will become the designated "Responsible" thread.
580 // The Responsible thread uses a timed park instead of a normal indefinite park
581 // operation -- it periodically wakes and checks for and recovers from potential
582 // strandings admitted by 1-0 exit operations. We need at most one Responsible
583 // thread per-monitor at any given moment. Only threads on cxq|EntryList may
584 // be responsible for a monitor.
585 //
586 // Currently, one of the contended threads takes on the added role of "Responsible".
587 // A viable alternative would be to use a dedicated "stranding checker" thread
588 // that periodically iterated over all the threads (or active monitors) and unparked
589 // successors where there was risk of stranding. This would help eliminate the
615
616 if (TryLock (Self) > 0) break ;
617 assert (_owner != Self, "invariant") ;
618
619 if ((SyncFlags & 2) && _Responsible == NULL) {
620 Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
621 }
622
623 // park self
624 if (_Responsible == Self || (SyncFlags & 1)) {
625 TEVENT (Inflated enter - park TIMED) ;
626 Self->_ParkEvent->park ((jlong) RecheckInterval) ;
627 // Increase the RecheckInterval, but clamp the value.
628 RecheckInterval *= 8 ;
629 if (RecheckInterval > 1000) RecheckInterval = 1000 ;
630 } else {
631 TEVENT (Inflated enter - park UNTIMED) ;
632 Self->_ParkEvent->park() ;
633 }
634
635 if (TryLock(Self) > 0) break ;
636
637 // The lock is still contested.
638 // Keep a tally of the # of futile wakeups.
639 // Note that the counter is not protected by a lock or updated by atomics.
640 // That is by design - we trade "lossy" counters which are exposed to
641 // races during updates for a lower probe effect.
642 TEVENT (Inflated enter - Futile wakeup) ;
643 if (ObjectMonitor::_sync_FutileWakeups != NULL) {
644 ObjectMonitor::_sync_FutileWakeups->inc() ;
645 }
646 ++ nWakeups ;
647
648 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
649 // We can defer clearing _succ until after the spin completes
650 // TrySpin() must tolerate being called with _succ == Self.
651 // Try yet another round of adaptive spinning.
652 if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
653
654 // We can find that we were unpark()ed and redesignated _succ while
720 // STs to monitor meta-data and user-data could reorder with (become
721 // visible after) the ST in exit that drops ownership of the lock.
722 // Some other thread could then acquire the lock, but observe inconsistent
723 // or old monitor meta-data and heap data. That violates the JMM.
724 // To that end, the 1-0 exit() operation must have at least STST|LDST
725 // "release" barrier semantics. Specifically, there must be at least a
726 // STST|LDST barrier in exit() before the ST of null into _owner that drops
727 // the lock. The barrier ensures that changes to monitor meta-data and data
728 // protected by the lock will be visible before we release the lock, and
729 // therefore before some other thread (CPU) has a chance to acquire the lock.
730 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
731 //
732 // Critically, any prior STs to _succ or EntryList must be visible before
733 // the ST of null into _owner in the *subsequent* (following) corresponding
734 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
735 // execute a serializing instruction.
736
737 if (SyncFlags & 8) {
738 OrderAccess::fence() ;
739 }
740 return ;
741 }
742
743 // ReenterI() is a specialized inline form of the latter half of the
744 // contended slow-path from EnterI(). We use ReenterI() only for
745 // monitor reentry in wait().
746 //
747 // In the future we should reconcile EnterI() and ReenterI(), adding
748 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
749 // loop accordingly.
750
751 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
752 assert (Self != NULL , "invariant") ;
753 assert (SelfNode != NULL , "invariant") ;
754 assert (SelfNode->_thread == Self , "invariant") ;
755 assert (_waiters > 0 , "invariant") ;
756 assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
757 assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
758 JavaThread * jt = (JavaThread *) Self ;
759
760 int nWakeups = 0 ;
936 //
937 // The CAS() in enter provides for safety and exclusion, while the CAS or
938 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
939 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
940 // We detect and recover from stranding with timers.
941 //
942 // If a thread transiently strands it'll park until (a) another
943 // thread acquires the lock and then drops the lock, at which time the
944 // exiting thread will notice and unpark the stranded thread, or, (b)
945 // the timer expires. If the lock is high traffic then the stranding latency
946 // will be low due to (a). If the lock is low traffic then the odds of
947 // stranding are lower, although the worst-case stranding latency
948 // is longer. Critically, we don't want to put excessive load in the
949 // platform's timer subsystem. We want to minimize both the timer injection
950 // rate (timers created/sec) as well as the number of timers active at
951 // any one time. (more precisely, we want to minimize timer-seconds, which is
952 // the integral of the # of active timers at any instant over time).
953 // Both impinge on OS scalability. Given that, at most one thread parked on
954 // a monitor will use a timer.
955
956 void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
957 Thread * Self = THREAD ;
958 if (THREAD != _owner) {
959 if (THREAD->is_lock_owned((address) _owner)) {
960 // Transmute _owner from a BasicLock pointer to a Thread address.
961 // We don't need to hold _mutex for this transition.
962 // Non-null to Non-null is safe as long as all readers can
963 // tolerate either flavor.
964 assert (_recursions == 0, "invariant") ;
965 _owner = THREAD ;
966 _recursions = 0 ;
967 OwnerIsThread = 1 ;
968 } else {
969 // NOTE: we need to handle unbalanced monitor enter/exit
970 // in native code by throwing an exception.
971 // TODO: Throw an IllegalMonitorStateException ?
972 TEVENT (Exit - Throw IMSX) ;
973 assert(false, "Non-balanced monitor enter/exit!");
974 if (false) {
975 THROW(vmSymbols::java_lang_IllegalMonitorStateException());
976 }
981 if (_recursions != 0) {
982 _recursions--; // this is simple recursive enter
983 TEVENT (Inflated exit - recursive) ;
984 return ;
985 }
986
987 // Invariant: after setting Responsible=null an thread must execute
988 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
989 if ((SyncFlags & 4) == 0) {
990 _Responsible = NULL ;
991 }
992
993 #if INCLUDE_TRACE
994 // get the owner's thread id for the MonitorEnter event
995 // if it is enabled and the thread isn't suspended
996 if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
997 _previous_owner_tid = SharedRuntime::get_java_tid(Self);
998 }
999 #endif
1000
1001 for (;;) {
1002 assert (THREAD == _owner, "invariant") ;
1003
1004
1005 if (Knob_ExitPolicy == 0) {
1006 // release semantics: prior loads and stores from within the critical section
1007 // must not float (reorder) past the following store that drops the lock.
1008 // On SPARC that requires MEMBAR #loadstore|#storestore.
1009 // But of course in TSO #loadstore|#storestore is not required.
1010 // I'd like to write one of the following:
1011 // A. OrderAccess::release() ; _owner = NULL
1012 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
1013 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
1014 // store into a _dummy variable. That store is not needed, but can result
1015 // in massive wasteful coherency traffic on classic SMP systems.
1016 // Instead, I use release_store(), which is implemented as just a simple
1017 // ST on x64, x86 and SPARC.
1018 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
1019 OrderAccess::storeload() ; // See if we need to wake a successor
1020 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1021 TEVENT (Inflated exit - simple egress) ;
1022 return ;
1023 }
1024 TEVENT (Inflated exit - complex egress) ;
1025
1026 // Normally the exiting thread is responsible for ensuring succession,
1027 // but if other successors are ready or other entering threads are spinning
1028 // then this thread can simply store NULL into _owner and exit without
1029 // waking a successor. The existence of spinners or ready successors
1030 // guarantees proper succession (liveness). Responsibility passes to the
1031 // ready or running successors. The exiting thread delegates the duty.
1032 // More precisely, if a successor already exists this thread is absolved
1033 // of the responsibility of waking (unparking) one.
1034 //
1035 // The _succ variable is critical to reducing futile wakeup frequency.
1036 // _succ identifies the "heir presumptive" thread that has been made
1037 // ready (unparked) but that has not yet run. We need only one such
1038 // successor thread to guarantee progress.
1039 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
1040 // section 3.3 "Futile Wakeup Throttling" for details.
1047 // to acquire the lock. If so, the exiting thread could exit
1048 // immediately without waking a successor, otherwise the exiting
1049 // thread would need to dequeue and wake a successor.
1050 // (Note that we'd need to make the post-drop spin short, but no
1051 // shorter than the worst-case round-trip cache-line migration time.
1052 // The dropped lock needs to become visible to the spinner, and then
1053 // the acquisition of the lock by the spinner must become visible to
1054 // the exiting thread).
1055 //
1056
1057 // It appears that an heir-presumptive (successor) must be made ready.
1058 // Only the current lock owner can manipulate the EntryList or
1059 // drain _cxq, so we need to reacquire the lock. If we fail
1060 // to reacquire the lock the responsibility for ensuring succession
1061 // falls to the new owner.
1062 //
1063 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
1064 return ;
1065 }
1066 TEVENT (Exit - Reacquired) ;
1067 } else {
1068 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1069 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
1070 OrderAccess::storeload() ;
1071 // Ratify the previously observed values.
1072 if (_cxq == NULL || _succ != NULL) {
1073 TEVENT (Inflated exit - simple egress) ;
1074 return ;
1075 }
1076
1077 // inopportune interleaving -- the exiting thread (this thread)
1078 // in the fast-exit path raced an entering thread in the slow-enter
1079 // path.
1080 // We have two choices:
1081 // A. Try to reacquire the lock.
1082 // If the CAS() fails return immediately, otherwise
1083 // we either restart/rerun the exit operation, or simply
1084 // fall-through into the code below which wakes a successor.
1085 // B. If the elements forming the EntryList|cxq are TSM
1086 // we could simply unpark() the lead thread and return
1087 // without having set _succ.
1088 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
1089 TEVENT (Inflated exit - reacquired succeeded) ;
1090 return ;
1091 }
1092 TEVENT (Inflated exit - reacquired failed) ;
1093 } else {
1094 TEVENT (Inflated exit - complex egress) ;
1095 }
1096 }
1097
1098 guarantee (_owner == THREAD, "invariant") ;
1099
1100 ObjectWaiter * w = NULL ;
1101 int QMode = Knob_QMode ;
1102
1103 if (QMode == 2 && _cxq != NULL) {
1104 // QMode == 2 : cxq has precedence over EntryList.
1105 // Try to directly wake a successor from the cxq.
1106 // If successful, the successor will need to unlink itself from cxq.
1107 w = _cxq ;
1108 assert (w != NULL, "invariant") ;
1109 assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
1110 ExitEpilog (Self, w) ;
1111 return ;
1112 }
1113
1114 if (QMode == 3 && _cxq != NULL) {
1115 // Aggressively drain cxq into EntryList at the first opportunity.
1116 // This policy ensure that recently-run threads live at the head of EntryList.
1117 // Drain _cxq into EntryList - bulk transfer.
1118 // First, detach _cxq.
1119 // The following loop is tantamount to: w = swap (&cxq, NULL)
1120 w = _cxq ;
1121 for (;;) {
1122 assert (w != NULL, "Invariant") ;
1123 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
1124 if (u == w) break ;
1125 w = u ;
1126 }
1127 assert (w != NULL , "invariant") ;
1128
1129 ObjectWaiter * q = NULL ;
1130 ObjectWaiter * p ;
1131 for (p = w ; p != NULL ; p = p->_next) {
1132 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
1133 p->TState = ObjectWaiter::TS_ENTER ;
1134 p->_prev = q ;
1135 q = p ;
1136 }
1137
1138 // Append the RATs to the EntryList
1139 // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
1140 ObjectWaiter * Tail ;
1141 for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
1142 if (Tail == NULL) {
1143 _EntryList = w ;
1144 } else {
1145 Tail->_next = w ;
1146 w->_prev = Tail ;
1147 }
1148
1149 // Fall thru into code that tries to wake a successor from EntryList
1150 }
1151
1152 if (QMode == 4 && _cxq != NULL) {
1153 // Aggressively drain cxq into EntryList at the first opportunity.
1154 // This policy ensure that recently-run threads live at the head of EntryList.
1155
1156 // Drain _cxq into EntryList - bulk transfer.
1157 // First, detach _cxq.
1158 // The following loop is tantamount to: w = swap (&cxq, NULL)
1159 w = _cxq ;
1160 for (;;) {
1161 assert (w != NULL, "Invariant") ;
1162 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
1163 if (u == w) break ;
1164 w = u ;
1165 }
1166 assert (w != NULL , "invariant") ;
1167
1168 ObjectWaiter * q = NULL ;
1169 ObjectWaiter * p ;
1170 for (p = w ; p != NULL ; p = p->_next) {
1171 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
1172 p->TState = ObjectWaiter::TS_ENTER ;
1173 p->_prev = q ;
1174 q = p ;
1175 }
1176
1177 // Prepend the RATs to the EntryList
1178 if (_EntryList != NULL) {
1179 q->_next = _EntryList ;
1180 _EntryList->_prev = q ;
1181 }
1182 _EntryList = w ;
1183
1184 // Fall thru into code that tries to wake a successor from EntryList
1185 }
1186
1187 w = _EntryList ;
1188 if (w != NULL) {
1189 // I'd like to write: guarantee (w->_thread != Self).
1190 // But in practice an exiting thread may find itself on the EntryList.
1191 // Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
1192 // then calls exit(). Exit release the lock by setting O._owner to NULL.
1193 // Lets say T1 then stalls. T2 acquires O and calls O.notify(). The
1194 // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1195 // release the lock "O". T2 resumes immediately after the ST of null into
1196 // _owner, above. T2 notices that the EntryList is populated, so it
1197 // reacquires the lock and then finds itself on the EntryList.
1198 // Given all that, we have to tolerate the circumstance where "w" is
1199 // associated with Self.
1200 assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1201 ExitEpilog (Self, w) ;
1202 return ;
1203 }
1204
1205 // If we find that both _cxq and EntryList are null then just
1213 for (;;) {
1214 assert (w != NULL, "Invariant") ;
1215 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
1216 if (u == w) break ;
1217 w = u ;
1218 }
1219 TEVENT (Inflated exit - drain cxq into EntryList) ;
1220
1221 assert (w != NULL , "invariant") ;
1222 assert (_EntryList == NULL , "invariant") ;
1223
1224 // Convert the LIFO SLL anchored by _cxq into a DLL.
1225 // The list reorganization step operates in O(LENGTH(w)) time.
1226 // It's critical that this step operate quickly as
1227 // "Self" still holds the outer-lock, restricting parallelism
1228 // and effectively lengthening the critical section.
1229 // Invariant: s chases t chases u.
1230 // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
1231 // we have faster access to the tail.
1232
1233 if (QMode == 1) {
1234 // QMode == 1 : drain cxq to EntryList, reversing order
1235 // We also reverse the order of the list.
1236 ObjectWaiter * s = NULL ;
1237 ObjectWaiter * t = w ;
1238 ObjectWaiter * u = NULL ;
1239 while (t != NULL) {
1240 guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
1241 t->TState = ObjectWaiter::TS_ENTER ;
1242 u = t->_next ;
1243 t->_prev = u ;
1244 t->_next = s ;
1245 s = t;
1246 t = u ;
1247 }
1248 _EntryList = s ;
1249 assert (s != NULL, "invariant") ;
1250 } else {
1251 // QMode == 0 or QMode == 2
1252 _EntryList = w ;
1253 ObjectWaiter * q = NULL ;
1254 ObjectWaiter * p ;
1255 for (p = w ; p != NULL ; p = p->_next) {
1256 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
1257 p->TState = ObjectWaiter::TS_ENTER ;
1258 p->_prev = q ;
1259 q = p ;
1260 }
1261 }
1262
1263 // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
1264 // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
1265
1266 // See if we can abdicate to a spinner instead of waking a thread.
1267 // A primary goal of the implementation is to reduce the
1268 // context-switch rate.
1269 if (_succ != NULL) continue;
1270
1271 w = _EntryList ;
1272 if (w != NULL) {
1273 guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1274 ExitEpilog (Self, w) ;
1275 return ;
1276 }
1277 }
1278 }
1279
1280 // ExitSuspendEquivalent:
1281 // A faster alternate to handle_special_suspend_equivalent_condition()
1351
1352 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1353 Trigger->unpark() ;
1354
1355 // Maintain stats and report events to JVMTI
1356 if (ObjectMonitor::_sync_Parks != NULL) {
1357 ObjectMonitor::_sync_Parks->inc() ;
1358 }
1359 }
1360
1361
1362 // -----------------------------------------------------------------------------
1363 // Class Loader deadlock handling.
1364 //
1365 // complete_exit exits a lock returning recursion count
1366 // complete_exit/reenter operate as a wait without waiting
1367 // complete_exit requires an inflated monitor
1368 // The _owner field is not always the Thread addr even with an
1369 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1370 // thread due to contention.
1371 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1372 Thread * const Self = THREAD;
1373 assert(Self->is_Java_thread(), "Must be Java thread!");
1374 JavaThread *jt = (JavaThread *)THREAD;
1375
1376 DeferredInitialize();
1377
1378 if (THREAD != _owner) {
1379 if (THREAD->is_lock_owned ((address)_owner)) {
1380 assert(_recursions == 0, "internal state error");
1381 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */
1382 _recursions = 0 ;
1383 OwnerIsThread = 1 ;
1384 }
1385 }
1386
1387 guarantee(Self == _owner, "complete_exit not owner");
1388 intptr_t save = _recursions; // record the old recursion count
1389 _recursions = 0; // set the recursion level to be 0
1390 exit (true, Self) ; // exit the monitor
1391 guarantee (_owner != Self, "invariant");
1392 return save;
1393 }
1394
1395 // reenter() enters a lock and sets recursion count
1396 // complete_exit/reenter operate as a wait without waiting
1397 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1398 Thread * const Self = THREAD;
1399 assert(Self->is_Java_thread(), "Must be Java thread!");
1400 JavaThread *jt = (JavaThread *)THREAD;
1401
1402 guarantee(_owner != Self, "reenter already owner");
1403 enter (THREAD); // enter the monitor
1404 guarantee (_recursions == 0, "reenter recursion");
1405 _recursions = recursions;
1406 return;
1407 }
1408
1409
1410 // -----------------------------------------------------------------------------
1411 // A macro is used below because there may already be a pending
1412 // exception which should not abort the execution of the routines
1413 // which use this (which is why we don't put this into check_slow and
1414 // call it with a CHECK argument).
1415
1416 #define CHECK_OWNER() \
1417 do { \
1418 if (THREAD != _owner) { \
1419 if (THREAD->is_lock_owned((address) _owner)) { \
1420 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \
1421 _recursions = 0; \
1422 OwnerIsThread = 1 ; \
1423 } else { \
1424 TEVENT (Throw IMSX) ; \
1425 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
1426 } \
1507 // for a pending interrupt.
1508 ObjectWaiter node(Self);
1509 node.TState = ObjectWaiter::TS_WAIT ;
1510 Self->_ParkEvent->reset() ;
1511 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
1512
1513 // Enter the waiting queue, which is a circular doubly linked list in this case
1514 // but it could be a priority queue or any data structure.
1515 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
1516 // by the the owner of the monitor *except* in the case where park()
1517 // returns because of a timeout of interrupt. Contention is exceptionally rare
1518 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1519
1520 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
1521 AddWaiter (&node) ;
1522 Thread::SpinRelease (&_WaitSetLock) ;
1523
1524 if ((SyncFlags & 4) == 0) {
1525 _Responsible = NULL ;
1526 }
1527 intptr_t save = _recursions; // record the old recursion count
1528 _waiters++; // increment the number of waiters
1529 _recursions = 0; // set the recursion level to be 1
1530 exit (true, Self) ; // exit the monitor
1531 guarantee (_owner != Self, "invariant") ;
1532
1533 // The thread is on the WaitSet list - now park() it.
1534 // On MP systems it's conceivable that a brief spin before we park
1535 // could be profitable.
1536 //
1537 // TODO-FIXME: change the following logic to a loop of the form
1538 // while (!timeout && !interrupted && _notified == 0) park()
1539
1540 int ret = OS_OK ;
1541 int WasNotified = 0 ;
1542 { // State transition wrappers
1543 OSThread* osthread = Self->osthread();
1544 OSThreadWaitState osts(osthread, true);
1545 {
1546 ThreadBlockInVM tbivm(jt);
1547 // Thread is in thread_blocked state and oop access is unsafe.
1548 jt->set_suspend_equivalent();
1549
1550 if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
1627 //
1628 // We redo the unpark() to ensure forward progress, i.e., we
1629 // don't want all pending threads hanging (parked) with none
1630 // entering the unlocked monitor.
1631 node._event->unpark();
1632 }
1633 }
1634
1635 if (event.should_commit()) {
1636 post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
1637 }
1638
1639 OrderAccess::fence() ;
1640
1641 assert (Self->_Stalled != 0, "invariant") ;
1642 Self->_Stalled = 0 ;
1643
1644 assert (_owner != Self, "invariant") ;
1645 ObjectWaiter::TStates v = node.TState ;
1646 if (v == ObjectWaiter::TS_RUN) {
1647 enter (Self) ;
1648 } else {
1649 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
1650 ReenterI (Self, &node) ;
1651 node.wait_reenter_end(this);
1652 }
1653
1654 // Self has reacquired the lock.
1655 // Lifecycle - the node representing Self must not appear on any queues.
1656 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1657 // want residual elements associated with this thread left on any lists.
1658 guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
1659 assert (_owner == Self, "invariant") ;
1660 assert (_succ != Self , "invariant") ;
1661 } // OSThreadWaitState()
1662
1663 jt->set_current_waiting_monitor(NULL);
1664
1665 guarantee (_recursions == 0, "invariant") ;
1666 _recursions = save; // restore the old recursion count
1667 _waiters--; // decrement the number of waiters
1668
1669 // Verify a few postconditions
1670 assert (_owner == Self , "invariant") ;
1671 assert (_succ != Self , "invariant") ;
1672 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
1673
1674 if (SyncFlags & 32) {
1675 OrderAccess::fence() ;
1676 }
1677
1678 // check if the notification happened
1679 if (!WasNotified) {
1680 // no, it could be timeout or Thread.interrupt() or both
1681 // check for interrupt event, otherwise it is timeout
1682 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1683 TEVENT (Wait - throw IEX from epilog) ;
1684 THROW(vmSymbols::java_lang_InterruptedException());
1685 }
1686 }
2511 SETKNOB(LogSpins) ;
2512 SETKNOB(SpinSetSucc) ;
2513 SETKNOB(SuccEnabled) ;
2514 SETKNOB(SuccRestrict) ;
2515 SETKNOB(Penalty) ;
2516 SETKNOB(Bonus) ;
2517 SETKNOB(BonusB) ;
2518 SETKNOB(Poverty) ;
2519 SETKNOB(SpinAfterFutile) ;
2520 SETKNOB(UsePause) ;
2521 SETKNOB(SpinEarly) ;
2522 SETKNOB(OState) ;
2523 SETKNOB(MaxSpinners) ;
2524 SETKNOB(PreSpin) ;
2525 SETKNOB(ExitPolicy) ;
2526 SETKNOB(QMode);
2527 SETKNOB(ResetEvent) ;
2528 SETKNOB(MoveNotifyee) ;
2529 SETKNOB(FastHSSEC) ;
2530 #undef SETKNOB
2531
2532 if (os::is_MP()) {
2533 BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
2534 if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
2535 // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
2536 } else {
2537 Knob_SpinLimit = 0 ;
2538 Knob_SpinBase = 0 ;
2539 Knob_PreSpin = 0 ;
2540 Knob_FixedSpin = -1 ;
2541 }
2542
2543 if (Knob_LogSpins == 0) {
2544 ObjectMonitor::_sync_FailedSpins = NULL ;
2545 }
2546
2547 free (knobs) ;
2548 OrderAccess::fence() ;
2549 InitDone = 1 ;
2550 }
|
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/markOop.hpp"
29 #include "oops/oop.inline.hpp"
30 #include "runtime/handles.inline.hpp"
31 #include "runtime/interfaceSupport.hpp"
32 #include "runtime/mutexLocker.hpp"
33 #include "runtime/objectMonitor.hpp"
34 #include "runtime/objectMonitor.inline.hpp"
35 #include "runtime/orderAccess.inline.hpp"
36 #include "runtime/osThread.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "runtime/thread.inline.hpp"
39 #include "services/threadService.hpp"
40 #include "trace/tracing.hpp"
41 #include "trace/traceMacros.hpp"
42 #include "evtrace/traceEvents.hpp"
43 #include "utilities/dtrace.hpp"
44 #include "utilities/macros.hpp"
45 #include "utilities/preserveException.hpp"
46 #ifdef TARGET_OS_FAMILY_linux
47 # include "os_linux.inline.hpp"
48 #endif
49 #ifdef TARGET_OS_FAMILY_solaris
50 # include "os_solaris.inline.hpp"
51 #endif
52 #ifdef TARGET_OS_FAMILY_windows
53 # include "os_windows.inline.hpp"
54 #endif
55 #ifdef TARGET_OS_FAMILY_bsd
56 # include "os_bsd.inline.hpp"
57 #endif
58
59 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
60 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
61 #define ATTR __attribute__((noinline))
62 #else
299
300 bool ObjectMonitor::try_enter(Thread* THREAD) {
301 if (THREAD != _owner) {
302 if (THREAD->is_lock_owned ((address)_owner)) {
303 assert(_recursions == 0, "internal state error");
304 _owner = THREAD ;
305 _recursions = 1 ;
306 OwnerIsThread = 1 ;
307 return true;
308 }
309 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
310 return false;
311 }
312 return true;
313 } else {
314 _recursions++;
315 return true;
316 }
317 }
318
319 void ATTR ObjectMonitor::enter(int after_wait, TRAPS) {
320 // The following code is ordered to check the most common cases first
321 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
322 Thread * const Self = THREAD ;
323 void * cur ;
324
325 cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
326 if (cur == NULL) {
327 // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
328 assert (_recursions == 0 , "invariant") ;
329 assert (_owner == Self, "invariant") ;
330 // CONSIDER: set or assert OwnerIsThread == 1
331 return ;
332 }
333
334 if (cur == Self) {
335 // TODO-FIXME: check for integer overflow! BUGID 6557169.
336 _recursions ++ ;
337 return ;
338 }
339
362 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
363 Self->_Stalled = 0 ;
364 return ;
365 }
366
367 assert (_owner != Self , "invariant") ;
368 assert (_succ != Self , "invariant") ;
369 assert (Self->is_Java_thread() , "invariant") ;
370 JavaThread * jt = (JavaThread *) Self ;
371 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
372 assert (jt->thread_state() != _thread_blocked , "invariant") ;
373 assert (this->object() != NULL , "invariant") ;
374 assert (_count >= 0, "invariant") ;
375
376 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
377 // Ensure the object-monitor relationship remains stable while there's contention.
378 Atomic::inc_ptr(&_count);
379
380 EventJavaMonitorEnter event;
381
382 int trace_flags = 0;
383 { // Change java thread status to indicate blocked on monitor enter.
384 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
385
386 if (EnableEventTracing) {
387 TraceEvents::write_monitor_contended_enter(this, (TraceTypes::monitor_enter_wait) after_wait);
388 }
389
390 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
391 if (JvmtiExport::should_post_monitor_contended_enter()) {
392 JvmtiExport::post_monitor_contended_enter(jt, this);
393
394 // The current thread does not yet own the monitor and does not
395 // yet appear on any queues that would get it made the successor.
396 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
397 // handler cannot accidentally consume an unpark() meant for the
398 // ParkEvent associated with this ObjectMonitor.
399 }
400
401 OSThreadContendState osts(Self->osthread());
402 ThreadBlockInVM tbivm(jt);
403
404 Self->set_current_pending_monitor(this);
405
406 // TODO-FIXME: change the following for(;;) loop to straight-line code.
407 for (;;) {
408 jt->set_suspend_equivalent();
409 // cleared by handle_special_suspend_equivalent_condition()
410 // or java_suspend_self()
411
412 trace_flags |= EnterI (THREAD) ;
413
414 if (!ExitSuspendEquivalent(jt)) break ;
415
416 //
417 // We have acquired the contended monitor, but while we were
418 // waiting another thread suspended us. We don't want to enter
419 // the monitor while suspended because that would surprise the
420 // thread that suspended us.
421 //
422 _recursions = 0 ;
423 _succ = NULL ;
424 exit (false, Self) ;
425
426 jt->java_suspend_self();
427 }
428 Self->set_current_pending_monitor(NULL);
429
430 // We cleared the pending monitor info since we've just gotten past
431 // the enter-check-for-suspend dance and we now own the monitor free
432 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
442 Self->_Stalled = 0 ;
443
444 // Must either set _recursions = 0 or ASSERT _recursions == 0.
445 assert (_recursions == 0 , "invariant") ;
446 assert (_owner == Self , "invariant") ;
447 assert (_succ != Self , "invariant") ;
448 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
449
450 // The thread -- now the owner -- is back in vm mode.
451 // Report the glorious news via TI,DTrace and jvmstat.
452 // The probe effect is non-trivial. All the reportage occurs
453 // while we hold the monitor, increasing the length of the critical
454 // section. Amdahl's parallel speedup law comes vividly into play.
455 //
456 // Another option might be to aggregate the events (thread local or
457 // per-monitor aggregation) and defer reporting until a more opportune
458 // time -- such as next time some thread encounters contention but has
459 // yet to acquire the lock. While spinning that thread could
460 // spinning we could increment JVMStat counters, etc.
461
462 if (EnableEventTracing) {
463 TraceEvents::write_monitor_contended_entered(this, (TraceTypes::monitor_entered_flags) trace_flags);
464 }
465
466 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
467 if (JvmtiExport::should_post_monitor_contended_entered()) {
468 JvmtiExport::post_monitor_contended_entered(jt, this);
469
470 // The current thread already owns the monitor and is not going to
471 // call park() for the remainder of the monitor enter protocol. So
472 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
473 // event handler consumed an unpark() issued by the thread that
474 // just exited the monitor.
475 }
476
477 if (event.should_commit()) {
478 event.set_klass(((oop)this->object())->klass());
479 event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
480 event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
481 event.commit();
482 }
483
484 if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
485 ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
492
493 int ObjectMonitor::TryLock (Thread * Self) {
494 for (;;) {
495 void * own = _owner ;
496 if (own != NULL) return 0 ;
497 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
498 // Either guarantee _recursions == 0 or set _recursions = 0.
499 assert (_recursions == 0, "invariant") ;
500 assert (_owner == Self, "invariant") ;
501 // CONSIDER: set or assert that OwnerIsThread == 1
502 return 1 ;
503 }
504 // The lock had been free momentarily, but we lost the race to the lock.
505 // Interference -- the CAS failed.
506 // We can either return -1 or retry.
507 // Retry doesn't make as much sense because the lock was just acquired.
508 if (true) return -1 ;
509 }
510 }
511
512 int ATTR ObjectMonitor::EnterI (TRAPS) {
513 int trace_flags = 0;
514
515 Thread * Self = THREAD ;
516 assert (Self->is_Java_thread(), "invariant") ;
517 assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ;
518
519 // Try the lock - TATAS
520 if (TryLock (Self) > 0) {
521 assert (_succ != Self , "invariant") ;
522 assert (_owner == Self , "invariant") ;
523 assert (_Responsible != Self , "invariant") ;
524 return trace_flags;
525 }
526
527 DeferredInitialize () ;
528
529 // We try one round of spinning *before* enqueueing Self.
530 //
531 // If the _owner is ready but OFFPROC we could use a YieldTo()
532 // operation to donate the remainder of this thread's quantum
533 // to the owner. This has subtle but beneficial affinity
534 // effects.
535
536 if (TrySpin (Self) > 0) {
537 assert (_owner == Self , "invariant") ;
538 assert (_succ != Self , "invariant") ;
539 assert (_Responsible != Self , "invariant") ;
540 return trace_flags;
541 }
542
543 // The Spin failed -- Enqueue and park the thread ...
544 assert (_succ != Self , "invariant") ;
545 assert (_owner != Self , "invariant") ;
546 assert (_Responsible != Self , "invariant") ;
547
548 // Enqueue "Self" on ObjectMonitor's _cxq.
549 //
550 // Node acts as a proxy for Self.
551 // As an aside, if were to ever rewrite the synchronization code mostly
552 // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
553 // Java objects. This would avoid awkward lifecycle and liveness issues,
554 // as well as eliminate a subset of ABA issues.
555 // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
556 //
557
558 ObjectWaiter node(Self) ;
559 Self->_ParkEvent->reset() ;
560 node._prev = (ObjectWaiter *) 0xBAD ;
561 node.TState = ObjectWaiter::TS_CXQ ;
562
563 // Push "Self" onto the front of the _cxq.
564 // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
565 // Note that spinning tends to reduce the rate at which threads
566 // enqueue and dequeue on EntryList|cxq.
567 ObjectWaiter * nxt ;
568 for (;;) {
569 node._next = nxt = _cxq ;
570 if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
571
572 // Interference - the CAS failed because _cxq changed. Just retry.
573 // As an optional optimization we retry the lock.
574 if (TryLock (Self) > 0) {
575 assert (_succ != Self , "invariant") ;
576 assert (_owner == Self , "invariant") ;
577 assert (_Responsible != Self , "invariant") ;
578 return trace_flags;
579 }
580 }
581
582 trace_flags |= TraceTypes::entered_queued;
583
584 // Check for cxq|EntryList edge transition to non-null. This indicates
585 // the onset of contention. While contention persists exiting threads
586 // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit
587 // operations revert to the faster 1-0 mode. This enter operation may interleave
588 // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
589 // arrange for one of the contending thread to use a timed park() operations
590 // to detect and recover from the race. (Stranding is form of progress failure
591 // where the monitor is unlocked but all the contending threads remain parked).
592 // That is, at least one of the contended threads will periodically poll _owner.
593 // One of the contending threads will become the designated "Responsible" thread.
594 // The Responsible thread uses a timed park instead of a normal indefinite park
595 // operation -- it periodically wakes and checks for and recovers from potential
596 // strandings admitted by 1-0 exit operations. We need at most one Responsible
597 // thread per-monitor at any given moment. Only threads on cxq|EntryList may
598 // be responsible for a monitor.
599 //
600 // Currently, one of the contended threads takes on the added role of "Responsible".
601 // A viable alternative would be to use a dedicated "stranding checker" thread
602 // that periodically iterated over all the threads (or active monitors) and unparked
603 // successors where there was risk of stranding. This would help eliminate the
629
630 if (TryLock (Self) > 0) break ;
631 assert (_owner != Self, "invariant") ;
632
633 if ((SyncFlags & 2) && _Responsible == NULL) {
634 Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
635 }
636
637 // park self
638 if (_Responsible == Self || (SyncFlags & 1)) {
639 TEVENT (Inflated enter - park TIMED) ;
640 Self->_ParkEvent->park ((jlong) RecheckInterval) ;
641 // Increase the RecheckInterval, but clamp the value.
642 RecheckInterval *= 8 ;
643 if (RecheckInterval > 1000) RecheckInterval = 1000 ;
644 } else {
645 TEVENT (Inflated enter - park UNTIMED) ;
646 Self->_ParkEvent->park() ;
647 }
648
649 trace_flags |= TraceTypes::entered_parked;
650
651 if (TryLock(Self) > 0) break ;
652
653 // The lock is still contested.
654 // Keep a tally of the # of futile wakeups.
655 // Note that the counter is not protected by a lock or updated by atomics.
656 // That is by design - we trade "lossy" counters which are exposed to
657 // races during updates for a lower probe effect.
658 TEVENT (Inflated enter - Futile wakeup) ;
659 if (ObjectMonitor::_sync_FutileWakeups != NULL) {
660 ObjectMonitor::_sync_FutileWakeups->inc() ;
661 }
662 ++ nWakeups ;
663
664 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
665 // We can defer clearing _succ until after the spin completes
666 // TrySpin() must tolerate being called with _succ == Self.
667 // Try yet another round of adaptive spinning.
668 if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
669
670 // We can find that we were unpark()ed and redesignated _succ while
736 // STs to monitor meta-data and user-data could reorder with (become
737 // visible after) the ST in exit that drops ownership of the lock.
738 // Some other thread could then acquire the lock, but observe inconsistent
739 // or old monitor meta-data and heap data. That violates the JMM.
740 // To that end, the 1-0 exit() operation must have at least STST|LDST
741 // "release" barrier semantics. Specifically, there must be at least a
742 // STST|LDST barrier in exit() before the ST of null into _owner that drops
743 // the lock. The barrier ensures that changes to monitor meta-data and data
744 // protected by the lock will be visible before we release the lock, and
745 // therefore before some other thread (CPU) has a chance to acquire the lock.
746 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
747 //
748 // Critically, any prior STs to _succ or EntryList must be visible before
749 // the ST of null into _owner in the *subsequent* (following) corresponding
750 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
751 // execute a serializing instruction.
752
753 if (SyncFlags & 8) {
754 OrderAccess::fence() ;
755 }
756 return trace_flags;
757 }
758
759 // ReenterI() is a specialized inline form of the latter half of the
760 // contended slow-path from EnterI(). We use ReenterI() only for
761 // monitor reentry in wait().
762 //
763 // In the future we should reconcile EnterI() and ReenterI(), adding
764 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
765 // loop accordingly.
766
767 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
768 assert (Self != NULL , "invariant") ;
769 assert (SelfNode != NULL , "invariant") ;
770 assert (SelfNode->_thread == Self , "invariant") ;
771 assert (_waiters > 0 , "invariant") ;
772 assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
773 assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
774 JavaThread * jt = (JavaThread *) Self ;
775
776 int nWakeups = 0 ;
952 //
953 // The CAS() in enter provides for safety and exclusion, while the CAS or
954 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
955 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
956 // We detect and recover from stranding with timers.
957 //
958 // If a thread transiently strands it'll park until (a) another
959 // thread acquires the lock and then drops the lock, at which time the
960 // exiting thread will notice and unpark the stranded thread, or, (b)
961 // the timer expires. If the lock is high traffic then the stranding latency
962 // will be low due to (a). If the lock is low traffic then the odds of
963 // stranding are lower, although the worst-case stranding latency
964 // is longer. Critically, we don't want to put excessive load in the
965 // platform's timer subsystem. We want to minimize both the timer injection
966 // rate (timers created/sec) as well as the number of timers active at
967 // any one time. (more precisely, we want to minimize timer-seconds, which is
968 // the integral of the # of active timers at any instant over time).
969 // Both impinge on OS scalability. Given that, at most one thread parked on
970 // a monitor will use a timer.
971
972 void ATTR ObjectMonitor::exit(intptr_t *exit_stack_id_for_wait, bool not_suspended, TRAPS) {
973 Thread * Self = THREAD ;
974 if (THREAD != _owner) {
975 if (THREAD->is_lock_owned((address) _owner)) {
976 // Transmute _owner from a BasicLock pointer to a Thread address.
977 // We don't need to hold _mutex for this transition.
978 // Non-null to Non-null is safe as long as all readers can
979 // tolerate either flavor.
980 assert (_recursions == 0, "invariant") ;
981 _owner = THREAD ;
982 _recursions = 0 ;
983 OwnerIsThread = 1 ;
984 } else {
985 // NOTE: we need to handle unbalanced monitor enter/exit
986 // in native code by throwing an exception.
987 // TODO: Throw an IllegalMonitorStateException ?
988 TEVENT (Exit - Throw IMSX) ;
989 assert(false, "Non-balanced monitor enter/exit!");
990 if (false) {
991 THROW(vmSymbols::java_lang_IllegalMonitorStateException());
992 }
997 if (_recursions != 0) {
998 _recursions--; // this is simple recursive enter
999 TEVENT (Inflated exit - recursive) ;
1000 return ;
1001 }
1002
1003 // Invariant: after setting Responsible=null an thread must execute
1004 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1005 if ((SyncFlags & 4) == 0) {
1006 _Responsible = NULL ;
1007 }
1008
1009 #if INCLUDE_TRACE
1010 // get the owner's thread id for the MonitorEnter event
1011 // if it is enabled and the thread isn't suspended
1012 if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
1013 _previous_owner_tid = SharedRuntime::get_java_tid(Self);
1014 }
1015 #endif
1016
1017 TraceEventMonitorContendedExited event(this);
1018 if (exit_stack_id_for_wait != NULL) {
1019 // This is a temporary exit for Object.wait().
1020 // We don't want to use the current stack trace as the lock site, so if we
1021 // end up writing the event, we allocate a stack id that we resolve later
1022 // when the monitor is really exited. When there are multiple waits, we
1023 // reuse the first preallocated stack id.
1024 event.set_use_or_preallocate_stack_id_at((TraceTypes::stack_id *) exit_stack_id_for_wait);
1025 event.set_resolve_stack(false);
1026 } else {
1027 // true exit
1028 event.set_resolve_stack(true);
1029 if (_trace_exit_stack != 0) {
1030 event.set_use_stack_id(_trace_exit_stack);
1031 event.enable(); // always write the exit event to resolve the stack
1032 }
1033 }
1034 if ((intptr_t(_EntryList) | intptr_t(_cxq)) != 0) {
1035 // there are queued threads -- we are definitely writing a trace event
1036 event.enable();
1037 }
1038
1039 _trace_exit_stack = 0;
1040
1041 for (;;) {
1042 assert (THREAD == _owner, "invariant") ;
1043
1044 //
1045 // NOTE: we have removed all code paths for ExitPolicy != 0 and QMode != 0
1046 // knob values for simplicity of event tracing.
1047 //
1048
1049 // release semantics: prior loads and stores from within the critical section
1050 // must not float (reorder) past the following store that drops the lock.
1051 // On SPARC that requires MEMBAR #loadstore|#storestore.
1052 // But of course in TSO #loadstore|#storestore is not required.
1053 // I'd like to write one of the following:
1054 // A. OrderAccess::release() ; _owner = NULL
1055 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
1056 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
1057 // store into a _dummy variable. That store is not needed, but can result
1058 // in massive wasteful coherency traffic on classic SMP systems.
1059 // Instead, I use release_store(), which is implemented as just a simple
1060 // ST on x64, x86 and SPARC.
1061 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
1062 OrderAccess::storeload() ; // See if we need to wake a successor
1063 bool queues_empty = ((intptr_t(_EntryList) | intptr_t(_cxq)) == 0);
1064 bool have_succ = (_succ != NULL);
1065 if (!queues_empty) {
1066 // some thread might have entered itself on _cxq in the meantime
1067 event.enable();
1068 }
1069 if (queues_empty || have_succ) {
1070 TEVENT (Inflated exit - simple egress) ;
1071 return ;
1072 }
1073 TEVENT (Inflated exit - complex egress) ;
1074
1075 // Normally the exiting thread is responsible for ensuring succession,
1076 // but if other successors are ready or other entering threads are spinning
1077 // then this thread can simply store NULL into _owner and exit without
1078 // waking a successor. The existence of spinners or ready successors
1079 // guarantees proper succession (liveness). Responsibility passes to the
1080 // ready or running successors. The exiting thread delegates the duty.
1081 // More precisely, if a successor already exists this thread is absolved
1082 // of the responsibility of waking (unparking) one.
1083 //
1084 // The _succ variable is critical to reducing futile wakeup frequency.
1085 // _succ identifies the "heir presumptive" thread that has been made
1086 // ready (unparked) but that has not yet run. We need only one such
1087 // successor thread to guarantee progress.
1088 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
1089 // section 3.3 "Futile Wakeup Throttling" for details.
1096 // to acquire the lock. If so, the exiting thread could exit
1097 // immediately without waking a successor, otherwise the exiting
1098 // thread would need to dequeue and wake a successor.
1099 // (Note that we'd need to make the post-drop spin short, but no
1100 // shorter than the worst-case round-trip cache-line migration time.
1101 // The dropped lock needs to become visible to the spinner, and then
1102 // the acquisition of the lock by the spinner must become visible to
1103 // the exiting thread).
1104 //
1105
1106 // It appears that an heir-presumptive (successor) must be made ready.
1107 // Only the current lock owner can manipulate the EntryList or
1108 // drain _cxq, so we need to reacquire the lock. If we fail
1109 // to reacquire the lock the responsibility for ensuring succession
1110 // falls to the new owner.
1111 //
1112 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
1113 return ;
1114 }
1115 TEVENT (Exit - Reacquired) ;
1116
1117 guarantee (_owner == THREAD, "invariant") ;
1118
1119 ObjectWaiter * w = NULL ;
1120
1121 w = _EntryList ;
1122 if (w != NULL) {
1123 // I'd like to write: guarantee (w->_thread != Self).
1124 // But in practice an exiting thread may find itself on the EntryList.
1125 // Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
1126 // then calls exit(). Exit release the lock by setting O._owner to NULL.
1127 // Lets say T1 then stalls. T2 acquires O and calls O.notify(). The
1128 // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1129 // release the lock "O". T2 resumes immediately after the ST of null into
1130 // _owner, above. T2 notices that the EntryList is populated, so it
1131 // reacquires the lock and then finds itself on the EntryList.
1132 // Given all that, we have to tolerate the circumstance where "w" is
1133 // associated with Self.
1134 assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1135 ExitEpilog (Self, w) ;
1136 return ;
1137 }
1138
1139 // If we find that both _cxq and EntryList are null then just
1147 for (;;) {
1148 assert (w != NULL, "Invariant") ;
1149 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
1150 if (u == w) break ;
1151 w = u ;
1152 }
1153 TEVENT (Inflated exit - drain cxq into EntryList) ;
1154
1155 assert (w != NULL , "invariant") ;
1156 assert (_EntryList == NULL , "invariant") ;
1157
1158 // Convert the LIFO SLL anchored by _cxq into a DLL.
1159 // The list reorganization step operates in O(LENGTH(w)) time.
1160 // It's critical that this step operate quickly as
1161 // "Self" still holds the outer-lock, restricting parallelism
1162 // and effectively lengthening the critical section.
1163 // Invariant: s chases t chases u.
1164 // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
1165 // we have faster access to the tail.
1166
1167 _EntryList = w ;
1168 ObjectWaiter * q = NULL ;
1169 ObjectWaiter * p ;
1170 for (p = w ; p != NULL ; p = p->_next) {
1171 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
1172 p->TState = ObjectWaiter::TS_ENTER ;
1173 p->_prev = q ;
1174 q = p ;
1175 }
1176
1177 // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
1178 // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
1179
1180 // See if we can abdicate to a spinner instead of waking a thread.
1181 // A primary goal of the implementation is to reduce the
1182 // context-switch rate.
1183 if (_succ != NULL) continue;
1184
1185 w = _EntryList ;
1186 if (w != NULL) {
1187 guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1188 ExitEpilog (Self, w) ;
1189 return ;
1190 }
1191 }
1192 }
1193
1194 // ExitSuspendEquivalent:
1195 // A faster alternate to handle_special_suspend_equivalent_condition()
1265
1266 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1267 Trigger->unpark() ;
1268
1269 // Maintain stats and report events to JVMTI
1270 if (ObjectMonitor::_sync_Parks != NULL) {
1271 ObjectMonitor::_sync_Parks->inc() ;
1272 }
1273 }
1274
1275
1276 // -----------------------------------------------------------------------------
1277 // Class Loader deadlock handling.
1278 //
1279 // complete_exit exits a lock returning recursion count
1280 // complete_exit/reenter operate as a wait without waiting
1281 // complete_exit requires an inflated monitor
1282 // The _owner field is not always the Thread addr even with an
1283 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1284 // thread due to contention.
1285 void ObjectMonitor::complete_exit(intptr_t *saved_recursions, intptr_t *saved_trace_exit_stack, TRAPS) {
1286 Thread * const Self = THREAD;
1287 assert(Self->is_Java_thread(), "Must be Java thread!");
1288 JavaThread *jt = (JavaThread *)THREAD;
1289
1290 DeferredInitialize();
1291
1292 if (THREAD != _owner) {
1293 if (THREAD->is_lock_owned ((address)_owner)) {
1294 assert(_recursions == 0, "internal state error");
1295 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */
1296 _recursions = 0 ;
1297 OwnerIsThread = 1 ;
1298 }
1299 }
1300
1301 guarantee(Self == _owner, "complete_exit not owner");
1302 // record old recursion level and exit stack
1303 if (saved_recursions != NULL) *saved_recursions = _recursions;
1304 if (saved_recursions != NULL) *saved_trace_exit_stack = _trace_exit_stack;
1305 _recursions = 0;
1306 exit(saved_trace_exit_stack, true, Self);
1307 guarantee (_owner != Self, "invariant");
1308 }
1309
1310 // reenter() enters a lock and sets recursion count
1311 // complete_exit/reenter operate as a wait without waiting
1312 void ObjectMonitor::reenter(intptr_t saved_recursions, intptr_t saved_trace_exit_stack, TRAPS) {
1313 Thread * const Self = THREAD;
1314 assert(Self->is_Java_thread(), "Must be Java thread!");
1315 JavaThread *jt = (JavaThread *)THREAD;
1316
1317 guarantee(_owner != Self, "reenter already owner");
1318 enter (THREAD); // enter the monitor
1319 guarantee (_recursions == 0, "reenter recursion");
1320 _recursions = saved_recursions;
1321 _trace_exit_stack = saved_trace_exit_stack;
1322 }
1323
1324
1325 // -----------------------------------------------------------------------------
1326 // A macro is used below because there may already be a pending
1327 // exception which should not abort the execution of the routines
1328 // which use this (which is why we don't put this into check_slow and
1329 // call it with a CHECK argument).
1330
1331 #define CHECK_OWNER() \
1332 do { \
1333 if (THREAD != _owner) { \
1334 if (THREAD->is_lock_owned((address) _owner)) { \
1335 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \
1336 _recursions = 0; \
1337 OwnerIsThread = 1 ; \
1338 } else { \
1339 TEVENT (Throw IMSX) ; \
1340 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
1341 } \
1422 // for a pending interrupt.
1423 ObjectWaiter node(Self);
1424 node.TState = ObjectWaiter::TS_WAIT ;
1425 Self->_ParkEvent->reset() ;
1426 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
1427
1428 // Enter the waiting queue, which is a circular doubly linked list in this case
1429 // but it could be a priority queue or any data structure.
1430 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
1431 // by the the owner of the monitor *except* in the case where park()
1432 // returns because of a timeout of interrupt. Contention is exceptionally rare
1433 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1434
1435 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
1436 AddWaiter (&node) ;
1437 Thread::SpinRelease (&_WaitSetLock) ;
1438
1439 if ((SyncFlags & 4) == 0) {
1440 _Responsible = NULL ;
1441 }
1442 intptr_t saved_recursions = _recursions; // record the old recursion count
1443 intptr_t saved_trace_exit_stack = _trace_exit_stack;
1444 _waiters++; // increment the number of waiters
1445 _recursions = 0; // set the recursion level to be 1
1446 exit(&saved_trace_exit_stack, true, Self); // exit, knows how to handle exit stack
1447 guarantee (_owner != Self, "invariant") ;
1448
1449 // The thread is on the WaitSet list - now park() it.
1450 // On MP systems it's conceivable that a brief spin before we park
1451 // could be profitable.
1452 //
1453 // TODO-FIXME: change the following logic to a loop of the form
1454 // while (!timeout && !interrupted && _notified == 0) park()
1455
1456 int ret = OS_OK ;
1457 int WasNotified = 0 ;
1458 { // State transition wrappers
1459 OSThread* osthread = Self->osthread();
1460 OSThreadWaitState osts(osthread, true);
1461 {
1462 ThreadBlockInVM tbivm(jt);
1463 // Thread is in thread_blocked state and oop access is unsafe.
1464 jt->set_suspend_equivalent();
1465
1466 if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
1543 //
1544 // We redo the unpark() to ensure forward progress, i.e., we
1545 // don't want all pending threads hanging (parked) with none
1546 // entering the unlocked monitor.
1547 node._event->unpark();
1548 }
1549 }
1550
1551 if (event.should_commit()) {
1552 post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
1553 }
1554
1555 OrderAccess::fence() ;
1556
1557 assert (Self->_Stalled != 0, "invariant") ;
1558 Self->_Stalled = 0 ;
1559
1560 assert (_owner != Self, "invariant") ;
1561 ObjectWaiter::TStates v = node.TState ;
1562 if (v == ObjectWaiter::TS_RUN) {
1563 int after_wait = TraceTypes::enter_after_wait_other;
1564 if (node._notified) {
1565 after_wait = TraceTypes::enter_after_wait_notify;
1566 } else if (ret == OS_TIMEOUT) {
1567 after_wait = TraceTypes::enter_after_wait_timeout;
1568 }
1569 enter (after_wait, Self) ;
1570 } else {
1571 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
1572 ReenterI (Self, &node) ;
1573 node.wait_reenter_end(this);
1574 }
1575
1576 // Self has reacquired the lock.
1577 // Lifecycle - the node representing Self must not appear on any queues.
1578 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1579 // want residual elements associated with this thread left on any lists.
1580 guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
1581 assert (_owner == Self, "invariant") ;
1582 assert (_succ != Self , "invariant") ;
1583 } // OSThreadWaitState()
1584
1585 jt->set_current_waiting_monitor(NULL);
1586
1587 guarantee (_recursions == 0, "invariant") ;
1588 // restore the saved recursion count and exit stack
1589 _recursions = saved_recursions;
1590 _trace_exit_stack = saved_trace_exit_stack;
1591 _waiters--; // decrement the number of waiters
1592
1593 // Verify a few postconditions
1594 assert (_owner == Self , "invariant") ;
1595 assert (_succ != Self , "invariant") ;
1596 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
1597
1598 if (SyncFlags & 32) {
1599 OrderAccess::fence() ;
1600 }
1601
1602 // check if the notification happened
1603 if (!WasNotified) {
1604 // no, it could be timeout or Thread.interrupt() or both
1605 // check for interrupt event, otherwise it is timeout
1606 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1607 TEVENT (Wait - throw IEX from epilog) ;
1608 THROW(vmSymbols::java_lang_InterruptedException());
1609 }
1610 }
2435 SETKNOB(LogSpins) ;
2436 SETKNOB(SpinSetSucc) ;
2437 SETKNOB(SuccEnabled) ;
2438 SETKNOB(SuccRestrict) ;
2439 SETKNOB(Penalty) ;
2440 SETKNOB(Bonus) ;
2441 SETKNOB(BonusB) ;
2442 SETKNOB(Poverty) ;
2443 SETKNOB(SpinAfterFutile) ;
2444 SETKNOB(UsePause) ;
2445 SETKNOB(SpinEarly) ;
2446 SETKNOB(OState) ;
2447 SETKNOB(MaxSpinners) ;
2448 SETKNOB(PreSpin) ;
2449 SETKNOB(ExitPolicy) ;
2450 SETKNOB(QMode);
2451 SETKNOB(ResetEvent) ;
2452 SETKNOB(MoveNotifyee) ;
2453 SETKNOB(FastHSSEC) ;
2454 #undef SETKNOB
2455
2456 guarantee(Knob_ExitPolicy == 0, "Sorry, event tracing does not support non-default ExitPolicy");
2457 guarantee(Knob_QMode == 0, "Sorry, event tracing does not support non-default QMode");
2458
2459 if (os::is_MP()) {
2460 BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
2461 if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
2462 // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
2463 } else {
2464 Knob_SpinLimit = 0 ;
2465 Knob_SpinBase = 0 ;
2466 Knob_PreSpin = 0 ;
2467 Knob_FixedSpin = -1 ;
2468 }
2469
2470 if (Knob_LogSpins == 0) {
2471 ObjectMonitor::_sync_FailedSpins = NULL ;
2472 }
2473
2474 free (knobs) ;
2475 OrderAccess::fence() ;
2476 InitDone = 1 ;
2477 }
|