529 // timer scalability issues we see on some platforms as we'd only have one thread
530 // -- the checker -- parked on a timer.
531
532 if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
533 // Try to assume the role of responsible thread for the monitor.
534 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
535 Atomic::replace_if_null(Self, &_Responsible);
536 }
537
538 // The lock might have been released while this thread was occupied queueing
539 // itself onto _cxq. To close the race and avoid "stranding" and
540 // progress-liveness failure we must resample-retry _owner before parking.
541 // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
542 // In this case the ST-MEMBAR is accomplished with CAS().
543 //
544 // TODO: Defer all thread state transitions until park-time.
545 // Since state transitions are heavy and inefficient we'd like
546 // to defer the state transitions until absolutely necessary,
547 // and in doing so avoid some transitions ...
548
549 TEVENT(Inflated enter - Contention);
550 int nWakeups = 0;
551 int recheckInterval = 1;
552
553 for (;;) {
554
555 if (TryLock(Self) > 0) break;
556 assert(_owner != Self, "invariant");
557
558 if ((SyncFlags & 2) && _Responsible == NULL) {
559 Atomic::replace_if_null(Self, &_Responsible);
560 }
561
562 // park self
563 if (_Responsible == Self || (SyncFlags & 1)) {
564 TEVENT(Inflated enter - park TIMED);
565 Self->_ParkEvent->park((jlong) recheckInterval);
566 // Increase the recheckInterval, but clamp the value.
567 recheckInterval *= 8;
568 if (recheckInterval > MAX_RECHECK_INTERVAL) {
569 recheckInterval = MAX_RECHECK_INTERVAL;
570 }
571 } else {
572 TEVENT(Inflated enter - park UNTIMED);
573 Self->_ParkEvent->park();
574 }
575
576 if (TryLock(Self) > 0) break;
577
578 // The lock is still contested.
579 // Keep a tally of the # of futile wakeups.
580 // Note that the counter is not protected by a lock or updated by atomics.
581 // That is by design - we trade "lossy" counters which are exposed to
582 // races during updates for a lower probe effect.
583 TEVENT(Inflated enter - Futile wakeup);
584 // This PerfData object can be used in parallel with a safepoint.
585 // See the work around in PerfDataManager::destroy().
586 OM_PERFDATA_OP(FutileWakeups, inc());
587 ++nWakeups;
588
589 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
590 // We can defer clearing _succ until after the spin completes
591 // TrySpin() must tolerate being called with _succ == Self.
592 // Try yet another round of adaptive spinning.
593 if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
594
595 // We can find that we were unpark()ed and redesignated _succ while
596 // we were spinning. That's harmless. If we iterate and call park(),
597 // park() will consume the event and return immediately and we'll
598 // just spin again. This pattern can repeat, leaving _succ to simply
599 // spin on a CPU. Enable Knob_ResetEvent to clear pending unparks().
600 // Alternately, we can sample fired() here, and if set, forgo spinning
601 // in the next iteration.
602
603 if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
690 // loop accordingly.
691
692 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
693 assert(Self != NULL, "invariant");
694 assert(SelfNode != NULL, "invariant");
695 assert(SelfNode->_thread == Self, "invariant");
696 assert(_waiters > 0, "invariant");
697 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
698 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
699 JavaThread * jt = (JavaThread *) Self;
700
701 int nWakeups = 0;
702 for (;;) {
703 ObjectWaiter::TStates v = SelfNode->TState;
704 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
705 assert(_owner != Self, "invariant");
706
707 if (TryLock(Self) > 0) break;
708 if (TrySpin(Self) > 0) break;
709
710 TEVENT(Wait Reentry - parking);
711
712 // State transition wrappers around park() ...
713 // ReenterI() wisely defers state transitions until
714 // it's clear we must park the thread.
715 {
716 OSThreadContendState osts(Self->osthread());
717 ThreadBlockInVM tbivm(jt);
718
719 // cleared by handle_special_suspend_equivalent_condition()
720 // or java_suspend_self()
721 jt->set_suspend_equivalent();
722 if (SyncFlags & 1) {
723 Self->_ParkEvent->park((jlong)MAX_RECHECK_INTERVAL);
724 } else {
725 Self->_ParkEvent->park();
726 }
727
728 // were we externally suspended while we were waiting?
729 for (;;) {
730 if (!ExitSuspendEquivalent(jt)) break;
731 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
732 jt->java_suspend_self();
733 jt->set_suspend_equivalent();
734 }
735 }
736
737 // Try again, but just so we distinguish between futile wakeups and
738 // successful wakeups. The following test isn't algorithmically
739 // necessary, but it helps us maintain sensible statistics.
740 if (TryLock(Self) > 0) break;
741
742 // The lock is still contested.
743 // Keep a tally of the # of futile wakeups.
744 // Note that the counter is not protected by a lock or updated by atomics.
745 // That is by design - we trade "lossy" counters which are exposed to
746 // races during updates for a lower probe effect.
747 TEVENT(Wait Reentry - futile wakeup);
748 ++nWakeups;
749
750 // Assuming this is not a spurious wakeup we'll normally
751 // find that _succ == Self.
752 if (_succ == Self) _succ = NULL;
753
754 // Invariant: after clearing _succ a contending thread
755 // *must* retry _owner before parking.
756 OrderAccess::fence();
757
758 // This PerfData object can be used in parallel with a safepoint.
759 // See the work around in PerfDataManager::destroy().
760 OM_PERFDATA_OP(FutileWakeups, inc());
761 }
762
763 // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
764 // Normally we'll find Self on the EntryList.
765 // Unlinking from the EntryList is constant-time and atomic-free.
766 // From the perspective of the lock owner (this thread), the
767 // EntryList is stable and cxq is prepend-only.
778 }
779
780 // By convention we unlink a contending thread from EntryList|cxq immediately
781 // after the thread acquires the lock in ::enter(). Equally, we could defer
782 // unlinking the thread until ::exit()-time.
783
784 void ObjectMonitor::UnlinkAfterAcquire(Thread *Self, ObjectWaiter *SelfNode) {
785 assert(_owner == Self, "invariant");
786 assert(SelfNode->_thread == Self, "invariant");
787
788 if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
789 // Normal case: remove Self from the DLL EntryList .
790 // This is a constant-time operation.
791 ObjectWaiter * nxt = SelfNode->_next;
792 ObjectWaiter * prv = SelfNode->_prev;
793 if (nxt != NULL) nxt->_prev = prv;
794 if (prv != NULL) prv->_next = nxt;
795 if (SelfNode == _EntryList) _EntryList = nxt;
796 assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
797 assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
798 TEVENT(Unlink from EntryList);
799 } else {
800 assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
801 // Inopportune interleaving -- Self is still on the cxq.
802 // This usually means the enqueue of self raced an exiting thread.
803 // Normally we'll find Self near the front of the cxq, so
804 // dequeueing is typically fast. If needbe we can accelerate
805 // this with some MCS/CHL-like bidirectional list hints and advisory
806 // back-links so dequeueing from the interior will normally operate
807 // in constant-time.
808 // Dequeue Self from either the head (with CAS) or from the interior
809 // with a linear-time scan and normal non-atomic memory operations.
810 // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
811 // and then unlink Self from EntryList. We have to drain eventually,
812 // so it might as well be now.
813
814 ObjectWaiter * v = _cxq;
815 assert(v != NULL, "invariant");
816 if (v != SelfNode || Atomic::cmpxchg(SelfNode->_next, &_cxq, v) != v) {
817 // The CAS above can fail from interference IFF a "RAT" arrived.
818 // In that case Self must be in the interior and can no longer be
819 // at the head of cxq.
820 if (v == SelfNode) {
821 assert(_cxq != v, "invariant");
822 v = _cxq; // CAS above failed - start scan at head of list
823 }
824 ObjectWaiter * p;
825 ObjectWaiter * q = NULL;
826 for (p = v; p != NULL && p != SelfNode; p = p->_next) {
827 q = p;
828 assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
829 }
830 assert(v != SelfNode, "invariant");
831 assert(p == SelfNode, "Node not found on cxq");
832 assert(p != _cxq, "invariant");
833 assert(q != NULL, "invariant");
834 assert(q->_next == p, "invariant");
835 q->_next = p->_next;
836 }
837 TEVENT(Unlink from cxq);
838 }
839
840 #ifdef ASSERT
841 // Diagnostic hygiene ...
842 SelfNode->_prev = (ObjectWaiter *) 0xBAD;
843 SelfNode->_next = (ObjectWaiter *) 0xBAD;
844 SelfNode->TState = ObjectWaiter::TS_RUN;
845 #endif
846 }
847
848 // -----------------------------------------------------------------------------
849 // Exit support
850 //
851 // exit()
852 // ~~~~~~
853 // Note that the collector can't reclaim the objectMonitor or deflate
854 // the object out from underneath the thread calling ::exit() as the
855 // thread calling ::exit() never transitions to a stable state.
856 // This inhibits GC, which in turn inhibits asynchronous (and
857 // inopportune) reclamation of "this".
906 Thread * const Self = THREAD;
907 if (THREAD != _owner) {
908 if (THREAD->is_lock_owned((address) _owner)) {
909 // Transmute _owner from a BasicLock pointer to a Thread address.
910 // We don't need to hold _mutex for this transition.
911 // Non-null to Non-null is safe as long as all readers can
912 // tolerate either flavor.
913 assert(_recursions == 0, "invariant");
914 _owner = THREAD;
915 _recursions = 0;
916 } else {
917 // Apparent unbalanced locking ...
918 // Naively we'd like to throw IllegalMonitorStateException.
919 // As a practical matter we can neither allocate nor throw an
920 // exception as ::exit() can be called from leaf routines.
921 // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
922 // Upon deeper reflection, however, in a properly run JVM the only
923 // way we should encounter this situation is in the presence of
924 // unbalanced JNI locking. TODO: CheckJNICalls.
925 // See also: CR4414101
926 TEVENT(Exit - Throw IMSX);
927 assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
928 return;
929 }
930 }
931
932 if (_recursions != 0) {
933 _recursions--; // this is simple recursive enter
934 TEVENT(Inflated exit - recursive);
935 return;
936 }
937
938 // Invariant: after setting Responsible=null an thread must execute
939 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
940 if ((SyncFlags & 4) == 0) {
941 _Responsible = NULL;
942 }
943
944 #if INCLUDE_JFR
945 // get the owner's thread id for the MonitorEnter event
946 // if it is enabled and the thread isn't suspended
947 if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
948 _previous_owner_tid = JFR_THREAD_ID(Self);
949 }
950 #endif
951
952 for (;;) {
953 assert(THREAD == _owner, "invariant");
954
955 if (Knob_ExitPolicy == 0) {
956 // release semantics: prior loads and stores from within the critical section
957 // must not float (reorder) past the following store that drops the lock.
958 // On SPARC that requires MEMBAR #loadstore|#storestore.
959 // But of course in TSO #loadstore|#storestore is not required.
960 // I'd like to write one of the following:
961 // A. OrderAccess::release() ; _owner = NULL
962 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
963 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
964 // store into a _dummy variable. That store is not needed, but can result
965 // in massive wasteful coherency traffic on classic SMP systems.
966 // Instead, I use release_store(), which is implemented as just a simple
967 // ST on x64, x86 and SPARC.
968 OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock
969 OrderAccess::storeload(); // See if we need to wake a successor
970 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
971 TEVENT(Inflated exit - simple egress);
972 return;
973 }
974 TEVENT(Inflated exit - complex egress);
975 // Other threads are blocked trying to acquire the lock.
976
977 // Normally the exiting thread is responsible for ensuring succession,
978 // but if other successors are ready or other entering threads are spinning
979 // then this thread can simply store NULL into _owner and exit without
980 // waking a successor. The existence of spinners or ready successors
981 // guarantees proper succession (liveness). Responsibility passes to the
982 // ready or running successors. The exiting thread delegates the duty.
983 // More precisely, if a successor already exists this thread is absolved
984 // of the responsibility of waking (unparking) one.
985 //
986 // The _succ variable is critical to reducing futile wakeup frequency.
987 // _succ identifies the "heir presumptive" thread that has been made
988 // ready (unparked) but that has not yet run. We need only one such
989 // successor thread to guarantee progress.
990 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
991 // section 3.3 "Futile Wakeup Throttling" for details.
992 //
993 // Note that spinners in Enter() also set _succ non-null.
994 // In the current implementation spinners opportunistically set
996 // Another less appealing alternative would be for the exiting thread
997 // to drop the lock and then spin briefly to see if a spinner managed
998 // to acquire the lock. If so, the exiting thread could exit
999 // immediately without waking a successor, otherwise the exiting
1000 // thread would need to dequeue and wake a successor.
1001 // (Note that we'd need to make the post-drop spin short, but no
1002 // shorter than the worst-case round-trip cache-line migration time.
1003 // The dropped lock needs to become visible to the spinner, and then
1004 // the acquisition of the lock by the spinner must become visible to
1005 // the exiting thread).
1006
1007 // It appears that an heir-presumptive (successor) must be made ready.
1008 // Only the current lock owner can manipulate the EntryList or
1009 // drain _cxq, so we need to reacquire the lock. If we fail
1010 // to reacquire the lock the responsibility for ensuring succession
1011 // falls to the new owner.
1012 //
1013 if (!Atomic::replace_if_null(THREAD, &_owner)) {
1014 return;
1015 }
1016 TEVENT(Exit - Reacquired);
1017 } else {
1018 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1019 OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock
1020 OrderAccess::storeload();
1021 // Ratify the previously observed values.
1022 if (_cxq == NULL || _succ != NULL) {
1023 TEVENT(Inflated exit - simple egress);
1024 return;
1025 }
1026
1027 // inopportune interleaving -- the exiting thread (this thread)
1028 // in the fast-exit path raced an entering thread in the slow-enter
1029 // path.
1030 // We have two choices:
1031 // A. Try to reacquire the lock.
1032 // If the CAS() fails return immediately, otherwise
1033 // we either restart/rerun the exit operation, or simply
1034 // fall-through into the code below which wakes a successor.
1035 // B. If the elements forming the EntryList|cxq are TSM
1036 // we could simply unpark() the lead thread and return
1037 // without having set _succ.
1038 if (!Atomic::replace_if_null(THREAD, &_owner)) {
1039 TEVENT(Inflated exit - reacquired succeeded);
1040 return;
1041 }
1042 TEVENT(Inflated exit - reacquired failed);
1043 } else {
1044 TEVENT(Inflated exit - complex egress);
1045 }
1046 }
1047
1048 guarantee(_owner == THREAD, "invariant");
1049
1050 ObjectWaiter * w = NULL;
1051 int QMode = Knob_QMode;
1052
1053 if (QMode == 2 && _cxq != NULL) {
1054 // QMode == 2 : cxq has precedence over EntryList.
1055 // Try to directly wake a successor from the cxq.
1056 // If successful, the successor will need to unlink itself from cxq.
1057 w = _cxq;
1058 assert(w != NULL, "invariant");
1059 assert(w->TState == ObjectWaiter::TS_CXQ, "Invariant");
1060 ExitEpilog(Self, w);
1061 return;
1062 }
1063
1064 if (QMode == 3 && _cxq != NULL) {
1151 // associated with Self.
1152 assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1153 ExitEpilog(Self, w);
1154 return;
1155 }
1156
1157 // If we find that both _cxq and EntryList are null then just
1158 // re-run the exit protocol from the top.
1159 w = _cxq;
1160 if (w == NULL) continue;
1161
1162 // Drain _cxq into EntryList - bulk transfer.
1163 // First, detach _cxq.
1164 // The following loop is tantamount to: w = swap(&cxq, NULL)
1165 for (;;) {
1166 assert(w != NULL, "Invariant");
1167 ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
1168 if (u == w) break;
1169 w = u;
1170 }
1171 TEVENT(Inflated exit - drain cxq into EntryList);
1172
1173 assert(w != NULL, "invariant");
1174 assert(_EntryList == NULL, "invariant");
1175
1176 // Convert the LIFO SLL anchored by _cxq into a DLL.
1177 // The list reorganization step operates in O(LENGTH(w)) time.
1178 // It's critical that this step operate quickly as
1179 // "Self" still holds the outer-lock, restricting parallelism
1180 // and effectively lengthening the critical section.
1181 // Invariant: s chases t chases u.
1182 // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
1183 // we have faster access to the tail.
1184
1185 if (QMode == 1) {
1186 // QMode == 1 : drain cxq to EntryList, reversing order
1187 // We also reverse the order of the list.
1188 ObjectWaiter * s = NULL;
1189 ObjectWaiter * t = w;
1190 ObjectWaiter * u = NULL;
1191 while (t != NULL) {
1255 // composite per-thread suspend flag and then update it with CAS().
1256 // Alternately, a Dekker-like mechanism with multiple variables
1257 // would suffice:
1258 // ST Self->_suspend_equivalent = false
1259 // MEMBAR
1260 // LD Self_>_suspend_flags
1261 //
1262 // UPDATE 2007-10-6: since I've replaced the native Mutex/Monitor subsystem
1263 // with a more efficient implementation, the need to use "FastHSSEC" has
1264 // decreased. - Dave
1265
1266
1267 bool ObjectMonitor::ExitSuspendEquivalent(JavaThread * jSelf) {
1268 const int Mode = Knob_FastHSSEC;
1269 if (Mode && !jSelf->is_external_suspend()) {
1270 assert(jSelf->is_suspend_equivalent(), "invariant");
1271 jSelf->clear_suspend_equivalent();
1272 if (2 == Mode) OrderAccess::storeload();
1273 if (!jSelf->is_external_suspend()) return false;
1274 // We raced a suspension -- fall thru into the slow path
1275 TEVENT(ExitSuspendEquivalent - raced);
1276 jSelf->set_suspend_equivalent();
1277 }
1278 return jSelf->handle_special_suspend_equivalent_condition();
1279 }
1280
1281
1282 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1283 assert(_owner == Self, "invariant");
1284
1285 // Exit protocol:
1286 // 1. ST _succ = wakee
1287 // 2. membar #loadstore|#storestore;
1288 // 2. ST _owner = NULL
1289 // 3. unpark(wakee)
1290
1291 _succ = Knob_SuccEnabled ? Wakee->_thread : NULL;
1292 ParkEvent * Trigger = Wakee->_event;
1293
1294 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1295 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1296 // out-of-scope (non-extant).
1297 Wakee = NULL;
1298
1299 // Drop the lock
1300 OrderAccess::release_store(&_owner, (void*)NULL);
1301 OrderAccess::fence(); // ST _owner vs LD in unpark()
1302
1303 if (SafepointMechanism::poll(Self)) {
1304 TEVENT(unpark before SAFEPOINT);
1305 }
1306
1307 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1308 Trigger->unpark();
1309
1310 // Maintain stats and report events to JVMTI
1311 OM_PERFDATA_OP(Parks, inc());
1312 }
1313
1314
1315 // -----------------------------------------------------------------------------
1316 // Class Loader deadlock handling.
1317 //
1318 // complete_exit exits a lock returning recursion count
1319 // complete_exit/reenter operate as a wait without waiting
1320 // complete_exit requires an inflated monitor
1321 // The _owner field is not always the Thread addr even with an
1322 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1323 // thread due to contention.
1324 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1325 Thread * const Self = THREAD;
1326 assert(Self->is_Java_thread(), "Must be Java thread!");
1355 enter(THREAD); // enter the monitor
1356 guarantee(_recursions == 0, "reenter recursion");
1357 _recursions = recursions;
1358 return;
1359 }
1360
1361
1362 // -----------------------------------------------------------------------------
1363 // A macro is used below because there may already be a pending
1364 // exception which should not abort the execution of the routines
1365 // which use this (which is why we don't put this into check_slow and
1366 // call it with a CHECK argument).
1367
1368 #define CHECK_OWNER() \
1369 do { \
1370 if (THREAD != _owner) { \
1371 if (THREAD->is_lock_owned((address) _owner)) { \
1372 _owner = THREAD; /* Convert from basiclock addr to Thread addr */ \
1373 _recursions = 0; \
1374 } else { \
1375 TEVENT(Throw IMSX); \
1376 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
1377 } \
1378 } \
1379 } while (false)
1380
1381 // check_slow() is a misnomer. It's called to simply to throw an IMSX exception.
1382 // TODO-FIXME: remove check_slow() -- it's likely dead.
1383
1384 void ObjectMonitor::check_slow(TRAPS) {
1385 TEVENT(check_slow - throw IMSX);
1386 assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1387 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1388 }
1389
1390 static int Adjust(volatile int * adr, int dx) {
1391 int v;
1392 for (v = *adr; Atomic::cmpxchg(v + dx, adr, v) != v; v = *adr) /* empty */;
1393 return v;
1394 }
1395
1396 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1397 ObjectMonitor* monitor,
1398 jlong notifier_tid,
1399 jlong timeout,
1400 bool timedout) {
1401 assert(event != NULL, "invariant");
1402 assert(monitor != NULL, "invariant");
1403 event->set_monitorClass(((oop)monitor->object())->klass());
1404 event->set_timeout(timeout);
1405 event->set_address((uintptr_t)monitor->object_addr());
1427
1428 // check for a pending interrupt
1429 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1430 // post monitor waited event. Note that this is past-tense, we are done waiting.
1431 if (JvmtiExport::should_post_monitor_waited()) {
1432 // Note: 'false' parameter is passed here because the
1433 // wait was not timed out due to thread interrupt.
1434 JvmtiExport::post_monitor_waited(jt, this, false);
1435
1436 // In this short circuit of the monitor wait protocol, the
1437 // current thread never drops ownership of the monitor and
1438 // never gets added to the wait queue so the current thread
1439 // cannot be made the successor. This means that the
1440 // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1441 // consume an unpark() meant for the ParkEvent associated with
1442 // this ObjectMonitor.
1443 }
1444 if (event.should_commit()) {
1445 post_monitor_wait_event(&event, this, 0, millis, false);
1446 }
1447 TEVENT(Wait - Throw IEX);
1448 THROW(vmSymbols::java_lang_InterruptedException());
1449 return;
1450 }
1451
1452 TEVENT(Wait);
1453
1454 assert(Self->_Stalled == 0, "invariant");
1455 Self->_Stalled = intptr_t(this);
1456 jt->set_current_waiting_monitor(this);
1457
1458 // create a node to be put into the queue
1459 // Critically, after we reset() the event but prior to park(), we must check
1460 // for a pending interrupt.
1461 ObjectWaiter node(Self);
1462 node.TState = ObjectWaiter::TS_WAIT;
1463 Self->_ParkEvent->reset();
1464 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
1465
1466 // Enter the waiting queue, which is a circular doubly linked list in this case
1467 // but it could be a priority queue or any data structure.
1468 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
1469 // by the the owner of the monitor *except* in the case where park()
1470 // returns because of a timeout of interrupt. Contention is exceptionally rare
1471 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1472
1473 Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
1614 jt->set_current_waiting_monitor(NULL);
1615
1616 guarantee(_recursions == 0, "invariant");
1617 _recursions = save; // restore the old recursion count
1618 _waiters--; // decrement the number of waiters
1619
1620 // Verify a few postconditions
1621 assert(_owner == Self, "invariant");
1622 assert(_succ != Self, "invariant");
1623 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
1624
1625 if (SyncFlags & 32) {
1626 OrderAccess::fence();
1627 }
1628
1629 // check if the notification happened
1630 if (!WasNotified) {
1631 // no, it could be timeout or Thread.interrupt() or both
1632 // check for interrupt event, otherwise it is timeout
1633 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1634 TEVENT(Wait - throw IEX from epilog);
1635 THROW(vmSymbols::java_lang_InterruptedException());
1636 }
1637 }
1638
1639 // NOTE: Spurious wake up will be consider as timeout.
1640 // Monitor notify has precedence over thread interrupt.
1641 }
1642
1643
1644 // Consider:
1645 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
1646 // then instead of transferring a thread from the WaitSet to the EntryList
1647 // we might just dequeue a thread from the WaitSet and directly unpark() it.
1648
1649 void ObjectMonitor::INotify(Thread * Self) {
1650 const int policy = Knob_MoveNotifyee;
1651
1652 Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
1653 ObjectWaiter * iterator = DequeueWaiter();
1654 if (iterator != NULL) {
1655 TEVENT(Notify1 - Transfer);
1656 guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1657 guarantee(iterator->_notified == 0, "invariant");
1658 // Disposition - what might we do with iterator ?
1659 // a. add it directly to the EntryList - either tail (policy == 1)
1660 // or head (policy == 0).
1661 // b. push it onto the front of the _cxq (policy == 2).
1662 // For now we use (b).
1663 if (policy != 4) {
1664 iterator->TState = ObjectWaiter::TS_ENTER;
1665 }
1666 iterator->_notified = 1;
1667 iterator->_notifier_tid = JFR_THREAD_ID(Self);
1668
1669 ObjectWaiter * list = _EntryList;
1670 if (list != NULL) {
1671 assert(list->_prev == NULL, "invariant");
1672 assert(list->TState == ObjectWaiter::TS_ENTER, "invariant");
1673 assert(list != iterator, "invariant");
1674 }
1675
1749 }
1750 }
1751 Thread::SpinRelease(&_WaitSetLock);
1752 }
1753
1754 // Consider: a not-uncommon synchronization bug is to use notify() when
1755 // notifyAll() is more appropriate, potentially resulting in stranded
1756 // threads; this is one example of a lost wakeup. A useful diagnostic
1757 // option is to force all notify() operations to behave as notifyAll().
1758 //
1759 // Note: We can also detect many such problems with a "minimum wait".
1760 // When the "minimum wait" is set to a small non-zero timeout value
1761 // and the program does not hang whereas it did absent "minimum wait",
1762 // that suggests a lost wakeup bug. The '-XX:SyncFlags=1' option uses
1763 // a "minimum wait" for all park() operations; see the recheckInterval
1764 // variable and MAX_RECHECK_INTERVAL.
1765
1766 void ObjectMonitor::notify(TRAPS) {
1767 CHECK_OWNER();
1768 if (_WaitSet == NULL) {
1769 TEVENT(Empty-Notify);
1770 return;
1771 }
1772 DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1773 INotify(THREAD);
1774 OM_PERFDATA_OP(Notifications, inc(1));
1775 }
1776
1777
1778 // The current implementation of notifyAll() transfers the waiters one-at-a-time
1779 // from the waitset to the EntryList. This could be done more efficiently with a
1780 // single bulk transfer but in practice it's not time-critical. Beware too,
1781 // that in prepend-mode we invert the order of the waiters. Let's say that the
1782 // waitset is "ABCD" and the EntryList is "XYZ". After a notifyAll() in prepend
1783 // mode the waitset will be empty and the EntryList will be "DCBAXYZ".
1784
1785 void ObjectMonitor::notifyAll(TRAPS) {
1786 CHECK_OWNER();
1787 if (_WaitSet == NULL) {
1788 TEVENT(Empty-NotifyAll);
1789 return;
1790 }
1791
1792 DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1793 int tally = 0;
1794 while (_WaitSet != NULL) {
1795 tally++;
1796 INotify(THREAD);
1797 }
1798
1799 OM_PERFDATA_OP(Notifications, inc(tally));
1800 }
1801
1802 // -----------------------------------------------------------------------------
1803 // Adaptive Spinning Support
1804 //
1805 // Adaptive spin-then-block - rational spinning
1806 //
1807 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
1808 // algorithm. On high order SMP systems it would be better to start with
1895 // Admission control - verify preconditions for spinning
1896 //
1897 // We always spin a little bit, just to prevent _SpinDuration == 0 from
1898 // becoming an absorbing state. Put another way, we spin briefly to
1899 // sample, just in case the system load, parallelism, contention, or lock
1900 // modality changed.
1901 //
1902 // Consider the following alternative:
1903 // Periodically set _SpinDuration = _SpinLimit and try a long/full
1904 // spin attempt. "Periodically" might mean after a tally of
1905 // the # of failed spin attempts (or iterations) reaches some threshold.
1906 // This takes us into the realm of 1-out-of-N spinning, where we
1907 // hold the duration constant but vary the frequency.
1908
1909 ctr = _SpinDuration;
1910 if (ctr < Knob_SpinBase) ctr = Knob_SpinBase;
1911 if (ctr <= 0) return 0;
1912
1913 if (Knob_SuccRestrict && _succ != NULL) return 0;
1914 if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
1915 TEVENT(Spin abort - notrunnable [TOP]);
1916 return 0;
1917 }
1918
1919 int MaxSpin = Knob_MaxSpinners;
1920 if (MaxSpin >= 0) {
1921 if (_Spinner > MaxSpin) {
1922 TEVENT(Spin abort -- too many spinners);
1923 return 0;
1924 }
1925 // Slightly racy, but benign ...
1926 Adjust(&_Spinner, 1);
1927 }
1928
1929 // We're good to spin ... spin ingress.
1930 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
1931 // when preparing to LD...CAS _owner, etc and the CAS is likely
1932 // to succeed.
1933 int hits = 0;
1934 int msk = 0;
1935 int caspty = Knob_CASPenalty;
1936 int oxpty = Knob_OXPenalty;
1937 int sss = Knob_SpinSetSucc;
1938 if (sss && _succ == NULL) _succ = Self;
1939 Thread * prv = NULL;
1940
1941 // There are three ways to exit the following loop:
1942 // 1. A successful spin where this thread has acquired the lock.
1943 // 2. Spin failure with prejudice
1944 // 3. Spin failure without prejudice
1945
1946 while (--ctr >= 0) {
1947
1948 // Periodic polling -- Check for pending GC
1949 // Threads may spin while they're unsafe.
1950 // We don't want spinning threads to delay the JVM from reaching
1951 // a stop-the-world safepoint or to steal cycles from GC.
1952 // If we detect a pending safepoint we abort in order that
1953 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1954 // this thread, if safe, doesn't steal cycles from GC.
1955 // This is in keeping with the "no loitering in runtime" rule.
1956 // We periodically check to see if there's a safepoint pending.
1957 if ((ctr & 0xFF) == 0) {
1958 if (SafepointMechanism::poll(Self)) {
1959 TEVENT(Spin: safepoint);
1960 goto Abort; // abrupt spin egress
1961 }
1962 if (Knob_UsePause & 1) SpinPause();
1963 }
1964
1965 if (Knob_UsePause & 2) SpinPause();
1966
1967 // Exponential back-off ... Stay off the bus to reduce coherency traffic.
1968 // This is useful on classic SMP systems, but is of less utility on
1969 // N1-style CMT platforms.
1970 //
1971 // Trade-off: lock acquisition latency vs coherency bandwidth.
1972 // Lock hold times are typically short. A histogram
1973 // of successful spin attempts shows that we usually acquire
1974 // the lock early in the spin. That suggests we want to
1975 // sample _owner frequently in the early phase of the spin,
1976 // but then back-off and sample less frequently as the spin
1977 // progresses. The back-off makes a good citizen on SMP big
1978 // SMP systems. Oversampling _owner can consume excessive
1979 // coherency bandwidth. Relatedly, if we _oversample _owner we
2012 // The spin was successful (profitable) so we tend toward
2013 // longer spin attempts in the future.
2014 // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
2015 // If we acquired the lock early in the spin cycle it
2016 // makes sense to increase _SpinDuration proportionally.
2017 // Note that we don't clamp SpinDuration precisely at SpinLimit.
2018 int x = _SpinDuration;
2019 if (x < Knob_SpinLimit) {
2020 if (x < Knob_Poverty) x = Knob_Poverty;
2021 _SpinDuration = x + Knob_Bonus;
2022 }
2023 return 1;
2024 }
2025
2026 // The CAS failed ... we can take any of the following actions:
2027 // * penalize: ctr -= Knob_CASPenalty
2028 // * exit spin with prejudice -- goto Abort;
2029 // * exit spin without prejudice.
2030 // * Since CAS is high-latency, retry again immediately.
2031 prv = ox;
2032 TEVENT(Spin: cas failed);
2033 if (caspty == -2) break;
2034 if (caspty == -1) goto Abort;
2035 ctr -= caspty;
2036 continue;
2037 }
2038
2039 // Did lock ownership change hands ?
2040 if (ox != prv && prv != NULL) {
2041 TEVENT(spin: Owner changed)
2042 if (oxpty == -2) break;
2043 if (oxpty == -1) goto Abort;
2044 ctr -= oxpty;
2045 }
2046 prv = ox;
2047
2048 // Abort the spin if the owner is not executing.
2049 // The owner must be executing in order to drop the lock.
2050 // Spinning while the owner is OFFPROC is idiocy.
2051 // Consider: ctr -= RunnablePenalty ;
2052 if (Knob_OState && NotRunnable (Self, ox)) {
2053 TEVENT(Spin abort - notrunnable);
2054 goto Abort;
2055 }
2056 if (sss && _succ == NULL) _succ = Self;
2057 }
2058
2059 // Spin failed with prejudice -- reduce _SpinDuration.
2060 // TODO: Use an AIMD-like policy to adjust _SpinDuration.
2061 // AIMD is globally stable.
2062 TEVENT(Spin failure);
2063 {
2064 int x = _SpinDuration;
2065 if (x > 0) {
2066 // Consider an AIMD scheme like: x -= (x >> 3) + 100
2067 // This is globally sample and tends to damp the response.
2068 x -= Knob_Penalty;
2069 if (x < 0) x = 0;
2070 _SpinDuration = x;
2071 }
2072 }
2073
2074 Abort:
2075 if (MaxSpin >= 0) Adjust(&_Spinner, -1);
2076 if (sss && _succ == Self) {
2077 _succ = NULL;
2078 // Invariant: after setting succ=null a contending thread
2079 // must recheck-retry _owner before parking. This usually happens
2080 // in the normal usage of TrySpin(), but it's safest
2081 // to make TrySpin() as foolproof as possible.
2082 OrderAccess::fence();
|
529 // timer scalability issues we see on some platforms as we'd only have one thread
530 // -- the checker -- parked on a timer.
531
532 if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
533 // Try to assume the role of responsible thread for the monitor.
534 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
535 Atomic::replace_if_null(Self, &_Responsible);
536 }
537
538 // The lock might have been released while this thread was occupied queueing
539 // itself onto _cxq. To close the race and avoid "stranding" and
540 // progress-liveness failure we must resample-retry _owner before parking.
541 // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
542 // In this case the ST-MEMBAR is accomplished with CAS().
543 //
544 // TODO: Defer all thread state transitions until park-time.
545 // Since state transitions are heavy and inefficient we'd like
546 // to defer the state transitions until absolutely necessary,
547 // and in doing so avoid some transitions ...
548
549 int nWakeups = 0;
550 int recheckInterval = 1;
551
552 for (;;) {
553
554 if (TryLock(Self) > 0) break;
555 assert(_owner != Self, "invariant");
556
557 if ((SyncFlags & 2) && _Responsible == NULL) {
558 Atomic::replace_if_null(Self, &_Responsible);
559 }
560
561 // park self
562 if (_Responsible == Self || (SyncFlags & 1)) {
563 Self->_ParkEvent->park((jlong) recheckInterval);
564 // Increase the recheckInterval, but clamp the value.
565 recheckInterval *= 8;
566 if (recheckInterval > MAX_RECHECK_INTERVAL) {
567 recheckInterval = MAX_RECHECK_INTERVAL;
568 }
569 } else {
570 Self->_ParkEvent->park();
571 }
572
573 if (TryLock(Self) > 0) break;
574
575 // The lock is still contested.
576 // Keep a tally of the # of futile wakeups.
577 // Note that the counter is not protected by a lock or updated by atomics.
578 // That is by design - we trade "lossy" counters which are exposed to
579 // races during updates for a lower probe effect.
580
581 // This PerfData object can be used in parallel with a safepoint.
582 // See the work around in PerfDataManager::destroy().
583 OM_PERFDATA_OP(FutileWakeups, inc());
584 ++nWakeups;
585
586 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
587 // We can defer clearing _succ until after the spin completes
588 // TrySpin() must tolerate being called with _succ == Self.
589 // Try yet another round of adaptive spinning.
590 if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
591
592 // We can find that we were unpark()ed and redesignated _succ while
593 // we were spinning. That's harmless. If we iterate and call park(),
594 // park() will consume the event and return immediately and we'll
595 // just spin again. This pattern can repeat, leaving _succ to simply
596 // spin on a CPU. Enable Knob_ResetEvent to clear pending unparks().
597 // Alternately, we can sample fired() here, and if set, forgo spinning
598 // in the next iteration.
599
600 if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
687 // loop accordingly.
688
689 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
690 assert(Self != NULL, "invariant");
691 assert(SelfNode != NULL, "invariant");
692 assert(SelfNode->_thread == Self, "invariant");
693 assert(_waiters > 0, "invariant");
694 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
695 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
696 JavaThread * jt = (JavaThread *) Self;
697
698 int nWakeups = 0;
699 for (;;) {
700 ObjectWaiter::TStates v = SelfNode->TState;
701 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
702 assert(_owner != Self, "invariant");
703
704 if (TryLock(Self) > 0) break;
705 if (TrySpin(Self) > 0) break;
706
707 // State transition wrappers around park() ...
708 // ReenterI() wisely defers state transitions until
709 // it's clear we must park the thread.
710 {
711 OSThreadContendState osts(Self->osthread());
712 ThreadBlockInVM tbivm(jt);
713
714 // cleared by handle_special_suspend_equivalent_condition()
715 // or java_suspend_self()
716 jt->set_suspend_equivalent();
717 if (SyncFlags & 1) {
718 Self->_ParkEvent->park((jlong)MAX_RECHECK_INTERVAL);
719 } else {
720 Self->_ParkEvent->park();
721 }
722
723 // were we externally suspended while we were waiting?
724 for (;;) {
725 if (!ExitSuspendEquivalent(jt)) break;
726 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
727 jt->java_suspend_self();
728 jt->set_suspend_equivalent();
729 }
730 }
731
732 // Try again, but just so we distinguish between futile wakeups and
733 // successful wakeups. The following test isn't algorithmically
734 // necessary, but it helps us maintain sensible statistics.
735 if (TryLock(Self) > 0) break;
736
737 // The lock is still contested.
738 // Keep a tally of the # of futile wakeups.
739 // Note that the counter is not protected by a lock or updated by atomics.
740 // That is by design - we trade "lossy" counters which are exposed to
741 // races during updates for a lower probe effect.
742 ++nWakeups;
743
744 // Assuming this is not a spurious wakeup we'll normally
745 // find that _succ == Self.
746 if (_succ == Self) _succ = NULL;
747
748 // Invariant: after clearing _succ a contending thread
749 // *must* retry _owner before parking.
750 OrderAccess::fence();
751
752 // This PerfData object can be used in parallel with a safepoint.
753 // See the work around in PerfDataManager::destroy().
754 OM_PERFDATA_OP(FutileWakeups, inc());
755 }
756
757 // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
758 // Normally we'll find Self on the EntryList.
759 // Unlinking from the EntryList is constant-time and atomic-free.
760 // From the perspective of the lock owner (this thread), the
761 // EntryList is stable and cxq is prepend-only.
772 }
773
774 // By convention we unlink a contending thread from EntryList|cxq immediately
775 // after the thread acquires the lock in ::enter(). Equally, we could defer
776 // unlinking the thread until ::exit()-time.
777
778 void ObjectMonitor::UnlinkAfterAcquire(Thread *Self, ObjectWaiter *SelfNode) {
779 assert(_owner == Self, "invariant");
780 assert(SelfNode->_thread == Self, "invariant");
781
782 if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
783 // Normal case: remove Self from the DLL EntryList .
784 // This is a constant-time operation.
785 ObjectWaiter * nxt = SelfNode->_next;
786 ObjectWaiter * prv = SelfNode->_prev;
787 if (nxt != NULL) nxt->_prev = prv;
788 if (prv != NULL) prv->_next = nxt;
789 if (SelfNode == _EntryList) _EntryList = nxt;
790 assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
791 assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
792 } else {
793 assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
794 // Inopportune interleaving -- Self is still on the cxq.
795 // This usually means the enqueue of self raced an exiting thread.
796 // Normally we'll find Self near the front of the cxq, so
797 // dequeueing is typically fast. If needbe we can accelerate
798 // this with some MCS/CHL-like bidirectional list hints and advisory
799 // back-links so dequeueing from the interior will normally operate
800 // in constant-time.
801 // Dequeue Self from either the head (with CAS) or from the interior
802 // with a linear-time scan and normal non-atomic memory operations.
803 // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
804 // and then unlink Self from EntryList. We have to drain eventually,
805 // so it might as well be now.
806
807 ObjectWaiter * v = _cxq;
808 assert(v != NULL, "invariant");
809 if (v != SelfNode || Atomic::cmpxchg(SelfNode->_next, &_cxq, v) != v) {
810 // The CAS above can fail from interference IFF a "RAT" arrived.
811 // In that case Self must be in the interior and can no longer be
812 // at the head of cxq.
813 if (v == SelfNode) {
814 assert(_cxq != v, "invariant");
815 v = _cxq; // CAS above failed - start scan at head of list
816 }
817 ObjectWaiter * p;
818 ObjectWaiter * q = NULL;
819 for (p = v; p != NULL && p != SelfNode; p = p->_next) {
820 q = p;
821 assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
822 }
823 assert(v != SelfNode, "invariant");
824 assert(p == SelfNode, "Node not found on cxq");
825 assert(p != _cxq, "invariant");
826 assert(q != NULL, "invariant");
827 assert(q->_next == p, "invariant");
828 q->_next = p->_next;
829 }
830 }
831
832 #ifdef ASSERT
833 // Diagnostic hygiene ...
834 SelfNode->_prev = (ObjectWaiter *) 0xBAD;
835 SelfNode->_next = (ObjectWaiter *) 0xBAD;
836 SelfNode->TState = ObjectWaiter::TS_RUN;
837 #endif
838 }
839
840 // -----------------------------------------------------------------------------
841 // Exit support
842 //
843 // exit()
844 // ~~~~~~
845 // Note that the collector can't reclaim the objectMonitor or deflate
846 // the object out from underneath the thread calling ::exit() as the
847 // thread calling ::exit() never transitions to a stable state.
848 // This inhibits GC, which in turn inhibits asynchronous (and
849 // inopportune) reclamation of "this".
898 Thread * const Self = THREAD;
899 if (THREAD != _owner) {
900 if (THREAD->is_lock_owned((address) _owner)) {
901 // Transmute _owner from a BasicLock pointer to a Thread address.
902 // We don't need to hold _mutex for this transition.
903 // Non-null to Non-null is safe as long as all readers can
904 // tolerate either flavor.
905 assert(_recursions == 0, "invariant");
906 _owner = THREAD;
907 _recursions = 0;
908 } else {
909 // Apparent unbalanced locking ...
910 // Naively we'd like to throw IllegalMonitorStateException.
911 // As a practical matter we can neither allocate nor throw an
912 // exception as ::exit() can be called from leaf routines.
913 // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
914 // Upon deeper reflection, however, in a properly run JVM the only
915 // way we should encounter this situation is in the presence of
916 // unbalanced JNI locking. TODO: CheckJNICalls.
917 // See also: CR4414101
918 assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
919 return;
920 }
921 }
922
923 if (_recursions != 0) {
924 _recursions--; // this is simple recursive enter
925 return;
926 }
927
928 // Invariant: after setting Responsible=null an thread must execute
929 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
930 if ((SyncFlags & 4) == 0) {
931 _Responsible = NULL;
932 }
933
934 #if INCLUDE_JFR
935 // get the owner's thread id for the MonitorEnter event
936 // if it is enabled and the thread isn't suspended
937 if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
938 _previous_owner_tid = JFR_THREAD_ID(Self);
939 }
940 #endif
941
942 for (;;) {
943 assert(THREAD == _owner, "invariant");
944
945 if (Knob_ExitPolicy == 0) {
946 // release semantics: prior loads and stores from within the critical section
947 // must not float (reorder) past the following store that drops the lock.
948 // On SPARC that requires MEMBAR #loadstore|#storestore.
949 // But of course in TSO #loadstore|#storestore is not required.
950 // I'd like to write one of the following:
951 // A. OrderAccess::release() ; _owner = NULL
952 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
953 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
954 // store into a _dummy variable. That store is not needed, but can result
955 // in massive wasteful coherency traffic on classic SMP systems.
956 // Instead, I use release_store(), which is implemented as just a simple
957 // ST on x64, x86 and SPARC.
958 OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock
959 OrderAccess::storeload(); // See if we need to wake a successor
960 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
961 return;
962 }
963 // Other threads are blocked trying to acquire the lock.
964
965 // Normally the exiting thread is responsible for ensuring succession,
966 // but if other successors are ready or other entering threads are spinning
967 // then this thread can simply store NULL into _owner and exit without
968 // waking a successor. The existence of spinners or ready successors
969 // guarantees proper succession (liveness). Responsibility passes to the
970 // ready or running successors. The exiting thread delegates the duty.
971 // More precisely, if a successor already exists this thread is absolved
972 // of the responsibility of waking (unparking) one.
973 //
974 // The _succ variable is critical to reducing futile wakeup frequency.
975 // _succ identifies the "heir presumptive" thread that has been made
976 // ready (unparked) but that has not yet run. We need only one such
977 // successor thread to guarantee progress.
978 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
979 // section 3.3 "Futile Wakeup Throttling" for details.
980 //
981 // Note that spinners in Enter() also set _succ non-null.
982 // In the current implementation spinners opportunistically set
984 // Another less appealing alternative would be for the exiting thread
985 // to drop the lock and then spin briefly to see if a spinner managed
986 // to acquire the lock. If so, the exiting thread could exit
987 // immediately without waking a successor, otherwise the exiting
988 // thread would need to dequeue and wake a successor.
989 // (Note that we'd need to make the post-drop spin short, but no
990 // shorter than the worst-case round-trip cache-line migration time.
991 // The dropped lock needs to become visible to the spinner, and then
992 // the acquisition of the lock by the spinner must become visible to
993 // the exiting thread).
994
995 // It appears that an heir-presumptive (successor) must be made ready.
996 // Only the current lock owner can manipulate the EntryList or
997 // drain _cxq, so we need to reacquire the lock. If we fail
998 // to reacquire the lock the responsibility for ensuring succession
999 // falls to the new owner.
1000 //
1001 if (!Atomic::replace_if_null(THREAD, &_owner)) {
1002 return;
1003 }
1004 } else {
1005 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1006 OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock
1007 OrderAccess::storeload();
1008 // Ratify the previously observed values.
1009 if (_cxq == NULL || _succ != NULL) {
1010 return;
1011 }
1012
1013 // inopportune interleaving -- the exiting thread (this thread)
1014 // in the fast-exit path raced an entering thread in the slow-enter
1015 // path.
1016 // We have two choices:
1017 // A. Try to reacquire the lock.
1018 // If the CAS() fails return immediately, otherwise
1019 // we either restart/rerun the exit operation, or simply
1020 // fall-through into the code below which wakes a successor.
1021 // B. If the elements forming the EntryList|cxq are TSM
1022 // we could simply unpark() the lead thread and return
1023 // without having set _succ.
1024 if (!Atomic::replace_if_null(THREAD, &_owner)) {
1025 return;
1026 }
1027 }
1028 }
1029
1030 guarantee(_owner == THREAD, "invariant");
1031
1032 ObjectWaiter * w = NULL;
1033 int QMode = Knob_QMode;
1034
1035 if (QMode == 2 && _cxq != NULL) {
1036 // QMode == 2 : cxq has precedence over EntryList.
1037 // Try to directly wake a successor from the cxq.
1038 // If successful, the successor will need to unlink itself from cxq.
1039 w = _cxq;
1040 assert(w != NULL, "invariant");
1041 assert(w->TState == ObjectWaiter::TS_CXQ, "Invariant");
1042 ExitEpilog(Self, w);
1043 return;
1044 }
1045
1046 if (QMode == 3 && _cxq != NULL) {
1133 // associated with Self.
1134 assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1135 ExitEpilog(Self, w);
1136 return;
1137 }
1138
1139 // If we find that both _cxq and EntryList are null then just
1140 // re-run the exit protocol from the top.
1141 w = _cxq;
1142 if (w == NULL) continue;
1143
1144 // Drain _cxq into EntryList - bulk transfer.
1145 // First, detach _cxq.
1146 // The following loop is tantamount to: w = swap(&cxq, NULL)
1147 for (;;) {
1148 assert(w != NULL, "Invariant");
1149 ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
1150 if (u == w) break;
1151 w = u;
1152 }
1153
1154 assert(w != NULL, "invariant");
1155 assert(_EntryList == NULL, "invariant");
1156
1157 // Convert the LIFO SLL anchored by _cxq into a DLL.
1158 // The list reorganization step operates in O(LENGTH(w)) time.
1159 // It's critical that this step operate quickly as
1160 // "Self" still holds the outer-lock, restricting parallelism
1161 // and effectively lengthening the critical section.
1162 // Invariant: s chases t chases u.
1163 // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
1164 // we have faster access to the tail.
1165
1166 if (QMode == 1) {
1167 // QMode == 1 : drain cxq to EntryList, reversing order
1168 // We also reverse the order of the list.
1169 ObjectWaiter * s = NULL;
1170 ObjectWaiter * t = w;
1171 ObjectWaiter * u = NULL;
1172 while (t != NULL) {
1236 // composite per-thread suspend flag and then update it with CAS().
1237 // Alternately, a Dekker-like mechanism with multiple variables
1238 // would suffice:
1239 // ST Self->_suspend_equivalent = false
1240 // MEMBAR
1241 // LD Self_>_suspend_flags
1242 //
1243 // UPDATE 2007-10-6: since I've replaced the native Mutex/Monitor subsystem
1244 // with a more efficient implementation, the need to use "FastHSSEC" has
1245 // decreased. - Dave
1246
1247
1248 bool ObjectMonitor::ExitSuspendEquivalent(JavaThread * jSelf) {
1249 const int Mode = Knob_FastHSSEC;
1250 if (Mode && !jSelf->is_external_suspend()) {
1251 assert(jSelf->is_suspend_equivalent(), "invariant");
1252 jSelf->clear_suspend_equivalent();
1253 if (2 == Mode) OrderAccess::storeload();
1254 if (!jSelf->is_external_suspend()) return false;
1255 // We raced a suspension -- fall thru into the slow path
1256 jSelf->set_suspend_equivalent();
1257 }
1258 return jSelf->handle_special_suspend_equivalent_condition();
1259 }
1260
1261
1262 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1263 assert(_owner == Self, "invariant");
1264
1265 // Exit protocol:
1266 // 1. ST _succ = wakee
1267 // 2. membar #loadstore|#storestore;
1268 // 2. ST _owner = NULL
1269 // 3. unpark(wakee)
1270
1271 _succ = Knob_SuccEnabled ? Wakee->_thread : NULL;
1272 ParkEvent * Trigger = Wakee->_event;
1273
1274 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1275 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1276 // out-of-scope (non-extant).
1277 Wakee = NULL;
1278
1279 // Drop the lock
1280 OrderAccess::release_store(&_owner, (void*)NULL);
1281 OrderAccess::fence(); // ST _owner vs LD in unpark()
1282
1283 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1284 Trigger->unpark();
1285
1286 // Maintain stats and report events to JVMTI
1287 OM_PERFDATA_OP(Parks, inc());
1288 }
1289
1290
1291 // -----------------------------------------------------------------------------
1292 // Class Loader deadlock handling.
1293 //
1294 // complete_exit exits a lock returning recursion count
1295 // complete_exit/reenter operate as a wait without waiting
1296 // complete_exit requires an inflated monitor
1297 // The _owner field is not always the Thread addr even with an
1298 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1299 // thread due to contention.
1300 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1301 Thread * const Self = THREAD;
1302 assert(Self->is_Java_thread(), "Must be Java thread!");
1331 enter(THREAD); // enter the monitor
1332 guarantee(_recursions == 0, "reenter recursion");
1333 _recursions = recursions;
1334 return;
1335 }
1336
1337
1338 // -----------------------------------------------------------------------------
1339 // A macro is used below because there may already be a pending
1340 // exception which should not abort the execution of the routines
1341 // which use this (which is why we don't put this into check_slow and
1342 // call it with a CHECK argument).
1343
1344 #define CHECK_OWNER() \
1345 do { \
1346 if (THREAD != _owner) { \
1347 if (THREAD->is_lock_owned((address) _owner)) { \
1348 _owner = THREAD; /* Convert from basiclock addr to Thread addr */ \
1349 _recursions = 0; \
1350 } else { \
1351 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
1352 } \
1353 } \
1354 } while (false)
1355
1356 // check_slow() is a misnomer. It's called to simply to throw an IMSX exception.
1357 // TODO-FIXME: remove check_slow() -- it's likely dead.
1358
1359 void ObjectMonitor::check_slow(TRAPS) {
1360 assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1361 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1362 }
1363
1364 static int Adjust(volatile int * adr, int dx) {
1365 int v;
1366 for (v = *adr; Atomic::cmpxchg(v + dx, adr, v) != v; v = *adr) /* empty */;
1367 return v;
1368 }
1369
1370 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1371 ObjectMonitor* monitor,
1372 jlong notifier_tid,
1373 jlong timeout,
1374 bool timedout) {
1375 assert(event != NULL, "invariant");
1376 assert(monitor != NULL, "invariant");
1377 event->set_monitorClass(((oop)monitor->object())->klass());
1378 event->set_timeout(timeout);
1379 event->set_address((uintptr_t)monitor->object_addr());
1401
1402 // check for a pending interrupt
1403 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1404 // post monitor waited event. Note that this is past-tense, we are done waiting.
1405 if (JvmtiExport::should_post_monitor_waited()) {
1406 // Note: 'false' parameter is passed here because the
1407 // wait was not timed out due to thread interrupt.
1408 JvmtiExport::post_monitor_waited(jt, this, false);
1409
1410 // In this short circuit of the monitor wait protocol, the
1411 // current thread never drops ownership of the monitor and
1412 // never gets added to the wait queue so the current thread
1413 // cannot be made the successor. This means that the
1414 // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1415 // consume an unpark() meant for the ParkEvent associated with
1416 // this ObjectMonitor.
1417 }
1418 if (event.should_commit()) {
1419 post_monitor_wait_event(&event, this, 0, millis, false);
1420 }
1421 THROW(vmSymbols::java_lang_InterruptedException());
1422 return;
1423 }
1424
1425 assert(Self->_Stalled == 0, "invariant");
1426 Self->_Stalled = intptr_t(this);
1427 jt->set_current_waiting_monitor(this);
1428
1429 // create a node to be put into the queue
1430 // Critically, after we reset() the event but prior to park(), we must check
1431 // for a pending interrupt.
1432 ObjectWaiter node(Self);
1433 node.TState = ObjectWaiter::TS_WAIT;
1434 Self->_ParkEvent->reset();
1435 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
1436
1437 // Enter the waiting queue, which is a circular doubly linked list in this case
1438 // but it could be a priority queue or any data structure.
1439 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
1440 // by the the owner of the monitor *except* in the case where park()
1441 // returns because of a timeout of interrupt. Contention is exceptionally rare
1442 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1443
1444 Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
1585 jt->set_current_waiting_monitor(NULL);
1586
1587 guarantee(_recursions == 0, "invariant");
1588 _recursions = save; // restore the old recursion count
1589 _waiters--; // decrement the number of waiters
1590
1591 // Verify a few postconditions
1592 assert(_owner == Self, "invariant");
1593 assert(_succ != Self, "invariant");
1594 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
1595
1596 if (SyncFlags & 32) {
1597 OrderAccess::fence();
1598 }
1599
1600 // check if the notification happened
1601 if (!WasNotified) {
1602 // no, it could be timeout or Thread.interrupt() or both
1603 // check for interrupt event, otherwise it is timeout
1604 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1605 THROW(vmSymbols::java_lang_InterruptedException());
1606 }
1607 }
1608
1609 // NOTE: Spurious wake up will be consider as timeout.
1610 // Monitor notify has precedence over thread interrupt.
1611 }
1612
1613
1614 // Consider:
1615 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
1616 // then instead of transferring a thread from the WaitSet to the EntryList
1617 // we might just dequeue a thread from the WaitSet and directly unpark() it.
1618
1619 void ObjectMonitor::INotify(Thread * Self) {
1620 const int policy = Knob_MoveNotifyee;
1621
1622 Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
1623 ObjectWaiter * iterator = DequeueWaiter();
1624 if (iterator != NULL) {
1625 guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1626 guarantee(iterator->_notified == 0, "invariant");
1627 // Disposition - what might we do with iterator ?
1628 // a. add it directly to the EntryList - either tail (policy == 1)
1629 // or head (policy == 0).
1630 // b. push it onto the front of the _cxq (policy == 2).
1631 // For now we use (b).
1632 if (policy != 4) {
1633 iterator->TState = ObjectWaiter::TS_ENTER;
1634 }
1635 iterator->_notified = 1;
1636 iterator->_notifier_tid = JFR_THREAD_ID(Self);
1637
1638 ObjectWaiter * list = _EntryList;
1639 if (list != NULL) {
1640 assert(list->_prev == NULL, "invariant");
1641 assert(list->TState == ObjectWaiter::TS_ENTER, "invariant");
1642 assert(list != iterator, "invariant");
1643 }
1644
1718 }
1719 }
1720 Thread::SpinRelease(&_WaitSetLock);
1721 }
1722
1723 // Consider: a not-uncommon synchronization bug is to use notify() when
1724 // notifyAll() is more appropriate, potentially resulting in stranded
1725 // threads; this is one example of a lost wakeup. A useful diagnostic
1726 // option is to force all notify() operations to behave as notifyAll().
1727 //
1728 // Note: We can also detect many such problems with a "minimum wait".
1729 // When the "minimum wait" is set to a small non-zero timeout value
1730 // and the program does not hang whereas it did absent "minimum wait",
1731 // that suggests a lost wakeup bug. The '-XX:SyncFlags=1' option uses
1732 // a "minimum wait" for all park() operations; see the recheckInterval
1733 // variable and MAX_RECHECK_INTERVAL.
1734
1735 void ObjectMonitor::notify(TRAPS) {
1736 CHECK_OWNER();
1737 if (_WaitSet == NULL) {
1738 return;
1739 }
1740 DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1741 INotify(THREAD);
1742 OM_PERFDATA_OP(Notifications, inc(1));
1743 }
1744
1745
1746 // The current implementation of notifyAll() transfers the waiters one-at-a-time
1747 // from the waitset to the EntryList. This could be done more efficiently with a
1748 // single bulk transfer but in practice it's not time-critical. Beware too,
1749 // that in prepend-mode we invert the order of the waiters. Let's say that the
1750 // waitset is "ABCD" and the EntryList is "XYZ". After a notifyAll() in prepend
1751 // mode the waitset will be empty and the EntryList will be "DCBAXYZ".
1752
1753 void ObjectMonitor::notifyAll(TRAPS) {
1754 CHECK_OWNER();
1755 if (_WaitSet == NULL) {
1756 return;
1757 }
1758
1759 DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1760 int tally = 0;
1761 while (_WaitSet != NULL) {
1762 tally++;
1763 INotify(THREAD);
1764 }
1765
1766 OM_PERFDATA_OP(Notifications, inc(tally));
1767 }
1768
1769 // -----------------------------------------------------------------------------
1770 // Adaptive Spinning Support
1771 //
1772 // Adaptive spin-then-block - rational spinning
1773 //
1774 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
1775 // algorithm. On high order SMP systems it would be better to start with
1862 // Admission control - verify preconditions for spinning
1863 //
1864 // We always spin a little bit, just to prevent _SpinDuration == 0 from
1865 // becoming an absorbing state. Put another way, we spin briefly to
1866 // sample, just in case the system load, parallelism, contention, or lock
1867 // modality changed.
1868 //
1869 // Consider the following alternative:
1870 // Periodically set _SpinDuration = _SpinLimit and try a long/full
1871 // spin attempt. "Periodically" might mean after a tally of
1872 // the # of failed spin attempts (or iterations) reaches some threshold.
1873 // This takes us into the realm of 1-out-of-N spinning, where we
1874 // hold the duration constant but vary the frequency.
1875
1876 ctr = _SpinDuration;
1877 if (ctr < Knob_SpinBase) ctr = Knob_SpinBase;
1878 if (ctr <= 0) return 0;
1879
1880 if (Knob_SuccRestrict && _succ != NULL) return 0;
1881 if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
1882 return 0;
1883 }
1884
1885 int MaxSpin = Knob_MaxSpinners;
1886 if (MaxSpin >= 0) {
1887 if (_Spinner > MaxSpin) {
1888 return 0;
1889 }
1890 // Slightly racy, but benign ...
1891 Adjust(&_Spinner, 1);
1892 }
1893
1894 // We're good to spin ... spin ingress.
1895 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
1896 // when preparing to LD...CAS _owner, etc and the CAS is likely
1897 // to succeed.
1898 int hits = 0;
1899 int msk = 0;
1900 int caspty = Knob_CASPenalty;
1901 int oxpty = Knob_OXPenalty;
1902 int sss = Knob_SpinSetSucc;
1903 if (sss && _succ == NULL) _succ = Self;
1904 Thread * prv = NULL;
1905
1906 // There are three ways to exit the following loop:
1907 // 1. A successful spin where this thread has acquired the lock.
1908 // 2. Spin failure with prejudice
1909 // 3. Spin failure without prejudice
1910
1911 while (--ctr >= 0) {
1912
1913 // Periodic polling -- Check for pending GC
1914 // Threads may spin while they're unsafe.
1915 // We don't want spinning threads to delay the JVM from reaching
1916 // a stop-the-world safepoint or to steal cycles from GC.
1917 // If we detect a pending safepoint we abort in order that
1918 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1919 // this thread, if safe, doesn't steal cycles from GC.
1920 // This is in keeping with the "no loitering in runtime" rule.
1921 // We periodically check to see if there's a safepoint pending.
1922 if ((ctr & 0xFF) == 0) {
1923 if (SafepointMechanism::poll(Self)) {
1924 goto Abort; // abrupt spin egress
1925 }
1926 if (Knob_UsePause & 1) SpinPause();
1927 }
1928
1929 if (Knob_UsePause & 2) SpinPause();
1930
1931 // Exponential back-off ... Stay off the bus to reduce coherency traffic.
1932 // This is useful on classic SMP systems, but is of less utility on
1933 // N1-style CMT platforms.
1934 //
1935 // Trade-off: lock acquisition latency vs coherency bandwidth.
1936 // Lock hold times are typically short. A histogram
1937 // of successful spin attempts shows that we usually acquire
1938 // the lock early in the spin. That suggests we want to
1939 // sample _owner frequently in the early phase of the spin,
1940 // but then back-off and sample less frequently as the spin
1941 // progresses. The back-off makes a good citizen on SMP big
1942 // SMP systems. Oversampling _owner can consume excessive
1943 // coherency bandwidth. Relatedly, if we _oversample _owner we
1976 // The spin was successful (profitable) so we tend toward
1977 // longer spin attempts in the future.
1978 // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1979 // If we acquired the lock early in the spin cycle it
1980 // makes sense to increase _SpinDuration proportionally.
1981 // Note that we don't clamp SpinDuration precisely at SpinLimit.
1982 int x = _SpinDuration;
1983 if (x < Knob_SpinLimit) {
1984 if (x < Knob_Poverty) x = Knob_Poverty;
1985 _SpinDuration = x + Knob_Bonus;
1986 }
1987 return 1;
1988 }
1989
1990 // The CAS failed ... we can take any of the following actions:
1991 // * penalize: ctr -= Knob_CASPenalty
1992 // * exit spin with prejudice -- goto Abort;
1993 // * exit spin without prejudice.
1994 // * Since CAS is high-latency, retry again immediately.
1995 prv = ox;
1996 if (caspty == -2) break;
1997 if (caspty == -1) goto Abort;
1998 ctr -= caspty;
1999 continue;
2000 }
2001
2002 // Did lock ownership change hands ?
2003 if (ox != prv && prv != NULL) {
2004 if (oxpty == -2) break;
2005 if (oxpty == -1) goto Abort;
2006 ctr -= oxpty;
2007 }
2008 prv = ox;
2009
2010 // Abort the spin if the owner is not executing.
2011 // The owner must be executing in order to drop the lock.
2012 // Spinning while the owner is OFFPROC is idiocy.
2013 // Consider: ctr -= RunnablePenalty ;
2014 if (Knob_OState && NotRunnable (Self, ox)) {
2015 goto Abort;
2016 }
2017 if (sss && _succ == NULL) _succ = Self;
2018 }
2019
2020 // Spin failed with prejudice -- reduce _SpinDuration.
2021 // TODO: Use an AIMD-like policy to adjust _SpinDuration.
2022 // AIMD is globally stable.
2023 {
2024 int x = _SpinDuration;
2025 if (x > 0) {
2026 // Consider an AIMD scheme like: x -= (x >> 3) + 100
2027 // This is globally sample and tends to damp the response.
2028 x -= Knob_Penalty;
2029 if (x < 0) x = 0;
2030 _SpinDuration = x;
2031 }
2032 }
2033
2034 Abort:
2035 if (MaxSpin >= 0) Adjust(&_Spinner, -1);
2036 if (sss && _succ == Self) {
2037 _succ = NULL;
2038 // Invariant: after setting succ=null a contending thread
2039 // must recheck-retry _owner before parking. This usually happens
2040 // in the normal usage of TrySpin(), but it's safest
2041 // to make TrySpin() as foolproof as possible.
2042 OrderAccess::fence();
|