389 // just exited the monitor.
390 }
391
392 if (event.should_commit()) {
393 event.set_monitorClass(((oop)this->object())->klass());
394 event.set_previousOwner((TYPE_THREAD)_previous_owner_tid);
395 event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
396 event.commit();
397 }
398
399 OM_PERFDATA_OP(ContendedLockAttempts, inc());
400 }
401
402
403 // Caveat: TryLock() is not necessarily serializing if it returns failure.
404 // Callers must compensate as needed.
405
406 int ObjectMonitor::TryLock(Thread * Self) {
407 void * own = _owner;
408 if (own != NULL) return 0;
409 if (Atomic::cmpxchg_if_null((void*)Self, &_owner)) {
410 // Either guarantee _recursions == 0 or set _recursions = 0.
411 assert(_recursions == 0, "invariant");
412 assert(_owner == Self, "invariant");
413 return 1;
414 }
415 // The lock had been free momentarily, but we lost the race to the lock.
416 // Interference -- the CAS failed.
417 // We can either return -1 or retry.
418 // Retry doesn't make as much sense because the lock was just acquired.
419 return -1;
420 }
421
422 #define MAX_RECHECK_INTERVAL 1000
423
424 void ObjectMonitor::EnterI(TRAPS) {
425 Thread * const Self = THREAD;
426 assert(Self->is_Java_thread(), "invariant");
427 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
428
429 // Try the lock - TATAS
497 // to detect and recover from the race. (Stranding is form of progress failure
498 // where the monitor is unlocked but all the contending threads remain parked).
499 // That is, at least one of the contended threads will periodically poll _owner.
500 // One of the contending threads will become the designated "Responsible" thread.
501 // The Responsible thread uses a timed park instead of a normal indefinite park
502 // operation -- it periodically wakes and checks for and recovers from potential
503 // strandings admitted by 1-0 exit operations. We need at most one Responsible
504 // thread per-monitor at any given moment. Only threads on cxq|EntryList may
505 // be responsible for a monitor.
506 //
507 // Currently, one of the contended threads takes on the added role of "Responsible".
508 // A viable alternative would be to use a dedicated "stranding checker" thread
509 // that periodically iterated over all the threads (or active monitors) and unparked
510 // successors where there was risk of stranding. This would help eliminate the
511 // timer scalability issues we see on some platforms as we'd only have one thread
512 // -- the checker -- parked on a timer.
513
514 if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
515 // Try to assume the role of responsible thread for the monitor.
516 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
517 Atomic::cmpxchg_if_null(Self, &_Responsible);
518 }
519
520 // The lock might have been released while this thread was occupied queueing
521 // itself onto _cxq. To close the race and avoid "stranding" and
522 // progress-liveness failure we must resample-retry _owner before parking.
523 // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
524 // In this case the ST-MEMBAR is accomplished with CAS().
525 //
526 // TODO: Defer all thread state transitions until park-time.
527 // Since state transitions are heavy and inefficient we'd like
528 // to defer the state transitions until absolutely necessary,
529 // and in doing so avoid some transitions ...
530
531 TEVENT(Inflated enter - Contention);
532 int nWakeups = 0;
533 int recheckInterval = 1;
534
535 for (;;) {
536
537 if (TryLock(Self) > 0) break;
538 assert(_owner != Self, "invariant");
539
540 if ((SyncFlags & 2) && _Responsible == NULL) {
541 Atomic::cmpxchg_if_null(Self, &_Responsible);
542 }
543
544 // park self
545 if (_Responsible == Self || (SyncFlags & 1)) {
546 TEVENT(Inflated enter - park TIMED);
547 Self->_ParkEvent->park((jlong) recheckInterval);
548 // Increase the recheckInterval, but clamp the value.
549 recheckInterval *= 8;
550 if (recheckInterval > MAX_RECHECK_INTERVAL) {
551 recheckInterval = MAX_RECHECK_INTERVAL;
552 }
553 } else {
554 TEVENT(Inflated enter - park UNTIMED);
555 Self->_ParkEvent->park();
556 }
557
558 if (TryLock(Self) > 0) break;
559
560 // The lock is still contested.
561 // Keep a tally of the # of futile wakeups.
975 // Note that spinners in Enter() also set _succ non-null.
976 // In the current implementation spinners opportunistically set
977 // _succ so that exiting threads might avoid waking a successor.
978 // Another less appealing alternative would be for the exiting thread
979 // to drop the lock and then spin briefly to see if a spinner managed
980 // to acquire the lock. If so, the exiting thread could exit
981 // immediately without waking a successor, otherwise the exiting
982 // thread would need to dequeue and wake a successor.
983 // (Note that we'd need to make the post-drop spin short, but no
984 // shorter than the worst-case round-trip cache-line migration time.
985 // The dropped lock needs to become visible to the spinner, and then
986 // the acquisition of the lock by the spinner must become visible to
987 // the exiting thread).
988
989 // It appears that an heir-presumptive (successor) must be made ready.
990 // Only the current lock owner can manipulate the EntryList or
991 // drain _cxq, so we need to reacquire the lock. If we fail
992 // to reacquire the lock the responsibility for ensuring succession
993 // falls to the new owner.
994 //
995 if (!Atomic::cmpxchg_if_null((void*)THREAD, &_owner)) {
996 return;
997 }
998 TEVENT(Exit - Reacquired);
999 } else {
1000 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1001 OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock
1002 OrderAccess::storeload();
1003 // Ratify the previously observed values.
1004 if (_cxq == NULL || _succ != NULL) {
1005 TEVENT(Inflated exit - simple egress);
1006 return;
1007 }
1008
1009 // inopportune interleaving -- the exiting thread (this thread)
1010 // in the fast-exit path raced an entering thread in the slow-enter
1011 // path.
1012 // We have two choices:
1013 // A. Try to reacquire the lock.
1014 // If the CAS() fails return immediately, otherwise
1015 // we either restart/rerun the exit operation, or simply
1016 // fall-through into the code below which wakes a successor.
1017 // B. If the elements forming the EntryList|cxq are TSM
1018 // we could simply unpark() the lead thread and return
1019 // without having set _succ.
1020 if (!Atomic::cmpxchg_if_null((void*)THREAD, &_owner)) {
1021 TEVENT(Inflated exit - reacquired succeeded);
1022 return;
1023 }
1024 TEVENT(Inflated exit - reacquired failed);
1025 } else {
1026 TEVENT(Inflated exit - complex egress);
1027 }
1028 }
1029
1030 guarantee(_owner == THREAD, "invariant");
1031
1032 ObjectWaiter * w = NULL;
1033 int QMode = Knob_QMode;
1034
1035 if (QMode == 2 && _cxq != NULL) {
1036 // QMode == 2 : cxq has precedence over EntryList.
1037 // Try to directly wake a successor from the cxq.
1038 // If successful, the successor will need to unlink itself from cxq.
1039 w = _cxq;
1040 assert(w != NULL, "invariant");
1682 } else if (policy == 2) { // prepend to cxq
1683 if (list == NULL) {
1684 iterator->_next = iterator->_prev = NULL;
1685 _EntryList = iterator;
1686 } else {
1687 iterator->TState = ObjectWaiter::TS_CXQ;
1688 for (;;) {
1689 ObjectWaiter * front = _cxq;
1690 iterator->_next = front;
1691 if (Atomic::cmpxchg(iterator, &_cxq, front) == front) {
1692 break;
1693 }
1694 }
1695 }
1696 } else if (policy == 3) { // append to cxq
1697 iterator->TState = ObjectWaiter::TS_CXQ;
1698 for (;;) {
1699 ObjectWaiter * tail = _cxq;
1700 if (tail == NULL) {
1701 iterator->_next = NULL;
1702 if (Atomic::cmpxchg_if_null(iterator, &_cxq)) {
1703 break;
1704 }
1705 } else {
1706 while (tail->_next != NULL) tail = tail->_next;
1707 tail->_next = iterator;
1708 iterator->_prev = tail;
1709 iterator->_next = NULL;
1710 break;
1711 }
1712 }
1713 } else {
1714 ParkEvent * ev = iterator->_event;
1715 iterator->TState = ObjectWaiter::TS_RUN;
1716 OrderAccess::fence();
1717 ev->unpark();
1718 }
1719
1720 // _WaitSetLock protects the wait queue, not the EntryList. We could
1721 // move the add-to-EntryList operation, above, outside the critical section
1722 // protected by _WaitSetLock. In practice that's not useful. With the
|
389 // just exited the monitor.
390 }
391
392 if (event.should_commit()) {
393 event.set_monitorClass(((oop)this->object())->klass());
394 event.set_previousOwner((TYPE_THREAD)_previous_owner_tid);
395 event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
396 event.commit();
397 }
398
399 OM_PERFDATA_OP(ContendedLockAttempts, inc());
400 }
401
402
403 // Caveat: TryLock() is not necessarily serializing if it returns failure.
404 // Callers must compensate as needed.
405
406 int ObjectMonitor::TryLock(Thread * Self) {
407 void * own = _owner;
408 if (own != NULL) return 0;
409 if (Atomic::cmpxchg((void*)Self, &_owner, (void*)NULL) == NULL) {
410 // Either guarantee _recursions == 0 or set _recursions = 0.
411 assert(_recursions == 0, "invariant");
412 assert(_owner == Self, "invariant");
413 return 1;
414 }
415 // The lock had been free momentarily, but we lost the race to the lock.
416 // Interference -- the CAS failed.
417 // We can either return -1 or retry.
418 // Retry doesn't make as much sense because the lock was just acquired.
419 return -1;
420 }
421
422 #define MAX_RECHECK_INTERVAL 1000
423
424 void ObjectMonitor::EnterI(TRAPS) {
425 Thread * const Self = THREAD;
426 assert(Self->is_Java_thread(), "invariant");
427 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
428
429 // Try the lock - TATAS
497 // to detect and recover from the race. (Stranding is form of progress failure
498 // where the monitor is unlocked but all the contending threads remain parked).
499 // That is, at least one of the contended threads will periodically poll _owner.
500 // One of the contending threads will become the designated "Responsible" thread.
501 // The Responsible thread uses a timed park instead of a normal indefinite park
502 // operation -- it periodically wakes and checks for and recovers from potential
503 // strandings admitted by 1-0 exit operations. We need at most one Responsible
504 // thread per-monitor at any given moment. Only threads on cxq|EntryList may
505 // be responsible for a monitor.
506 //
507 // Currently, one of the contended threads takes on the added role of "Responsible".
508 // A viable alternative would be to use a dedicated "stranding checker" thread
509 // that periodically iterated over all the threads (or active monitors) and unparked
510 // successors where there was risk of stranding. This would help eliminate the
511 // timer scalability issues we see on some platforms as we'd only have one thread
512 // -- the checker -- parked on a timer.
513
514 if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
515 // Try to assume the role of responsible thread for the monitor.
516 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
517 Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL);
518 }
519
520 // The lock might have been released while this thread was occupied queueing
521 // itself onto _cxq. To close the race and avoid "stranding" and
522 // progress-liveness failure we must resample-retry _owner before parking.
523 // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
524 // In this case the ST-MEMBAR is accomplished with CAS().
525 //
526 // TODO: Defer all thread state transitions until park-time.
527 // Since state transitions are heavy and inefficient we'd like
528 // to defer the state transitions until absolutely necessary,
529 // and in doing so avoid some transitions ...
530
531 TEVENT(Inflated enter - Contention);
532 int nWakeups = 0;
533 int recheckInterval = 1;
534
535 for (;;) {
536
537 if (TryLock(Self) > 0) break;
538 assert(_owner != Self, "invariant");
539
540 if ((SyncFlags & 2) && _Responsible == NULL) {
541 Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL);
542 }
543
544 // park self
545 if (_Responsible == Self || (SyncFlags & 1)) {
546 TEVENT(Inflated enter - park TIMED);
547 Self->_ParkEvent->park((jlong) recheckInterval);
548 // Increase the recheckInterval, but clamp the value.
549 recheckInterval *= 8;
550 if (recheckInterval > MAX_RECHECK_INTERVAL) {
551 recheckInterval = MAX_RECHECK_INTERVAL;
552 }
553 } else {
554 TEVENT(Inflated enter - park UNTIMED);
555 Self->_ParkEvent->park();
556 }
557
558 if (TryLock(Self) > 0) break;
559
560 // The lock is still contested.
561 // Keep a tally of the # of futile wakeups.
975 // Note that spinners in Enter() also set _succ non-null.
976 // In the current implementation spinners opportunistically set
977 // _succ so that exiting threads might avoid waking a successor.
978 // Another less appealing alternative would be for the exiting thread
979 // to drop the lock and then spin briefly to see if a spinner managed
980 // to acquire the lock. If so, the exiting thread could exit
981 // immediately without waking a successor, otherwise the exiting
982 // thread would need to dequeue and wake a successor.
983 // (Note that we'd need to make the post-drop spin short, but no
984 // shorter than the worst-case round-trip cache-line migration time.
985 // The dropped lock needs to become visible to the spinner, and then
986 // the acquisition of the lock by the spinner must become visible to
987 // the exiting thread).
988
989 // It appears that an heir-presumptive (successor) must be made ready.
990 // Only the current lock owner can manipulate the EntryList or
991 // drain _cxq, so we need to reacquire the lock. If we fail
992 // to reacquire the lock the responsibility for ensuring succession
993 // falls to the new owner.
994 //
995 if (Atomic::cmpxchg((void*)THREAD, &_owner, (void*)NULL) != NULL) {
996 return;
997 }
998 TEVENT(Exit - Reacquired);
999 } else {
1000 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1001 OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock
1002 OrderAccess::storeload();
1003 // Ratify the previously observed values.
1004 if (_cxq == NULL || _succ != NULL) {
1005 TEVENT(Inflated exit - simple egress);
1006 return;
1007 }
1008
1009 // inopportune interleaving -- the exiting thread (this thread)
1010 // in the fast-exit path raced an entering thread in the slow-enter
1011 // path.
1012 // We have two choices:
1013 // A. Try to reacquire the lock.
1014 // If the CAS() fails return immediately, otherwise
1015 // we either restart/rerun the exit operation, or simply
1016 // fall-through into the code below which wakes a successor.
1017 // B. If the elements forming the EntryList|cxq are TSM
1018 // we could simply unpark() the lead thread and return
1019 // without having set _succ.
1020 if (Atomic::cmpxchg((void*)THREAD, &_owner, (void*)NULL) != NULL) {
1021 TEVENT(Inflated exit - reacquired succeeded);
1022 return;
1023 }
1024 TEVENT(Inflated exit - reacquired failed);
1025 } else {
1026 TEVENT(Inflated exit - complex egress);
1027 }
1028 }
1029
1030 guarantee(_owner == THREAD, "invariant");
1031
1032 ObjectWaiter * w = NULL;
1033 int QMode = Knob_QMode;
1034
1035 if (QMode == 2 && _cxq != NULL) {
1036 // QMode == 2 : cxq has precedence over EntryList.
1037 // Try to directly wake a successor from the cxq.
1038 // If successful, the successor will need to unlink itself from cxq.
1039 w = _cxq;
1040 assert(w != NULL, "invariant");
1682 } else if (policy == 2) { // prepend to cxq
1683 if (list == NULL) {
1684 iterator->_next = iterator->_prev = NULL;
1685 _EntryList = iterator;
1686 } else {
1687 iterator->TState = ObjectWaiter::TS_CXQ;
1688 for (;;) {
1689 ObjectWaiter * front = _cxq;
1690 iterator->_next = front;
1691 if (Atomic::cmpxchg(iterator, &_cxq, front) == front) {
1692 break;
1693 }
1694 }
1695 }
1696 } else if (policy == 3) { // append to cxq
1697 iterator->TState = ObjectWaiter::TS_CXQ;
1698 for (;;) {
1699 ObjectWaiter * tail = _cxq;
1700 if (tail == NULL) {
1701 iterator->_next = NULL;
1702 if (Atomic::cmpxchg(iterator, &_cxq, (ObjectWaiter*)NULL) == NULL) {
1703 break;
1704 }
1705 } else {
1706 while (tail->_next != NULL) tail = tail->_next;
1707 tail->_next = iterator;
1708 iterator->_prev = tail;
1709 iterator->_next = NULL;
1710 break;
1711 }
1712 }
1713 } else {
1714 ParkEvent * ev = iterator->_event;
1715 iterator->TState = ObjectWaiter::TS_RUN;
1716 OrderAccess::fence();
1717 ev->unpark();
1718 }
1719
1720 // _WaitSetLock protects the wait queue, not the EntryList. We could
1721 // move the add-to-EntryList operation, above, outside the critical section
1722 // protected by _WaitSetLock. In practice that's not useful. With the
|