< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page

        

*** 243,253 **** void ObjectMonitor::enter(TRAPS) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; ! void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL); if (cur == NULL) { assert(_recursions == 0, "invariant"); return; } --- 243,253 ---- void ObjectMonitor::enter(TRAPS) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; ! void * cur = Atomic::cmpxchg(&_owner, (void*)NULL, Self); if (cur == NULL) { assert(_recursions == 0, "invariant"); return; }
*** 401,411 **** // Callers must compensate as needed. int ObjectMonitor::TryLock(Thread * Self) { void * own = _owner; if (own != NULL) return 0; ! if (Atomic::replace_if_null(Self, &_owner)) { assert(_recursions == 0, "invariant"); return 1; } // The lock had been free momentarily, but we lost the race to the lock. // Interference -- the CAS failed. --- 401,411 ---- // Callers must compensate as needed. int ObjectMonitor::TryLock(Thread * Self) { void * own = _owner; if (own != NULL) return 0; ! if (Atomic::replace_if_null(&_owner, Self)) { assert(_recursions == 0, "invariant"); return 1; } // The lock had been free momentarily, but we lost the race to the lock. // Interference -- the CAS failed.
*** 478,488 **** // Note that spinning tends to reduce the rate at which threads // enqueue and dequeue on EntryList|cxq. ObjectWaiter * nxt; for (;;) { node._next = nxt = _cxq; ! if (Atomic::cmpxchg(&node, &_cxq, nxt) == nxt) break; // Interference - the CAS failed because _cxq changed. Just retry. // As an optional optimization we retry the lock. if (TryLock (Self) > 0) { assert(_succ != Self, "invariant"); --- 478,488 ---- // Note that spinning tends to reduce the rate at which threads // enqueue and dequeue on EntryList|cxq. ObjectWaiter * nxt; for (;;) { node._next = nxt = _cxq; ! if (Atomic::cmpxchg(&_cxq, nxt, &node) == nxt) break; // Interference - the CAS failed because _cxq changed. Just retry. // As an optional optimization we retry the lock. if (TryLock (Self) > 0) { assert(_succ != Self, "invariant");
*** 516,526 **** // -- the checker -- parked on a timer. if (nxt == NULL && _EntryList == NULL) { // Try to assume the role of responsible thread for the monitor. // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } ! Atomic::replace_if_null(Self, &_Responsible); } // The lock might have been released while this thread was occupied queueing // itself onto _cxq. To close the race and avoid "stranding" and // progress-liveness failure we must resample-retry _owner before parking. --- 516,526 ---- // -- the checker -- parked on a timer. if (nxt == NULL && _EntryList == NULL) { // Try to assume the role of responsible thread for the monitor. // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } ! Atomic::replace_if_null(&_Responsible, Self); } // The lock might have been released while this thread was occupied queueing // itself onto _cxq. To close the race and avoid "stranding" and // progress-liveness failure we must resample-retry _owner before parking.
*** 771,781 **** // and then unlink Self from EntryList. We have to drain eventually, // so it might as well be now. ObjectWaiter * v = _cxq; assert(v != NULL, "invariant"); ! if (v != SelfNode || Atomic::cmpxchg(SelfNode->_next, &_cxq, v) != v) { // The CAS above can fail from interference IFF a "RAT" arrived. // In that case Self must be in the interior and can no longer be // at the head of cxq. if (v == SelfNode) { assert(_cxq != v, "invariant"); --- 771,781 ---- // and then unlink Self from EntryList. We have to drain eventually, // so it might as well be now. ObjectWaiter * v = _cxq; assert(v != NULL, "invariant"); ! if (v != SelfNode || Atomic::cmpxchg(&_cxq, v, SelfNode->_next) != v) { // The CAS above can fail from interference IFF a "RAT" arrived. // In that case Self must be in the interior and can no longer be // at the head of cxq. if (v == SelfNode) { assert(_cxq != v, "invariant");
*** 957,967 **** // Only the current lock owner can manipulate the EntryList or // drain _cxq, so we need to reacquire the lock. If we fail // to reacquire the lock the responsibility for ensuring succession // falls to the new owner. // ! if (!Atomic::replace_if_null(THREAD, &_owner)) { return; } guarantee(_owner == THREAD, "invariant"); --- 957,967 ---- // Only the current lock owner can manipulate the EntryList or // drain _cxq, so we need to reacquire the lock. If we fail // to reacquire the lock the responsibility for ensuring succession // falls to the new owner. // ! if (!Atomic::replace_if_null(&_owner, THREAD)) { return; } guarantee(_owner == THREAD, "invariant");
*** 993,1003 **** // Drain _cxq into EntryList - bulk transfer. // First, detach _cxq. // The following loop is tantamount to: w = swap(&cxq, NULL) for (;;) { assert(w != NULL, "Invariant"); ! ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w); if (u == w) break; w = u; } assert(w != NULL, "invariant"); --- 993,1003 ---- // Drain _cxq into EntryList - bulk transfer. // First, detach _cxq. // The following loop is tantamount to: w = swap(&cxq, NULL) for (;;) { assert(w != NULL, "Invariant"); ! ObjectWaiter * u = Atomic::cmpxchg(&_cxq, w, (ObjectWaiter*)NULL); if (u == w) break; w = u; } assert(w != NULL, "invariant");
*** 1457,1467 **** } else { iterator->TState = ObjectWaiter::TS_CXQ; for (;;) { ObjectWaiter * front = _cxq; iterator->_next = front; ! if (Atomic::cmpxchg(iterator, &_cxq, front) == front) { break; } } } --- 1457,1467 ---- } else { iterator->TState = ObjectWaiter::TS_CXQ; for (;;) { ObjectWaiter * front = _cxq; iterator->_next = front; ! if (Atomic::cmpxchg(&_cxq, front, iterator) == front) { break; } } }
*** 1678,1688 **** // the spin without prejudice or apply a "penalty" to the // spin count-down variable "ctr", reducing it by 100, say. Thread * ox = (Thread *) _owner; if (ox == NULL) { ! ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL); if (ox == NULL) { // The CAS succeeded -- this thread acquired ownership // Take care of some bookkeeping to exit spin state. if (_succ == Self) { _succ = NULL; --- 1678,1688 ---- // the spin without prejudice or apply a "penalty" to the // spin count-down variable "ctr", reducing it by 100, say. Thread * ox = (Thread *) _owner; if (ox == NULL) { ! ox = (Thread*)Atomic::cmpxchg(&_owner, (void*)NULL, Self); if (ox == NULL) { // The CAS succeeded -- this thread acquired ownership // Take care of some bookkeeping to exit spin state. if (_succ == Self) { _succ = NULL;
< prev index next >