< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page

        

*** 247,257 **** void ObjectMonitor::enter(TRAPS) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; ! void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL); if (cur == NULL) { // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); return; --- 247,257 ---- void ObjectMonitor::enter(TRAPS) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; ! void * cur = Atomic::cmpxchg((void*)Self, &_owner, (void*)NULL); if (cur == NULL) { // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); return;
*** 404,414 **** // Callers must compensate as needed. int ObjectMonitor::TryLock(Thread * Self) { void * own = _owner; if (own != NULL) return 0; ! if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { // Either guarantee _recursions == 0 or set _recursions = 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); return 1; } --- 404,414 ---- // Callers must compensate as needed. int ObjectMonitor::TryLock(Thread * Self) { void * own = _owner; if (own != NULL) return 0; ! if (Atomic::cmpxchg((void*)Self, &_owner, (void*)NULL) == NULL) { // Either guarantee _recursions == 0 or set _recursions = 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); return 1; }
*** 474,484 **** // Note that spinning tends to reduce the rate at which threads // enqueue and dequeue on EntryList|cxq. ObjectWaiter * nxt; for (;;) { node._next = nxt = _cxq; ! if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break; // Interference - the CAS failed because _cxq changed. Just retry. // As an optional optimization we retry the lock. if (TryLock (Self) > 0) { assert(_succ != Self, "invariant"); --- 474,484 ---- // Note that spinning tends to reduce the rate at which threads // enqueue and dequeue on EntryList|cxq. ObjectWaiter * nxt; for (;;) { node._next = nxt = _cxq; ! if (Atomic::cmpxchg(&node, &_cxq, nxt) == nxt) break; // Interference - the CAS failed because _cxq changed. Just retry. // As an optional optimization we retry the lock. if (TryLock (Self) > 0) { assert(_succ != Self, "invariant");
*** 512,522 **** // -- the checker -- parked on a timer. if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) { // Try to assume the role of responsible thread for the monitor. // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } ! Atomic::cmpxchg_ptr(Self, &_Responsible, NULL); } // The lock might have been released while this thread was occupied queueing // itself onto _cxq. To close the race and avoid "stranding" and // progress-liveness failure we must resample-retry _owner before parking. --- 512,522 ---- // -- the checker -- parked on a timer. if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) { // Try to assume the role of responsible thread for the monitor. // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } ! Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL); } // The lock might have been released while this thread was occupied queueing // itself onto _cxq. To close the race and avoid "stranding" and // progress-liveness failure we must resample-retry _owner before parking.
*** 536,546 **** if (TryLock(Self) > 0) break; assert(_owner != Self, "invariant"); if ((SyncFlags & 2) && _Responsible == NULL) { ! Atomic::cmpxchg_ptr(Self, &_Responsible, NULL); } // park self if (_Responsible == Self || (SyncFlags & 1)) { TEVENT(Inflated enter - park TIMED); --- 536,546 ---- if (TryLock(Self) > 0) break; assert(_owner != Self, "invariant"); if ((SyncFlags & 2) && _Responsible == NULL) { ! Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL); } // park self if (_Responsible == Self || (SyncFlags & 1)) { TEVENT(Inflated enter - park TIMED);
*** 793,803 **** // and then unlink Self from EntryList. We have to drain eventually, // so it might as well be now. ObjectWaiter * v = _cxq; assert(v != NULL, "invariant"); ! if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) { // The CAS above can fail from interference IFF a "RAT" arrived. // In that case Self must be in the interior and can no longer be // at the head of cxq. if (v == SelfNode) { assert(_cxq != v, "invariant"); --- 793,803 ---- // and then unlink Self from EntryList. We have to drain eventually, // so it might as well be now. ObjectWaiter * v = _cxq; assert(v != NULL, "invariant"); ! if (v != SelfNode || Atomic::cmpxchg(SelfNode->_next, &_cxq, v) != v) { // The CAS above can fail from interference IFF a "RAT" arrived. // In that case Self must be in the interior and can no longer be // at the head of cxq. if (v == SelfNode) { assert(_cxq != v, "invariant");
*** 945,955 **** // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both // store into a _dummy variable. That store is not needed, but can result // in massive wasteful coherency traffic on classic SMP systems. // Instead, I use release_store(), which is implemented as just a simple // ST on x64, x86 and SPARC. ! OrderAccess::release_store_ptr(&_owner, NULL); // drop the lock OrderAccess::storeload(); // See if we need to wake a successor if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { TEVENT(Inflated exit - simple egress); return; } --- 945,955 ---- // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both // store into a _dummy variable. That store is not needed, but can result // in massive wasteful coherency traffic on classic SMP systems. // Instead, I use release_store(), which is implemented as just a simple // ST on x64, x86 and SPARC. ! OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock OrderAccess::storeload(); // See if we need to wake a successor if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { TEVENT(Inflated exit - simple egress); return; }
*** 990,1006 **** // Only the current lock owner can manipulate the EntryList or // drain _cxq, so we need to reacquire the lock. If we fail // to reacquire the lock the responsibility for ensuring succession // falls to the new owner. // ! if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { return; } TEVENT(Exit - Reacquired); } else { if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { ! OrderAccess::release_store_ptr(&_owner, NULL); // drop the lock OrderAccess::storeload(); // Ratify the previously observed values. if (_cxq == NULL || _succ != NULL) { TEVENT(Inflated exit - simple egress); return; --- 990,1006 ---- // Only the current lock owner can manipulate the EntryList or // drain _cxq, so we need to reacquire the lock. If we fail // to reacquire the lock the responsibility for ensuring succession // falls to the new owner. // ! if (Atomic::cmpxchg((void*)THREAD, &_owner, (void*)NULL) != NULL) { return; } TEVENT(Exit - Reacquired); } else { if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { ! OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock OrderAccess::storeload(); // Ratify the previously observed values. if (_cxq == NULL || _succ != NULL) { TEVENT(Inflated exit - simple egress); return;
*** 1015,1025 **** // we either restart/rerun the exit operation, or simply // fall-through into the code below which wakes a successor. // B. If the elements forming the EntryList|cxq are TSM // we could simply unpark() the lead thread and return // without having set _succ. ! if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { TEVENT(Inflated exit - reacquired succeeded); return; } TEVENT(Inflated exit - reacquired failed); } else { --- 1015,1025 ---- // we either restart/rerun the exit operation, or simply // fall-through into the code below which wakes a successor. // B. If the elements forming the EntryList|cxq are TSM // we could simply unpark() the lead thread and return // without having set _succ. ! if (Atomic::cmpxchg((void*)THREAD, &_owner, (void*)NULL) != NULL) { TEVENT(Inflated exit - reacquired succeeded); return; } TEVENT(Inflated exit - reacquired failed); } else {
*** 1050,1060 **** // First, detach _cxq. // The following loop is tantamount to: w = swap(&cxq, NULL) w = _cxq; for (;;) { assert(w != NULL, "Invariant"); ! ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w); if (u == w) break; w = u; } assert(w != NULL, "invariant"); --- 1050,1060 ---- // First, detach _cxq. // The following loop is tantamount to: w = swap(&cxq, NULL) w = _cxq; for (;;) { assert(w != NULL, "Invariant"); ! ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w); if (u == w) break; w = u; } assert(w != NULL, "invariant");
*** 1091,1101 **** // First, detach _cxq. // The following loop is tantamount to: w = swap(&cxq, NULL) w = _cxq; for (;;) { assert(w != NULL, "Invariant"); ! ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w); if (u == w) break; w = u; } assert(w != NULL, "invariant"); --- 1091,1101 ---- // First, detach _cxq. // The following loop is tantamount to: w = swap(&cxq, NULL) w = _cxq; for (;;) { assert(w != NULL, "Invariant"); ! ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w); if (u == w) break; w = u; } assert(w != NULL, "invariant");
*** 1144,1154 **** // Drain _cxq into EntryList - bulk transfer. // First, detach _cxq. // The following loop is tantamount to: w = swap(&cxq, NULL) for (;;) { assert(w != NULL, "Invariant"); ! ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w); if (u == w) break; w = u; } TEVENT(Inflated exit - drain cxq into EntryList); --- 1144,1154 ---- // Drain _cxq into EntryList - bulk transfer. // First, detach _cxq. // The following loop is tantamount to: w = swap(&cxq, NULL) for (;;) { assert(w != NULL, "Invariant"); ! ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w); if (u == w) break; w = u; } TEVENT(Inflated exit - drain cxq into EntryList);
*** 1277,1287 **** // The thread associated with Wakee may have grabbed the lock and "Wakee" may be // out-of-scope (non-extant). Wakee = NULL; // Drop the lock ! OrderAccess::release_store_ptr(&_owner, NULL); OrderAccess::fence(); // ST _owner vs LD in unpark() if (SafepointSynchronize::do_call_back()) { TEVENT(unpark before SAFEPOINT); } --- 1277,1287 ---- // The thread associated with Wakee may have grabbed the lock and "Wakee" may be // out-of-scope (non-extant). Wakee = NULL; // Drop the lock ! OrderAccess::release_store(&_owner, (void*)NULL); OrderAccess::fence(); // ST _owner vs LD in unpark() if (SafepointSynchronize::do_call_back()) { TEVENT(unpark before SAFEPOINT); }
*** 1686,1707 **** } else { iterator->TState = ObjectWaiter::TS_CXQ; for (;;) { ObjectWaiter * front = _cxq; iterator->_next = front; ! if (Atomic::cmpxchg_ptr(iterator, &_cxq, front) == front) { break; } } } } else if (policy == 3) { // append to cxq iterator->TState = ObjectWaiter::TS_CXQ; for (;;) { ObjectWaiter * tail = _cxq; if (tail == NULL) { iterator->_next = NULL; ! if (Atomic::cmpxchg_ptr(iterator, &_cxq, NULL) == NULL) { break; } } else { while (tail->_next != NULL) tail = tail->_next; tail->_next = iterator; --- 1686,1707 ---- } else { iterator->TState = ObjectWaiter::TS_CXQ; for (;;) { ObjectWaiter * front = _cxq; iterator->_next = front; ! if (Atomic::cmpxchg(iterator, &_cxq, front) == front) { break; } } } } else if (policy == 3) { // append to cxq iterator->TState = ObjectWaiter::TS_CXQ; for (;;) { ObjectWaiter * tail = _cxq; if (tail == NULL) { iterator->_next = NULL; ! if (Atomic::cmpxchg(iterator, &_cxq, (ObjectWaiter*)NULL) == NULL) { break; } } else { while (tail->_next != NULL) tail = tail->_next; tail->_next = iterator;
*** 1978,1988 **** // the spin without prejudice or apply a "penalty" to the // spin count-down variable "ctr", reducing it by 100, say. Thread * ox = (Thread *) _owner; if (ox == NULL) { ! ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL); if (ox == NULL) { // The CAS succeeded -- this thread acquired ownership // Take care of some bookkeeping to exit spin state. if (sss && _succ == Self) { _succ = NULL; --- 1978,1988 ---- // the spin without prejudice or apply a "penalty" to the // spin count-down variable "ctr", reducing it by 100, say. Thread * ox = (Thread *) _owner; if (ox == NULL) { ! ox = (Thread*)Atomic::cmpxchg((void*)Self, &_owner, (void*)NULL); if (ox == NULL) { // The CAS succeeded -- this thread acquired ownership // Take care of some bookkeeping to exit spin state. if (sss && _succ == Self) { _succ = NULL;
< prev index next >