< prev index next >

src/hotspot/share/runtime/mutex.cpp

Print this page

        

*** 249,264 **** // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly // similar name. // // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o - - // CASPTR() uses the canonical argument order that dominates in the literature. - // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates. - - #define CASPTR(a, c, s) \ - intptr_t(Atomic::cmpxchg_ptr((void *)(s), (void *)(a), (void *)(c))) #define UNS(x) (uintptr_t(x)) #define TRACE(m) \ { \ static volatile int ctr = 0; \ int x = ++ctr; \ --- 249,258 ----
*** 295,320 **** int Monitor::TryLock() { intptr_t v = _LockWord.FullWord; for (;;) { if ((v & _LBIT) != 0) return 0; ! const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); if (v == u) return 1; v = u; } } int Monitor::TryFast() { // Optimistic fast-path form ... // Fast-path attempt for the common uncontended case. // Avoid RTS->RTO $ coherence upgrade on typical SMP systems. ! intptr_t v = CASPTR(&_LockWord, 0, _LBIT); // agro ... if (v == 0) return 1; for (;;) { if ((v & _LBIT) != 0) return 0; ! const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); if (v == u) return 1; v = u; } } --- 289,314 ---- int Monitor::TryLock() { intptr_t v = _LockWord.FullWord; for (;;) { if ((v & _LBIT) != 0) return 0; ! const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v); if (v == u) return 1; v = u; } } int Monitor::TryFast() { // Optimistic fast-path form ... // Fast-path attempt for the common uncontended case. // Avoid RTS->RTO $ coherence upgrade on typical SMP systems. ! intptr_t v = Atomic::cmpxchg((intptr_t)_LBIT, &_LockWord.FullWord, (intptr_t)0); // agro ... if (v == 0) return 1; for (;;) { if ((v & _LBIT) != 0) return 0; ! const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v); if (v == u) return 1; v = u; } }
*** 348,358 **** int SpinMax = NativeMonitorSpinLimit; int flgs = NativeMonitorFlags; for (;;) { intptr_t v = _LockWord.FullWord; if ((v & _LBIT) == 0) { ! if (CASPTR (&_LockWord, v, v|_LBIT) == v) { return 1; } continue; } --- 342,352 ---- int SpinMax = NativeMonitorSpinLimit; int flgs = NativeMonitorFlags; for (;;) { intptr_t v = _LockWord.FullWord; if ((v & _LBIT) == 0) { ! if (Atomic::cmpxchg (v|_LBIT, &_LockWord.FullWord, v) == v) { return 1; } continue; }
*** 417,433 **** inline int Monitor::AcquireOrPush(ParkEvent * ESelf) { intptr_t v = _LockWord.FullWord; for (;;) { if ((v & _LBIT) == 0) { ! const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); if (u == v) return 1; // indicate acquired v = u; } else { // Anticipate success ... ESelf->ListNext = (ParkEvent *)(v & ~_LBIT); ! const intptr_t u = CASPTR(&_LockWord, v, intptr_t(ESelf)|_LBIT); if (u == v) return 0; // indicate pushed onto cxq v = u; } // Interference - LockWord change - just retry } --- 411,427 ---- inline int Monitor::AcquireOrPush(ParkEvent * ESelf) { intptr_t v = _LockWord.FullWord; for (;;) { if ((v & _LBIT) == 0) { ! const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v); if (u == v) return 1; // indicate acquired v = u; } else { // Anticipate success ... ESelf->ListNext = (ParkEvent *)(v & ~_LBIT); ! const intptr_t u = Atomic::cmpxchg(intptr_t(ESelf)|_LBIT, &_LockWord.FullWord, v); if (u == v) return 0; // indicate pushed onto cxq v = u; } // Interference - LockWord change - just retry }
*** 461,482 **** // LockWord encoding = (cxq,LOCKBYTE) ESelf->reset(); OrderAccess::fence(); // Optional optimization ... try barging on the inner lock ! if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(ESelf)) == 0) { goto OnDeck_LOOP; } if (AcquireOrPush(ESelf)) goto Exeunt; // At any given time there is at most one ondeck thread. // ondeck implies not resident on cxq and not resident on EntryList // Only the OnDeck thread can try to acquire -- contend for -- the lock. // CONSIDER: use Self->OnDeck instead of m->OnDeck. // Deschedule Self so that others may run. ! while (OrderAccess::load_ptr_acquire(&_OnDeck) != ESelf) { ParkCommon(ESelf, 0); } // Self is now in the OnDeck position and will remain so until it // manages to acquire the lock. --- 455,476 ---- // LockWord encoding = (cxq,LOCKBYTE) ESelf->reset(); OrderAccess::fence(); // Optional optimization ... try barging on the inner lock ! if ((NativeMonitorFlags & 32) && Atomic::cmpxchg(ESelf, &_OnDeck, (ParkEvent*)NULL) == NULL) { goto OnDeck_LOOP; } if (AcquireOrPush(ESelf)) goto Exeunt; // At any given time there is at most one ondeck thread. // ondeck implies not resident on cxq and not resident on EntryList // Only the OnDeck thread can try to acquire -- contend for -- the lock. // CONSIDER: use Self->OnDeck instead of m->OnDeck. // Deschedule Self so that others may run. ! while (OrderAccess::load_acquire(&_OnDeck) != ESelf) { ParkCommon(ESelf, 0); } // Self is now in the OnDeck position and will remain so until it // manages to acquire the lock.
*** 568,578 **** // but only one concurrent consumer (detacher of RATs). // Consider protecting this critical section with schedctl on Solaris. // Unlike a normal lock, however, the exiting thread "locks" OnDeck, // picks a successor and marks that thread as OnDeck. That successor // thread will then clear OnDeck once it eventually acquires the outer lock. ! if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) { return; } ParkEvent * List = _EntryList; if (List != NULL) { --- 562,572 ---- // but only one concurrent consumer (detacher of RATs). // Consider protecting this critical section with schedctl on Solaris. // Unlike a normal lock, however, the exiting thread "locks" OnDeck, // picks a successor and marks that thread as OnDeck. That successor // thread will then clear OnDeck once it eventually acquires the outer lock. ! if (Atomic::cmpxchg((ParkEvent*)_LBIT, &_OnDeck, (ParkEvent*)NULL) != NULL) { return; } ParkEvent * List = _EntryList; if (List != NULL) {
*** 590,600 **** // Pass OnDeck role to w, ensuring that _EntryList has been set first. // w will clear _OnDeck once it acquires the outer lock. // Note that once we set _OnDeck that thread can acquire the mutex, proceed // with its critical section and then enter this code to unlock the mutex. So // you can have multiple threads active in IUnlock at the same time. ! OrderAccess::release_store_ptr(&_OnDeck, w); // Another optional optimization ... // For heavily contended locks it's not uncommon that some other // thread acquired the lock while this thread was arranging succession. // Try to defer the unpark() operation - Delegate the responsibility --- 584,594 ---- // Pass OnDeck role to w, ensuring that _EntryList has been set first. // w will clear _OnDeck once it acquires the outer lock. // Note that once we set _OnDeck that thread can acquire the mutex, proceed // with its critical section and then enter this code to unlock the mutex. So // you can have multiple threads active in IUnlock at the same time. ! OrderAccess::release_store(&_OnDeck, w); // Another optional optimization ... // For heavily contended locks it's not uncommon that some other // thread acquired the lock while this thread was arranging succession. // Try to defer the unpark() operation - Delegate the responsibility
*** 614,624 **** // drain RATs from cxq into EntryList // Detach RATs segment with CAS and then merge into EntryList for (;;) { // optional optimization - if locked, the owner is responsible for succession if (cxq & _LBIT) goto Punt; ! const intptr_t vfy = CASPTR(&_LockWord, cxq, cxq & _LBIT); if (vfy == cxq) break; cxq = vfy; // Interference - LockWord changed - Just retry // We can see concurrent interference from contending threads // pushing themselves onto the cxq or from lock-unlock operations. --- 608,618 ---- // drain RATs from cxq into EntryList // Detach RATs segment with CAS and then merge into EntryList for (;;) { // optional optimization - if locked, the owner is responsible for succession if (cxq & _LBIT) goto Punt; ! const intptr_t vfy = Atomic::cmpxchg(cxq & _LBIT, &_LockWord.FullWord, cxq); if (vfy == cxq) break; cxq = vfy; // Interference - LockWord changed - Just retry // We can see concurrent interference from contending threads // pushing themselves onto the cxq or from lock-unlock operations.
*** 691,701 **** // push nfy onto the cxq for (;;) { const intptr_t v = _LockWord.FullWord; assert((v & 0xFF) == _LBIT, "invariant"); nfy->ListNext = (ParkEvent *)(v & ~_LBIT); ! if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break; // interference - _LockWord changed -- just retry } // Note that setting Notified before pushing nfy onto the cxq is // also legal and safe, but the safety properties are much more // subtle, so for the sake of code stewardship ... --- 685,695 ---- // push nfy onto the cxq for (;;) { const intptr_t v = _LockWord.FullWord; assert((v & 0xFF) == _LBIT, "invariant"); nfy->ListNext = (ParkEvent *)(v & ~_LBIT); ! if (Atomic::cmpxchg(intptr_t(nfy)|_LBIT, &_LockWord.FullWord, v) == v) break; // interference - _LockWord changed -- just retry } // Note that setting Notified before pushing nfy onto the cxq is // also legal and safe, but the safety properties are much more // subtle, so for the sake of code stewardship ...
*** 838,848 **** } else { // A prior notify() operation moved ESelf from the WaitSet to the cxq. // ESelf is now on the cxq, EntryList or at the OnDeck position. // The following fragment is extracted from Monitor::ILock() for (;;) { ! if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break; ParkCommon(ESelf, 0); } assert(_OnDeck == ESelf, "invariant"); _OnDeck = NULL; } --- 832,842 ---- } else { // A prior notify() operation moved ESelf from the WaitSet to the cxq. // ESelf is now on the cxq, EntryList or at the OnDeck position. // The following fragment is extracted from Monitor::ILock() for (;;) { ! if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break; ParkCommon(ESelf, 0); } assert(_OnDeck == ESelf, "invariant"); _OnDeck = NULL; }
*** 1056,1066 **** // At any given time there is at most one ondeck thread. // ondeck implies not resident on cxq and not resident on EntryList // Only the OnDeck thread can try to acquire -- contend for -- the lock. // CONSIDER: use Self->OnDeck instead of m->OnDeck. for (;;) { ! if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break; ParkCommon(ESelf, 0); } assert(_OnDeck == ESelf, "invariant"); _OnDeck = NULL; --- 1050,1060 ---- // At any given time there is at most one ondeck thread. // ondeck implies not resident on cxq and not resident on EntryList // Only the OnDeck thread can try to acquire -- contend for -- the lock. // CONSIDER: use Self->OnDeck instead of m->OnDeck. for (;;) { ! if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break; ParkCommon(ESelf, 0); } assert(_OnDeck == ESelf, "invariant"); _OnDeck = NULL;
< prev index next >