< prev index next >

src/hotspot/share/runtime/thread.cpp

Print this page
rev 47400 : [mq]: cmpxchg_ptr

*** 4702,4714 **** typedef volatile intptr_t MutexT; // Mux Lock-word enum MuxBits { LOCKBIT = 1 }; void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) { ! intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0); if (w == 0) return; ! if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { return; } TEVENT(muxAcquire - Contention); ParkEvent * const Self = Thread::current()->_MuxEvent; --- 4702,4714 ---- typedef volatile intptr_t MutexT; // Mux Lock-word enum MuxBits { LOCKBIT = 1 }; void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) { ! intptr_t w = Atomic::cmpxchg((intptr_t)LOCKBIT, Lock, (intptr_t)0); if (w == 0) return; ! if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { return; } TEVENT(muxAcquire - Contention); ParkEvent * const Self = Thread::current()->_MuxEvent;
*** 4717,4727 **** int its = (os::is_MP() ? 100 : 0) + 1; // Optional spin phase: spin-then-park strategy while (--its >= 0) { w = *Lock; ! if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { return; } } Self->reset(); --- 4717,4727 ---- int its = (os::is_MP() ? 100 : 0) + 1; // Optional spin phase: spin-then-park strategy while (--its >= 0) { w = *Lock; ! if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { return; } } Self->reset();
*** 4730,4760 **** // CAS() both serializes execution and ratifies the fetched *Lock value. OrderAccess::fence(); for (;;) { w = *Lock; if ((w & LOCKBIT) == 0) { ! if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { Self->OnList = 0; // hygiene - allows stronger asserts return; } continue; // Interference -- *Lock changed -- Just retry } assert(w & LOCKBIT, "invariant"); Self->ListNext = (ParkEvent *) (w & ~LOCKBIT); ! if (Atomic::cmpxchg_ptr(intptr_t(Self)|LOCKBIT, Lock, w) == w) break; } while (Self->OnList != 0) { Self->park(); } } } void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) { ! intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0); if (w == 0) return; ! if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { return; } TEVENT(muxAcquire - Contention); ParkEvent * ReleaseAfter = NULL; --- 4730,4760 ---- // CAS() both serializes execution and ratifies the fetched *Lock value. OrderAccess::fence(); for (;;) { w = *Lock; if ((w & LOCKBIT) == 0) { ! if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { Self->OnList = 0; // hygiene - allows stronger asserts return; } continue; // Interference -- *Lock changed -- Just retry } assert(w & LOCKBIT, "invariant"); Self->ListNext = (ParkEvent *) (w & ~LOCKBIT); ! if (Atomic::cmpxchg(intptr_t(Self)|LOCKBIT, Lock, w) == w) break; } while (Self->OnList != 0) { Self->park(); } } } void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) { ! intptr_t w = Atomic::cmpxchg((intptr_t)LOCKBIT, Lock, (intptr_t)0); if (w == 0) return; ! if ((w & LOCKBIT) == 0 && Atomic::cmpxchg((intptr_t)w|LOCKBIT, Lock, w) == w) { return; } TEVENT(muxAcquire - Contention); ParkEvent * ReleaseAfter = NULL;
*** 4767,4777 **** int its = (os::is_MP() ? 100 : 0) + 1; // Optional spin phase: spin-then-park strategy while (--its >= 0) { w = *Lock; ! if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { if (ReleaseAfter != NULL) { ParkEvent::Release(ReleaseAfter); } return; } --- 4767,4777 ---- int its = (os::is_MP() ? 100 : 0) + 1; // Optional spin phase: spin-then-park strategy while (--its >= 0) { w = *Lock; ! if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { if (ReleaseAfter != NULL) { ParkEvent::Release(ReleaseAfter); } return; }
*** 4783,4793 **** // CAS() both serializes execution and ratifies the fetched *Lock value. OrderAccess::fence(); for (;;) { w = *Lock; if ((w & LOCKBIT) == 0) { ! if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { ev->OnList = 0; // We call ::Release while holding the outer lock, thus // artificially lengthening the critical section. // Consider deferring the ::Release() until the subsequent unlock(), // after we've dropped the outer lock. --- 4783,4793 ---- // CAS() both serializes execution and ratifies the fetched *Lock value. OrderAccess::fence(); for (;;) { w = *Lock; if ((w & LOCKBIT) == 0) { ! if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { ev->OnList = 0; // We call ::Release while holding the outer lock, thus // artificially lengthening the critical section. // Consider deferring the ::Release() until the subsequent unlock(), // after we've dropped the outer lock.
*** 4798,4808 **** } continue; // Interference -- *Lock changed -- Just retry } assert(w & LOCKBIT, "invariant"); ev->ListNext = (ParkEvent *) (w & ~LOCKBIT); ! if (Atomic::cmpxchg_ptr(intptr_t(ev)|LOCKBIT, Lock, w) == w) break; } while (ev->OnList != 0) { ev->park(); } --- 4798,4808 ---- } continue; // Interference -- *Lock changed -- Just retry } assert(w & LOCKBIT, "invariant"); ev->ListNext = (ParkEvent *) (w & ~LOCKBIT); ! if (Atomic::cmpxchg(intptr_t(ev)|LOCKBIT, Lock, w) == w) break; } while (ev->OnList != 0) { ev->park(); }
*** 4834,4855 **** // bidirectional fence/MEMBAR semantics, ensuring that all prior memory operations // executed within the critical section are complete and globally visible before the // store (CAS) to the lock-word that releases the lock becomes globally visible. void Thread::muxRelease(volatile intptr_t * Lock) { for (;;) { ! const intptr_t w = Atomic::cmpxchg_ptr(0, Lock, LOCKBIT); assert(w & LOCKBIT, "invariant"); if (w == LOCKBIT) return; ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT); assert(List != NULL, "invariant"); assert(List->OnList == intptr_t(Lock), "invariant"); ParkEvent * const nxt = List->ListNext; guarantee((intptr_t(nxt) & LOCKBIT) == 0, "invariant"); // The following CAS() releases the lock and pops the head element. // The CAS() also ratifies the previously fetched lock-word value. ! if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) { continue; } List->OnList = 0; OrderAccess::fence(); List->unpark(); --- 4834,4855 ---- // bidirectional fence/MEMBAR semantics, ensuring that all prior memory operations // executed within the critical section are complete and globally visible before the // store (CAS) to the lock-word that releases the lock becomes globally visible. void Thread::muxRelease(volatile intptr_t * Lock) { for (;;) { ! const intptr_t w = Atomic::cmpxchg((intptr_t)0, Lock, (intptr_t)LOCKBIT); assert(w & LOCKBIT, "invariant"); if (w == LOCKBIT) return; ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT); assert(List != NULL, "invariant"); assert(List->OnList == intptr_t(Lock), "invariant"); ParkEvent * const nxt = List->ListNext; guarantee((intptr_t(nxt) & LOCKBIT) == 0, "invariant"); // The following CAS() releases the lock and pops the head element. // The CAS() also ratifies the previously fetched lock-word value. ! if (Atomic::cmpxchg(intptr_t(nxt), Lock, w) != w) { continue; } List->OnList = 0; OrderAccess::fence(); List->unpark();
< prev index next >