< prev index next >
src/hotspot/share/runtime/thread.cpp
Print this page
*** 859,869 ****
// GC Support
bool Thread::claim_par_threads_do(uintx claim_token) {
uintx token = _threads_do_token;
if (token != claim_token) {
! uintx res = Atomic::cmpxchg(claim_token, &_threads_do_token, token);
if (res == token) {
return true;
}
guarantee(res == claim_token, "invariant");
}
--- 859,869 ----
// GC Support
bool Thread::claim_par_threads_do(uintx claim_token) {
uintx token = _threads_do_token;
if (token != claim_token) {
! uintx res = Atomic::cmpxchg(&_threads_do_token, token, claim_token);
if (res == token) {
return true;
}
guarantee(res == claim_token, "invariant");
}
*** 4873,4883 ****
typedef volatile int SpinLockT;
void Thread::SpinAcquire(volatile int * adr, const char * LockName) {
! if (Atomic::cmpxchg (1, adr, 0) == 0) {
return; // normal fast-path return
}
// Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
int ctr = 0;
--- 4873,4883 ----
typedef volatile int SpinLockT;
void Thread::SpinAcquire(volatile int * adr, const char * LockName) {
! if (Atomic::cmpxchg(adr, 0, 1) == 0) {
return; // normal fast-path return
}
// Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
int ctr = 0;
*** 4894,4904 ****
}
} else {
SpinPause();
}
}
! if (Atomic::cmpxchg(1, adr, 0) == 0) return;
}
}
void Thread::SpinRelease(volatile int * adr) {
assert(*adr != 0, "invariant");
--- 4894,4904 ----
}
} else {
SpinPause();
}
}
! if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
}
}
void Thread::SpinRelease(volatile int * adr) {
assert(*adr != 0, "invariant");
*** 4966,4978 ****
const intptr_t LOCKBIT = 1;
void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) {
! intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0);
if (w == 0) return;
! if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
return;
}
ParkEvent * const Self = Thread::current()->_MuxEvent;
assert((intptr_t(Self) & LOCKBIT) == 0, "invariant");
--- 4966,4978 ----
const intptr_t LOCKBIT = 1;
void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) {
! intptr_t w = Atomic::cmpxchg(Lock, (intptr_t)0, LOCKBIT);
if (w == 0) return;
! if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(Lock, w, w|LOCKBIT) == w) {
return;
}
ParkEvent * const Self = Thread::current()->_MuxEvent;
assert((intptr_t(Self) & LOCKBIT) == 0, "invariant");
*** 4980,4990 ****
int its = (os::is_MP() ? 100 : 0) + 1;
// Optional spin phase: spin-then-park strategy
while (--its >= 0) {
w = *Lock;
! if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
return;
}
}
Self->reset();
--- 4980,4990 ----
int its = (os::is_MP() ? 100 : 0) + 1;
// Optional spin phase: spin-then-park strategy
while (--its >= 0) {
w = *Lock;
! if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(Lock, w, w|LOCKBIT) == w) {
return;
}
}
Self->reset();
*** 4993,5011 ****
// CAS() both serializes execution and ratifies the fetched *Lock value.
OrderAccess::fence();
for (;;) {
w = *Lock;
if ((w & LOCKBIT) == 0) {
! if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
Self->OnList = 0; // hygiene - allows stronger asserts
return;
}
continue; // Interference -- *Lock changed -- Just retry
}
assert(w & LOCKBIT, "invariant");
Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
! if (Atomic::cmpxchg(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
}
while (Self->OnList != 0) {
Self->park();
}
--- 4993,5011 ----
// CAS() both serializes execution and ratifies the fetched *Lock value.
OrderAccess::fence();
for (;;) {
w = *Lock;
if ((w & LOCKBIT) == 0) {
! if (Atomic::cmpxchg(Lock, w, w|LOCKBIT) == w) {
Self->OnList = 0; // hygiene - allows stronger asserts
return;
}
continue; // Interference -- *Lock changed -- Just retry
}
assert(w & LOCKBIT, "invariant");
Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
! if (Atomic::cmpxchg(Lock, w, intptr_t(Self)|LOCKBIT) == w) break;
}
while (Self->OnList != 0) {
Self->park();
}
*** 5037,5058 ****
// bidirectional fence/MEMBAR semantics, ensuring that all prior memory operations
// executed within the critical section are complete and globally visible before the
// store (CAS) to the lock-word that releases the lock becomes globally visible.
void Thread::muxRelease(volatile intptr_t * Lock) {
for (;;) {
! const intptr_t w = Atomic::cmpxchg((intptr_t)0, Lock, LOCKBIT);
assert(w & LOCKBIT, "invariant");
if (w == LOCKBIT) return;
ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT);
assert(List != NULL, "invariant");
assert(List->OnList == intptr_t(Lock), "invariant");
ParkEvent * const nxt = List->ListNext;
guarantee((intptr_t(nxt) & LOCKBIT) == 0, "invariant");
// The following CAS() releases the lock and pops the head element.
// The CAS() also ratifies the previously fetched lock-word value.
! if (Atomic::cmpxchg(intptr_t(nxt), Lock, w) != w) {
continue;
}
List->OnList = 0;
OrderAccess::fence();
List->unpark();
--- 5037,5058 ----
// bidirectional fence/MEMBAR semantics, ensuring that all prior memory operations
// executed within the critical section are complete and globally visible before the
// store (CAS) to the lock-word that releases the lock becomes globally visible.
void Thread::muxRelease(volatile intptr_t * Lock) {
for (;;) {
! const intptr_t w = Atomic::cmpxchg(Lock, LOCKBIT, (intptr_t)0);
assert(w & LOCKBIT, "invariant");
if (w == LOCKBIT) return;
ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT);
assert(List != NULL, "invariant");
assert(List->OnList == intptr_t(Lock), "invariant");
ParkEvent * const nxt = List->ListNext;
guarantee((intptr_t(nxt) & LOCKBIT) == 0, "invariant");
// The following CAS() releases the lock and pops the head element.
// The CAS() also ratifies the previously fetched lock-word value.
! if (Atomic::cmpxchg(Lock, w, intptr_t(nxt)) != w) {
continue;
}
List->OnList = 0;
OrderAccess::fence();
List->unpark();
< prev index next >