< prev index next >
src/hotspot/share/runtime/objectMonitor.cpp
Print this page
rev 57560 : imported patch 8236035.patch.cr0
rev 57561 : dholmes CR - rename simply_set_owner_from() -> set_owner_from() and simply_set_owner_from_BasicLock() -> set_owner_from_BasicLock(); rename release_clear_owner_with_barrier() -> release_clear_owner() and refactor barrier code back into the call sites.
*** 243,253 ****
void ObjectMonitor::enter(TRAPS) {
// The following code is ordered to check the most common cases first
// and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
Thread * const Self = THREAD;
! void * cur = Atomic::cmpxchg(&_owner, (void*)NULL, Self);
if (cur == NULL) {
assert(_recursions == 0, "invariant");
return;
}
--- 243,253 ----
void ObjectMonitor::enter(TRAPS) {
// The following code is ordered to check the most common cases first
// and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
Thread * const Self = THREAD;
! void* cur = try_set_owner_from(NULL, Self);
if (cur == NULL) {
assert(_recursions == 0, "invariant");
return;
}
*** 258,270 ****
}
if (Self->is_lock_owned((address)cur)) {
assert(_recursions == 0, "internal state error");
_recursions = 1;
! // Commute owner from a thread-specific on-stack BasicLockObject address to
! // a full-fledged "Thread *".
! _owner = Self;
return;
}
// We've encountered genuine contention.
assert(Self->_Stalled == 0, "invariant");
--- 258,268 ----
}
if (Self->is_lock_owned((address)cur)) {
assert(_recursions == 0, "internal state error");
_recursions = 1;
! set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
return;
}
// We've encountered genuine contention.
assert(Self->_Stalled == 0, "invariant");
*** 401,411 ****
// Callers must compensate as needed.
int ObjectMonitor::TryLock(Thread * Self) {
void * own = _owner;
if (own != NULL) return 0;
! if (Atomic::replace_if_null(&_owner, Self)) {
assert(_recursions == 0, "invariant");
return 1;
}
// The lock had been free momentarily, but we lost the race to the lock.
// Interference -- the CAS failed.
--- 399,409 ----
// Callers must compensate as needed.
int ObjectMonitor::TryLock(Thread * Self) {
void * own = _owner;
if (own != NULL) return 0;
! if (try_set_owner_from(NULL, Self) == NULL) {
assert(_recursions == 0, "invariant");
return 1;
}
// The lock had been free momentarily, but we lost the race to the lock.
// Interference -- the CAS failed.
*** 862,878 ****
// of such futile wakups is low.
void ObjectMonitor::exit(bool not_suspended, TRAPS) {
Thread * const Self = THREAD;
if (THREAD != _owner) {
! if (THREAD->is_lock_owned((address) _owner)) {
! // Transmute _owner from a BasicLock pointer to a Thread address.
! // We don't need to hold _mutex for this transition.
! // Non-null to Non-null is safe as long as all readers can
! // tolerate either flavor.
assert(_recursions == 0, "invariant");
! _owner = THREAD;
_recursions = 0;
} else {
// Apparent unbalanced locking ...
// Naively we'd like to throw IllegalMonitorStateException.
// As a practical matter we can neither allocate nor throw an
--- 860,873 ----
// of such futile wakups is low.
void ObjectMonitor::exit(bool not_suspended, TRAPS) {
Thread * const Self = THREAD;
if (THREAD != _owner) {
! void* cur = _owner;
! if (THREAD->is_lock_owned((address)cur)) {
assert(_recursions == 0, "invariant");
! set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
_recursions = 0;
} else {
// Apparent unbalanced locking ...
// Naively we'd like to throw IllegalMonitorStateException.
// As a practical matter we can neither allocate nor throw an
*** 912,925 ****
#endif
for (;;) {
assert(THREAD == _owner, "invariant");
// release semantics: prior loads and stores from within the critical section
// must not float (reorder) past the following store that drops the lock.
! Atomic::release_store(&_owner, (void*)NULL); // drop the lock
! OrderAccess::storeload(); // See if we need to wake a successor
if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
return;
}
// Other threads are blocked trying to acquire the lock.
--- 907,925 ----
#endif
for (;;) {
assert(THREAD == _owner, "invariant");
+ // Drop the lock.
// release semantics: prior loads and stores from within the critical section
// must not float (reorder) past the following store that drops the lock.
! // Uses a storeload to separate release_store(owner) from the
! // successor check. The try_set_owner() below uses cmpxchg() so
! // we get the fence down there.
! release_clear_owner(Self);
! OrderAccess::storeload();
!
if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
return;
}
// Other threads are blocked trying to acquire the lock.
*** 957,967 ****
// Only the current lock owner can manipulate the EntryList or
// drain _cxq, so we need to reacquire the lock. If we fail
// to reacquire the lock the responsibility for ensuring succession
// falls to the new owner.
//
! if (!Atomic::replace_if_null(&_owner, THREAD)) {
return;
}
guarantee(_owner == THREAD, "invariant");
--- 957,967 ----
// Only the current lock owner can manipulate the EntryList or
// drain _cxq, so we need to reacquire the lock. If we fail
// to reacquire the lock the responsibility for ensuring succession
// falls to the new owner.
//
! if (try_set_owner_from(NULL, Self) != NULL) {
return;
}
guarantee(_owner == THREAD, "invariant");
*** 1090,1101 ****
// The thread associated with Wakee may have grabbed the lock and "Wakee" may be
// out-of-scope (non-extant).
Wakee = NULL;
// Drop the lock
! Atomic::release_store(&_owner, (void*)NULL);
! OrderAccess::fence(); // ST _owner vs LD in unpark()
DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
Trigger->unpark();
// Maintain stats and report events to JVMTI
--- 1090,1102 ----
// The thread associated with Wakee may have grabbed the lock and "Wakee" may be
// out-of-scope (non-extant).
Wakee = NULL;
// Drop the lock
! // Uses a fence to separate release_store(owner) from the LD in unpark().
! release_clear_owner(Self);
! OrderAccess::fence();
DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
Trigger->unpark();
// Maintain stats and report events to JVMTI
*** 1118,1130 ****
JavaThread *jt = (JavaThread *)THREAD;
assert(InitDone, "Unexpectedly not initialized");
if (THREAD != _owner) {
! if (THREAD->is_lock_owned ((address)_owner)) {
assert(_recursions == 0, "internal state error");
! _owner = THREAD; // Convert from basiclock addr to Thread addr
_recursions = 0;
}
}
guarantee(Self == _owner, "complete_exit not owner");
--- 1119,1132 ----
JavaThread *jt = (JavaThread *)THREAD;
assert(InitDone, "Unexpectedly not initialized");
if (THREAD != _owner) {
! void* cur = _owner;
! if (THREAD->is_lock_owned((address)cur)) {
assert(_recursions == 0, "internal state error");
! set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
_recursions = 0;
}
}
guarantee(Self == _owner, "complete_exit not owner");
*** 1168,1179 ****
// is not the owner, that exception will be replaced by the IMSE.
bool ObjectMonitor::check_owner(Thread* THREAD) {
if (_owner == THREAD) {
return true;
}
! if (THREAD->is_lock_owned((address)_owner)) {
! _owner = THREAD; // convert from BasicLock addr to Thread addr
_recursions = 0;
return true;
}
THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
"current thread is not owner", false);
--- 1170,1182 ----
// is not the owner, that exception will be replaced by the IMSE.
bool ObjectMonitor::check_owner(Thread* THREAD) {
if (_owner == THREAD) {
return true;
}
! void* cur = _owner;
! if (THREAD->is_lock_owned((address)cur)) {
! set_owner_from_BasicLock(cur, THREAD); // Convert from BasicLock* to Thread*.
_recursions = 0;
return true;
}
THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
"current thread is not owner", false);
*** 1678,1688 ****
// the spin without prejudice or apply a "penalty" to the
// spin count-down variable "ctr", reducing it by 100, say.
Thread * ox = (Thread *) _owner;
if (ox == NULL) {
! ox = (Thread*)Atomic::cmpxchg(&_owner, (void*)NULL, Self);
if (ox == NULL) {
// The CAS succeeded -- this thread acquired ownership
// Take care of some bookkeeping to exit spin state.
if (_succ == Self) {
_succ = NULL;
--- 1681,1691 ----
// the spin without prejudice or apply a "penalty" to the
// spin count-down variable "ctr", reducing it by 100, say.
Thread * ox = (Thread *) _owner;
if (ox == NULL) {
! ox = (Thread*)try_set_owner_from(NULL, Self);
if (ox == NULL) {
// The CAS succeeded -- this thread acquired ownership
// Take care of some bookkeeping to exit spin state.
if (_succ == Self) {
_succ = NULL;
< prev index next >