--- old/src/hotspot/share/logging/logTag.hpp 2020-01-27 14:36:37.000000000 -0500 +++ new/src/hotspot/share/logging/logTag.hpp 2020-01-27 14:36:37.000000000 -0500 @@ -117,6 +117,7 @@ LOG_TAG(oops) \ LOG_TAG(oopstorage) \ LOG_TAG(os) \ + LOG_TAG(owner) \ LOG_TAG(pagesize) \ LOG_TAG(patch) \ LOG_TAG(path) \ --- old/src/hotspot/share/runtime/objectMonitor.cpp 2020-01-27 14:36:38.000000000 -0500 +++ new/src/hotspot/share/runtime/objectMonitor.cpp 2020-01-27 14:36:38.000000000 -0500 @@ -245,7 +245,7 @@ // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; - void * cur = Atomic::cmpxchg(&_owner, (void*)NULL, Self); + void* cur = try_set_owner_from(NULL, Self); if (cur == NULL) { assert(_recursions == 0, "invariant"); return; @@ -260,9 +260,7 @@ if (Self->is_lock_owned((address)cur)) { assert(_recursions == 0, "internal state error"); _recursions = 1; - // Commute owner from a thread-specific on-stack BasicLockObject address to - // a full-fledged "Thread *". - _owner = Self; + set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*. return; } @@ -403,7 +401,7 @@ int ObjectMonitor::TryLock(Thread * Self) { void * own = _owner; if (own != NULL) return 0; - if (Atomic::replace_if_null(&_owner, Self)) { + if (try_set_owner_from(NULL, Self) == NULL) { assert(_recursions == 0, "invariant"); return 1; } @@ -864,13 +862,10 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) { Thread * const Self = THREAD; if (THREAD != _owner) { - if (THREAD->is_lock_owned((address) _owner)) { - // Transmute _owner from a BasicLock pointer to a Thread address. - // We don't need to hold _mutex for this transition. - // Non-null to Non-null is safe as long as all readers can - // tolerate either flavor. + void* cur = _owner; + if (THREAD->is_lock_owned((address)cur)) { assert(_recursions == 0, "invariant"); - _owner = THREAD; + set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*. _recursions = 0; } else { // Apparent unbalanced locking ... @@ -914,10 +909,15 @@ for (;;) { assert(THREAD == _owner, "invariant"); + // Drop the lock. // release semantics: prior loads and stores from within the critical section // must not float (reorder) past the following store that drops the lock. - Atomic::release_store(&_owner, (void*)NULL); // drop the lock - OrderAccess::storeload(); // See if we need to wake a successor + // Uses a storeload to separate release_store(owner) from the + // successor check. The try_set_owner() below uses cmpxchg() so + // we get the fence down there. + release_clear_owner(Self); + OrderAccess::storeload(); + if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { return; } @@ -959,7 +959,7 @@ // to reacquire the lock the responsibility for ensuring succession // falls to the new owner. // - if (!Atomic::replace_if_null(&_owner, THREAD)) { + if (try_set_owner_from(NULL, Self) != NULL) { return; } @@ -1092,8 +1092,9 @@ Wakee = NULL; // Drop the lock - Atomic::release_store(&_owner, (void*)NULL); - OrderAccess::fence(); // ST _owner vs LD in unpark() + // Uses a fence to separate release_store(owner) from the LD in unpark(). + release_clear_owner(Self); + OrderAccess::fence(); DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self); Trigger->unpark(); @@ -1120,9 +1121,10 @@ assert(InitDone, "Unexpectedly not initialized"); if (THREAD != _owner) { - if (THREAD->is_lock_owned ((address)_owner)) { + void* cur = _owner; + if (THREAD->is_lock_owned((address)cur)) { assert(_recursions == 0, "internal state error"); - _owner = THREAD; // Convert from basiclock addr to Thread addr + set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*. _recursions = 0; } } @@ -1170,8 +1172,9 @@ if (_owner == THREAD) { return true; } - if (THREAD->is_lock_owned((address)_owner)) { - _owner = THREAD; // convert from BasicLock addr to Thread addr + void* cur = _owner; + if (THREAD->is_lock_owned((address)cur)) { + set_owner_from_BasicLock(cur, THREAD); // Convert from BasicLock* to Thread*. _recursions = 0; return true; } @@ -1680,7 +1683,7 @@ Thread * ox = (Thread *) _owner; if (ox == NULL) { - ox = (Thread*)Atomic::cmpxchg(&_owner, (void*)NULL, Self); + ox = (Thread*)try_set_owner_from(NULL, Self); if (ox == NULL) { // The CAS succeeded -- this thread acquired ownership // Take care of some bookkeeping to exit spin state. --- old/src/hotspot/share/runtime/objectMonitor.hpp 2020-01-27 14:36:39.000000000 -0500 +++ new/src/hotspot/share/runtime/objectMonitor.hpp 2020-01-27 14:36:39.000000000 -0500 @@ -232,7 +232,15 @@ intptr_t is_entered(Thread* current) const; void* owner() const; - void set_owner(void* owner); + // Clear _owner field; current value must match old_value. + void release_clear_owner(void* old_value); + // Simply set _owner field to new_value; current value must match old_value. + void set_owner_from(void* old_value, void* new_value); + // Simply set _owner field to self; current value must match basic_lock_p. + void set_owner_from_BasicLock(void* basic_lock_p, Thread* self); + // Try to set _owner field to new_value if the current value matches + // old_value. Otherwise, does not change the _owner field. + void* try_set_owner_from(void* old_value, void* new_value); jint waiters() const; --- old/src/hotspot/share/runtime/objectMonitor.inline.hpp 2020-01-27 14:36:40.000000000 -0500 +++ new/src/hotspot/share/runtime/objectMonitor.inline.hpp 2020-01-27 14:36:40.000000000 -0500 @@ -25,6 +25,7 @@ #ifndef SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP #define SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP +#include "logging/log.hpp" #include "runtime/atomic.hpp" inline intptr_t ObjectMonitor::is_entered(TRAPS) const { @@ -84,8 +85,56 @@ return _contentions; } -inline void ObjectMonitor::set_owner(void* owner) { - _owner = owner; +// Clear _owner field; current value must match old_value. +inline void ObjectMonitor::release_clear_owner(void* old_value) { + void* prev = _owner; + assert(prev == old_value, "unexpected prev owner=" INTPTR_FORMAT + ", expected=" INTPTR_FORMAT, p2i(prev), p2i(old_value)); + Atomic::release_store(&_owner, (void*)NULL); + log_trace(monitorinflation, owner)("release_clear_owner(): mid=" + INTPTR_FORMAT ", prev=" INTPTR_FORMAT, + p2i(this), p2i(prev)); +} + +// Simply set _owner field to new_value; current value must match old_value. +// (Simple means no memory sync needed.) +inline void ObjectMonitor::set_owner_from(void* old_value, void* new_value) { + void* prev = _owner; + assert(prev == old_value, "unexpected prev owner=" INTPTR_FORMAT + ", expected=" INTPTR_FORMAT, p2i(prev), p2i(old_value)); + _owner = new_value; + log_trace(monitorinflation, owner)("set_owner_from(): mid=" + INTPTR_FORMAT ", prev=" INTPTR_FORMAT + ", new=" INTPTR_FORMAT, p2i(this), + p2i(prev), p2i(new_value)); +} + +// Simply set _owner field to self; current value must match basic_lock_p. +inline void ObjectMonitor::set_owner_from_BasicLock(void* basic_lock_p, Thread* self) { + void* prev = _owner; + assert(prev == basic_lock_p, "unexpected prev owner=" INTPTR_FORMAT + ", expected=" INTPTR_FORMAT, p2i(prev), p2i(basic_lock_p)); + // Non-null owner field to non-null owner field is safe without + // cmpxchg() as long as all readers can tolerate either flavor. + _owner = self; + log_trace(monitorinflation, owner)("set_owner_from_BasicLock(): mid=" + INTPTR_FORMAT ", prev=" INTPTR_FORMAT + ", new=" INTPTR_FORMAT ", basic_lock_p=" + INTPTR_FORMAT, p2i(this), p2i(prev), + p2i(self), p2i(basic_lock_p)); +} + +// Try to set _owner field to new_value if the current value matches +// old_value. Otherwise, does not change the _owner field. +inline void* ObjectMonitor::try_set_owner_from(void* old_value, void* new_value) { + void* prev = Atomic::cmpxchg(&_owner, old_value, new_value); + if (prev == old_value) { + log_trace(monitorinflation, owner)("try_set_owner_from(): mid=" + INTPTR_FORMAT ", prev=" INTPTR_FORMAT + ", new=" INTPTR_FORMAT, p2i(this), + p2i(prev), p2i(new_value)); + } + return prev; } #endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP --- old/src/hotspot/share/runtime/synchronizer.cpp 2020-01-27 14:36:42.000000000 -0500 +++ new/src/hotspot/share/runtime/synchronizer.cpp 2020-01-27 14:36:41.000000000 -0500 @@ -240,7 +240,7 @@ // and last are the inflated Java Monitor (ObjectMonitor) checks. lock->set_displaced_header(markWord::unused_mark()); - if (owner == NULL && Atomic::replace_if_null(&(m->_owner), self)) { + if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) { assert(m->_recursions == 0, "invariant"); return true; } @@ -1404,7 +1404,7 @@ // Note that a thread can inflate an object // that it has stack-locked -- as might happen in wait() -- directly // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. - m->set_owner(mark.locker()); + m->set_owner_from(NULL, mark.locker()); m->set_object(object); // TODO-FIXME: assert BasicLock->dhw != 0.