--- old/src/hotspot/share/runtime/objectMonitor.inline.hpp 2019-12-11 14:52:17.000000000 -0500 +++ new/src/hotspot/share/runtime/objectMonitor.inline.hpp 2019-12-11 14:52:17.000000000 -0500 @@ -25,6 +25,7 @@ #ifndef SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP #define SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP +#include "logging/log.hpp" #include "runtime/atomic.hpp" inline intptr_t ObjectMonitor::is_entered(TRAPS) const { @@ -51,19 +52,57 @@ return _waiters; } +// Returns NULL if DEFLATER_MARKER is observed. inline void* ObjectMonitor::owner() const { - return _owner; + void* owner = _owner; + return owner != DEFLATER_MARKER ? owner : NULL; +} + +// Returns true if owner field == DEFLATER_MARKER and false otherwise. +// This accessor is called when we really need to know if the owner +// field == DEFLATER_MARKER and any non-NULL value won't do the trick. +inline bool ObjectMonitor::owner_is_DEFLATER_MARKER() { + return _owner == DEFLATER_MARKER; } inline void ObjectMonitor::clear() { assert(Atomic::load(&_header).value() != 0, "must be non-zero"); + assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner)); +#ifdef ASSERT + jint l_ref_count = ref_count(); +#endif + assert(l_ref_count == 0, "must be 0: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count()); + + Atomic::store(&_header, markWord::zero()); + + clear_using_JT(); +} + +inline void ObjectMonitor::clear_using_JT() { + // Unlike other *_using_JT() functions, we cannot assert + // AsyncDeflateIdleMonitors or Thread::current()->is_Java_thread() + // because clear() calls this function for the rest of its checks. + + if (AsyncDeflateIdleMonitors) { + // Async deflation protocol uses the header, owner and ref_count + // fields. While the ObjectMonitor being deflated is on the global free + // list, we leave those three fields alone; owner == DEFLATER_MARKER + // and ref_count < 0 will force any racing threads to retry. The + // header field is used by install_displaced_markword_in_object() + // in the last part of the deflation protocol so we cannot check + // its value here. + guarantee(_owner == NULL || _owner == DEFLATER_MARKER, + "must be NULL or DEFLATER_MARKER: owner=" INTPTR_FORMAT, + p2i(_owner)); + jint l_ref_count = ref_count(); + guarantee(l_ref_count <= 0, "must be <= 0: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count()); + } assert(_contentions == 0, "must be 0: contentions=%d", _contentions); assert(_waiters == 0, "must be 0: waiters=%d", _waiters); assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions); assert(_object != NULL, "must be non-NULL"); - assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner)); - Atomic::store(&_header, markWord::zero()); + set_allocation_state(Free); _object = NULL; } @@ -84,8 +123,122 @@ return _contentions; } -inline void ObjectMonitor::set_owner(void* owner) { - _owner = owner; +// Clear _owner field; current value must match old_value. +// If needs_fence is true, we issue a fence() after the release_store(). +// Otherwise, a storeload() is good enough. See the callers for more info. +inline void ObjectMonitor::release_clear_owner_with_barrier(void* old_value, + bool needs_fence) { + void* prev = _owner; + ADIM_guarantee(prev == old_value, "unexpected prev owner=" INTPTR_FORMAT + ", expected=" INTPTR_FORMAT, p2i(prev), p2i(old_value)); + Atomic::release_store(&_owner, (void*)NULL); + if (needs_fence) { + OrderAccess::fence(); + } else { + OrderAccess::storeload(); + } + log_trace(monitorinflation, owner)("release_clear_owner_with_barrier(): mid=" + INTPTR_FORMAT ", prev=" INTPTR_FORMAT + ", needs_fence=%d", p2i(this), p2i(prev), + needs_fence); +} + +// Simply set _owner field to new_value; current value must match old_value. +// (Simple means no memory sync needed.) +inline void ObjectMonitor::simply_set_owner_from(void* new_value, void* old_value) { + void* prev = _owner; + ADIM_guarantee(prev == old_value, "unexpected prev owner=" INTPTR_FORMAT + ", expected=" INTPTR_FORMAT, p2i(prev), p2i(old_value)); + _owner = new_value; + log_trace(monitorinflation, owner)("simply_set_owner_from(): mid=" + INTPTR_FORMAT ", prev=" INTPTR_FORMAT + ", new=" INTPTR_FORMAT, p2i(this), + p2i(prev), p2i(new_value)); +} + +// Simply set _owner field to new_value; current value must match old_value1 or old_value2. +// (Simple means no memory sync needed.) +inline void ObjectMonitor::simply_set_owner_from(void* new_value, void* old_value1, void* old_value2) { + void* prev = _owner; + ADIM_guarantee(prev == old_value1 || prev == old_value2, + "unexpected prev owner=" INTPTR_FORMAT ", expected1=" + INTPTR_FORMAT " or expected2=" INTPTR_FORMAT, p2i(prev), + p2i(old_value1), p2i(old_value2)); + _owner = new_value; + log_trace(monitorinflation, owner)("simply_set_owner_from(old1=" INTPTR_FORMAT + ", old2=" INTPTR_FORMAT "): mid=" + INTPTR_FORMAT ", prev=" INTPTR_FORMAT + ", new=" INTPTR_FORMAT, p2i(old_value1), + p2i(old_value2), p2i(this), p2i(prev), + p2i(new_value)); +} + +// Simply set _owner field to self; current value must match basic_lock_p. +inline void ObjectMonitor::simply_set_owner_from_BasicLock(Thread* self, void* basic_lock_p) { + void* prev = _owner; + ADIM_guarantee(prev == basic_lock_p, "unexpected prev owner=" INTPTR_FORMAT + ", expected=" INTPTR_FORMAT, p2i(prev), p2i(basic_lock_p)); + // Non-null owner field to non-null owner field is safe without + // cmpxchg() as long as all readers can tolerate either flavor. + _owner = self; + log_trace(monitorinflation, owner)("simply_set_owner_from_BasicLock(): mid=" + INTPTR_FORMAT ", prev=" INTPTR_FORMAT + ", new=" INTPTR_FORMAT ", basic_lock_p=" + INTPTR_FORMAT, p2i(this), p2i(prev), + p2i(self), p2i(basic_lock_p)); +} + +// Try to set _owner field to new_value if the current value matches +// old_value. Otherwise, does not change the _owner field. +inline void* ObjectMonitor::try_set_owner_from(void* new_value, void* old_value) { + void* prev = Atomic::cmpxchg(&_owner, old_value, new_value); + if (prev == old_value) { + log_trace(monitorinflation, owner)("try_set_owner_from(): mid=" + INTPTR_FORMAT ", prev=" INTPTR_FORMAT + ", new=" INTPTR_FORMAT, p2i(this), + p2i(prev), p2i(new_value)); + } + return prev; +} + +inline void ObjectMonitor::set_allocation_state(ObjectMonitor::AllocationState s) { + _allocation_state = s; +} + +inline ObjectMonitor::AllocationState ObjectMonitor::allocation_state() const { + return _allocation_state; +} + +inline bool ObjectMonitor::is_free() const { + return _allocation_state == Free; +} + +inline bool ObjectMonitor::is_old() const { + return _allocation_state == Old; +} + +inline bool ObjectMonitor::is_new() const { + return _allocation_state == New; +} + +inline void ObjectMonitor::dec_ref_count() { + Atomic::dec(&_ref_count); + // Can be negative as part of async deflation protocol. + jint l_ref_count = ref_count(); + ADIM_guarantee(AsyncDeflateIdleMonitors || l_ref_count >= 0, + "sanity check: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count()); +} + +inline void ObjectMonitor::inc_ref_count() { + Atomic::inc(&_ref_count); + // Can be negative as part of async deflation protocol. + jint l_ref_count = ref_count(); + ADIM_guarantee(AsyncDeflateIdleMonitors || l_ref_count > 0, + "sanity check: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count()); +} + +inline jint ObjectMonitor::ref_count() const { + return Atomic::load(&_ref_count); } #endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP