< prev index next >
src/hotspot/share/runtime/objectMonitor.inline.hpp
Print this page
rev 56634 : imported patch 8230876.patch
rev 56635 : v2.00 -> v2.05 (CR5/v2.05/8-for-jdk13) patches combined into one; merge with 8229212.patch; merge with jdk-14+11; merge with 8230184.patch; merge with 8230876.patch; merge with jdk-14+15; merge with jdk-14+18.
rev 56639 : loosen a couple more counter checks due to races observed in testing; simplify om_release() extraction of mid since list head or cur_mid_in_use is marked; simplify deflate_monitor_list() extraction of mid since there are no parallel deleters due to the safepoint; simplify deflate_monitor_list_using_JT() extraction of mid since list head or cur_mid_in_use is marked; prepend_block_to_lists() - simplify based on David H's comments; does not need load_acquire() or release_store() because of the cmpxchg(); prepend_to_common() - simplify to use mark_next_loop() for m and use mark_list_head() and release_store() for the non-empty list case; add more debugging for "Non-balanced monitor enter/exit" failure mode; fix race in inflate() in the "CASE: neutral" code path; install_displaced_markword_in_object() does not need to clear the header field since that is handled when the ObjectMonitor is moved from the global free list; LSuccess should clear boxReg to set ICC.ZF=1 to avoid depending on existing boxReg contents; update fast_unlock() to detect when object no longer refers to the same ObjectMonitor and take fast path exit instead; clarify fast_lock() code where we detect when object no longer refers to the same ObjectMonitor; add/update comments for movptr() calls where we move a literal into an Address; remove set_owner(); refactor setting of owner field into set_owner_from(2 versions), set_owner_from_BasicLock(), and try_set_owner_from(); the new functions include monitorinflation+owner logging; extract debug code from v2.06 and v2.07 and move to v2.07.debug; change 'jccb' -> 'jcc' and 'jmpb' -> 'jmp' as needed; checkpoint initial version of MacroAssembler::inc_om_ref_count(); update LP64 MacroAssembler::fast_lock() and fast_unlock() to use inc_om_ref_count(); fast_lock() return flag setting logic can use 'testptr(tmpReg, tmpReg)' instead of 'cmpptr(tmpReg, 0)' since that's more efficient; fast_unlock() LSuccess return flag setting logic can use 'testl (boxReg, 0)' instead of 'xorptr(boxReg, boxReg)' since that's more efficient; cleanup "fast-path" vs "fast path" and "slow-path" vs "slow path"; update MacroAssembler::rtm_inflated_locking() to use inc_om_ref_count(); update MacroAssembler::fast_lock() to preserve the flags before decrementing ref_count and restore the flags afterwards; this is more clean than depending on the contents of rax/tmpReg; coleenp CR - refactor async monitor deflation work from ServiceThread::service_thread_entry() to ObjectSynchronizer::deflate_idle_monitors_using_JT(); rehn,eosterlund CR - add support for HandshakeAfterDeflateIdleMonitors for platforms that don't have ObjectMonitor ref_count support implemented in C2 fast_lock() and fast_unlock().
*** 23,32 ****
--- 23,33 ----
*/
#ifndef SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
#define SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
+ #include "logging/log.hpp"
#include "runtime/atomic.hpp"
inline intptr_t ObjectMonitor::is_entered(TRAPS) const {
if (THREAD == _owner || THREAD->is_lock_owned((address) _owner)) {
return 1;
*** 49,71 ****
inline jint ObjectMonitor::waiters() const {
return _waiters;
}
inline void* ObjectMonitor::owner() const {
! return _owner;
}
inline void ObjectMonitor::clear() {
assert(Atomic::load(&_header).value() != 0, "must be non-zero");
assert(_contentions == 0, "must be 0: contentions=%d", _contentions);
assert(_waiters == 0, "must be 0: waiters=%d", _waiters);
! assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT, _recursions);
assert(_object != NULL, "must be non-NULL");
- assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner));
! Atomic::store(markWord::zero(), &_header);
_object = NULL;
}
inline void* ObjectMonitor::object() const {
return _object;
--- 50,106 ----
inline jint ObjectMonitor::waiters() const {
return _waiters;
}
+ // Returns NULL if DEFLATER_MARKER is observed.
inline void* ObjectMonitor::owner() const {
! void* owner = _owner;
! return owner != DEFLATER_MARKER ? owner : NULL;
! }
!
! // Returns true if owner field == DEFLATER_MARKER and false otherwise.
! // This accessor is called when we really need to know if the owner
! // field == DEFLATER_MARKER and any non-NULL value won't do the trick.
! inline bool ObjectMonitor::owner_is_DEFLATER_MARKER() {
! return OrderAccess::load_acquire(&_owner) == DEFLATER_MARKER;
}
inline void ObjectMonitor::clear() {
assert(Atomic::load(&_header).value() != 0, "must be non-zero");
+ assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner));
+ assert(ref_count() == 0, "must be 0: ref_count=%d", ref_count());
+
+ Atomic::store(markWord::zero(), &_header);
+
+ clear_using_JT();
+ }
+
+ inline void ObjectMonitor::clear_using_JT() {
+ // Unlike other *_using_JT() functions, we cannot assert
+ // AsyncDeflateIdleMonitors or Thread::current()->is_Java_thread()
+ // because clear() calls this function for the rest of its checks.
+
+ if (AsyncDeflateIdleMonitors) {
+ // Async deflation protocol uses the header, owner and ref_count
+ // fields. While the ObjectMonitor being deflated is on the global free
+ // list, we leave those three fields alone; owner == DEFLATER_MARKER
+ // and ref_count < 0 will force any racing threads to retry. The
+ // header field is used by install_displaced_markword_in_object()
+ // in the last part of the deflation protocol so we cannot check
+ // its value here.
+ guarantee(_owner == NULL || _owner == DEFLATER_MARKER,
+ "must be NULL or DEFLATER_MARKER: owner=" INTPTR_FORMAT,
+ p2i(_owner));
+ guarantee(ref_count() <= 0, "must be <= 0: ref_count=%d", ref_count());
+ }
assert(_contentions == 0, "must be 0: contentions=%d", _contentions);
assert(_waiters == 0, "must be 0: waiters=%d", _waiters);
! assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
assert(_object != NULL, "must be non-NULL");
! set_allocation_state(Free);
_object = NULL;
}
inline void* ObjectMonitor::object() const {
return _object;
*** 82,91 ****
// return number of threads contending for this monitor
inline jint ObjectMonitor::contentions() const {
return _contentions;
}
! inline void ObjectMonitor::set_owner(void* owner) {
! _owner = owner;
}
#endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
--- 117,221 ----
// return number of threads contending for this monitor
inline jint ObjectMonitor::contentions() const {
return _contentions;
}
! // Set _owner field to new_value; current value must match old_value.
! inline void ObjectMonitor::set_owner_from(void* new_value, void* old_value) {
! void* prev = Atomic::cmpxchg(new_value, &_owner, old_value);
! ADIM_guarantee(prev == old_value, "unexpected prev owner=" INTPTR_FORMAT
! ", expected=" INTPTR_FORMAT, p2i(prev), p2i(old_value));
! log_trace(monitorinflation, owner)("mid=" INTPTR_FORMAT ", prev="
! INTPTR_FORMAT ", new=" INTPTR_FORMAT,
! p2i(this), p2i(prev), p2i(new_value));
! }
!
! // Set _owner field to new_value; current value must match old_value1 or old_value2.
! inline void ObjectMonitor::set_owner_from(void* new_value, void* old_value1, void* old_value2) {
! void* prev = Atomic::cmpxchg(new_value, &_owner, old_value1);
! if (prev != old_value1) {
! prev = Atomic::cmpxchg(new_value, &_owner, old_value2);
! }
! ADIM_guarantee(prev == old_value1 || prev == old_value2,
! "unexpected prev owner=" INTPTR_FORMAT ", expected1="
! INTPTR_FORMAT " or expected2=" INTPTR_FORMAT, p2i(prev),
! p2i(old_value1), p2i(old_value2));
! log_trace(monitorinflation, owner)("mid=" INTPTR_FORMAT ", prev="
! INTPTR_FORMAT ", new=" INTPTR_FORMAT,
! p2i(this), p2i(prev), p2i(new_value));
! }
!
! // Set _owner field to self; current value must match basic_lock_p.
! inline void ObjectMonitor::set_owner_from_BasicLock(Thread* self, void* basic_lock_p) {
! assert(self->is_lock_owned((address)basic_lock_p), "self=" INTPTR_FORMAT
! " must own basic_lock_p=" INTPTR_FORMAT, p2i(self), p2i(basic_lock_p));
! void* prev = _owner;
! ADIM_guarantee(prev == basic_lock_p, "unexpected prev owner=" INTPTR_FORMAT
! ", expected=" INTPTR_FORMAT, p2i(prev), p2i(basic_lock_p));
! // Non-null owner field to non-null owner field is safe without
! // cmpxchg() as long as all readers can tolerate either flavor.
! _owner = self;
! log_trace(monitorinflation, owner)("mid=" INTPTR_FORMAT ", prev="
! INTPTR_FORMAT ", new=" INTPTR_FORMAT,
! p2i(this), p2i(prev), p2i(self));
! }
!
! // Try to set _owner field to new_value if the current value matches
! // old_value. Otherwise, does not change the _owner field.
! inline void* ObjectMonitor::try_set_owner_from(void* new_value, void* old_value) {
! void* prev = Atomic::cmpxchg(new_value, &_owner, old_value);
! if (prev == old_value) {
! log_trace(monitorinflation, owner)("mid=" INTPTR_FORMAT ", prev="
! INTPTR_FORMAT ", new=" INTPTR_FORMAT,
! p2i(this), p2i(prev), p2i(new_value));
! }
! return prev;
! }
!
! inline void ObjectMonitor::set_allocation_state(ObjectMonitor::AllocationState s) {
! _allocation_state = s;
! }
!
! inline ObjectMonitor::AllocationState ObjectMonitor::allocation_state() const {
! return _allocation_state;
! }
!
! inline bool ObjectMonitor::is_free() const {
! return _allocation_state == Free;
! }
!
! inline bool ObjectMonitor::is_active() const {
! return !is_free();
! }
!
! inline bool ObjectMonitor::is_old() const {
! return _allocation_state == Old;
! }
!
! inline bool ObjectMonitor::is_new() const {
! return _allocation_state == New;
! }
!
! inline void ObjectMonitor::dec_ref_count() {
! // The decrement only needs to be MO_ACQ_REL since the reference
! // counter is volatile.
! Atomic::dec(&_ref_count);
! // Can be negative as part of async deflation protocol.
! ADIM_guarantee(AsyncDeflateIdleMonitors || ref_count() >= 0,
! "sanity check: ref_count=%d", ref_count());
! }
!
! inline void ObjectMonitor::inc_ref_count() {
! // The increment needs to be MO_SEQ_CST so that the reference
! // counter update is seen as soon as possible in a race with the
! // async deflation protocol.
! Atomic::inc(&_ref_count);
! // Can be negative as part of async deflation protocol.
! ADIM_guarantee(AsyncDeflateIdleMonitors || ref_count() > 0,
! "sanity check: ref_count=%d", ref_count());
! }
!
! inline jint ObjectMonitor::ref_count() const {
! return OrderAccess::load_acquire(&_ref_count);
}
#endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
< prev index next >