--- old/src/hotspot/share/runtime/objectMonitor.inline.hpp 2019-04-18 22:07:24.016306999 -0400 +++ new/src/hotspot/share/runtime/objectMonitor.inline.hpp 2019-04-18 22:07:23.336307010 -0400 @@ -56,20 +56,33 @@ } inline void ObjectMonitor::clear() { + assert(_header != NULL, "must be non-NULL"); assert(_contentions == 0, "must be 0: contentions=%d", _contentions); assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner)); + _header = NULL; + clear_using_JT(); } inline void ObjectMonitor::clear_using_JT() { - // When clearing using a JavaThread, we leave _owner == DEFLATER_MARKER - // and _contentions < 0 to force any racing threads to retry. Unlike other - // *_using_JT() functions, we cannot assert AsyncDeflateIdleMonitors - // or Thread::current()->is_Java_thread() because clear() calls this - // function for the rest of its checks. - - assert(_header != NULL, "must be non-NULL"); + // Unlike other *_using_JT() functions, we cannot assert + // AsyncDeflateIdleMonitors or Thread::current()->is_Java_thread() + // because clear() calls this function for the rest of its checks. + + if (AsyncDeflateIdleMonitors) { + // Async deflation protocol uses the _header, _contentions and _owner + // fields. While the ObjectMonitor being deflated is on the global free + // list, we leave those three fields alone; _owner == DEFLATER_MARKER + // and _contentions < 0 will force any racing threads to retry. The + // _header field is used by install_displaced_markword_in_object() + // in the last part of the deflation protocol so we cannot check + // its values here. + guarantee(_owner == NULL || _owner == DEFLATER_MARKER, + "must be NULL or DEFLATER_MARKER: owner=" INTPTR_FORMAT, + p2i(_owner)); + guarantee(_contentions <= 0, "must be <= 0: contentions=%d", _contentions); + } assert(_waiters == 0, "must be 0: waiters=%d", _waiters); assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT, _recursions); assert(_object != NULL, "must be non-NULL"); @@ -78,7 +91,6 @@ // decrement _ref_count. set_allocation_state(Free); - _header = NULL; _object = NULL; // Do not clear _ref_count here because _ref_count is for indicating // that the ObjectMonitor* is in use which is orthogonal to whether @@ -148,30 +160,18 @@ } inline void ObjectMonitor::dec_ref_count() { - // The decrement needs to be MO_ACQ_REL. At the moment, the Atomic::dec - // backend on PPC does not yet conform to these requirements. Therefore - // the decrement is simulated with an Atomic::sub(1, &addr). Without - // this MO_ACQ_REL Atomic::dec simulation, AsyncDeflateIdleMonitors is - // not safe. - Atomic::sub((jint)1, &_ref_count); + // The decrement only needs to be MO_ACQ_REL since the reference + // counter is volatile. + Atomic::dec(&_ref_count); guarantee(_ref_count >= 0, "sanity check: ref_count=%d", _ref_count); } inline void ObjectMonitor::inc_ref_count() { - // The increment needs to be MO_SEQ_CST. At the moment, the Atomic::inc - // backend on PPC does not yet conform to these requirements. Therefore - // the increment is simulated with a load phi; cas phi + 1; loop. - // Without this MO_SEQ_CST Atomic::inc simulation, AsyncDeflateIdleMonitors - // is not safe. - for (;;) { - jint sample = OrderAccess::load_acquire(&_ref_count); - guarantee(sample >= 0, "sanity check: sample=%d", (int)sample); - if (Atomic::cmpxchg(sample + 1, &_ref_count, sample) == sample) { - // Incremented _ref_count without interference. - return; - } - // Implied else: Saw interference so loop and try again. - } + // The increment needs to be MO_SEQ_CST so that the reference + // counter update is seen as soon as possible in a race with the + // async deflation protocol. + Atomic::inc(&_ref_count); + guarantee(_ref_count > 0, "sanity check: ref_count=%d", _ref_count); } inline jint ObjectMonitor::ref_count() const {