--- old/src/hotspot/share/runtime/objectMonitor.cpp 2019-05-02 15:04:25.010393114 -0400 +++ new/src/hotspot/share/runtime/objectMonitor.cpp 2019-05-02 15:04:24.562393130 -0400 @@ -238,7 +238,9 @@ // ----------------------------------------------------------------------------- // Enter support -bool ObjectMonitor::enter(TRAPS) { +void ObjectMonitor::enter(TRAPS) { + ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count); + // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; @@ -248,13 +250,13 @@ // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); - return true; + return; } if (cur == Self) { // TODO-FIXME: check for integer overflow! BUGID 6557169. _recursions++; - return true; + return; } if (Self->is_lock_owned ((address)cur)) { @@ -263,7 +265,7 @@ // Commute owner from a thread-specific on-stack BasicLockObject address to // a full-fledged "Thread *". _owner = Self; - return true; + return; } // We've encountered genuine contention. @@ -284,7 +286,7 @@ ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()), p2i(markOopDesc::encode(this))); Self->_Stalled = 0; - return true; + return; } assert(_owner != Self, "invariant"); @@ -294,21 +296,13 @@ assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(jt->thread_state() != _thread_blocked, "invariant"); assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant"); - assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant"); + assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions); - // Prevent deflation. See ObjectSynchronizer::deflate_monitor() and is_busy(). - // Ensure the object-monitor relationship remains stable while there's contention. - const jint contentions = Atomic::add(1, &_contentions); - if (contentions <= 0 && _owner == DEFLATER_MARKER) { - // Async deflation is in progress. Attempt to restore the - // header/dmw to the object's header so that we only retry once - // if the deflater thread happens to be slow. - const oop obj = (oop) object(); - install_displaced_markword_in_object(obj); - Self->_Stalled = 0; - return false; // Caller should retry. Never mind about _contentions as this monitor has been deflated. - } - // The deflater thread will not deflate this monitor and the monitor is contended, continue. + // Prevent deflation. See ObjectSynchronizer::deflate_monitor(), + // ObjectSynchronizer::deflate_monitor_using_JT() and is_busy(). + // Ensure the object <-> monitor relationship remains stable while + // there's contention. + Atomic::add(1, &_contentions); JFR_ONLY(JfrConditionalFlushWithStacktrace flush(jt);) EventJavaMonitorEnter event; @@ -370,7 +364,7 @@ } Atomic::dec(&_contentions); - assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant"); + assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions); Self->_Stalled = 0; // Must either set _recursions = 0 or ASSERT _recursions == 0. @@ -406,7 +400,6 @@ event.commit(); } OM_PERFDATA_OP(ContendedLockAttempts, inc()); - return true; } // Caveat: TryLock() is not necessarily serializing if it returns failure. @@ -435,7 +428,7 @@ // deflation process. void ObjectMonitor::install_displaced_markword_in_object(const oop obj) { // This function must only be called when (owner == DEFLATER_MARKER - // && contentions <= 0), but we can't guarantee that here because + // && ref_count <= 0), but we can't guarantee that here because // those values could change when the ObjectMonitor gets moved from // the global free list to a per-thread free list. @@ -510,6 +503,8 @@ #define MAX_RECHECK_INTERVAL 1000 void ObjectMonitor::EnterI(TRAPS) { + ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count); + Thread * const Self = THREAD; assert(Self->is_Java_thread(), "invariant"); assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant"); @@ -523,12 +518,8 @@ } if (_owner == DEFLATER_MARKER) { - // The deflation protocol finished the first part (setting _owner), but - // it failed the second part (making _contentions negative) and bailed. - // Because we're called from enter() we have at least one contention. - guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 " - "should have been handled by the caller: contentions=%d", - _contentions); + // The deflation protocol finished the first part (setting owner), but + // it failed the second part (making ref_count negative) and bailed. if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { // Acquired the monitor. assert(_succ != Self, "invariant"); @@ -654,12 +645,8 @@ if (TryLock(Self) > 0) break; if (_owner == DEFLATER_MARKER) { - // The deflation protocol finished the first part (setting _owner), but - // it failed the second part (making _contentions negative) and bailed. - // Because we're called from enter() we have at least one contention. - guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 " - "should have been handled by the caller: contentions=%d", - _contentions); + // The deflation protocol finished the first part (setting owner), but + // it failed the second part (making ref_count negative) and bailed. if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { // Acquired the monitor. break; @@ -770,6 +757,8 @@ // In the future we should reconcile EnterI() and ReenterI(). void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) { + ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count); + assert(Self != NULL, "invariant"); assert(SelfNode != NULL, "invariant"); assert(SelfNode->_thread == Self, "invariant"); @@ -788,12 +777,8 @@ if (TrySpin(Self) > 0) break; if (_owner == DEFLATER_MARKER) { - // The deflation protocol finished the first part (setting _owner), - // but it will observe _waiters != 0 and will bail out. Because we're - // called from wait() we may or may not have any contentions. - guarantee(_contentions >= 0, "owner == DEFLATER_MARKER && contentions < 0 " - "should have been handled by the caller: contentions=%d", - _contentions); + // The deflation protocol finished the first part (setting owner), but + // it failed the second part (making ref_count negative) and bailed. if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { // Acquired the monitor. break; @@ -1258,20 +1243,16 @@ // reenter() enters a lock and sets recursion count // complete_exit/reenter operate as a wait without waiting -bool ObjectMonitor::reenter(intptr_t recursions, TRAPS) { +void ObjectMonitor::reenter(intptr_t recursions, TRAPS) { Thread * const Self = THREAD; assert(Self->is_Java_thread(), "Must be Java thread!"); JavaThread *jt = (JavaThread *)THREAD; guarantee(_owner != Self, "reenter already owner"); - if (!enter(THREAD)) { - // Failed to enter the monitor so return for a retry. - return false; - } + enter(THREAD); // Entered the monitor. guarantee(_recursions == 0, "reenter recursion"); _recursions = recursions; - return true; } @@ -1499,8 +1480,7 @@ assert(_owner != Self, "invariant"); ObjectWaiter::TStates v = node.TState; if (v == ObjectWaiter::TS_RUN) { - const bool success = enter(Self); - ADIM_guarantee(success, "enter signaled for a retry, but monitor should not have been deflated as waiters > 0"); + enter(Self); } else { guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant"); ReenterI(Self, &node); @@ -2064,7 +2044,7 @@ DEBUG_ONLY(InitDone = true;) } -// For internal used by ObjectSynchronizer::monitors_iterate(). +// For internal use by ObjectSynchronizer::monitors_iterate(). ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) { om_ptr->inc_ref_count(); _om_ptr = om_ptr; @@ -2098,17 +2078,14 @@ // Race here if monitor is not owned! The above ref_count bump // will cause subsequent async deflation to skip it. However, // previous or concurrent async deflation is a race. - if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->_contentions <= 0) { - // Async deflation is in progress. - if (om_ptr->ref_count() <= 0) { - // And our ref_count increment above lost the race to async - // deflation. Attempt to restore the header/dmw to the - // object's header so that we only retry once if the deflater - // thread happens to be slow. - om_ptr->install_displaced_markword_in_object(object); - om_ptr->dec_ref_count(); - return false; - } + if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->ref_count() <= 0) { + // Async deflation is in progress and our ref_count increment + // above lost the race to async deflation. Attempt to restore + // the header/dmw to the object's header so that we only retry + // once if the deflater thread happens to be slow. + om_ptr->install_displaced_markword_in_object(object); + om_ptr->dec_ref_count(); + return false; } // The ObjectMonitor could have been deflated and reused for // another object before we bumped the ref_count so make sure @@ -2122,8 +2099,8 @@ } } - guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT, - p2i(_om_ptr)); + ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT, + p2i(_om_ptr)); _om_ptr = om_ptr; return true; } @@ -2131,11 +2108,11 @@ // For internal use by ObjectSynchronizer::inflate(). void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) { if (_om_ptr == NULL) { - guarantee(om_ptr != NULL, "cannot clear an unset om_ptr"); + ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr"); om_ptr->inc_ref_count(); _om_ptr = om_ptr; } else { - guarantee(om_ptr == NULL, "can only clear a set om_ptr"); + ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr"); _om_ptr->dec_ref_count(); _om_ptr = NULL; } --- old/src/hotspot/share/runtime/objectMonitor.hpp 2019-05-02 15:04:26.038393078 -0400 +++ new/src/hotspot/share/runtime/objectMonitor.hpp 2019-05-02 15:04:25.586393094 -0400 @@ -168,13 +168,15 @@ volatile jint _contentions; // Number of active contentions in enter(). It is used by is_busy() // along with other fields to determine if an ObjectMonitor can be - // deflated. See ObjectSynchronizer::deflate_monitor(). + // deflated. See ObjectSynchronizer::deflate_monitor() and + // ObjectSynchronizer::deflate_monitor_using_JT(). protected: ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor volatile jint _waiters; // number of waiting threads private: volatile int _WaitSetLock; // protects Wait Queue - simple spinlock - volatile jint _ref_count; // ref count for ObjectMonitor* + volatile jint _ref_count; // ref count for ObjectMonitor* and used by the async deflation + // protocol. See ObjectSynchronizer::deflate_monitor_using_JT(). typedef enum { Free = 0, // Free must be 0 for monitor to be free after memset(..,0,..). New, @@ -249,16 +251,13 @@ return _contentions|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList); } - // Version of is_busy() that accounts for special values in - // _contentions and _owner when AsyncDeflateIdleMonitors is enabled. + // Version of is_busy() that accounts for the special value in + // _owner when AsyncDeflateIdleMonitors is enabled. intptr_t is_busy_async() const { - intptr_t ret_code = _waiters | intptr_t(_cxq) | intptr_t(_EntryList); + intptr_t ret_code = _contentions | _waiters | intptr_t(_cxq) | intptr_t(_EntryList); if (!AsyncDeflateIdleMonitors) { - ret_code |= _contentions | intptr_t(_owner); + ret_code |= intptr_t(_owner); } else { - if (_contentions > 0) { - ret_code |= _contentions; - } if (_owner != DEFLATER_MARKER) { ret_code |= intptr_t(_owner); } @@ -326,7 +325,7 @@ void clear(); void clear_using_JT(); - bool enter(TRAPS); // Returns false if monitor is being async deflated and caller should retry locking the object. + void enter(TRAPS); void exit(bool not_suspended, TRAPS); void wait(jlong millis, bool interruptable, TRAPS); void notify(TRAPS); @@ -334,7 +333,7 @@ // Use the following at your own risk intptr_t complete_exit(TRAPS); - bool reenter(intptr_t recursions, TRAPS); // Returns false if monitor is being async deflated and caller should retry locking the object. + void reenter(intptr_t recursions, TRAPS); private: void AddWaiter(ObjectWaiter * waiter); --- old/src/hotspot/share/runtime/objectMonitor.inline.hpp 2019-05-02 15:04:27.058393043 -0400 +++ new/src/hotspot/share/runtime/objectMonitor.inline.hpp 2019-05-02 15:04:26.586393059 -0400 @@ -57,8 +57,8 @@ inline void ObjectMonitor::clear() { assert(_header != NULL, "must be non-NULL"); - assert(_contentions == 0, "must be 0: contentions=%d", _contentions); assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner)); + assert(_ref_count == 0, "must be 0: ref_count=%d", _ref_count); _header = NULL; @@ -71,30 +71,25 @@ // because clear() calls this function for the rest of its checks. if (AsyncDeflateIdleMonitors) { - // Async deflation protocol uses the _header, _contentions and _owner + // Async deflation protocol uses the header, owner and ref_count // fields. While the ObjectMonitor being deflated is on the global free - // list, we leave those three fields alone; _owner == DEFLATER_MARKER - // and _contentions < 0 will force any racing threads to retry. The - // _header field is used by install_displaced_markword_in_object() + // list, we leave those three fields alone; owner == DEFLATER_MARKER + // and ref_count < 0 will force any racing threads to retry. The + // header field is used by install_displaced_markword_in_object() // in the last part of the deflation protocol so we cannot check - // its values here. + // its value here. guarantee(_owner == NULL || _owner == DEFLATER_MARKER, "must be NULL or DEFLATER_MARKER: owner=" INTPTR_FORMAT, p2i(_owner)); - guarantee(_contentions <= 0, "must be <= 0: contentions=%d", _contentions); + guarantee(_ref_count <= 0, "must be <= 0: ref_count=%d", _ref_count); } + assert(_contentions == 0, "must be 0: contentions=%d", _contentions); assert(_waiters == 0, "must be 0: waiters=%d", _waiters); assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT, _recursions); assert(_object != NULL, "must be non-NULL"); - // Do not assert _ref_count == 0 here because a racing thread could - // increment _ref_count, observe _owner == DEFLATER_MARKER and then - // decrement _ref_count. set_allocation_state(Free); _object = NULL; - // Do not clear _ref_count here because _ref_count is for indicating - // that the ObjectMonitor* is in use which is orthogonal to whether - // the ObjectMonitor itself is in use for a locking operation. } inline void* ObjectMonitor::object() const { --- old/src/hotspot/share/runtime/synchronizer.cpp 2019-05-02 15:04:28.054393008 -0400 +++ new/src/hotspot/share/runtime/synchronizer.cpp 2019-05-02 15:04:27.594393024 -0400 @@ -350,36 +350,33 @@ // We don't need to use fast path here, because it must have been // failed in the interpreter/compiler code. void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { - bool do_loop = true; - while (do_loop) { - markOop mark = obj->mark(); - assert(!mark->has_bias_pattern(), "should not see bias pattern here"); + markOop mark = obj->mark(); + assert(!mark->has_bias_pattern(), "should not see bias pattern here"); - if (mark->is_neutral()) { - // Anticipate successful CAS -- the ST of the displaced mark must - // be visible <= the ST performed by the CAS. - lock->set_displaced_header(mark); - if (mark == obj()->cas_set_mark((markOop) lock, mark)) { - return; - } - // Fall through to inflate() ... - } else if (mark->has_locker() && - THREAD->is_lock_owned((address)mark->locker())) { - assert(lock != mark->locker(), "must not re-lock the same lock"); - assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); - lock->set_displaced_header(NULL); + if (mark->is_neutral()) { + // Anticipate successful CAS -- the ST of the displaced mark must + // be visible <= the ST performed by the CAS. + lock->set_displaced_header(mark); + if (mark == obj()->cas_set_mark((markOop) lock, mark)) { return; } - - // The object header will never be displaced to this lock, - // so it does not matter what the value is, except that it - // must be non-zero to avoid looking like a re-entrant lock, - // and must not look locked either. - lock->set_displaced_header(markOopDesc::unused_mark()); - ObjectMonitorHandle omh; - inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter); - do_loop = !omh.om_ptr()->enter(THREAD); + // Fall through to inflate() ... + } else if (mark->has_locker() && + THREAD->is_lock_owned((address)mark->locker())) { + assert(lock != mark->locker(), "must not re-lock the same lock"); + assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); + lock->set_displaced_header(NULL); + return; } + + // The object header will never be displaced to this lock, + // so it does not matter what the value is, except that it + // must be non-zero to avoid looking like a re-entrant lock, + // and must not look locked either. + lock->set_displaced_header(markOopDesc::unused_mark()); + ObjectMonitorHandle omh; + inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter); + omh.om_ptr()->enter(THREAD); } // This routine is used to handle interpreter/compiler slow case @@ -421,12 +418,9 @@ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); } - bool do_loop = true; - while (do_loop) { - ObjectMonitorHandle omh; - inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); - do_loop = !omh.om_ptr()->reenter(recursion, THREAD); - } + ObjectMonitorHandle omh; + inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); + omh.om_ptr()->reenter(recursion, THREAD); } // ----------------------------------------------------------------------------- // JNI locks on java objects @@ -438,12 +432,9 @@ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); } THREAD->set_current_pending_monitor_is_from_java(false); - bool do_loop = true; - while (do_loop) { - ObjectMonitorHandle omh; - inflate(&omh, THREAD, obj(), inflate_cause_jni_enter); - do_loop = !omh.om_ptr()->enter(THREAD); - } + ObjectMonitorHandle omh; + inflate(&omh, THREAD, obj(), inflate_cause_jni_enter); + omh.om_ptr()->enter(THREAD); THREAD->set_current_pending_monitor_is_from_java(true); } @@ -1174,7 +1165,6 @@ // Clear any values we allowed to linger during async deflation. take->_header = NULL; take->set_owner(NULL); - take->_contentions = 0; if (take->ref_count() < 0) { // Add back max_jint to restore the ref_count field to its @@ -1782,14 +1772,14 @@ // Returns true if it was deflated and false otherwise. // // The async deflation protocol sets owner to DEFLATER_MARKER and -// makes contentions negative as signals to contending threads that +// makes ref_count negative as signals to contending threads that // an async deflation is in progress. There are a number of checks // as part of the protocol to make sure that the calling thread has // not lost the race to a contending thread or to a thread that just // wants to use the ObjectMonitor*. // // The ObjectMonitor has been successfully async deflated when: -// (owner == DEFLATER_MARKER && contentions < 0 && ref_count < 0). +// (owner == DEFLATER_MARKER && ref_count < 0) // Contending threads or ObjectMonitor* using threads that see those // values know to retry their operation. // @@ -1815,41 +1805,33 @@ // the slow path. This is just the first part of the async // deflation dance. - if (mid->_waiters != 0 || mid->ref_count() != 0) { + if (mid->_contentions != 0 || mid->_waiters != 0) { // Another thread has raced to enter the ObjectMonitor after - // mid->is_busy() above and has already waited on it which - // makes it busy so no deflation. Or the ObjectMonitor* is - // in use for some other operation like inflate(). Restore - // owner to NULL if it is still DEFLATER_MARKER. + // mid->is_busy() above or has already entered and waited on + // it which makes it busy so no deflation. Restore owner to + // NULL if it is still DEFLATER_MARKER. Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); return false; } - if (Atomic::cmpxchg(-max_jint, &mid->_contentions, (jint)0) == 0) { - // Make contentions negative to force any contending threads to - // retry. This is the second part of the async deflation dance. + if (Atomic::cmpxchg(-max_jint, &mid->_ref_count, (jint)0) == 0) { + // Make ref_count negative to force any contending threads or + // ObjectMonitor* using threads to retry. This is the second + // part of the async deflation dance. - if (mid->_owner == DEFLATER_MARKER && - Atomic::cmpxchg(-max_jint, &mid->_ref_count, (jint)0) == 0) { + if (mid->_owner == DEFLATER_MARKER) { // If owner is still DEFLATER_MARKER, then we have successfully // signaled any contending threads to retry. If it is not, then we // have lost the race to an entering thread and the ObjectMonitor - // is now busy. If we cannot make ref_count negative (because the - // ObjectMonitor* is in use), then we have lost that race instead. - // This is the third and final part of the async deflation dance. - // Note: This owner check solves the ABA problem with contentions + // is now busy. This is the third and final part of the async + // deflation dance. + // Note: This owner check solves the ABA problem with ref_count // where another thread acquired the ObjectMonitor, finished - // using it and restored the contentions to zero. - // Note: Making ref_count negative solves the race with - // ObjectMonitor::save_om_ptr() where its ref_count increment - // happens after the first ref_count check in this function. - // Note: Making ref_count negative must happen after the third - // part check of "owner == DEFLATER_MARKER". When save_om_ptr() - // retries, it will call install_displaced_markword_in_object() - // which will disconnect the object from the ObjectMonitor so - // deflation must happen. + // using it and restored the ref_count to zero. // Sanity checks for the races: + guarantee(mid->_contentions == 0, "must be 0: contentions=%d", + mid->_contentions); guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters); guarantee(mid->_cxq == NULL, "must be no contending threads: cxq=" INTPTR_FORMAT, p2i(mid->_cxq)); @@ -1898,29 +1880,27 @@ // refers to this ObjectMonitor. Those linkages have to be // cleaned up by the caller who has the complete context. - // We leave owner == DEFLATER_MARKER and contentions < 0 + // We leave owner == DEFLATER_MARKER and ref_count < 0 // to force any racing threads to retry. return true; // Success, ObjectMonitor has been deflated. } - // The owner was changed from DEFLATER_MARKER or ObjectMonitor* - // is in use so we lost the race since the ObjectMonitor is now - // busy. - - // Restore owner to NULL if it is still DEFLATER_MARKER: - Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); + // The owner was changed from DEFLATER_MARKER so we lost the + // race since the ObjectMonitor is now busy. - // Add back max_jint to restore the contentions field to its + // Add back max_jint to restore the ref_count field to its // proper value (which may not be what we saw above): - Atomic::add(max_jint, &mid->_contentions); + Atomic::add(max_jint, &mid->_ref_count); - assert(mid->_contentions >= 0, "must not be negative: contentions=%d", - mid->_contentions); + assert(mid->ref_count() >= 0, "must not be negative: ref_count=%d", + mid->ref_count()); + return false; } - // The contentions was no longer 0 so we lost the race since the - // ObjectMonitor is now busy. - assert(mid->_owner != DEFLATER_MARKER, "must not be DEFLATER_MARKER"); + // The ref_count was no longer 0 so we lost the race since the + // ObjectMonitor is now busy or the ObjectMonitor* is now is use. + // Restore owner to NULL if it is still DEFLATER_MARKER: + Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); } // The owner field is no longer NULL so we lost the race since the