< prev index next >
src/hotspot/share/runtime/objectMonitor.inline.hpp
Print this page
rev 54415 : 8222295: more baseline cleanups from Async Monitor Deflation project
rev 54416 : Checkpoint latest preliminary review patches for full OpenJDK review; merge with 8222295.patch.
rev 54417 : imported patch dcubed.monitor_deflate_conc.v2.01
@@ -54,33 +54,45 @@
void* owner = _owner;
return owner != DEFLATER_MARKER ? owner : NULL;
}
inline void ObjectMonitor::clear() {
+ assert(_header != NULL, "must be non-NULL");
assert(_contentions == 0, "must be 0: contentions=%d", _contentions);
assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner));
+ _header = NULL;
+
clear_using_JT();
}
inline void ObjectMonitor::clear_using_JT() {
- // When clearing using a JavaThread, we leave _owner == DEFLATER_MARKER
- // and _contentions < 0 to force any racing threads to retry. Unlike other
- // *_using_JT() functions, we cannot assert AsyncDeflateIdleMonitors
- // or Thread::current()->is_Java_thread() because clear() calls this
- // function for the rest of its checks.
-
- assert(_header != NULL, "must be non-NULL");
+ // Unlike other *_using_JT() functions, we cannot assert
+ // AsyncDeflateIdleMonitors or Thread::current()->is_Java_thread()
+ // because clear() calls this function for the rest of its checks.
+
+ if (AsyncDeflateIdleMonitors) {
+ // Async deflation protocol uses the _header, _contentions and _owner
+ // fields. While the ObjectMonitor being deflated is on the global free
+ // list, we leave those three fields alone; _owner == DEFLATER_MARKER
+ // and _contentions < 0 will force any racing threads to retry. The
+ // _header field is used by install_displaced_markword_in_object()
+ // in the last part of the deflation protocol so we cannot check
+ // its values here.
+ guarantee(_owner == NULL || _owner == DEFLATER_MARKER,
+ "must be NULL or DEFLATER_MARKER: owner=" INTPTR_FORMAT,
+ p2i(_owner));
+ guarantee(_contentions <= 0, "must be <= 0: contentions=%d", _contentions);
+ }
assert(_waiters == 0, "must be 0: waiters=%d", _waiters);
assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT, _recursions);
assert(_object != NULL, "must be non-NULL");
// Do not assert _ref_count == 0 here because a racing thread could
// increment _ref_count, observe _owner == DEFLATER_MARKER and then
// decrement _ref_count.
set_allocation_state(Free);
- _header = NULL;
_object = NULL;
// Do not clear _ref_count here because _ref_count is for indicating
// that the ObjectMonitor* is in use which is orthogonal to whether
// the ObjectMonitor itself is in use for a locking operation.
}
@@ -146,34 +158,22 @@
inline bool ObjectMonitor::is_new() const {
return _allocation_state == New;
}
inline void ObjectMonitor::dec_ref_count() {
- // The decrement needs to be MO_ACQ_REL. At the moment, the Atomic::dec
- // backend on PPC does not yet conform to these requirements. Therefore
- // the decrement is simulated with an Atomic::sub(1, &addr). Without
- // this MO_ACQ_REL Atomic::dec simulation, AsyncDeflateIdleMonitors is
- // not safe.
- Atomic::sub((jint)1, &_ref_count);
+ // The decrement only needs to be MO_ACQ_REL since the reference
+ // counter is volatile.
+ Atomic::dec(&_ref_count);
guarantee(_ref_count >= 0, "sanity check: ref_count=%d", _ref_count);
}
inline void ObjectMonitor::inc_ref_count() {
- // The increment needs to be MO_SEQ_CST. At the moment, the Atomic::inc
- // backend on PPC does not yet conform to these requirements. Therefore
- // the increment is simulated with a load phi; cas phi + 1; loop.
- // Without this MO_SEQ_CST Atomic::inc simulation, AsyncDeflateIdleMonitors
- // is not safe.
- for (;;) {
- jint sample = OrderAccess::load_acquire(&_ref_count);
- guarantee(sample >= 0, "sanity check: sample=%d", (int)sample);
- if (Atomic::cmpxchg(sample + 1, &_ref_count, sample) == sample) {
- // Incremented _ref_count without interference.
- return;
- }
- // Implied else: Saw interference so loop and try again.
- }
+ // The increment needs to be MO_SEQ_CST so that the reference
+ // counter update is seen as soon as possible in a race with the
+ // async deflation protocol.
+ Atomic::inc(&_ref_count);
+ guarantee(_ref_count > 0, "sanity check: ref_count=%d", _ref_count);
}
inline jint ObjectMonitor::ref_count() const {
return OrderAccess::load_acquire(&_ref_count);
}
< prev index next >