< prev index next >

src/hotspot/share/runtime/objectMonitor.inline.hpp

Print this page
rev 54110 : Checkpoint latest preliminary review patches for full OpenJDK review.

@@ -51,24 +51,44 @@
 
 inline jint ObjectMonitor::waiters() const {
   return _waiters;
 }
 
+// Returns NULL if DEFLATER_MARKER is observed.
 inline void* ObjectMonitor::owner() const {
-  return _owner;
+  void* owner = _owner;
+  return owner != DEFLATER_MARKER ? owner : NULL;
 }
 
 inline void ObjectMonitor::clear() {
-  assert(_header != NULL, "Fatal logic error in ObjectMonitor header!");
   assert(_count == 0, "Fatal logic error in ObjectMonitor count!");
+  assert(_owner == NULL, "Fatal logic error in ObjectMonitor owner!");
+
+  clear_using_JT();
+}
+
+inline void ObjectMonitor::clear_using_JT() {
+  // When clearing using a JavaThread, we leave _owner == DEFLATER_MARKER
+  // and _count < 0 to force any racing threads to retry. Unlike other
+  // *_using_JT() functions, we cannot assert AsyncDeflateIdleMonitors
+  // or Thread::current()->is_Java_thread() because clear() calls this
+  // function for the rest of its checks.
+
+  assert(_header != NULL, "Fatal logic error in ObjectMonitor header!");
   assert(_waiters == 0, "Fatal logic error in ObjectMonitor waiters!");
   assert(_recursions == 0, "Fatal logic error in ObjectMonitor recursions!");
   assert(_object != NULL, "Fatal logic error in ObjectMonitor object!");
-  assert(_owner == NULL, "Fatal logic error in ObjectMonitor owner!");
+  // Do not assert _ref_count == 0 here because a racing thread could
+  // increment _ref_count, observe _owner == DEFLATER_MARKER and then
+  // decrement _ref_count.
 
+  set_allocation_state(Free);
   _header = NULL;
   _object = NULL;
+  // Do not clear _ref_count here because _ref_count is for indicating
+  // that the ObjectMonitor* is in use which is orthogonal to whether
+  // the ObjectMonitor itself is in use for a locking operation.
 }
 
 inline void* ObjectMonitor::object() const {
   return _object;
 }

@@ -105,6 +125,61 @@
 inline void ObjectMonitor::set_owner(void* owner) {
   _owner = owner;
   _recursions = 0;
 }
 
+inline void ObjectMonitor::set_allocation_state(ObjectMonitor::AllocationState s) {
+  _allocation_state = s;
+}
+
+inline ObjectMonitor::AllocationState ObjectMonitor::allocation_state() const {
+  return _allocation_state;
+}
+
+inline bool ObjectMonitor::is_free() const {
+  return _allocation_state == Free;
+}
+
+inline bool ObjectMonitor::is_active() const {
+  return !is_free();
+}
+
+inline bool ObjectMonitor::is_old() const {
+  return _allocation_state == Old;
+}
+
+inline bool ObjectMonitor::is_new() const {
+  return _allocation_state == New;
+}
+
+inline void ObjectMonitor::dec_ref_count() {
+  // The decrement needs to be MO_ACQ_REL. At the moment, the Atomic::dec
+  // backend on PPC does not yet conform to these requirements. Therefore
+  // the decrement is simulated with an Atomic::sub(1, &addr). Without
+  // this MO_ACQ_REL Atomic::dec simulation, AsyncDeflateIdleMonitors is
+  // not safe.
+  Atomic::sub((jint)1, &_ref_count);
+  guarantee(_ref_count >= 0, "sanity check: ref_count=%d", _ref_count);
+}
+
+inline void ObjectMonitor::inc_ref_count() {
+  // The increment needs to be MO_SEQ_CST. At the moment, the Atomic::inc
+  // backend on PPC does not yet conform to these requirements. Therefore
+  // the increment is simulated with a load phi; cas phi + 1; loop.
+  // Without this MO_SEQ_CST Atomic::inc simulation, AsyncDeflateIdleMonitors
+  // is not safe.
+  for (;;) {
+    jint sample = OrderAccess::load_acquire(&_ref_count);
+    guarantee(sample >= 0, "sanity check: sample=%d", (int)sample);
+    if (Atomic::cmpxchg(sample + 1, &_ref_count, sample) == sample) {
+      // Incremented _ref_count without interference.
+      return;
+    }
+    // Implied else: Saw interference so loop and try again.
+  }
+}
+
+inline jint ObjectMonitor::ref_count() const {
+  return OrderAccess::load_acquire(&_ref_count);
+}
+
 #endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
< prev index next >