< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 57560 : imported patch 8235795.patch.cr0
rev 57561 : dholmes CR - refactor common code, refactor atomic load of LVars.population in monitors_used_above_threshold, simplify list walking in ObjectSynchronizer::om_release() so we lock fewer ObjectMonitors, remove unnecessary locking from ObjectSynchronizer::deflate_monitor_list(), add NoSafepointVerifier helpers to main list management functions, remove unnecessary storestore(), remove unnecessary comments, clarify/fix comments.

@@ -161,20 +161,25 @@
 static bool is_locked(ObjectMonitor* om) {
   return ((intptr_t)Atomic::load(&om->_next_om) & OM_LOCK_BIT) == OM_LOCK_BIT;
 }
 
 // Mark an ObjectMonitor* with OM_LOCK_BIT and return it.
-// Note: the om parameter may or may not have been marked originally.
 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) {
   return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT);
 }
 
+// Return the unmarked next field in an ObjectMonitor. Note: the next
+// field may or may not have been marked with OM_LOCK_BIT originally.
+static ObjectMonitor* unmarked_next(ObjectMonitor* om) {
+  return (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT);
+}
+
 // Try to lock an ObjectMonitor. Returns true if locking was successful.
 // Otherwise returns false.
 static bool try_om_lock(ObjectMonitor* om) {
   // Get current next field without any OM_LOCK_BIT value.
-  ObjectMonitor* next = (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT);
+  ObjectMonitor* next = unmarked_next(om);
   if (Atomic::cmpxchg(&om->_next_om, next, mark_om_ptr(next)) != next) {
     return false;  // Cannot lock the ObjectMonitor.
   }
   return true;
 }

@@ -206,25 +211,19 @@
     if (mid == NULL) {
       return NULL;  // The list is empty.
     }
     if (try_om_lock(mid)) {
       if (Atomic::load(list_p) != mid) {
-        // The list head changed so we have to retry.
+        // The list head changed before we could lock it so we have to retry.
         om_unlock(mid);
         continue;
       }
       return mid;
     }
   }
 }
 
-// Return the unmarked next field in an ObjectMonitor. Note: the next
-// field may or may not have been marked with OM_LOCK_BIT originally.
-static ObjectMonitor* unmarked_next(ObjectMonitor* om) {
-  return (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT);
-}
-
 #undef OM_LOCK_BIT
 
 
 // =====================> List Management functions
 

@@ -1150,22 +1149,23 @@
         // Only process with closure if the object is set.
         closure->do_monitor(mid);
       }
     }
     // unmarked_next() is not needed with g_block_list (no locking
-    // used with with block linkage _next_om fields).
+    // used with block linkage _next_om fields).
     block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om);
   }
 }
 
 static bool monitors_used_above_threshold() {
-  if (Atomic::load(&LVars.population) == 0) {
+  int population = Atomic::load(&LVars.population);
+  if (population == 0) {
     return false;
   }
   if (MonitorUsedDeflationThreshold > 0) {
-    int monitors_used = Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count);
-    int monitor_usage = (monitors_used * 100LL) / Atomic::load(&LVars.population);
+    int monitors_used = population - Atomic::load(&LVars.free_count);
+    int monitor_usage = (monitors_used * 100LL) / population;
     return monitor_usage > MonitorUsedDeflationThreshold;
   }
   return false;
 }
 

@@ -1276,10 +1276,11 @@
   // and list coherency traffic, but also tends to increase the
   // number of ObjectMonitors in circulation as well as the STW
   // scavenge costs.  As usual, we lean toward time in space-time
   // tradeoffs.
   const int MAXPRIVATE = 1024;
+  NoSafepointVerifier nsv;
 
   stringStream ss;
   for (;;) {
     ObjectMonitor* m;
 

@@ -1383,67 +1384,88 @@
 
 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
                                     bool from_per_thread_alloc) {
   guarantee(m->header().value() == 0, "invariant");
   guarantee(m->object() == NULL, "invariant");
+  NoSafepointVerifier nsv;
+
   stringStream ss;
   guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
             "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss),
             m->_recursions);
   // _next_om is used for both per-thread in-use and free lists so
   // we have to remove 'm' from the in-use list first (as needed).
   if (from_per_thread_alloc) {
     // Need to remove 'm' from om_in_use_list.
-    ObjectMonitor* cur_mid_in_use = NULL;
     ObjectMonitor* mid = NULL;
     ObjectMonitor* next = NULL;
-    bool extracted = false;
 
-    // We use the simpler lock-mid-as-we-go protocol to prevent races
-    // with a list walker thread since there are no parallel list
-    // deletions (deflations happen at a safepoint).
+    // This list walk can only race with another list walker since
+    // deflation can only happen at a safepoint so we don't have to
+    // worry about an ObjectMonitor being removed from this list
+    // while we are walking it.
+
+    // Lock the list head to avoid racing with another list walker.
     if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) {
       fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
     }
     next = unmarked_next(mid);
-    while (true) {
       if (m == mid) {
-        // We found 'm' on the per-thread in-use list so try to extract it.
-        if (cur_mid_in_use == NULL) {
-          // mid is the list head and it is locked. Switch the list head
-          // to next which unlocks the list head, but leaves mid locked:
+      // First special case:
+      // 'm' matches mid, is the list head and is locked. Switch the list
+      // head to next which unlocks the list head, but leaves the extracted
+      // mid locked:
           Atomic::store(&self->om_in_use_list, next);
+    } else if (m == next) {
+      // Second special case:
+      // 'm' matches next after the list head and we already have the list
+      // head locked so set mid to what we are extracting:
+      mid = next;
+      // Lock mid to prevent races with a list walker:
+      om_lock(mid);
+      // Update next to what follows mid (if anything):
+      next = unmarked_next(mid);
+      // Switch next after the list head to new next which unlocks the
+      // list head, but leaves the extracted mid locked:
+      set_next(self->om_in_use_list, next);
+    } else {
+      // We have to search the list to find 'm'.
+      om_unlock(mid);  // unlock the list head
+      guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT
+                " is too short.", p2i(self), p2i(self->om_in_use_list));
+      // Our starting anchor is next after the list head which is the
+      // last ObjectMonitor we checked:
+      ObjectMonitor* anchor = next;
+      while ((mid = unmarked_next(anchor)) != NULL) {
+        if (m == mid) {
+          // We found 'm' on the per-thread in-use list so extract it.
+          om_lock(anchor);  // Lock the anchor so we can safely modify it.
+          // Update next to what follows mid (if anything):
+          next = unmarked_next(mid);
+          // Switch next after the anchor to new next which unlocks the
+          // anchor, but leaves the extracted mid locked:
+          set_next(anchor, next);
+          break;
         } else {
-          // mid is locked. Switch cur_mid_in_use's next field to next
-          // which is safe because we have no parallel list deletions,
-          // but we leave mid locked:
-          set_next(cur_mid_in_use, next);
+          anchor = mid;
+        }
+      }
         }
+
+    if (mid == NULL) {
+      // Reached end of the list and didn't find 'm' so:
+      fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list="
+            INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list));
+    }
+
         // At this point mid is disconnected from the in-use list so
         // its lock no longer has any effects on the in-use list.
-        extracted = true;
         Atomic::dec(&self->om_in_use_count);
         // Unlock mid, but leave the next value for any lagging list
         // walkers. It will get cleaned up when mid is prepended to
         // the thread's free list:
         om_unlock(mid);
-        break;
-      } else {
-        om_unlock(mid);
-        cur_mid_in_use = mid;
-      }
-      // All the list management is done so move on to the next one:
-      mid = next;
-      if (mid == NULL) {
-        // Reached end of the list and didn't find m so:
-        fatal("must find m=" INTPTR_FORMAT "on om_in_use_list=" INTPTR_FORMAT,
-              p2i(m), p2i(self->om_in_use_list));
-      }
-      // Lock mid so we can possibly extract it:
-      om_lock(mid);
-      next = unmarked_next(mid);
-    }
   }
 
   prepend_to_om_free_list(self, m);
 }
 

@@ -1463,10 +1485,11 @@
 void ObjectSynchronizer::om_flush(Thread* self) {
   // Process the per-thread in-use list first to be consistent.
   int in_use_count = 0;
   ObjectMonitor* in_use_list = NULL;
   ObjectMonitor* in_use_tail = NULL;
+  NoSafepointVerifier nsv;
 
   // This function can race with a list walker thread so we lock the
   // list head to prevent confusion.
   if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) {
     // At this point, we have locked the in-use list head so a racing

@@ -1841,12 +1864,11 @@
 
     assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
            p2i(mid->object()));
 
     // Move the deflated ObjectMonitor to the working free list
-    // defined by free_head_p and free_tail_p. The working list is
-    // local so no need for a memory barrier.
+    // defined by free_head_p and free_tail_p.
     if (*free_head_p == NULL) *free_head_p = mid;
     if (*free_tail_p != NULL) {
       // We append to the list so the caller can use mid->_next_om
       // to fix the linkages in its context.
       ObjectMonitor* prevtail = *free_tail_p;

@@ -1889,64 +1911,45 @@
   ObjectMonitor* cur_mid_in_use = NULL;
   ObjectMonitor* mid = NULL;
   ObjectMonitor* next = NULL;
   int deflated_count = 0;
 
-  // We use the simpler lock-mid-as-we-go protocol to prevent races
-  // with a list walker thread since this caller is the only one doing
-  // deletions on this list during the safepoint.
-  if ((mid = get_list_head_locked(list_p)) == NULL) {
-    return 0;  // The list is empty so nothing to deflate.
-  }
-  next = unmarked_next(mid);
+  // This list walk executes at a safepoint and does not race with any
+  // other list walkers.
 
-  while (true) {
+  for (mid = Atomic::load(list_p); mid != NULL; mid = next) {
+    next = unmarked_next(mid);
     oop obj = (oop) mid->object();
     if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
       // Deflation succeeded and already updated free_head_p and
       // free_tail_p as needed. Finish the move to the local free list
       // by unlinking mid from the global or per-thread in-use list.
       if (cur_mid_in_use == NULL) {
-        // mid is the list head and it is locked. Switch the list head
-        // to next which unlocks the list head, but leaves mid locked:
+        // mid is the list head so switch the list head to next:
         Atomic::store(list_p, next);
       } else {
-        // mid is locked. Switch cur_mid_in_use's next field to next
-        // which is safe because we have no parallel list deletions,
-        // but we leave mid locked:
+        // Switch cur_mid_in_use's next field to next:
         set_next(cur_mid_in_use, next);
       }
-      // At this point mid is disconnected from the in-use list so
-      // its lock no longer has any effects on the in-use list.
+      // At this point mid is disconnected from the in-use list.
       deflated_count++;
       Atomic::dec(count_p);
-      // mid is current tail in the free_head_p list so NULL terminate it
-      // (which also unlocks it):
+      // mid is current tail in the free_head_p list so NULL terminate it:
       set_next(mid, NULL);
     } else {
-      om_unlock(mid);
       cur_mid_in_use = mid;
     }
-    // All the list management is done so move on to the next one:
-    mid = next;
-    if (mid == NULL) {
-      break;  // Reached end of the list so nothing more to deflate.
-    }
-    // Lock mid so we can possibly deflate it:
-    om_lock(mid);
-    next = unmarked_next(mid);
   }
   return deflated_count;
 }
 
 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
   counters->n_in_use = 0;              // currently associated with objects
   counters->n_in_circulation = 0;      // extant
   counters->n_scavenged = 0;           // reclaimed (global and per-thread)
   counters->per_thread_scavenged = 0;  // per-thread scavenge total
   counters->per_thread_times = 0.0;    // per-thread scavenge times
-  OrderAccess::storestore();           // flush inits for worker threads
 }
 
 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   bool deflated = false;

@@ -1972,11 +1975,10 @@
     Atomic::add(&counters->n_in_use, Atomic::load(&LVars.in_use_count));
   }
 
   if (free_head_p != NULL) {
     // Move the deflated ObjectMonitors back to the global free list.
-    // The working list is local so no need for a memory barrier.
     guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
 #ifdef ASSERT
     ObjectMonitor* l_next_om = Atomic::load(&free_tail_p->_next_om);
 #endif
     assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));

@@ -2042,11 +2044,10 @@
   int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
   Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count));
 
   if (free_head_p != NULL) {
     // Move the deflated ObjectMonitors back to the global free list.
-    // The working list is local so no need for a memory barrier.
     guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
 #ifdef ASSERT
     ObjectMonitor* l_next_om = Atomic::load(&free_tail_p->_next_om);
 #endif
     assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));

@@ -2583,11 +2584,11 @@
       size_t diff = mon - blk;
       assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
       return 1;
     }
     // unmarked_next() is not needed with g_block_list (no locking
-    // used with with block linkage _next_om fields).
+    // used with block linkage _next_om fields).
     block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om);
   }
   return 0;
 }
 
< prev index next >