< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 56044 : imported patch 8230184.patch
rev 56046 : v2.00 -> v2.05 (CR5/v2.05/8-for-jdk13) patches combined into one; merge with 8229212.patch; merge with jdk-14+11; merge with 8230184.patch.
rev 56047 : renames, comment cleanups and additions, whitespace and indent fixes; add PaddedObjectMonitor typdef to make 'PaddedEnd<ObjectMonitor' cleanups easier; add a couple of missing 'private' decls; delete unused next() function; merge pieces from dcubed.monitor_deflate_conc.v2.06d in dcubed.monitor_deflate_conc.v2.06[ac]; merge with 8229212.patch; merge with jdk-14+11; merge with 8230184.patch.
rev 56048 : Add OM_CACHE_LINE_SIZE so that ObjectMonitor cache line sizes can be experimented with independently of DEFAULT_CACHE_LINE_SIZE; for SPARC and X64 configs that use 128 for DEFAULT_CACHE_LINE_SIZE, we are experimenting with 64; move _previous_owner_tid and _allocation_state fields to share the cache line with ObjectMonitor::_header; put ObjectMonitor::_ref_count on its own cache line after _owner; add 'int* count_p' parameter to deflate_monitor_list() and deflate_monitor_list_using_JT() and push counter updates down to where the ObjectMonitors are actually removed from the in-use lists; monitors_iterate() async deflation check should use negative ref_count; add 'JavaThread* target' param to deflate_per_thread_idle_monitors_using_JT() add deflate_common_idle_monitors_using_JT() to make it clear which JavaThread* is the target of the work and which is the calling JavaThread* (self); g_free_list, g_om_in_use_list and g_om_in_use_count are now static to synchronizer.cpp (reduce scope); add more diagnostic info to some assert()'s; minor code cleanups and code motion; save_om_ptr() should detect a race with a deflating thread that is bailing out and cause a retry when the ref_count field is not positive; merge with jdk-14+11; add special GC support for TestHumongousClassLoader.java; merge with 8230184.patch.

@@ -116,23 +116,24 @@
 #define NINFLATIONLOCKS 256
 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 
 // global list of blocks of monitors
 PaddedObjectMonitor* volatile ObjectSynchronizer::g_block_list = NULL;
+bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
+bool volatile ObjectSynchronizer::_is_special_deflation_requested = false;
+jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
+
 // Global ObjectMonitor free list. Newly allocated and deflated
 // ObjectMonitors are prepended here.
-ObjectMonitor* volatile ObjectSynchronizer::g_free_list = NULL;
+static ObjectMonitor* volatile g_free_list = NULL;
 // Global ObjectMonitor in-use list. When a JavaThread is exiting,
 // ObjectMonitors on its per-thread in-use list are prepended here.
-ObjectMonitor* volatile ObjectSynchronizer::g_om_in_use_list = NULL;
-int ObjectSynchronizer::g_om_in_use_count = 0;  // # on g_om_in_use_list
-bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
-bool volatile ObjectSynchronizer::_is_special_deflation_requested = false;
-jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
+static ObjectMonitor* volatile g_om_in_use_list = NULL;
 
 static volatile intptr_t gListLock = 0;   // protects global monitor lists
 static volatile int g_om_free_count = 0;  // # on g_free_list
+static volatile int g_om_in_use_count = 0;  // # on g_om_in_use_list
 static volatile int g_om_population = 0;  // # Extant -- in circulation
 
 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 
 

@@ -580,19 +581,19 @@
 // As a general policy we use "volatile" to control compiler-based reordering
 // and explicit fences (barriers) to control for architectural reordering
 // performed by the CPU(s) or platform.
 
 struct SharedGlobals {
-  char         _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
+  char         _pad_prefix[OM_CACHE_LINE_SIZE];
   // These are highly shared mostly-read variables.
   // To avoid false-sharing they need to be the sole occupants of a cache line.
   volatile int stw_random;
   volatile int stw_cycle;
-  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
+  DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
   // Hot RW variable -- Sequester to avoid false-sharing
   volatile int hc_sequence;
-  DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
+  DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
 };
 
 static SharedGlobals GVars;
 static int MonitorScavengeThreshold = 1000000;
 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending

@@ -996,16 +997,17 @@
       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
       if (mid->is_active()) {
         ObjectMonitorHandle omh(mid);
 
         if (mid->object() == NULL ||
-            (AsyncDeflateIdleMonitors && mid->_owner == DEFLATER_MARKER)) {
+            (AsyncDeflateIdleMonitors && mid->ref_count() < 0)) {
           // Only process with closure if the object is set.
           // For async deflation, race here if monitor is not owned!
           // The above ref_count bump (in ObjectMonitorHandle ctr)
           // will cause subsequent async deflation to skip it.
-          // However, previous or concurrent async deflation is a race.
+          // However, previous or concurrent async deflation is a race
+          // so skip this ObjectMonitor if it is being async deflated.
           continue;
         }
         closure->do_monitor(mid);
       }
     }

@@ -1096,12 +1098,11 @@
 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   // The oops_do() phase does not overlap with monitor deflation
   // so no need to update the ObjectMonitor's ref_count for this
   // ObjectMonitor* use.
-  ObjectMonitor* mid;
-  for (mid = list; mid != NULL; mid = mid->_next_om) {
+  for (ObjectMonitor* mid = list; mid != NULL; mid = mid->_next_om) {
     if (mid->object() != NULL) {
       f->do_oop((oop*)mid->object_addr());
     }
   }
 }

@@ -1200,11 +1201,11 @@
       // Clean up your own mess (Gibbs Rule 45). Otherwise, skip this
       // deflation. deflate_global_idle_monitors_using_JT() is called
       // by the ServiceThread. Per-thread async deflation is triggered
       // by the ServiceThread via om_request_deflation.
       debug_only(jt->check_for_valid_safepoint_state(false);)
-      ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT();
+      ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(jt);
     }
   }
 
   stringStream ss;
   for (;;) {

@@ -1287,14 +1288,14 @@
     // A better solution would be to use C++ placement-new.
     // BEWARE: As it stands currently, we don't run the ctors!
     assert(_BLOCKSIZE > 1, "invariant");
     size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
     PaddedObjectMonitor* temp;
-    size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
+    size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1);
     void* real_malloc_addr = (void*)NEW_C_HEAP_ARRAY(char, aligned_size,
                                                      mtInternal);
-    temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
+    temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE);
 
     // NOTE: (almost) no way to recover if allocation failed.
     // We might be able to induce a STW safepoint and scavenge enough
     // ObjectMonitors to permit progress.
     if (temp == NULL) {

@@ -1412,50 +1413,48 @@
 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT()
 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can
 // run at the same time as om_flush() so we have to be careful.
 
 void ObjectSynchronizer::om_flush(Thread* self) {
-  ObjectMonitor* free_list = self->om_free_list;
-  ObjectMonitor* free_tail = NULL;
-  int free_count = 0;
-  if (free_list != NULL) {
-    ObjectMonitor* s;
-    // The thread is going away. Set 'free_tail' to the last per-thread free
-    // monitor which will be linked to g_free_list below under the gListLock.
-    stringStream ss;
-    for (s = free_list; s != NULL; s = s->_next_om) {
-      free_count++;
-      free_tail = s;
-      guarantee(s->object() == NULL, "invariant");
-      guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
-    }
-    guarantee(free_tail != NULL, "invariant");
-    ADIM_guarantee(self->om_free_count == free_count, "free-count off");
-    self->om_free_list = NULL;
-    self->om_free_count = 0;
-  }
-
+  int in_use_count = 0;
   ObjectMonitor* in_use_list = self->om_in_use_list;
   ObjectMonitor* in_use_tail = NULL;
-  int in_use_count = 0;
   if (in_use_list != NULL) {
     // The thread is going away, however the ObjectMonitors on the
     // om_in_use_list may still be in-use by other threads. Link
     // them to in_use_tail, which will be linked into the global
     // in-use list g_om_in_use_list below, under the gListLock.
-    ObjectMonitor *cur_om;
-    for (cur_om = in_use_list; cur_om != NULL; cur_om = cur_om->_next_om) {
+    for (ObjectMonitor* cur_om = in_use_list; cur_om != NULL; cur_om = cur_om->_next_om) {
       in_use_tail = cur_om;
       in_use_count++;
       ADIM_guarantee(cur_om->is_active(), "invariant");
     }
     guarantee(in_use_tail != NULL, "invariant");
     ADIM_guarantee(self->om_in_use_count == in_use_count, "in-use count off");
     self->om_in_use_list = NULL;
     self->om_in_use_count = 0;
   }
 
+  int free_count = 0;
+  ObjectMonitor* free_list = self->om_free_list;
+  ObjectMonitor* free_tail = NULL;
+  if (free_list != NULL) {
+    // The thread is going away. Set 'free_tail' to the last per-thread free
+    // monitor which will be linked to g_free_list below under the gListLock.
+    stringStream ss;
+    for (ObjectMonitor* s = free_list; s != NULL; s = s->_next_om) {
+      free_count++;
+      free_tail = s;
+      guarantee(s->object() == NULL, "invariant");
+      guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
+    }
+    guarantee(free_tail != NULL, "invariant");
+    ADIM_guarantee(self->om_free_count == free_count, "free-count off");
+    self->om_free_list = NULL;
+    self->om_free_count = 0;
+  }
+
   Thread::muxAcquire(&gListLock, "om_flush");
   if (free_tail != NULL) {
     free_tail->_next_om = g_free_list;
     g_free_list = free_list;
     g_om_free_count += free_count;

@@ -1853,11 +1852,12 @@
     if (*free_tail_p != NULL) {
       // We append to the list so the caller can use mid->_next_om
       // to fix the linkages in its context.
       ObjectMonitor* prevtail = *free_tail_p;
       // Should have been cleaned up by the caller:
-      assert(prevtail->_next_om == NULL, "cleaned up deflated?");
+      assert(prevtail->_next_om == NULL, "must be NULL: _next_om="
+             INTPTR_FORMAT, p2i(prevtail->_next_om));
       prevtail->_next_om = mid;
     }
     *free_tail_p = mid;
     // At this point, mid->_next_om still refers to its current
     // value and another ObjectMonitor's _next_om field still

@@ -2020,15 +2020,16 @@
 //
 // See also ParallelSPCleanupTask and
 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
 // Threads::parallel_java_threads_do() in thread.cpp.
 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
+                                             int* count_p,
                                              ObjectMonitor** free_head_p,
                                              ObjectMonitor** free_tail_p) {
+  ObjectMonitor* cur_mid_in_use = NULL;
   ObjectMonitor* mid;
   ObjectMonitor* next;
-  ObjectMonitor* cur_mid_in_use = NULL;
   int deflated_count = 0;
 
   for (mid = *list_p; mid != NULL;) {
     oop obj = (oop) mid->object();
     if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {

@@ -2042,10 +2043,11 @@
       }
       next = mid->_next_om;
       mid->_next_om = NULL;  // This mid is current tail in the free_head_p list
       mid = next;
       deflated_count++;
+      *count_p = *count_p - 1;
     } else {
       cur_mid_in_use = mid;
       mid = mid->_next_om;
     }
   }

@@ -2058,19 +2060,20 @@
 // Caller acquires gListLock as appropriate. If a safepoint has started,
 // then we save state via saved_mid_in_use_p and return to the caller to
 // honor the safepoint.
 //
 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
+                                                      int* count_p,
                                                       ObjectMonitor** free_head_p,
                                                       ObjectMonitor** free_tail_p,
                                                       ObjectMonitor** saved_mid_in_use_p) {
   assert(AsyncDeflateIdleMonitors, "sanity check");
   assert(Thread::current()->is_Java_thread(), "precondition");
 
+  ObjectMonitor* cur_mid_in_use = NULL;
   ObjectMonitor* mid;
   ObjectMonitor* next;
-  ObjectMonitor* cur_mid_in_use = NULL;
   int deflated_count = 0;
 
   if (*saved_mid_in_use_p == NULL) {
     // No saved state so start at the beginning.
     mid = *list_p;

@@ -2098,10 +2101,11 @@
       mid->_next_om = NULL;
       // At this point mid is disconnected from the in-use list
       // and is the current tail in the free_head_p list.
       mid = next;
       deflated_count++;
+      *count_p = *count_p - 1;
     } else {
       // mid is considered in-use if it does not have an associated
       // Java object or mid is not old or deflation did not succeed.
       // A mid->is_new() node can be seen here when it is freshly
       // returned by om_alloc() (and skips the deflation code path).

@@ -2167,25 +2171,26 @@
   // Note: the thread-local monitors lists get deflated in
   // a separate pass. See deflate_thread_local_monitors().
 
   // For moribund threads, scan g_om_in_use_list
   int deflated_count = 0;
-  if (g_om_in_use_list) {
+  if (g_om_in_use_list != NULL) {
+    // Update n_in_circulation before g_om_in_use_count is updated by deflation.
     counters->n_in_circulation += g_om_in_use_count;
-    deflated_count = deflate_monitor_list((ObjectMonitor **)&g_om_in_use_list, &free_head_p, &free_tail_p);
-    g_om_in_use_count -= deflated_count;
-    counters->n_scavenged += deflated_count;
+    deflated_count = deflate_monitor_list((ObjectMonitor**)&g_om_in_use_list, (int*)&g_om_in_use_count, &free_head_p, &free_tail_p);
     counters->n_in_use += g_om_in_use_count;
   }
 
   if (free_head_p != NULL) {
     // Move the deflated ObjectMonitors back to the global free list.
-    guarantee(free_tail_p != NULL && counters->n_scavenged > 0, "invariant");
-    assert(free_tail_p->_next_om == NULL, "invariant");
+    guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
+    assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
+           INTPTR_FORMAT, p2i(free_tail_p->_next_om));
     // constant-time list splice - prepend scavenged segment to g_free_list
     free_tail_p->_next_om = g_free_list;
     g_free_list = free_head_p;
+    counters->n_scavenged += deflated_count;
   }
   Thread::muxRelease(&gListLock);
   timer.stop();
 
   LogStreamHandle(Debug, monitorinflation) lsh_debug;

@@ -2209,25 +2214,26 @@
   JavaThread* self = JavaThread::current();
 
   deflate_common_idle_monitors_using_JT(true /* is_global */, self);
 }
 
-// Deflate per-thread idle ObjectMonitors using a JavaThread.
+// Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread.
 //
-void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT() {
+void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) {
   assert(AsyncDeflateIdleMonitors, "sanity check");
   assert(Thread::current()->is_Java_thread(), "precondition");
-  JavaThread* self = JavaThread::current();
 
-  self->om_request_deflation = false;
+  target->om_request_deflation = false;
 
-  deflate_common_idle_monitors_using_JT(false /* !is_global */, self);
+  deflate_common_idle_monitors_using_JT(false /* !is_global */, target);
 }
 
 // Deflate global or per-thread idle ObjectMonitors using a JavaThread.
 //
-void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* self) {
+void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) {
+  JavaThread* self = JavaThread::current();
+
   int deflated_count = 0;
   ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged ObjectMonitors
   ObjectMonitor* free_tail_p = NULL;
   ObjectMonitor* saved_mid_in_use_p = NULL;
   elapsedTimer timer;

@@ -2238,37 +2244,35 @@
 
   if (is_global) {
     Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(1)");
     OM_PERFDATA_OP(MonExtant, set_value(g_om_in_use_count));
   } else {
-    OM_PERFDATA_OP(MonExtant, inc(self->om_in_use_count));
+    OM_PERFDATA_OP(MonExtant, inc(target->om_in_use_count));
   }
 
   do {
     int local_deflated_count;
     if (is_global) {
-      local_deflated_count = deflate_monitor_list_using_JT((ObjectMonitor **)&g_om_in_use_list, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
-      g_om_in_use_count -= local_deflated_count;
+      local_deflated_count = deflate_monitor_list_using_JT((ObjectMonitor**)&g_om_in_use_list, (int*)&g_om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
     } else {
-      local_deflated_count = deflate_monitor_list_using_JT(self->om_in_use_list_addr(), &free_head_p, &free_tail_p, &saved_mid_in_use_p);
-      self->om_in_use_count -= local_deflated_count;
+      local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
     }
     deflated_count += local_deflated_count;
 
     if (free_head_p != NULL) {
       // Move the deflated ObjectMonitors to the global free list.
       guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
-      assert(free_tail_p->_next_om == NULL, "invariant");
+      assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
+             INTPTR_FORMAT, p2i(free_tail_p->_next_om));
 
       if (!is_global) {
         Thread::muxAcquire(&gListLock, "deflate_per_thread_idle_monitors_using_JT(2)");
       }
       // Constant-time list splice - prepend scavenged segment to g_free_list.
       free_tail_p->_next_om = g_free_list;
       g_free_list = free_head_p;
 
-      g_om_free_count += local_deflated_count;
       OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
       if (!is_global) {
         Thread::muxRelease(&gListLock);
       }
     }

@@ -2281,11 +2285,11 @@
       timer.stop();
       {
         if (is_global) {
           log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
         } else {
-          log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(self));
+          log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target));
         }
         assert(SafepointSynchronize::is_synchronizing(), "sanity check");
         ThreadBlockInVM blocker(self);
       }
       // Prepare for another loop after the safepoint.

@@ -2314,11 +2318,11 @@
   }
   if (ls != NULL) {
     if (is_global) {
       ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
     } else {
-      ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(self), timer.seconds(), deflated_count);
+      ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count);
     }
   }
 }
 
 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {

@@ -2333,12 +2337,10 @@
   bool needs_special_deflation = is_special_deflation_requested();
   if (!AsyncDeflateIdleMonitors || needs_special_deflation) {
     // AsyncDeflateIdleMonitors does not use these counters unless
     // there is a special deflation request.
 
-    g_om_free_count += counters->n_scavenged;
-
     OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
     OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
   }
 
   if (log_is_enabled(Debug, monitorinflation)) {

@@ -2383,29 +2385,29 @@
   if (log_is_enabled(Info, safepoint, cleanup) ||
       log_is_enabled(Info, monitorinflation)) {
     timer.start();
   }
 
-  int deflated_count = deflate_monitor_list(thread->om_in_use_list_addr(), &free_head_p, &free_tail_p);
-
-  Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
-
-  // Adjust counters
+  // Update n_in_circulation before om_in_use_count is updated by deflation.
   counters->n_in_circulation += thread->om_in_use_count;
-  thread->om_in_use_count -= deflated_count;
-  counters->n_scavenged += deflated_count;
+
+  int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
   counters->n_in_use += thread->om_in_use_count;
-  counters->per_thread_scavenged += deflated_count;
+
+  Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
 
   if (free_head_p != NULL) {
     // Move the deflated ObjectMonitors back to the global free list.
     guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
-    assert(free_tail_p->_next_om == NULL, "invariant");
+    assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
+           INTPTR_FORMAT, p2i(free_tail_p->_next_om));
 
     // constant-time list splice - prepend scavenged segment to g_free_list
     free_tail_p->_next_om = g_free_list;
     g_free_list = free_head_p;
+    counters->n_scavenged += deflated_count;
+    counters->per_thread_scavenged += deflated_count;
   }
 
   timer.stop();
   // Safepoint logging cares about cumulative per_thread_times and
   // we'll capture most of the cost, but not the muxRelease() which
< prev index next >