< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 58110 : v2.09a with 8235795, 8235931 and 8236035 extracted; rebased to jdk-14+28; merge with 8236035.patch.cr1; merge with 8235795.patch.cr1; merge with 8236035.patch.cr2; merge with 8235795.patch.cr2; merge with 8235795.patch.cr3.
rev 58111 : See CR9-to-CR10-changes; merge with jdk-15+11.


 122 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL;
 123 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 124 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false;
 125 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 126 
 127 struct ObjectMonitorListGlobals {
 128   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 129   // These are highly shared list related variables.
 130   // To avoid false-sharing they need to be the sole occupants of a cache line.
 131 
 132   // Global ObjectMonitor free list. Newly allocated and deflated
 133   // ObjectMonitors are prepended here.
 134   ObjectMonitor* _free_list;
 135   DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
 136 
 137   // Global ObjectMonitor in-use list. When a JavaThread is exiting,
 138   // ObjectMonitors on its per-thread in-use list are prepended here.
 139   ObjectMonitor* _in_use_list;
 140   DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
 141 
 142   // Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors
 143   // is true, deflated ObjectMonitors wait on this list until after a
 144   // handshake or a safepoint for platforms that don't support handshakes.
 145   // After the handshake or safepoint, the deflated ObjectMonitors are
 146   // prepended to free_list.
 147   ObjectMonitor* _wait_list;
 148   DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
 149 
 150   int _free_count;    // # on free_list
 151   DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int));
 152 
 153   int _in_use_count;  // # on in_use_list
 154   DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int));
 155 
 156   int _population;    // # Extant -- in circulation
 157   DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int));
 158 
 159   int _wait_count;    // # on wait_list
 160   DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int));
 161 };
 162 static ObjectMonitorListGlobals om_list_globals;
 163 
 164 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 165 
 166 


 303 
 304   // Second we handle om_list_globals._free_list:
 305   prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1,
 306                          &om_list_globals._free_list, &om_list_globals._free_count);
 307 }
 308 
 309 // Prepend a list of ObjectMonitors to om_list_globals._free_list.
 310 // 'tail' is the last ObjectMonitor in the list and there are 'count'
 311 // on the list. Also updates om_list_globals._free_count.
 312 static void prepend_list_to_global_free_list(ObjectMonitor* list,
 313                                              ObjectMonitor* tail, int count) {
 314   prepend_list_to_common(list, tail, count, &om_list_globals._free_list,
 315                          &om_list_globals._free_count);
 316 }
 317 
 318 // Prepend a list of ObjectMonitors to om_list_globals._wait_list.
 319 // 'tail' is the last ObjectMonitor in the list and there are 'count'
 320 // on the list. Also updates om_list_globals._wait_count.
 321 static void prepend_list_to_global_wait_list(ObjectMonitor* list,
 322                                              ObjectMonitor* tail, int count) {
 323   assert(HandshakeAfterDeflateIdleMonitors, "sanity check");
 324   prepend_list_to_common(list, tail, count, &om_list_globals._wait_list,
 325                          &om_list_globals._wait_count);
 326 }
 327 
 328 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list.
 329 // 'tail' is the last ObjectMonitor in the list and there are 'count'
 330 // on the list. Also updates om_list_globals._in_use_list.
 331 static void prepend_list_to_global_in_use_list(ObjectMonitor* list,
 332                                                ObjectMonitor* tail, int count) {
 333   prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list,
 334                          &om_list_globals._in_use_count);
 335 }
 336 
 337 // Prepend an ObjectMonitor to the specified list. Also updates
 338 // the specified counter.
 339 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
 340                               int* count_p) {
 341   while (true) {
 342     om_lock(m);  // Lock m so we can safely update its next field.
 343     ObjectMonitor* cur = NULL;


 518       // displaced to this thread's BasicLock. Make the displaced header
 519       // non-NULL so this BasicLock is not seen as recursive nor as
 520       // being locked. We do this unconditionally so that this thread's
 521       // BasicLock cannot be mis-interpreted by any stack walkers. For
 522       // performance reasons, stack walkers generally first check for
 523       // Biased Locking in the object's header, the second check is for
 524       // stack-locking in the object's header, the third check is for
 525       // recursive stack-locking in the displaced header in the BasicLock,
 526       // and last are the inflated Java Monitor (ObjectMonitor) checks.
 527       lock->set_displaced_header(markWord::unused_mark());
 528 
 529       if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) {
 530         assert(m->_recursions == 0, "invariant");
 531         return true;
 532       }
 533 
 534       if (AsyncDeflateIdleMonitors &&
 535           m->try_set_owner_from(DEFLATER_MARKER, self) == DEFLATER_MARKER) {
 536         // The deflation protocol finished the first part (setting owner),
 537         // but it failed the second part (making ref_count negative) and
 538         // bailed. Or the ObjectMonitor was async deflated and reused.
 539         // Acquired the monitor.
 540         assert(m->_recursions == 0, "invariant");
 541         return true;
 542       }
 543     }
 544     break;
 545   }
 546 
 547   // Note that we could inflate in quick_enter.
 548   // This is likely a useful optimization
 549   // Critically, in quick_enter() we must not:
 550   // -- perform bias revocation, or
 551   // -- block indefinitely, or
 552   // -- reach a safepoint
 553 
 554   return false;        // revert to slow-path
 555 }
 556 
 557 // -----------------------------------------------------------------------------
 558 // Monitor Enter/Exit
 559 // The interpreter and compiler assembly code tries to lock using the fast path


1018       hash = get_next_hash(self, obj);  // get a new hash
1019       temp = mark.copy_set_hash(hash);  // merge the hash into header
1020                                         // try to install the hash
1021       test = obj->cas_set_mark(temp, mark);
1022       if (test == mark) {               // if the hash was installed, return it
1023         return hash;
1024       }
1025       // Failed to install the hash. It could be that another thread
1026       // installed the hash just before our attempt or inflation has
1027       // occurred or... so we fall thru to inflate the monitor for
1028       // stability and then install the hash.
1029     } else if (mark.has_monitor()) {
1030       ObjectMonitorHandle omh;
1031       if (!omh.save_om_ptr(obj, mark)) {
1032         // Lost a race with async deflation so try again.
1033         assert(AsyncDeflateIdleMonitors, "sanity check");
1034         continue;
1035       }
1036       monitor = omh.om_ptr();
1037       temp = monitor->header();
1038       // Allow for a lagging install_displaced_markword_in_object() to
1039       // have marked the ObjectMonitor's header/dmw field.
1040       assert(temp.is_neutral() || (AsyncDeflateIdleMonitors && temp.is_marked()),
1041              "invariant: header=" INTPTR_FORMAT, temp.value());
1042       hash = temp.hash();
1043       if (hash != 0) {                  // if it has a hash, just return it
1044         return hash;
1045       }
1046       // Fall thru so we only have one place that installs the hash in
1047       // the ObjectMonitor.
1048     } else if (self->is_lock_owned((address)mark.locker())) {
1049       // This is a stack lock owned by the calling thread so fetch the
1050       // displaced markWord from the BasicLock on the stack.
1051       temp = mark.displaced_mark_helper();
1052       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1053       hash = temp.hash();
1054       if (hash != 0) {                  // if it has a hash, just return it
1055         return hash;
1056       }
1057       // WARNING:
1058       // The displaced header in the BasicLock on a thread's stack
1059       // is strictly immutable. It CANNOT be changed in ANY cases.
1060       // So we have to inflate the stack lock into an ObjectMonitor
1061       // even if the current thread owns the lock. The BasicLock on
1062       // a thread's stack can be asynchronously read by other threads
1063       // during an inflate() call so any change to that stack memory
1064       // may not propagate to other threads correctly.
1065     }
1066 
1067     // Inflate the monitor to set the hash.
1068     ObjectMonitorHandle omh;
1069     inflate(&omh, self, obj, inflate_cause_hash_code);
1070     monitor = omh.om_ptr();
1071     // Load ObjectMonitor's header/dmw field and see if it has a hash.
1072     mark = monitor->header();
1073     // Allow for a lagging install_displaced_markword_in_object() to
1074     // have marked the ObjectMonitor's header/dmw field.
1075     assert(mark.is_neutral() || (AsyncDeflateIdleMonitors && mark.is_marked()),
1076            "invariant: header=" INTPTR_FORMAT, mark.value());
1077     hash = mark.hash();
1078     if (hash == 0) {                    // if it does not have a hash
1079       hash = get_next_hash(self, obj);  // get a new hash
1080       temp = mark.copy_set_hash(hash);  // merge the hash into header
1081       if (AsyncDeflateIdleMonitors && temp.is_marked()) {
1082         // A lagging install_displaced_markword_in_object() has marked
1083         // the ObjectMonitor's header/dmw field. We clear it to avoid
1084         // any confusion if we are able to set the hash.
1085         temp.set_unmarked();
1086       }
1087       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1088       uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
1089       test = markWord(v);
1090       if (test != mark) {
1091         // The attempt to update the ObjectMonitor's header/dmw field
1092         // did not work. This can happen if another thread managed to
1093         // merge in the hash just before our cmpxchg(). With async
1094         // deflation, a lagging install_displaced_markword_in_object()
1095         // could have just marked or just unmarked the header/dmw field.
1096         // If we add any new usages of the header/dmw field, this code
1097         // will need to be updated.
1098         if (AsyncDeflateIdleMonitors) {
1099           // Since async deflation gives us two possible reasons for
1100           // the cmwxchg() to fail, it is easier to simply retry.
1101           continue;
1102         }
1103         hash = test.hash();
1104         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1105         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1106       }
1107     }
1108     // We finally get the hash.
1109     return hash;
1110   }
1111 }
1112 
1113 // Deprecated -- use FastHashCode() instead.
1114 
1115 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
1116   return FastHashCode(Thread::current(), obj());
1117 }
1118 
1119 
1120 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
1121                                                    Handle h_obj) {
1122   if (UseBiasedLocking) {


1248     }
1249 
1250     // Unlocked case, header in place
1251     // Cannot have assertion since this object may have been
1252     // locked by another thread when reaching here.
1253     // assert(mark.is_neutral(), "sanity check");
1254 
1255     return NULL;
1256   }
1257 }
1258 
1259 // Visitors ...
1260 
1261 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1262   PaddedObjectMonitor* block = Atomic::load(&g_block_list);
1263   while (block != NULL) {
1264     assert(block->object() == CHAINMARKER, "must be a block header");
1265     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1266       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1267       ObjectMonitorHandle omh;
1268       if (!mid->is_free() && omh.set_om_ptr_if_safe(mid)) {
1269         // The ObjectMonitor* is not free and it has been made safe.
1270         if (mid->object() == NULL) {
1271           // Only process with closure if the object is set.
1272           continue;
1273         }
1274         closure->do_monitor(mid);
1275       }
1276     }
1277     // unmarked_next() is not needed with g_block_list (no locking
1278     // used with block linkage _next_om fields).
1279     block = (PaddedObjectMonitor*)block->next_om();
1280   }
1281 }
1282 
1283 static bool monitors_used_above_threshold() {
1284   int population = Atomic::load(&om_list_globals._population);
1285   if (population == 0) {
1286     return false;
1287   }
1288   if (MonitorUsedDeflationThreshold > 0) {
1289     int monitors_used = population - Atomic::load(&om_list_globals._free_count);
1290     if (HandshakeAfterDeflateIdleMonitors) {
1291       monitors_used -= Atomic::load(&om_list_globals._wait_count);
1292     }
1293     int monitor_usage = (monitors_used * 100LL) / population;
1294     return monitor_usage > MonitorUsedDeflationThreshold;
1295   }
1296   return false;
1297 }
1298 
1299 // Returns true if MonitorBound is set (> 0) and if the specified
1300 // cnt is > MonitorBound. Otherwise returns false.
1301 static bool is_MonitorBound_exceeded(const int cnt) {
1302   const int mx = MonitorBound;
1303   return mx > 0 && cnt > mx;
1304 }
1305 
1306 bool ObjectSynchronizer::is_async_deflation_needed() {
1307   if (!AsyncDeflateIdleMonitors) {
1308     return false;
1309   }
1310   if (is_async_deflation_requested()) {
1311     // Async deflation request.
1312     return true;
1313   }
1314   if (AsyncDeflationInterval > 0 &&
1315       time_since_last_async_deflation_ms() > AsyncDeflationInterval &&
1316       monitors_used_above_threshold()) {
1317     // It's been longer than our specified deflate interval and there
1318     // are too many monitors in use. We don't deflate more frequently
1319     // than AsyncDeflationInterval (unless is_async_deflation_requested)
1320     // in order to not swamp the ServiceThread.
1321     _last_async_deflation_time_ns = os::javaTimeNanos();
1322     return true;
1323   }
1324   int monitors_used = Atomic::load(&om_list_globals._population) -
1325                       Atomic::load(&om_list_globals._free_count);
1326   if (HandshakeAfterDeflateIdleMonitors) {
1327     monitors_used -= Atomic::load(&om_list_globals._wait_count);
1328   }
1329   if (is_MonitorBound_exceeded(monitors_used)) {
1330     // Not enough ObjectMonitors on the global free list.
1331     return true;
1332   }
1333   return false;
1334 }
1335 
1336 bool ObjectSynchronizer::needs_monitor_scavenge() {
1337   if (Atomic::load(&_forceMonitorScavenge) == 1) {
1338     log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
1339     return true;
1340   }
1341   return false;
1342 }
1343 
1344 bool ObjectSynchronizer::is_safepoint_deflation_needed() {
1345   if (!AsyncDeflateIdleMonitors) {
1346     if (monitors_used_above_threshold()) {
1347       // Too many monitors in use.
1348       return true;


1481       m->set_allocation_state(ObjectMonitor::New);
1482       prepend_to_om_in_use_list(self, m);
1483       return m;
1484     }
1485 
1486     // 2: try to allocate from the global om_list_globals._free_list
1487     // If we're using thread-local free lists then try
1488     // to reprovision the caller's free list.
1489     if (Atomic::load(&om_list_globals._free_list) != NULL) {
1490       // Reprovision the thread's om_free_list.
1491       // Use bulk transfers to reduce the allocation rate and heat
1492       // on various locks.
1493       for (int i = self->om_free_provision; --i >= 0;) {
1494         ObjectMonitor* take = take_from_start_of_global_free_list();
1495         if (take == NULL) {
1496           break;  // No more are available.
1497         }
1498         guarantee(take->object() == NULL, "invariant");
1499         if (AsyncDeflateIdleMonitors) {
1500           // We allowed 3 field values to linger during async deflation.
1501           // We clear header and restore ref_count here, but we leave
1502           // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor
1503           // enter optimization can no longer race with async deflation
1504           // and reuse.
1505           take->set_header(markWord::zero());


1506           if (take->ref_count() < 0) {
1507             // Add back max_jint to restore the ref_count field to its
1508             // proper value.
1509             Atomic::add(&take->_ref_count, max_jint);
1510 
1511 #ifdef ASSERT
1512             jint l_ref_count = take->ref_count();
1513 #endif
1514             assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
1515                    l_ref_count, take->ref_count());
1516           }
1517         }
1518         take->Recycle();
1519         // Since we're taking from the global free-list, take must be Free.
1520         // om_release() also sets the allocation state to Free because it
1521         // is called from other code paths.
1522         assert(take->is_free(), "invariant");
1523         om_release(self, take, false);
1524       }
1525       self->om_free_provision += 1 + (self->om_free_provision / 2);


2026       ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
2027       return;
2028     }
2029 
2030     // CASE: neutral
2031     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
2032     // If we know we're inflating for entry it's better to inflate by swinging a
2033     // pre-locked ObjectMonitor pointer into the object header.   A successful
2034     // CAS inflates the object *and* confers ownership to the inflating thread.
2035     // In the current implementation we use a 2-step mechanism where we CAS()
2036     // to inflate and then CAS() again to try to swing _owner from NULL to self.
2037     // An inflateTry() method that we could call from enter() would be useful.
2038 
2039     // Catch if the object's header is not neutral (not locked and
2040     // not marked is what we care about here).
2041     ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
2042     ObjectMonitor* m = om_alloc(self);
2043     // prepare m for installation - set monitor to initial state
2044     m->Recycle();
2045     m->set_header(mark);
2046     // If we leave _owner == DEFLATER_MARKER here, then the simple C2
2047     // ObjectMonitor enter optimization can no longer race with async
2048     // deflation and reuse.

2049     m->set_object(object);
2050     m->_Responsible  = NULL;
2051     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
2052 
2053     omh_p->set_om_ptr(m);
2054 
2055     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
2056       m->set_header(markWord::zero());
2057       m->set_object(NULL);
2058       m->Recycle();
2059       omh_p->set_om_ptr(NULL);
2060       // om_release() will reset the allocation state from New to Free.
2061       om_release(self, m, true);
2062       m = NULL;
2063       continue;
2064       // interference - the markword changed - just retry.
2065       // The state-transitions are one-way, so there's no chance of
2066       // live-lock -- "Inflated" is an absorbing state.
2067     }
2068 


2164 
2165   if (mid->is_busy() || mid->ref_count() != 0) {
2166     // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
2167     // is in use so no deflation.
2168     deflated = false;
2169   } else {
2170     // Deflate the monitor if it is no longer being used
2171     // It's idle - scavenge and return to the global free list
2172     // plain old deflation ...
2173     if (log_is_enabled(Trace, monitorinflation)) {
2174       ResourceMark rm;
2175       log_trace(monitorinflation)("deflate_monitor: "
2176                                   "object=" INTPTR_FORMAT ", mark="
2177                                   INTPTR_FORMAT ", type='%s'", p2i(obj),
2178                                   mark.value(), obj->klass()->external_name());
2179     }
2180 
2181     // Restore the header back to obj
2182     obj->release_set_mark(dmw);
2183     if (AsyncDeflateIdleMonitors) {
2184       // clear() expects the owner field to be NULL and we won't race
2185       // with the simple C2 ObjectMonitor enter optimization since
2186       // we're at a safepoint. DEFLATER_MARKER is the only non-NULL
2187       // value we should see here.
2188       mid->try_set_owner_from(DEFLATER_MARKER, NULL);
2189     }
2190     mid->clear();
2191 
2192     assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
2193            p2i(mid->object()));
2194     assert(mid->is_free(), "invariant");
2195 
2196     // Move the deflated ObjectMonitor to the working free list
2197     // defined by free_head_p and free_tail_p.
2198     if (*free_head_p == NULL) *free_head_p = mid;
2199     if (*free_tail_p != NULL) {
2200       // We append to the list so the caller can use mid->_next_om
2201       // to fix the linkages in its context.
2202       ObjectMonitor* prevtail = *free_tail_p;
2203       // Should have been cleaned up by the caller:
2204       // Note: Should not have to lock prevtail here since we're at a
2205       // safepoint and ObjectMonitors on the local free list should
2206       // not be accessed in parallel.
2207 #ifdef ASSERT


2294         const oop obj = (oop) mid->object();
2295         if (log_is_enabled(Trace, monitorinflation)) {
2296           ResourceMark rm;
2297           log_trace(monitorinflation)("deflate_monitor_using_JT: "
2298                                       "object=" INTPTR_FORMAT ", mark="
2299                                       INTPTR_FORMAT ", type='%s'",
2300                                       p2i(obj), obj->mark().value(),
2301                                       obj->klass()->external_name());
2302         }
2303 
2304         // Install the old mark word if nobody else has already done it.
2305         mid->install_displaced_markword_in_object(obj);
2306         mid->clear_using_JT();
2307 
2308         assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT,
2309                p2i(mid->object()));
2310         assert(mid->is_free(), "must be free: allocation_state=%d",
2311                (int) mid->allocation_state());
2312 
2313         // Move the deflated ObjectMonitor to the working free list
2314         // defined by free_head_p and free_tail_p. No races on this list
2315         // so no need for load_acquire() or store_release().
2316         if (*free_head_p == NULL) {
2317           // First one on the list.
2318           *free_head_p = mid;
2319         }
2320         if (*free_tail_p != NULL) {
2321           // We append to the list so the caller can use mid->_next_om
2322           // to fix the linkages in its context.
2323           ObjectMonitor* prevtail = *free_tail_p;
2324           // Should have been cleaned up by the caller:
2325           om_lock(prevtail);
2326 #ifdef ASSERT
2327           ObjectMonitor* l_next_om = unmarked_next(prevtail);
2328 #endif
2329           assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2330           prevtail->set_next_om(mid);  // prevtail now points to mid (and is unlocked)
2331         }
2332         *free_tail_p = mid;
2333 
2334         // At this point, mid->_next_om still refers to its current
2335         // value and another ObjectMonitor's _next_om field still


2452     next = unmarked_next(mid);
2453   } else {
2454     // We're restarting after a safepoint so restore the necessary state
2455     // before we resume.
2456     cur_mid_in_use = *saved_mid_in_use_p;
2457     // Lock cur_mid_in_use so we can possibly update its
2458     // next field to extract a deflated ObjectMonitor.
2459     om_lock(cur_mid_in_use);
2460     mid = unmarked_next(cur_mid_in_use);
2461     if (mid == NULL) {
2462       om_unlock(cur_mid_in_use);
2463       *saved_mid_in_use_p = NULL;
2464       return 0;  // The remainder is empty so nothing more to deflate.
2465     }
2466     // Lock mid so we can possibly deflate it:
2467     om_lock(mid);
2468     next = unmarked_next(mid);
2469   }
2470 
2471   while (true) {
2472     // The current mid's next field is marked at this point. If we have
2473     // a cur_mid_in_use, then its next field is also marked at this point.
2474 
2475     if (next != NULL) {
2476       // We lock next so that an om_flush() thread that is behind us
2477       // cannot pass us when we unlock the current mid.
2478       om_lock(next);
2479       next_next = unmarked_next(next);
2480     }
2481 
2482     // Only try to deflate if there is an associated Java object and if
2483     // mid is old (is not newly allocated and is not newly freed).
2484     if (mid->object() != NULL && mid->is_old() &&
2485         deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
2486       // Deflation succeeded and already updated free_head_p and
2487       // free_tail_p as needed. Finish the move to the local free list
2488       // by unlinking mid from the global or per-thread in-use list.
2489       if (cur_mid_in_use == NULL) {
2490         // mid is the list head and it is locked. Switch the list head
2491         // to next which is also locked (if not NULL) and also leave
2492         // mid locked:
2493         Atomic::store(list_p, next);


2652       // are idle unless this JavaThread is exiting; do not race with
2653       // ObjectSynchronizer::om_flush().
2654       deflate_per_thread_idle_monitors_using_JT(jt);
2655       count++;
2656     }
2657   }
2658   if (count > 0) {
2659     log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count);
2660   }
2661 
2662   log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, "
2663                              "global_free_count=%d, global_wait_count=%d",
2664                              Atomic::load(&om_list_globals._population),
2665                              Atomic::load(&om_list_globals._in_use_count),
2666                              Atomic::load(&om_list_globals._free_count),
2667                              Atomic::load(&om_list_globals._wait_count));
2668 
2669   // The ServiceThread's async deflation request has been processed.
2670   set_is_async_deflation_requested(false);
2671 
2672   if (HandshakeAfterDeflateIdleMonitors &&
2673       Atomic::load(&om_list_globals._wait_count) > 0) {
2674     // There are deflated ObjectMonitors waiting for a handshake
2675     // (or a safepoint) for safety.
2676 
2677     ObjectMonitor* list = Atomic::load(&om_list_globals._wait_list);
2678     ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL");
2679     int count = Atomic::load(&om_list_globals._wait_count);
2680     Atomic::store(&om_list_globals._wait_count, 0);
2681     Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL);
2682 
2683     // Find the tail for prepend_list_to_common(). No need to mark
2684     // ObjectMonitors for this list walk since only the deflater
2685     // thread manages the wait list.
2686     int l_count = 0;
2687     ObjectMonitor* tail = NULL;
2688     for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) {
2689       tail = n;
2690       l_count++;
2691     }
2692     ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count);
2693 


2734 
2735   if (log_is_enabled(Info, monitorinflation)) {
2736     timer.start();
2737   }
2738 
2739   if (is_global) {
2740     OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&om_list_globals._in_use_count)));
2741   } else {
2742     OM_PERFDATA_OP(MonExtant, inc(Atomic::load(&target->om_in_use_count)));
2743   }
2744 
2745   do {
2746     int local_deflated_count;
2747     if (is_global) {
2748       local_deflated_count =
2749           deflate_monitor_list_using_JT(&om_list_globals._in_use_list,
2750                                         &om_list_globals._in_use_count,
2751                                         &free_head_p, &free_tail_p,
2752                                         &saved_mid_in_use_p);
2753     } else {
2754       local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);



2755     }
2756     deflated_count += local_deflated_count;
2757 
2758     if (free_head_p != NULL) {
2759       // Move the deflated ObjectMonitors to the global free list.
2760       guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
2761       // Note: The target thread can be doing an om_alloc() that
2762       // is trying to prepend an ObjectMonitor on its in-use list
2763       // at the same time that we have deflated the current in-use
2764       // list head and put it on the local free list. prepend_to_common()
2765       // will detect the race and retry which avoids list corruption,
2766       // but the next field in free_tail_p can flicker to marked
2767       // and then unmarked while prepend_to_common() is sorting it
2768       // all out.
2769 #ifdef ASSERT
2770       ObjectMonitor* l_next_om = unmarked_next(free_tail_p);
2771 #endif
2772       assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2773 
2774       if (HandshakeAfterDeflateIdleMonitors) {
2775         prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count);
2776       } else {
2777         prepend_list_to_global_free_list(free_head_p, free_tail_p, local_deflated_count);
2778       }
2779 
2780       OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
2781     }
2782 
2783     if (saved_mid_in_use_p != NULL) {
2784       // deflate_monitor_list_using_JT() detected a safepoint starting.
2785       timer.stop();
2786       {
2787         if (is_global) {
2788           log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
2789         } else {
2790           log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target));
2791         }
2792         assert(SafepointMechanism::should_block(self), "sanity check");
2793         ThreadBlockInVM blocker(self);
2794       }
2795       // Prepare for another loop after the safepoint.
2796       free_head_p = NULL;
2797       free_tail_p = NULL;
2798       if (log_is_enabled(Info, monitorinflation)) {


3026   if (Atomic::load(&om_list_globals._population) == chk_om_population) {
3027     ls->print_cr("global_population=%d equals chk_om_population=%d",
3028                  Atomic::load(&om_list_globals._population), chk_om_population);
3029   } else {
3030     // With fine grained locks on the monitor lists, it is possible for
3031     // log_monitor_list_counts() to return a value that doesn't match
3032     // om_list_globals._population. So far a higher value has been
3033     // seen in testing so something is being double counted by
3034     // log_monitor_list_counts().
3035     ls->print_cr("WARNING: global_population=%d is not equal to "
3036                  "chk_om_population=%d",
3037                  Atomic::load(&om_list_globals._population), chk_om_population);
3038   }
3039 
3040   // Check om_list_globals._in_use_list and om_list_globals._in_use_count:
3041   chk_global_in_use_list_and_count(ls, &error_cnt);
3042 
3043   // Check om_list_globals._free_list and om_list_globals._free_count:
3044   chk_global_free_list_and_count(ls, &error_cnt);
3045 
3046   if (HandshakeAfterDeflateIdleMonitors) {
3047     // Check om_list_globals._wait_list and om_list_globals._wait_count:
3048     chk_global_wait_list_and_count(ls, &error_cnt);
3049   }
3050 
3051   ls->print_cr("Checking per-thread lists:");
3052 
3053   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3054     // Check om_in_use_list and om_in_use_count:
3055     chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
3056 
3057     // Check om_free_list and om_free_count:
3058     chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
3059   }
3060 
3061   if (error_cnt == 0) {
3062     ls->print_cr("No errors found in monitor list checks.");
3063   } else {
3064     log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
3065   }
3066 
3067   if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
3068       (!on_exit && log_is_enabled(Trace, monitorinflation))) {
3069     // When exiting this log output is at the Info level. When called


3416       }
3417     }
3418   }
3419 
3420   out->flush();
3421 }
3422 
3423 // Log counts for the global and per-thread monitor lists and return
3424 // the population count.
3425 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
3426   int pop_count = 0;
3427   out->print_cr("%18s  %10s  %10s  %10s  %10s",
3428                 "Global Lists:", "InUse", "Free", "Wait", "Total");
3429   out->print_cr("==================  ==========  ==========  ==========  ==========");
3430   int l_in_use_count = Atomic::load(&om_list_globals._in_use_count);
3431   int l_free_count = Atomic::load(&om_list_globals._free_count);
3432   int l_wait_count = Atomic::load(&om_list_globals._wait_count);
3433   out->print_cr("%18s  %10d  %10d  %10d  %10d", "", l_in_use_count,
3434                 l_free_count, l_wait_count,
3435                 Atomic::load(&om_list_globals._population));
3436   pop_count += l_in_use_count + l_free_count;
3437   if (HandshakeAfterDeflateIdleMonitors) {
3438     pop_count += l_wait_count;
3439   }
3440 
3441   out->print_cr("%18s  %10s  %10s  %10s",
3442                 "Per-Thread Lists:", "InUse", "Free", "Provision");
3443   out->print_cr("==================  ==========  ==========  ==========");
3444 
3445   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3446     int l_om_in_use_count = Atomic::load(&jt->om_in_use_count);
3447     int l_om_free_count = Atomic::load(&jt->om_free_count);
3448     out->print_cr(INTPTR_FORMAT "  %10d  %10d  %10d", p2i(jt),
3449                   l_om_in_use_count, l_om_free_count, jt->om_free_provision);
3450     pop_count += l_om_in_use_count + l_om_free_count;
3451   }
3452   return pop_count;
3453 }
3454 
3455 #ifndef PRODUCT
3456 
3457 // Check if monitor belongs to the monitor cache
3458 // The list is grow-only so it's *relatively* safe to traverse
3459 // the list of extant blocks without taking a lock.


 122 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL;
 123 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 124 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false;
 125 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 126 
 127 struct ObjectMonitorListGlobals {
 128   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 129   // These are highly shared list related variables.
 130   // To avoid false-sharing they need to be the sole occupants of a cache line.
 131 
 132   // Global ObjectMonitor free list. Newly allocated and deflated
 133   // ObjectMonitors are prepended here.
 134   ObjectMonitor* _free_list;
 135   DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
 136 
 137   // Global ObjectMonitor in-use list. When a JavaThread is exiting,
 138   // ObjectMonitors on its per-thread in-use list are prepended here.
 139   ObjectMonitor* _in_use_list;
 140   DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
 141 
 142   // Global ObjectMonitor wait list. Deflated ObjectMonitors wait on
 143   // this list until after a handshake or a safepoint for platforms
 144   // that don't support handshakes. After the handshake or safepoint,
 145   // the deflated ObjectMonitors are prepended to free_list.

 146   ObjectMonitor* _wait_list;
 147   DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
 148 
 149   int _free_count;    // # on free_list
 150   DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int));
 151 
 152   int _in_use_count;  // # on in_use_list
 153   DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int));
 154 
 155   int _population;    // # Extant -- in circulation
 156   DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int));
 157 
 158   int _wait_count;    // # on wait_list
 159   DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int));
 160 };
 161 static ObjectMonitorListGlobals om_list_globals;
 162 
 163 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 164 
 165 


 302 
 303   // Second we handle om_list_globals._free_list:
 304   prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1,
 305                          &om_list_globals._free_list, &om_list_globals._free_count);
 306 }
 307 
 308 // Prepend a list of ObjectMonitors to om_list_globals._free_list.
 309 // 'tail' is the last ObjectMonitor in the list and there are 'count'
 310 // on the list. Also updates om_list_globals._free_count.
 311 static void prepend_list_to_global_free_list(ObjectMonitor* list,
 312                                              ObjectMonitor* tail, int count) {
 313   prepend_list_to_common(list, tail, count, &om_list_globals._free_list,
 314                          &om_list_globals._free_count);
 315 }
 316 
 317 // Prepend a list of ObjectMonitors to om_list_globals._wait_list.
 318 // 'tail' is the last ObjectMonitor in the list and there are 'count'
 319 // on the list. Also updates om_list_globals._wait_count.
 320 static void prepend_list_to_global_wait_list(ObjectMonitor* list,
 321                                              ObjectMonitor* tail, int count) {

 322   prepend_list_to_common(list, tail, count, &om_list_globals._wait_list,
 323                          &om_list_globals._wait_count);
 324 }
 325 
 326 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list.
 327 // 'tail' is the last ObjectMonitor in the list and there are 'count'
 328 // on the list. Also updates om_list_globals._in_use_list.
 329 static void prepend_list_to_global_in_use_list(ObjectMonitor* list,
 330                                                ObjectMonitor* tail, int count) {
 331   prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list,
 332                          &om_list_globals._in_use_count);
 333 }
 334 
 335 // Prepend an ObjectMonitor to the specified list. Also updates
 336 // the specified counter.
 337 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
 338                               int* count_p) {
 339   while (true) {
 340     om_lock(m);  // Lock m so we can safely update its next field.
 341     ObjectMonitor* cur = NULL;


 516       // displaced to this thread's BasicLock. Make the displaced header
 517       // non-NULL so this BasicLock is not seen as recursive nor as
 518       // being locked. We do this unconditionally so that this thread's
 519       // BasicLock cannot be mis-interpreted by any stack walkers. For
 520       // performance reasons, stack walkers generally first check for
 521       // Biased Locking in the object's header, the second check is for
 522       // stack-locking in the object's header, the third check is for
 523       // recursive stack-locking in the displaced header in the BasicLock,
 524       // and last are the inflated Java Monitor (ObjectMonitor) checks.
 525       lock->set_displaced_header(markWord::unused_mark());
 526 
 527       if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) {
 528         assert(m->_recursions == 0, "invariant");
 529         return true;
 530       }
 531 
 532       if (AsyncDeflateIdleMonitors &&
 533           m->try_set_owner_from(DEFLATER_MARKER, self) == DEFLATER_MARKER) {
 534         // The deflation protocol finished the first part (setting owner),
 535         // but it failed the second part (making ref_count negative) and
 536         // bailed. Acquired the monitor.

 537         assert(m->_recursions == 0, "invariant");
 538         return true;
 539       }
 540     }
 541     break;
 542   }
 543 
 544   // Note that we could inflate in quick_enter.
 545   // This is likely a useful optimization
 546   // Critically, in quick_enter() we must not:
 547   // -- perform bias revocation, or
 548   // -- block indefinitely, or
 549   // -- reach a safepoint
 550 
 551   return false;        // revert to slow-path
 552 }
 553 
 554 // -----------------------------------------------------------------------------
 555 // Monitor Enter/Exit
 556 // The interpreter and compiler assembly code tries to lock using the fast path


1015       hash = get_next_hash(self, obj);  // get a new hash
1016       temp = mark.copy_set_hash(hash);  // merge the hash into header
1017                                         // try to install the hash
1018       test = obj->cas_set_mark(temp, mark);
1019       if (test == mark) {               // if the hash was installed, return it
1020         return hash;
1021       }
1022       // Failed to install the hash. It could be that another thread
1023       // installed the hash just before our attempt or inflation has
1024       // occurred or... so we fall thru to inflate the monitor for
1025       // stability and then install the hash.
1026     } else if (mark.has_monitor()) {
1027       ObjectMonitorHandle omh;
1028       if (!omh.save_om_ptr(obj, mark)) {
1029         // Lost a race with async deflation so try again.
1030         assert(AsyncDeflateIdleMonitors, "sanity check");
1031         continue;
1032       }
1033       monitor = omh.om_ptr();
1034       temp = monitor->header();
1035       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());



1036       hash = temp.hash();
1037       if (hash != 0) {                  // if it has a hash, just return it
1038         return hash;
1039       }
1040       // Fall thru so we only have one place that installs the hash in
1041       // the ObjectMonitor.
1042     } else if (self->is_lock_owned((address)mark.locker())) {
1043       // This is a stack lock owned by the calling thread so fetch the
1044       // displaced markWord from the BasicLock on the stack.
1045       temp = mark.displaced_mark_helper();
1046       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1047       hash = temp.hash();
1048       if (hash != 0) {                  // if it has a hash, just return it
1049         return hash;
1050       }
1051       // WARNING:
1052       // The displaced header in the BasicLock on a thread's stack
1053       // is strictly immutable. It CANNOT be changed in ANY cases.
1054       // So we have to inflate the stack lock into an ObjectMonitor
1055       // even if the current thread owns the lock. The BasicLock on
1056       // a thread's stack can be asynchronously read by other threads
1057       // during an inflate() call so any change to that stack memory
1058       // may not propagate to other threads correctly.
1059     }
1060 
1061     // Inflate the monitor to set the hash.
1062     ObjectMonitorHandle omh;
1063     inflate(&omh, self, obj, inflate_cause_hash_code);
1064     monitor = omh.om_ptr();
1065     // Load ObjectMonitor's header/dmw field and see if it has a hash.
1066     mark = monitor->header();
1067     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());



1068     hash = mark.hash();
1069     if (hash == 0) {                    // if it does not have a hash
1070       hash = get_next_hash(self, obj);  // get a new hash
1071       temp = mark.copy_set_hash(hash);  // merge the hash into header






1072       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1073       uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
1074       test = markWord(v);
1075       if (test != mark) {
1076         // The attempt to update the ObjectMonitor's header/dmw field
1077         // did not work. This can happen if another thread managed to
1078         // merge in the hash just before our cmpxchg().


1079         // If we add any new usages of the header/dmw field, this code
1080         // will need to be updated.





1081         hash = test.hash();
1082         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1083         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1084       }
1085     }
1086     // We finally get the hash.
1087     return hash;
1088   }
1089 }
1090 
1091 // Deprecated -- use FastHashCode() instead.
1092 
1093 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
1094   return FastHashCode(Thread::current(), obj());
1095 }
1096 
1097 
1098 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
1099                                                    Handle h_obj) {
1100   if (UseBiasedLocking) {


1226     }
1227 
1228     // Unlocked case, header in place
1229     // Cannot have assertion since this object may have been
1230     // locked by another thread when reaching here.
1231     // assert(mark.is_neutral(), "sanity check");
1232 
1233     return NULL;
1234   }
1235 }
1236 
1237 // Visitors ...
1238 
1239 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1240   PaddedObjectMonitor* block = Atomic::load(&g_block_list);
1241   while (block != NULL) {
1242     assert(block->object() == CHAINMARKER, "must be a block header");
1243     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1244       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1245       ObjectMonitorHandle omh;
1246       if (!mid->is_free() && omh.save_om_ptr_if_safe(mid)) {
1247         // The ObjectMonitor* is not free and it has been made safe.
1248         if (mid->object() == NULL) {
1249           // Only process with closure if the object is set.
1250           continue;
1251         }
1252         closure->do_monitor(mid);
1253       }
1254     }
1255     // unmarked_next() is not needed with g_block_list (no locking
1256     // used with block linkage _next_om fields).
1257     block = (PaddedObjectMonitor*)block->next_om();
1258   }
1259 }
1260 
1261 static bool monitors_used_above_threshold() {
1262   int population = Atomic::load(&om_list_globals._population);
1263   if (population == 0) {
1264     return false;
1265   }
1266   if (MonitorUsedDeflationThreshold > 0) {
1267     int monitors_used = population - Atomic::load(&om_list_globals._free_count) -
1268                         Atomic::load(&om_list_globals._wait_count);


1269     int monitor_usage = (monitors_used * 100LL) / population;
1270     return monitor_usage > MonitorUsedDeflationThreshold;
1271   }
1272   return false;
1273 }
1274 
1275 // Returns true if MonitorBound is set (> 0) and if the specified
1276 // cnt is > MonitorBound. Otherwise returns false.
1277 static bool is_MonitorBound_exceeded(const int cnt) {
1278   const int mx = MonitorBound;
1279   return mx > 0 && cnt > mx;
1280 }
1281 
1282 bool ObjectSynchronizer::is_async_deflation_needed() {
1283   if (!AsyncDeflateIdleMonitors) {
1284     return false;
1285   }
1286   if (is_async_deflation_requested()) {
1287     // Async deflation request.
1288     return true;
1289   }
1290   if (AsyncDeflationInterval > 0 &&
1291       time_since_last_async_deflation_ms() > AsyncDeflationInterval &&
1292       monitors_used_above_threshold()) {
1293     // It's been longer than our specified deflate interval and there
1294     // are too many monitors in use. We don't deflate more frequently
1295     // than AsyncDeflationInterval (unless is_async_deflation_requested)
1296     // in order to not swamp the ServiceThread.
1297     _last_async_deflation_time_ns = os::javaTimeNanos();
1298     return true;
1299   }
1300   int monitors_used = Atomic::load(&om_list_globals._population) -
1301                       Atomic::load(&om_list_globals._free_count) -
1302                       Atomic::load(&om_list_globals._wait_count);


1303   if (is_MonitorBound_exceeded(monitors_used)) {
1304     // Not enough ObjectMonitors on the global free list.
1305     return true;
1306   }
1307   return false;
1308 }
1309 
1310 bool ObjectSynchronizer::needs_monitor_scavenge() {
1311   if (Atomic::load(&_forceMonitorScavenge) == 1) {
1312     log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
1313     return true;
1314   }
1315   return false;
1316 }
1317 
1318 bool ObjectSynchronizer::is_safepoint_deflation_needed() {
1319   if (!AsyncDeflateIdleMonitors) {
1320     if (monitors_used_above_threshold()) {
1321       // Too many monitors in use.
1322       return true;


1455       m->set_allocation_state(ObjectMonitor::New);
1456       prepend_to_om_in_use_list(self, m);
1457       return m;
1458     }
1459 
1460     // 2: try to allocate from the global om_list_globals._free_list
1461     // If we're using thread-local free lists then try
1462     // to reprovision the caller's free list.
1463     if (Atomic::load(&om_list_globals._free_list) != NULL) {
1464       // Reprovision the thread's om_free_list.
1465       // Use bulk transfers to reduce the allocation rate and heat
1466       // on various locks.
1467       for (int i = self->om_free_provision; --i >= 0;) {
1468         ObjectMonitor* take = take_from_start_of_global_free_list();
1469         if (take == NULL) {
1470           break;  // No more are available.
1471         }
1472         guarantee(take->object() == NULL, "invariant");
1473         if (AsyncDeflateIdleMonitors) {
1474           // We allowed 3 field values to linger during async deflation.
1475           // Clear or restore them as appropriate.



1476           take->set_header(markWord::zero());
1477           // DEFLATER_MARKER is the only non-NULL value we should see here.
1478           take->try_set_owner_from(DEFLATER_MARKER, NULL);
1479           if (take->ref_count() < 0) {
1480             // Add back max_jint to restore the ref_count field to its
1481             // proper value.
1482             Atomic::add(&take->_ref_count, max_jint);
1483 
1484 #ifdef ASSERT
1485             jint l_ref_count = take->ref_count();
1486 #endif
1487             assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
1488                    l_ref_count, take->ref_count());
1489           }
1490         }
1491         take->Recycle();
1492         // Since we're taking from the global free-list, take must be Free.
1493         // om_release() also sets the allocation state to Free because it
1494         // is called from other code paths.
1495         assert(take->is_free(), "invariant");
1496         om_release(self, take, false);
1497       }
1498       self->om_free_provision += 1 + (self->om_free_provision / 2);


1999       ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
2000       return;
2001     }
2002 
2003     // CASE: neutral
2004     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
2005     // If we know we're inflating for entry it's better to inflate by swinging a
2006     // pre-locked ObjectMonitor pointer into the object header.   A successful
2007     // CAS inflates the object *and* confers ownership to the inflating thread.
2008     // In the current implementation we use a 2-step mechanism where we CAS()
2009     // to inflate and then CAS() again to try to swing _owner from NULL to self.
2010     // An inflateTry() method that we could call from enter() would be useful.
2011 
2012     // Catch if the object's header is not neutral (not locked and
2013     // not marked is what we care about here).
2014     ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
2015     ObjectMonitor* m = om_alloc(self);
2016     // prepare m for installation - set monitor to initial state
2017     m->Recycle();
2018     m->set_header(mark);
2019     if (AsyncDeflateIdleMonitors) {
2020       // DEFLATER_MARKER is the only non-NULL value we should see here.
2021       m->try_set_owner_from(DEFLATER_MARKER, NULL);
2022     }
2023     m->set_object(object);
2024     m->_Responsible  = NULL;
2025     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
2026 
2027     omh_p->set_om_ptr(m);
2028 
2029     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
2030       m->set_header(markWord::zero());
2031       m->set_object(NULL);
2032       m->Recycle();
2033       omh_p->set_om_ptr(NULL);
2034       // om_release() will reset the allocation state from New to Free.
2035       om_release(self, m, true);
2036       m = NULL;
2037       continue;
2038       // interference - the markword changed - just retry.
2039       // The state-transitions are one-way, so there's no chance of
2040       // live-lock -- "Inflated" is an absorbing state.
2041     }
2042 


2138 
2139   if (mid->is_busy() || mid->ref_count() != 0) {
2140     // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
2141     // is in use so no deflation.
2142     deflated = false;
2143   } else {
2144     // Deflate the monitor if it is no longer being used
2145     // It's idle - scavenge and return to the global free list
2146     // plain old deflation ...
2147     if (log_is_enabled(Trace, monitorinflation)) {
2148       ResourceMark rm;
2149       log_trace(monitorinflation)("deflate_monitor: "
2150                                   "object=" INTPTR_FORMAT ", mark="
2151                                   INTPTR_FORMAT ", type='%s'", p2i(obj),
2152                                   mark.value(), obj->klass()->external_name());
2153     }
2154 
2155     // Restore the header back to obj
2156     obj->release_set_mark(dmw);
2157     if (AsyncDeflateIdleMonitors) {
2158       // clear() expects the owner field to be NULL.
2159       // DEFLATER_MARKER is the only non-NULL value we should see here.


2160       mid->try_set_owner_from(DEFLATER_MARKER, NULL);
2161     }
2162     mid->clear();
2163 
2164     assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
2165            p2i(mid->object()));
2166     assert(mid->is_free(), "invariant");
2167 
2168     // Move the deflated ObjectMonitor to the working free list
2169     // defined by free_head_p and free_tail_p.
2170     if (*free_head_p == NULL) *free_head_p = mid;
2171     if (*free_tail_p != NULL) {
2172       // We append to the list so the caller can use mid->_next_om
2173       // to fix the linkages in its context.
2174       ObjectMonitor* prevtail = *free_tail_p;
2175       // Should have been cleaned up by the caller:
2176       // Note: Should not have to lock prevtail here since we're at a
2177       // safepoint and ObjectMonitors on the local free list should
2178       // not be accessed in parallel.
2179 #ifdef ASSERT


2266         const oop obj = (oop) mid->object();
2267         if (log_is_enabled(Trace, monitorinflation)) {
2268           ResourceMark rm;
2269           log_trace(monitorinflation)("deflate_monitor_using_JT: "
2270                                       "object=" INTPTR_FORMAT ", mark="
2271                                       INTPTR_FORMAT ", type='%s'",
2272                                       p2i(obj), obj->mark().value(),
2273                                       obj->klass()->external_name());
2274         }
2275 
2276         // Install the old mark word if nobody else has already done it.
2277         mid->install_displaced_markword_in_object(obj);
2278         mid->clear_using_JT();
2279 
2280         assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT,
2281                p2i(mid->object()));
2282         assert(mid->is_free(), "must be free: allocation_state=%d",
2283                (int) mid->allocation_state());
2284 
2285         // Move the deflated ObjectMonitor to the working free list
2286         // defined by free_head_p and free_tail_p.

2287         if (*free_head_p == NULL) {
2288           // First one on the list.
2289           *free_head_p = mid;
2290         }
2291         if (*free_tail_p != NULL) {
2292           // We append to the list so the caller can use mid->_next_om
2293           // to fix the linkages in its context.
2294           ObjectMonitor* prevtail = *free_tail_p;
2295           // Should have been cleaned up by the caller:
2296           om_lock(prevtail);
2297 #ifdef ASSERT
2298           ObjectMonitor* l_next_om = unmarked_next(prevtail);
2299 #endif
2300           assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2301           prevtail->set_next_om(mid);  // prevtail now points to mid (and is unlocked)
2302         }
2303         *free_tail_p = mid;
2304 
2305         // At this point, mid->_next_om still refers to its current
2306         // value and another ObjectMonitor's _next_om field still


2423     next = unmarked_next(mid);
2424   } else {
2425     // We're restarting after a safepoint so restore the necessary state
2426     // before we resume.
2427     cur_mid_in_use = *saved_mid_in_use_p;
2428     // Lock cur_mid_in_use so we can possibly update its
2429     // next field to extract a deflated ObjectMonitor.
2430     om_lock(cur_mid_in_use);
2431     mid = unmarked_next(cur_mid_in_use);
2432     if (mid == NULL) {
2433       om_unlock(cur_mid_in_use);
2434       *saved_mid_in_use_p = NULL;
2435       return 0;  // The remainder is empty so nothing more to deflate.
2436     }
2437     // Lock mid so we can possibly deflate it:
2438     om_lock(mid);
2439     next = unmarked_next(mid);
2440   }
2441 
2442   while (true) {
2443     // The current mid is locked at this point. If we have a
2444     // cur_mid_in_use, then it is also locked at this point.
2445 
2446     if (next != NULL) {
2447       // We lock next so that an om_flush() thread that is behind us
2448       // cannot pass us when we unlock the current mid.
2449       om_lock(next);
2450       next_next = unmarked_next(next);
2451     }
2452 
2453     // Only try to deflate if there is an associated Java object and if
2454     // mid is old (is not newly allocated and is not newly freed).
2455     if (mid->object() != NULL && mid->is_old() &&
2456         deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
2457       // Deflation succeeded and already updated free_head_p and
2458       // free_tail_p as needed. Finish the move to the local free list
2459       // by unlinking mid from the global or per-thread in-use list.
2460       if (cur_mid_in_use == NULL) {
2461         // mid is the list head and it is locked. Switch the list head
2462         // to next which is also locked (if not NULL) and also leave
2463         // mid locked:
2464         Atomic::store(list_p, next);


2623       // are idle unless this JavaThread is exiting; do not race with
2624       // ObjectSynchronizer::om_flush().
2625       deflate_per_thread_idle_monitors_using_JT(jt);
2626       count++;
2627     }
2628   }
2629   if (count > 0) {
2630     log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count);
2631   }
2632 
2633   log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, "
2634                              "global_free_count=%d, global_wait_count=%d",
2635                              Atomic::load(&om_list_globals._population),
2636                              Atomic::load(&om_list_globals._in_use_count),
2637                              Atomic::load(&om_list_globals._free_count),
2638                              Atomic::load(&om_list_globals._wait_count));
2639 
2640   // The ServiceThread's async deflation request has been processed.
2641   set_is_async_deflation_requested(false);
2642 
2643   if (Atomic::load(&om_list_globals._wait_count) > 0) {

2644     // There are deflated ObjectMonitors waiting for a handshake
2645     // (or a safepoint) for safety.
2646 
2647     ObjectMonitor* list = Atomic::load(&om_list_globals._wait_list);
2648     ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL");
2649     int count = Atomic::load(&om_list_globals._wait_count);
2650     Atomic::store(&om_list_globals._wait_count, 0);
2651     Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL);
2652 
2653     // Find the tail for prepend_list_to_common(). No need to mark
2654     // ObjectMonitors for this list walk since only the deflater
2655     // thread manages the wait list.
2656     int l_count = 0;
2657     ObjectMonitor* tail = NULL;
2658     for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) {
2659       tail = n;
2660       l_count++;
2661     }
2662     ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count);
2663 


2704 
2705   if (log_is_enabled(Info, monitorinflation)) {
2706     timer.start();
2707   }
2708 
2709   if (is_global) {
2710     OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&om_list_globals._in_use_count)));
2711   } else {
2712     OM_PERFDATA_OP(MonExtant, inc(Atomic::load(&target->om_in_use_count)));
2713   }
2714 
2715   do {
2716     int local_deflated_count;
2717     if (is_global) {
2718       local_deflated_count =
2719           deflate_monitor_list_using_JT(&om_list_globals._in_use_list,
2720                                         &om_list_globals._in_use_count,
2721                                         &free_head_p, &free_tail_p,
2722                                         &saved_mid_in_use_p);
2723     } else {
2724       local_deflated_count =
2725           deflate_monitor_list_using_JT(&target->om_in_use_list,
2726                                         &target->om_in_use_count, &free_head_p,
2727                                         &free_tail_p, &saved_mid_in_use_p);
2728     }
2729     deflated_count += local_deflated_count;
2730 
2731     if (free_head_p != NULL) {
2732       // Move the deflated ObjectMonitors to the global free list.
2733       guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
2734       // Note: The target thread can be doing an om_alloc() that
2735       // is trying to prepend an ObjectMonitor on its in-use list
2736       // at the same time that we have deflated the current in-use
2737       // list head and put it on the local free list. prepend_to_common()
2738       // will detect the race and retry which avoids list corruption,
2739       // but the next field in free_tail_p can flicker to marked
2740       // and then unmarked while prepend_to_common() is sorting it
2741       // all out.
2742 #ifdef ASSERT
2743       ObjectMonitor* l_next_om = unmarked_next(free_tail_p);
2744 #endif
2745       assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2746 

2747       prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count);



2748 
2749       OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
2750     }
2751 
2752     if (saved_mid_in_use_p != NULL) {
2753       // deflate_monitor_list_using_JT() detected a safepoint starting.
2754       timer.stop();
2755       {
2756         if (is_global) {
2757           log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
2758         } else {
2759           log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target));
2760         }
2761         assert(SafepointMechanism::should_block(self), "sanity check");
2762         ThreadBlockInVM blocker(self);
2763       }
2764       // Prepare for another loop after the safepoint.
2765       free_head_p = NULL;
2766       free_tail_p = NULL;
2767       if (log_is_enabled(Info, monitorinflation)) {


2995   if (Atomic::load(&om_list_globals._population) == chk_om_population) {
2996     ls->print_cr("global_population=%d equals chk_om_population=%d",
2997                  Atomic::load(&om_list_globals._population), chk_om_population);
2998   } else {
2999     // With fine grained locks on the monitor lists, it is possible for
3000     // log_monitor_list_counts() to return a value that doesn't match
3001     // om_list_globals._population. So far a higher value has been
3002     // seen in testing so something is being double counted by
3003     // log_monitor_list_counts().
3004     ls->print_cr("WARNING: global_population=%d is not equal to "
3005                  "chk_om_population=%d",
3006                  Atomic::load(&om_list_globals._population), chk_om_population);
3007   }
3008 
3009   // Check om_list_globals._in_use_list and om_list_globals._in_use_count:
3010   chk_global_in_use_list_and_count(ls, &error_cnt);
3011 
3012   // Check om_list_globals._free_list and om_list_globals._free_count:
3013   chk_global_free_list_and_count(ls, &error_cnt);
3014 

3015   // Check om_list_globals._wait_list and om_list_globals._wait_count:
3016   chk_global_wait_list_and_count(ls, &error_cnt);

3017 
3018   ls->print_cr("Checking per-thread lists:");
3019 
3020   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3021     // Check om_in_use_list and om_in_use_count:
3022     chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
3023 
3024     // Check om_free_list and om_free_count:
3025     chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
3026   }
3027 
3028   if (error_cnt == 0) {
3029     ls->print_cr("No errors found in monitor list checks.");
3030   } else {
3031     log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
3032   }
3033 
3034   if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
3035       (!on_exit && log_is_enabled(Trace, monitorinflation))) {
3036     // When exiting this log output is at the Info level. When called


3383       }
3384     }
3385   }
3386 
3387   out->flush();
3388 }
3389 
3390 // Log counts for the global and per-thread monitor lists and return
3391 // the population count.
3392 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
3393   int pop_count = 0;
3394   out->print_cr("%18s  %10s  %10s  %10s  %10s",
3395                 "Global Lists:", "InUse", "Free", "Wait", "Total");
3396   out->print_cr("==================  ==========  ==========  ==========  ==========");
3397   int l_in_use_count = Atomic::load(&om_list_globals._in_use_count);
3398   int l_free_count = Atomic::load(&om_list_globals._free_count);
3399   int l_wait_count = Atomic::load(&om_list_globals._wait_count);
3400   out->print_cr("%18s  %10d  %10d  %10d  %10d", "", l_in_use_count,
3401                 l_free_count, l_wait_count,
3402                 Atomic::load(&om_list_globals._population));
3403   pop_count += l_in_use_count + l_free_count + l_wait_count;



3404 
3405   out->print_cr("%18s  %10s  %10s  %10s",
3406                 "Per-Thread Lists:", "InUse", "Free", "Provision");
3407   out->print_cr("==================  ==========  ==========  ==========");
3408 
3409   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3410     int l_om_in_use_count = Atomic::load(&jt->om_in_use_count);
3411     int l_om_free_count = Atomic::load(&jt->om_free_count);
3412     out->print_cr(INTPTR_FORMAT "  %10d  %10d  %10d", p2i(jt),
3413                   l_om_in_use_count, l_om_free_count, jt->om_free_provision);
3414     pop_count += l_om_in_use_count + l_om_free_count;
3415   }
3416   return pop_count;
3417 }
3418 
3419 #ifndef PRODUCT
3420 
3421 // Check if monitor belongs to the monitor cache
3422 // The list is grow-only so it's *relatively* safe to traverse
3423 // the list of extant blocks without taking a lock.
< prev index next >