157
158 int _wait_count; // # on wait_list
159 DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int));
160 };
161 static ObjectMonitorListGlobals om_list_globals;
162
163 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
164
165
166 // =====================> Spin-lock functions
167
168 // ObjectMonitors are not lockable outside of this file. We use spin-locks
169 // implemented using a bit in the _next_om field instead of the heavier
170 // weight locking mechanisms for faster list management.
171
172 #define OM_LOCK_BIT 0x1
173
174 // Return true if the ObjectMonitor is locked.
175 // Otherwise returns false.
176 static bool is_locked(ObjectMonitor* om) {
177 return ((intptr_t)om->next_om() & OM_LOCK_BIT) == OM_LOCK_BIT;
178 }
179
180 // Mark an ObjectMonitor* with OM_LOCK_BIT and return it.
181 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) {
182 return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT);
183 }
184
185 // Return the unmarked next field in an ObjectMonitor. Note: the next
186 // field may or may not have been marked with OM_LOCK_BIT originally.
187 static ObjectMonitor* unmarked_next(ObjectMonitor* om) {
188 return (ObjectMonitor*)((intptr_t)om->next_om() & ~OM_LOCK_BIT);
189 }
190
191 // Try to lock an ObjectMonitor. Returns true if locking was successful.
192 // Otherwise returns false.
193 static bool try_om_lock(ObjectMonitor* om) {
194 // Get current next field without any OM_LOCK_BIT value.
195 ObjectMonitor* next = unmarked_next(om);
196 if (om->try_set_next_om(next, mark_om_ptr(next)) != next) {
197 return false; // Cannot lock the ObjectMonitor.
198 }
199 return true;
200 }
201
202 // Lock an ObjectMonitor.
203 static void om_lock(ObjectMonitor* om) {
204 while (true) {
205 if (try_om_lock(om)) {
206 return;
207 }
208 }
209 }
210
211 // Unlock an ObjectMonitor.
212 static void om_unlock(ObjectMonitor* om) {
213 ObjectMonitor* next = om->next_om();
214 guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT
215 " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT);
216
217 next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT.
218 om->set_next_om(next);
219 }
220
221 // Get the list head after locking it. Returns the list head or NULL
222 // if the list is empty.
223 static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) {
224 while (true) {
225 ObjectMonitor* mid = Atomic::load(list_p);
226 if (mid == NULL) {
227 return NULL; // The list is empty.
228 }
229 if (try_om_lock(mid)) {
230 if (Atomic::load(list_p) != mid) {
231 // The list head changed before we could lock it so we have to retry.
232 om_unlock(mid);
233 continue;
234 }
235 return mid;
236 }
237 }
238 }
239
240 #undef OM_LOCK_BIT
241
242
243 // =====================> List Management functions
244
245 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is
246 // the last ObjectMonitor in the list and there are 'count' on the list.
247 // Also updates the specified *count_p.
248 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail,
249 int count, ObjectMonitor** list_p,
250 int* count_p) {
251 while (true) {
252 ObjectMonitor* cur = Atomic::load(list_p);
253 // Prepend list to *list_p.
254 if (!try_om_lock(tail)) {
255 // Failed to lock tail due to a list walker so try it all again.
256 continue;
257 }
258 tail->set_next_om(cur); // tail now points to cur (and unlocks tail)
259 if (cur == NULL) {
260 // No potential race with takers or other prependers since
261 // *list_p is empty.
262 if (Atomic::cmpxchg(list_p, cur, list) == cur) {
263 // Successfully switched *list_p to the list value.
264 Atomic::add(count_p, count);
265 break;
266 }
267 // Implied else: try it all again
268 } else {
269 if (!try_om_lock(cur)) {
270 continue; // failed to lock cur so try it all again
271 }
272 // We locked cur so try to switch *list_p to the list value.
273 if (Atomic::cmpxchg(list_p, cur, list) != cur) {
274 // The list head has changed so unlock cur and try again:
275 om_unlock(cur);
276 continue;
277 }
325
326 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list.
327 // 'tail' is the last ObjectMonitor in the list and there are 'count'
328 // on the list. Also updates om_list_globals._in_use_list.
329 static void prepend_list_to_global_in_use_list(ObjectMonitor* list,
330 ObjectMonitor* tail, int count) {
331 prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list,
332 &om_list_globals._in_use_count);
333 }
334
335 // Prepend an ObjectMonitor to the specified list. Also updates
336 // the specified counter.
337 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
338 int* count_p) {
339 while (true) {
340 om_lock(m); // Lock m so we can safely update its next field.
341 ObjectMonitor* cur = NULL;
342 // Lock the list head to guard against races with a list walker
343 // or async deflater thread (which only races in om_in_use_list):
344 if ((cur = get_list_head_locked(list_p)) != NULL) {
345 // List head is now locked so we can safely switch it.
346 m->set_next_om(cur); // m now points to cur (and unlocks m)
347 Atomic::store(list_p, m); // Switch list head to unlocked m.
348 om_unlock(cur);
349 break;
350 }
351 // The list is empty so try to set the list head.
352 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
353 m->set_next_om(cur); // m now points to NULL (and unlocks m)
354 if (Atomic::cmpxchg(list_p, cur, m) == cur) {
355 // List head is now unlocked m.
356 break;
357 }
358 // Implied else: try it all again
359 }
360 Atomic::inc(count_p);
361 }
362
363 // Prepend an ObjectMonitor to a per-thread om_free_list.
364 // Also updates the per-thread om_free_count.
365 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) {
366 prepend_to_common(m, &self->om_free_list, &self->om_free_count);
367 }
368
369 // Prepend an ObjectMonitor to a per-thread om_in_use_list.
370 // Also updates the per-thread om_in_use_count.
371 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) {
372 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
373 }
374
375 // Take an ObjectMonitor from the start of the specified list. Also
376 // decrements the specified counter. Returns NULL if none are available.
377 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p,
378 int* count_p) {
379 ObjectMonitor* take = NULL;
380 // Lock the list head to guard against races with a list walker
381 // or async deflater thread (which only races in om_list_globals._free_list):
382 if ((take = get_list_head_locked(list_p)) == NULL) {
383 return NULL; // None are available.
384 }
385 ObjectMonitor* next = unmarked_next(take);
386 // Switch locked list head to next (which unlocks the list head, but
387 // leaves take locked):
388 Atomic::store(list_p, next);
389 Atomic::dec(count_p);
390 // Unlock take, but leave the next value for any lagging list
391 // walkers. It will get cleaned up when take is prepended to
392 // the in-use list:
393 om_unlock(take);
394 return take;
395 }
396
397 // Take an ObjectMonitor from the start of the om_list_globals._free_list.
398 // Also updates om_list_globals._free_count. Returns NULL if none are
399 // available.
400 static ObjectMonitor* take_from_start_of_global_free_list() {
401 return take_from_start_of_common(&om_list_globals._free_list,
402 &om_list_globals._free_count);
403 }
404
405 // Take an ObjectMonitor from the start of a per-thread free-list.
406 // Also updates om_free_count. Returns NULL if none are available.
407 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) {
1326 if (is_special_deflation_requested()) {
1327 // For AsyncDeflateIdleMonitors only do a safepoint deflation
1328 // if there is a special deflation request.
1329 return true;
1330 }
1331 return false;
1332 }
1333
1334 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1335 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS);
1336 }
1337
1338 void ObjectSynchronizer::oops_do(OopClosure* f) {
1339 // We only scan the global used list here (for moribund threads), and
1340 // the thread-local monitors in Thread::oops_do().
1341 global_used_oops_do(f);
1342 }
1343
1344 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1345 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1346 list_oops_do(Atomic::load(&om_list_globals._in_use_list), f);
1347 }
1348
1349 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1350 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1351 list_oops_do(thread->om_in_use_list, f);
1352 }
1353
1354 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1355 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1356 // The oops_do() phase does not overlap with monitor deflation
1357 // so no need to lock ObjectMonitors for the list traversal.
1358 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1359 if (mid->object() != NULL) {
1360 f->do_oop((oop*)mid->object_addr());
1361 }
1362 }
1363 }
1364
1365
1391 for (;;) {
1392 ObjectMonitor* m;
1393
1394 // 1: try to allocate from the thread's local om_free_list.
1395 // Threads will attempt to allocate first from their local list, then
1396 // from the global list, and only after those attempts fail will the
1397 // thread attempt to instantiate new monitors. Thread-local free lists
1398 // improve allocation latency, as well as reducing coherency traffic
1399 // on the shared global list.
1400 m = take_from_start_of_om_free_list(self);
1401 if (m != NULL) {
1402 guarantee(m->object() == NULL, "invariant");
1403 m->set_allocation_state(ObjectMonitor::New);
1404 prepend_to_om_in_use_list(self, m);
1405 return m;
1406 }
1407
1408 // 2: try to allocate from the global om_list_globals._free_list
1409 // If we're using thread-local free lists then try
1410 // to reprovision the caller's free list.
1411 if (Atomic::load(&om_list_globals._free_list) != NULL) {
1412 // Reprovision the thread's om_free_list.
1413 // Use bulk transfers to reduce the allocation rate and heat
1414 // on various locks.
1415 for (int i = self->om_free_provision; --i >= 0;) {
1416 ObjectMonitor* take = take_from_start_of_global_free_list();
1417 if (take == NULL) {
1418 break; // No more are available.
1419 }
1420 guarantee(take->object() == NULL, "invariant");
1421 if (AsyncDeflateIdleMonitors) {
1422 // We allowed 3 field values to linger during async deflation.
1423 // Clear or restore them as appropriate.
1424 take->set_header(markWord::zero());
1425 // DEFLATER_MARKER is the only non-NULL value we should see here.
1426 take->try_set_owner_from(DEFLATER_MARKER, NULL);
1427 if (take->contentions() < 0) {
1428 // Add back max_jint to restore the contentions field to its
1429 // proper value.
1430 take->add_to_contentions(max_jint);
1519 // we have to remove 'm' from the in-use list first (as needed).
1520 if (from_per_thread_alloc) {
1521 // Need to remove 'm' from om_in_use_list.
1522 ObjectMonitor* mid = NULL;
1523 ObjectMonitor* next = NULL;
1524
1525 // This list walk can race with another list walker or with async
1526 // deflation so we have to worry about an ObjectMonitor being
1527 // removed from this list while we are walking it.
1528
1529 // Lock the list head to avoid racing with another list walker
1530 // or with async deflation.
1531 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) {
1532 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
1533 }
1534 next = unmarked_next(mid);
1535 if (m == mid) {
1536 // First special case:
1537 // 'm' matches mid, is the list head and is locked. Switch the list
1538 // head to next which unlocks the list head, but leaves the extracted
1539 // mid locked:
1540 Atomic::store(&self->om_in_use_list, next);
1541 } else if (m == next) {
1542 // Second special case:
1543 // 'm' matches next after the list head and we already have the list
1544 // head locked so set mid to what we are extracting:
1545 mid = next;
1546 // Lock mid to prevent races with a list walker or an async
1547 // deflater thread that's ahead of us. The locked list head
1548 // prevents races from behind us.
1549 om_lock(mid);
1550 // Update next to what follows mid (if anything):
1551 next = unmarked_next(mid);
1552 // Switch next after the list head to new next which unlocks the
1553 // list head, but leaves the extracted mid locked:
1554 self->om_in_use_list->set_next_om(next);
1555 } else {
1556 // We have to search the list to find 'm'.
1557 guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT
1558 " is too short.", p2i(self), p2i(self->om_in_use_list));
1559 // Our starting anchor is next after the list head which is the
1560 // last ObjectMonitor we checked:
1561 ObjectMonitor* anchor = next;
1562 // Lock anchor to prevent races with a list walker or an async
1563 // deflater thread that's ahead of us. The locked list head
1564 // prevents races from behind us.
1565 om_lock(anchor);
1566 om_unlock(mid); // Unlock the list head now that anchor is locked.
1567 while ((mid = unmarked_next(anchor)) != NULL) {
1568 if (m == mid) {
1569 // We found 'm' on the per-thread in-use list so extract it.
1570 // Update next to what follows mid (if anything):
1571 next = unmarked_next(mid);
1572 // Switch next after the anchor to new next which unlocks the
1573 // anchor, but leaves the extracted mid locked:
1574 anchor->set_next_om(next);
1575 break;
1576 } else {
1577 // Lock the next anchor to prevent races with a list walker
1578 // or an async deflater thread that's ahead of us. The locked
1579 // current anchor prevents races from behind us.
1580 om_lock(mid);
1581 // Unlock current anchor now that next anchor is locked:
1582 om_unlock(anchor);
1583 anchor = mid; // Advance to new anchor and try again.
1584 }
1585 }
1586 }
1587
1588 if (mid == NULL) {
1589 // Reached end of the list and didn't find 'm' so:
1590 fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list="
1591 INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list));
1592 }
1593
1659 cur_om = unmarked_next(in_use_tail);
1660 continue;
1661 }
1662 if (cur_om->object() == NULL) {
1663 // cur_om was deflated and the object ref was cleared while it
1664 // was locked. We happened to see it just after it was unlocked
1665 // (and added to the free list). Refetch the possibly changed
1666 // next field and try again.
1667 cur_om = unmarked_next(in_use_tail);
1668 continue;
1669 }
1670 in_use_tail = cur_om;
1671 in_use_count++;
1672 cur_om = unmarked_next(cur_om);
1673 }
1674 guarantee(in_use_tail != NULL, "invariant");
1675 int l_om_in_use_count = Atomic::load(&self->om_in_use_count);
1676 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't match: "
1677 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
1678 Atomic::store(&self->om_in_use_count, 0);
1679 // Clear the in-use list head (which also unlocks it):
1680 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
1681 om_unlock(in_use_list);
1682 }
1683
1684 int free_count = 0;
1685 ObjectMonitor* free_list = NULL;
1686 ObjectMonitor* free_tail = NULL;
1687 // This function can race with a list walker thread so we lock the
1688 // list head to prevent confusion.
1689 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) {
1690 // At this point, we have locked the free list head so a racing
1691 // thread cannot come in after us. However, a racing thread could
1692 // be ahead of us; we'll detect that and delay to let it finish.
1693 //
1694 // The thread is going away. Set 'free_tail' to the last per-thread free
1695 // monitor which will be linked to om_list_globals._free_list below.
1696 //
1697 // Account for the free list head before the loop since it is
1698 // already locked (by this thread):
1702 if (is_locked(s)) {
1703 // s is locked so there must be a racing walker thread ahead
1704 // of us so we'll give it a chance to finish.
1705 while (is_locked(s)) {
1706 os::naked_short_sleep(1);
1707 }
1708 }
1709 free_tail = s;
1710 free_count++;
1711 guarantee(s->object() == NULL, "invariant");
1712 if (s->is_busy()) {
1713 stringStream ss;
1714 fatal("must be !is_busy: %s", s->is_busy_to_string(&ss));
1715 }
1716 }
1717 guarantee(free_tail != NULL, "invariant");
1718 int l_om_free_count = Atomic::load(&self->om_free_count);
1719 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: "
1720 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
1721 Atomic::store(&self->om_free_count, 0);
1722 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
1723 om_unlock(free_list);
1724 }
1725
1726 if (free_tail != NULL) {
1727 prepend_list_to_global_free_list(free_list, free_tail, free_count);
1728 }
1729
1730 if (in_use_tail != NULL) {
1731 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count);
1732 }
1733
1734 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1735 LogStreamHandle(Info, monitorinflation) lsh_info;
1736 LogStream* ls = NULL;
1737 if (log_is_enabled(Debug, monitorinflation)) {
1738 ls = &lsh_debug;
1739 } else if ((free_count != 0 || in_use_count != 0) &&
1740 log_is_enabled(Info, monitorinflation)) {
1741 ls = &lsh_info;
1886
1887 // Setup monitor fields to proper values -- prepare the monitor
1888 m->set_header(dmw);
1889
1890 // Optimization: if the mark.locker stack address is associated
1891 // with this thread we could simply set m->_owner = self.
1892 // Note that a thread can inflate an object
1893 // that it has stack-locked -- as might happen in wait() -- directly
1894 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1895 if (AsyncDeflateIdleMonitors) {
1896 m->set_owner_from(NULL, DEFLATER_MARKER, mark.locker());
1897 } else {
1898 m->set_owner_from(NULL, mark.locker());
1899 }
1900 m->set_object(object);
1901 // TODO-FIXME: assert BasicLock->dhw != 0.
1902
1903 // Must preserve store ordering. The monitor state must
1904 // be stable at the time of publishing the monitor address.
1905 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1906 object->release_set_mark(markWord::encode(m));
1907
1908 // Once ObjectMonitor is configured and the object is associated
1909 // with the ObjectMonitor, it is safe to allow async deflation:
1910 assert(m->is_new(), "freshly allocated monitor must be new");
1911 // Release semantics needed to keep allocation_state from floating up.
1912 m->release_set_allocation_state(ObjectMonitor::Old);
1913
1914 // Hopefully the performance counters are allocated on distinct cache lines
1915 // to avoid false sharing on MP systems ...
1916 OM_PERFDATA_OP(Inflations, inc());
1917 if (log_is_enabled(Trace, monitorinflation)) {
1918 ResourceMark rm(self);
1919 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1920 INTPTR_FORMAT ", type='%s'", p2i(object),
1921 object->mark().value(), object->klass()->external_name());
1922 }
1923 if (event.should_commit()) {
1924 post_monitor_inflate_event(&event, object, cause);
1925 }
2059 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
2060 const markWord dmw = mid->header();
2061 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
2062
2063 if (mid->is_busy()) {
2064 // Easy checks are first - the ObjectMonitor is busy so no deflation.
2065 deflated = false;
2066 } else {
2067 // Deflate the monitor if it is no longer being used
2068 // It's idle - scavenge and return to the global free list
2069 // plain old deflation ...
2070 if (log_is_enabled(Trace, monitorinflation)) {
2071 ResourceMark rm;
2072 log_trace(monitorinflation)("deflate_monitor: "
2073 "object=" INTPTR_FORMAT ", mark="
2074 INTPTR_FORMAT ", type='%s'", p2i(obj),
2075 mark.value(), obj->klass()->external_name());
2076 }
2077
2078 // Restore the header back to obj
2079 obj->release_set_mark(dmw);
2080 if (AsyncDeflateIdleMonitors) {
2081 // clear() expects the owner field to be NULL.
2082 // DEFLATER_MARKER is the only non-NULL value we should see here.
2083 mid->try_set_owner_from(DEFLATER_MARKER, NULL);
2084 }
2085 mid->clear();
2086
2087 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
2088 p2i(mid->object()));
2089 assert(mid->is_free(), "invariant");
2090
2091 // Move the deflated ObjectMonitor to the working free list
2092 // defined by free_head_p and free_tail_p.
2093 if (*free_head_p == NULL) *free_head_p = mid;
2094 if (*free_tail_p != NULL) {
2095 // We append to the list so the caller can use mid->_next_om
2096 // to fix the linkages in its context.
2097 ObjectMonitor* prevtail = *free_tail_p;
2098 // Should have been cleaned up by the caller:
2261 // This list walk executes at a safepoint and does not race with any
2262 // other list walkers.
2263
2264 for (mid = Atomic::load(list_p); mid != NULL; mid = next) {
2265 next = unmarked_next(mid);
2266 oop obj = (oop) mid->object();
2267 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
2268 // Deflation succeeded and already updated free_head_p and
2269 // free_tail_p as needed. Finish the move to the local free list
2270 // by unlinking mid from the global or per-thread in-use list.
2271 if (cur_mid_in_use == NULL) {
2272 // mid is the list head so switch the list head to next:
2273 Atomic::store(list_p, next);
2274 } else {
2275 // Switch cur_mid_in_use's next field to next:
2276 cur_mid_in_use->set_next_om(next);
2277 }
2278 // At this point mid is disconnected from the in-use list.
2279 deflated_count++;
2280 Atomic::dec(count_p);
2281 // mid is current tail in the free_head_p list so NULL terminate it:
2282 mid->set_next_om(NULL);
2283 } else {
2284 cur_mid_in_use = mid;
2285 }
2286 }
2287 return deflated_count;
2288 }
2289
2290 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using
2291 // a JavaThread. Returns the number of deflated ObjectMonitors. The given
2292 // list could be a per-thread in-use list or the global in-use list.
2293 // If a safepoint has started, then we save state via saved_mid_in_use_p
2294 // and return to the caller to honor the safepoint.
2295 //
2296 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
2297 int* count_p,
2298 ObjectMonitor** free_head_p,
2299 ObjectMonitor** free_tail_p,
2300 ObjectMonitor** saved_mid_in_use_p) {
2301 assert(AsyncDeflateIdleMonitors, "sanity check");
2342 // The current mid is locked at this point. If we have a
2343 // cur_mid_in_use, then it is also locked at this point.
2344
2345 if (next != NULL) {
2346 // We lock next so that an om_flush() thread that is behind us
2347 // cannot pass us when we unlock the current mid.
2348 om_lock(next);
2349 next_next = unmarked_next(next);
2350 }
2351
2352 // Only try to deflate if there is an associated Java object and if
2353 // mid is old (is not newly allocated and is not newly freed).
2354 if (mid->object() != NULL && mid->is_old() &&
2355 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
2356 // Deflation succeeded and already updated free_head_p and
2357 // free_tail_p as needed. Finish the move to the local free list
2358 // by unlinking mid from the global or per-thread in-use list.
2359 if (cur_mid_in_use == NULL) {
2360 // mid is the list head and it is locked. Switch the list head
2361 // to next which is also locked (if not NULL) and also leave
2362 // mid locked:
2363 Atomic::store(list_p, next);
2364 } else {
2365 ObjectMonitor* locked_next = mark_om_ptr(next);
2366 // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
2367 // next field to locked_next and also leave mid locked:
2368 cur_mid_in_use->set_next_om(locked_next);
2369 }
2370 // At this point mid is disconnected from the in-use list so
2371 // its lock longer has any effects on in-use list.
2372 deflated_count++;
2373 Atomic::dec(count_p);
2374 // mid is current tail in the free_head_p list so NULL terminate it
2375 // (which also unlocks it):
2376 mid->set_next_om(NULL);
2377
2378 // All the list management is done so move on to the next one:
2379 mid = next; // mid keeps non-NULL next's locked state
2380 next = next_next;
2381 } else {
2382 // mid is considered in-use if it does not have an associated
2383 // Java object or mid is not old or deflation did not succeed.
2384 // A mid->is_new() node can be seen here when it is freshly
2385 // returned by om_alloc() (and skips the deflation code path).
2386 // A mid->is_old() node can be seen here when deflation failed.
2387 // A mid->is_free() node can be seen here when a fresh node from
2388 // om_alloc() is released by om_release() due to losing the race
2389 // in inflate().
2390
2391 // All the list management is done so move on to the next one:
2392 if (cur_mid_in_use != NULL) {
2393 om_unlock(cur_mid_in_use);
2394 }
2395 // The next cur_mid_in_use keeps mid's lock state so
2396 // that it is stable for a possible next field change. It
2397 // cannot be modified by om_release() while it is locked.
2398 cur_mid_in_use = mid;
2399 mid = next; // mid keeps non-NULL next's locked state
2400 next = next_next;
2401
2402 if (SafepointMechanism::should_block(self) &&
2403 cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) {
2404 // If a safepoint has started and cur_mid_in_use is not the list
2405 // head and is old, then it is safe to use as saved state. Return
2406 // to the caller before blocking.
2407 *saved_mid_in_use_p = cur_mid_in_use;
2408 om_unlock(cur_mid_in_use);
2409 if (mid != NULL) {
2410 om_unlock(mid);
2411 }
2412 return deflated_count;
2413 }
2414 }
2415 if (mid == NULL) {
2416 if (cur_mid_in_use != NULL) {
2417 om_unlock(cur_mid_in_use);
2418 }
2419 break; // Reached end of the list so nothing more to deflate.
2420 }
2421
2422 // The current mid's next field is locked at this point. If we have
2445 if (!is_special_deflation_requested()) {
2446 return;
2447 }
2448 }
2449
2450 bool deflated = false;
2451
2452 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2453 ObjectMonitor* free_tail_p = NULL;
2454 elapsedTimer timer;
2455
2456 if (log_is_enabled(Info, monitorinflation)) {
2457 timer.start();
2458 }
2459
2460 // Note: the thread-local monitors lists get deflated in
2461 // a separate pass. See deflate_thread_local_monitors().
2462
2463 // For moribund threads, scan om_list_globals._in_use_list
2464 int deflated_count = 0;
2465 if (Atomic::load(&om_list_globals._in_use_list) != NULL) {
2466 // Update n_in_circulation before om_list_globals._in_use_count is
2467 // updated by deflation.
2468 Atomic::add(&counters->n_in_circulation,
2469 Atomic::load(&om_list_globals._in_use_count));
2470
2471 deflated_count = deflate_monitor_list(&om_list_globals._in_use_list,
2472 &om_list_globals._in_use_count,
2473 &free_head_p, &free_tail_p);
2474 Atomic::add(&counters->n_in_use, Atomic::load(&om_list_globals._in_use_count));
2475 }
2476
2477 if (free_head_p != NULL) {
2478 // Move the deflated ObjectMonitors back to the global free list.
2479 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2480 #ifdef ASSERT
2481 ObjectMonitor* l_next_om = free_tail_p->next_om();
2482 #endif
2483 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2484 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
2530 }
2531
2532 log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, "
2533 "global_free_count=%d, global_wait_count=%d",
2534 Atomic::load(&om_list_globals._population),
2535 Atomic::load(&om_list_globals._in_use_count),
2536 Atomic::load(&om_list_globals._free_count),
2537 Atomic::load(&om_list_globals._wait_count));
2538
2539 // The ServiceThread's async deflation request has been processed.
2540 set_is_async_deflation_requested(false);
2541
2542 if (Atomic::load(&om_list_globals._wait_count) > 0) {
2543 // There are deflated ObjectMonitors waiting for a handshake
2544 // (or a safepoint) for safety.
2545
2546 ObjectMonitor* list = Atomic::load(&om_list_globals._wait_list);
2547 ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL");
2548 int count = Atomic::load(&om_list_globals._wait_count);
2549 Atomic::store(&om_list_globals._wait_count, 0);
2550 Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL);
2551
2552 // Find the tail for prepend_list_to_common(). No need to mark
2553 // ObjectMonitors for this list walk since only the deflater
2554 // thread manages the wait list.
2555 int l_count = 0;
2556 ObjectMonitor* tail = NULL;
2557 for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) {
2558 tail = n;
2559 l_count++;
2560 }
2561 ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count);
2562
2563 // Will execute a safepoint if !ThreadLocalHandshakes:
2564 HandshakeForDeflation hfd_hc;
2565 Handshake::execute(&hfd_hc);
2566
2567 prepend_list_to_common(list, tail, count, &om_list_globals._free_list,
2568 &om_list_globals._free_count);
2569
|
157
158 int _wait_count; // # on wait_list
159 DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int));
160 };
161 static ObjectMonitorListGlobals om_list_globals;
162
163 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
164
165
166 // =====================> Spin-lock functions
167
168 // ObjectMonitors are not lockable outside of this file. We use spin-locks
169 // implemented using a bit in the _next_om field instead of the heavier
170 // weight locking mechanisms for faster list management.
171
172 #define OM_LOCK_BIT 0x1
173
174 // Return true if the ObjectMonitor is locked.
175 // Otherwise returns false.
176 static bool is_locked(ObjectMonitor* om) {
177 return ((intptr_t)om->next_om_acquire() & OM_LOCK_BIT) == OM_LOCK_BIT;
178 }
179
180 // Mark an ObjectMonitor* with OM_LOCK_BIT and return it.
181 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) {
182 return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT);
183 }
184
185 // Return the unmarked next field in an ObjectMonitor. Note: the next
186 // field may or may not have been marked with OM_LOCK_BIT originally.
187 static ObjectMonitor* unmarked_next(ObjectMonitor* om) {
188 return (ObjectMonitor*)((intptr_t)om->next_om() & ~OM_LOCK_BIT);
189 }
190
191 // Try to lock an ObjectMonitor. Returns true if locking was successful.
192 // Otherwise returns false.
193 static bool try_om_lock(ObjectMonitor* om) {
194 // Get current next field without any OM_LOCK_BIT value.
195 ObjectMonitor* next = unmarked_next(om);
196 if (om->try_set_next_om(next, mark_om_ptr(next)) != next) {
197 return false; // Cannot lock the ObjectMonitor.
198 }
199 return true;
200 }
201
202 // Lock an ObjectMonitor.
203 static void om_lock(ObjectMonitor* om) {
204 while (true) {
205 if (try_om_lock(om)) {
206 return;
207 }
208 }
209 }
210
211 // Unlock an ObjectMonitor.
212 static void om_unlock(ObjectMonitor* om) {
213 ObjectMonitor* next = om->next_om();
214 guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT
215 " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT);
216
217 next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT.
218 om->release_set_next_om(next);
219 }
220
221 // Get the list head after locking it. Returns the list head or NULL
222 // if the list is empty.
223 static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) {
224 while (true) {
225 // Acquire semantics not needed on this list load since we're
226 // checking for NULL here or following up with a cmpxchg() via
227 // try_om_lock() below and we retry on cmpxchg() failure.
228 ObjectMonitor* mid = Atomic::load(list_p);
229 if (mid == NULL) {
230 return NULL; // The list is empty.
231 }
232 if (try_om_lock(mid)) {
233 // Acquire semantics not needed on this list load since memory is
234 // already consistent due to the cmpxchg() via try_om_lock() above.
235 if (Atomic::load(list_p) != mid) {
236 // The list head changed before we could lock it so we have to retry.
237 om_unlock(mid);
238 continue;
239 }
240 return mid;
241 }
242 }
243 }
244
245 #undef OM_LOCK_BIT
246
247
248 // =====================> List Management functions
249
250 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is
251 // the last ObjectMonitor in the list and there are 'count' on the list.
252 // Also updates the specified *count_p.
253 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail,
254 int count, ObjectMonitor** list_p,
255 int* count_p) {
256 while (true) {
257 // Acquire semantics not needed on this list load since we're
258 // following up with a cmpxchg() via try_om_lock() below and we
259 // retry on cmpxchg() failure.
260 ObjectMonitor* cur = Atomic::load(list_p);
261 // Prepend list to *list_p.
262 if (!try_om_lock(tail)) {
263 // Failed to lock tail due to a list walker so try it all again.
264 continue;
265 }
266 // Release semantics not needed on this "unlock" since memory is
267 // already consistent due to the cmpxchg() via try_om_lock() above.
268 tail->set_next_om(cur); // tail now points to cur (and unlocks tail)
269 if (cur == NULL) {
270 // No potential race with takers or other prependers since
271 // *list_p is empty.
272 if (Atomic::cmpxchg(list_p, cur, list) == cur) {
273 // Successfully switched *list_p to the list value.
274 Atomic::add(count_p, count);
275 break;
276 }
277 // Implied else: try it all again
278 } else {
279 if (!try_om_lock(cur)) {
280 continue; // failed to lock cur so try it all again
281 }
282 // We locked cur so try to switch *list_p to the list value.
283 if (Atomic::cmpxchg(list_p, cur, list) != cur) {
284 // The list head has changed so unlock cur and try again:
285 om_unlock(cur);
286 continue;
287 }
335
336 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list.
337 // 'tail' is the last ObjectMonitor in the list and there are 'count'
338 // on the list. Also updates om_list_globals._in_use_list.
339 static void prepend_list_to_global_in_use_list(ObjectMonitor* list,
340 ObjectMonitor* tail, int count) {
341 prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list,
342 &om_list_globals._in_use_count);
343 }
344
345 // Prepend an ObjectMonitor to the specified list. Also updates
346 // the specified counter.
347 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
348 int* count_p) {
349 while (true) {
350 om_lock(m); // Lock m so we can safely update its next field.
351 ObjectMonitor* cur = NULL;
352 // Lock the list head to guard against races with a list walker
353 // or async deflater thread (which only races in om_in_use_list):
354 if ((cur = get_list_head_locked(list_p)) != NULL) {
355 // List head is now locked so we can safely switch it. Release
356 // semantics not needed on this "unlock" since memory is already
357 // consistent due to the cmpxchg() via get_list_head_locked() above.
358 m->set_next_om(cur); // m now points to cur (and unlocks m)
359 OrderAccess::storestore(); // Make sure set_next_om() is seen first.
360 Atomic::store(list_p, m); // Switch list head to unlocked m.
361 om_unlock(cur);
362 break;
363 }
364 // The list is empty so try to set the list head.
365 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
366 // Release semantics not needed on this "unlock" since memory
367 // is already consistent.
368 m->set_next_om(cur); // m now points to NULL (and unlocks m)
369 if (Atomic::cmpxchg(list_p, cur, m) == cur) {
370 // List head is now unlocked m.
371 break;
372 }
373 // Implied else: try it all again
374 }
375 Atomic::inc(count_p);
376 }
377
378 // Prepend an ObjectMonitor to a per-thread om_free_list.
379 // Also updates the per-thread om_free_count.
380 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) {
381 prepend_to_common(m, &self->om_free_list, &self->om_free_count);
382 }
383
384 // Prepend an ObjectMonitor to a per-thread om_in_use_list.
385 // Also updates the per-thread om_in_use_count.
386 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) {
387 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
388 }
389
390 // Take an ObjectMonitor from the start of the specified list. Also
391 // decrements the specified counter. Returns NULL if none are available.
392 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p,
393 int* count_p) {
394 ObjectMonitor* take = NULL;
395 // Lock the list head to guard against races with a list walker
396 // or async deflater thread (which only races in om_list_globals._free_list):
397 if ((take = get_list_head_locked(list_p)) == NULL) {
398 return NULL; // None are available.
399 }
400 ObjectMonitor* next = unmarked_next(take);
401 // Switch locked list head to next (which unlocks the list head, but
402 // leaves take locked). Release semantics not needed on this "unlock"
403 // since memory is already consistent due to the cmpxchg() via
404 // get_list_head_locked() above.
405 Atomic::store(list_p, next);
406 Atomic::dec(count_p);
407 // Unlock take, but leave the next value for any lagging list
408 // walkers. It will get cleaned up when take is prepended to
409 // the in-use list:
410 om_unlock(take);
411 return take;
412 }
413
414 // Take an ObjectMonitor from the start of the om_list_globals._free_list.
415 // Also updates om_list_globals._free_count. Returns NULL if none are
416 // available.
417 static ObjectMonitor* take_from_start_of_global_free_list() {
418 return take_from_start_of_common(&om_list_globals._free_list,
419 &om_list_globals._free_count);
420 }
421
422 // Take an ObjectMonitor from the start of a per-thread free-list.
423 // Also updates om_free_count. Returns NULL if none are available.
424 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) {
1343 if (is_special_deflation_requested()) {
1344 // For AsyncDeflateIdleMonitors only do a safepoint deflation
1345 // if there is a special deflation request.
1346 return true;
1347 }
1348 return false;
1349 }
1350
1351 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1352 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS);
1353 }
1354
1355 void ObjectSynchronizer::oops_do(OopClosure* f) {
1356 // We only scan the global used list here (for moribund threads), and
1357 // the thread-local monitors in Thread::oops_do().
1358 global_used_oops_do(f);
1359 }
1360
1361 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1362 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1363 // Acquire semantics not needed since we're at a safepoint.
1364 list_oops_do(Atomic::load(&om_list_globals._in_use_list), f);
1365 }
1366
1367 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1368 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1369 list_oops_do(thread->om_in_use_list, f);
1370 }
1371
1372 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1373 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1374 // The oops_do() phase does not overlap with monitor deflation
1375 // so no need to lock ObjectMonitors for the list traversal.
1376 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1377 if (mid->object() != NULL) {
1378 f->do_oop((oop*)mid->object_addr());
1379 }
1380 }
1381 }
1382
1383
1409 for (;;) {
1410 ObjectMonitor* m;
1411
1412 // 1: try to allocate from the thread's local om_free_list.
1413 // Threads will attempt to allocate first from their local list, then
1414 // from the global list, and only after those attempts fail will the
1415 // thread attempt to instantiate new monitors. Thread-local free lists
1416 // improve allocation latency, as well as reducing coherency traffic
1417 // on the shared global list.
1418 m = take_from_start_of_om_free_list(self);
1419 if (m != NULL) {
1420 guarantee(m->object() == NULL, "invariant");
1421 m->set_allocation_state(ObjectMonitor::New);
1422 prepend_to_om_in_use_list(self, m);
1423 return m;
1424 }
1425
1426 // 2: try to allocate from the global om_list_globals._free_list
1427 // If we're using thread-local free lists then try
1428 // to reprovision the caller's free list.
1429 // Acquire semantics not needed on this list load since memory
1430 // is already consistent due to the cmpxchg() via
1431 // take_from_start_of_om_free_list() above.
1432 if (Atomic::load(&om_list_globals._free_list) != NULL) {
1433 // Reprovision the thread's om_free_list.
1434 // Use bulk transfers to reduce the allocation rate and heat
1435 // on various locks.
1436 for (int i = self->om_free_provision; --i >= 0;) {
1437 ObjectMonitor* take = take_from_start_of_global_free_list();
1438 if (take == NULL) {
1439 break; // No more are available.
1440 }
1441 guarantee(take->object() == NULL, "invariant");
1442 if (AsyncDeflateIdleMonitors) {
1443 // We allowed 3 field values to linger during async deflation.
1444 // Clear or restore them as appropriate.
1445 take->set_header(markWord::zero());
1446 // DEFLATER_MARKER is the only non-NULL value we should see here.
1447 take->try_set_owner_from(DEFLATER_MARKER, NULL);
1448 if (take->contentions() < 0) {
1449 // Add back max_jint to restore the contentions field to its
1450 // proper value.
1451 take->add_to_contentions(max_jint);
1540 // we have to remove 'm' from the in-use list first (as needed).
1541 if (from_per_thread_alloc) {
1542 // Need to remove 'm' from om_in_use_list.
1543 ObjectMonitor* mid = NULL;
1544 ObjectMonitor* next = NULL;
1545
1546 // This list walk can race with another list walker or with async
1547 // deflation so we have to worry about an ObjectMonitor being
1548 // removed from this list while we are walking it.
1549
1550 // Lock the list head to avoid racing with another list walker
1551 // or with async deflation.
1552 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) {
1553 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
1554 }
1555 next = unmarked_next(mid);
1556 if (m == mid) {
1557 // First special case:
1558 // 'm' matches mid, is the list head and is locked. Switch the list
1559 // head to next which unlocks the list head, but leaves the extracted
1560 // mid locked. Release semantics not needed on this "unlock" since
1561 // memory is already consistent due to the get_list_head_locked()
1562 // above.
1563 Atomic::store(&self->om_in_use_list, next);
1564 } else if (m == next) {
1565 // Second special case:
1566 // 'm' matches next after the list head and we already have the list
1567 // head locked so set mid to what we are extracting:
1568 mid = next;
1569 // Lock mid to prevent races with a list walker or an async
1570 // deflater thread that's ahead of us. The locked list head
1571 // prevents races from behind us.
1572 om_lock(mid);
1573 // Update next to what follows mid (if anything):
1574 next = unmarked_next(mid);
1575 // Switch next after the list head to new next which unlocks the
1576 // list head, but leaves the extracted mid locked. Release semantics
1577 // not needed on this "unlock" since memory is already consistent
1578 // due to the get_list_head_locked() above.
1579 self->om_in_use_list->set_next_om(next);
1580 } else {
1581 // We have to search the list to find 'm'.
1582 guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT
1583 " is too short.", p2i(self), p2i(self->om_in_use_list));
1584 // Our starting anchor is next after the list head which is the
1585 // last ObjectMonitor we checked:
1586 ObjectMonitor* anchor = next;
1587 // Lock anchor to prevent races with a list walker or an async
1588 // deflater thread that's ahead of us. The locked list head
1589 // prevents races from behind us.
1590 om_lock(anchor);
1591 om_unlock(mid); // Unlock the list head now that anchor is locked.
1592 while ((mid = unmarked_next(anchor)) != NULL) {
1593 if (m == mid) {
1594 // We found 'm' on the per-thread in-use list so extract it.
1595 // Update next to what follows mid (if anything):
1596 next = unmarked_next(mid);
1597 // Switch next after the anchor to new next which unlocks the
1598 // anchor, but leaves the extracted mid locked. Release semantics
1599 // not needed on this "unlock" since memory is already consistent
1600 // due to the om_unlock() above before entering the loop or the
1601 // om_unlock() below before looping again.
1602 anchor->set_next_om(next);
1603 break;
1604 } else {
1605 // Lock the next anchor to prevent races with a list walker
1606 // or an async deflater thread that's ahead of us. The locked
1607 // current anchor prevents races from behind us.
1608 om_lock(mid);
1609 // Unlock current anchor now that next anchor is locked:
1610 om_unlock(anchor);
1611 anchor = mid; // Advance to new anchor and try again.
1612 }
1613 }
1614 }
1615
1616 if (mid == NULL) {
1617 // Reached end of the list and didn't find 'm' so:
1618 fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list="
1619 INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list));
1620 }
1621
1687 cur_om = unmarked_next(in_use_tail);
1688 continue;
1689 }
1690 if (cur_om->object() == NULL) {
1691 // cur_om was deflated and the object ref was cleared while it
1692 // was locked. We happened to see it just after it was unlocked
1693 // (and added to the free list). Refetch the possibly changed
1694 // next field and try again.
1695 cur_om = unmarked_next(in_use_tail);
1696 continue;
1697 }
1698 in_use_tail = cur_om;
1699 in_use_count++;
1700 cur_om = unmarked_next(cur_om);
1701 }
1702 guarantee(in_use_tail != NULL, "invariant");
1703 int l_om_in_use_count = Atomic::load(&self->om_in_use_count);
1704 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't match: "
1705 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
1706 Atomic::store(&self->om_in_use_count, 0);
1707 OrderAccess::storestore(); // Make sure counter update is seen first.
1708 // Clear the in-use list head (which also unlocks it):
1709 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
1710 om_unlock(in_use_list);
1711 }
1712
1713 int free_count = 0;
1714 ObjectMonitor* free_list = NULL;
1715 ObjectMonitor* free_tail = NULL;
1716 // This function can race with a list walker thread so we lock the
1717 // list head to prevent confusion.
1718 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) {
1719 // At this point, we have locked the free list head so a racing
1720 // thread cannot come in after us. However, a racing thread could
1721 // be ahead of us; we'll detect that and delay to let it finish.
1722 //
1723 // The thread is going away. Set 'free_tail' to the last per-thread free
1724 // monitor which will be linked to om_list_globals._free_list below.
1725 //
1726 // Account for the free list head before the loop since it is
1727 // already locked (by this thread):
1731 if (is_locked(s)) {
1732 // s is locked so there must be a racing walker thread ahead
1733 // of us so we'll give it a chance to finish.
1734 while (is_locked(s)) {
1735 os::naked_short_sleep(1);
1736 }
1737 }
1738 free_tail = s;
1739 free_count++;
1740 guarantee(s->object() == NULL, "invariant");
1741 if (s->is_busy()) {
1742 stringStream ss;
1743 fatal("must be !is_busy: %s", s->is_busy_to_string(&ss));
1744 }
1745 }
1746 guarantee(free_tail != NULL, "invariant");
1747 int l_om_free_count = Atomic::load(&self->om_free_count);
1748 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: "
1749 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
1750 Atomic::store(&self->om_free_count, 0);
1751 OrderAccess::storestore(); // Make sure counter update is seen first.
1752 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
1753 om_unlock(free_list);
1754 }
1755
1756 if (free_tail != NULL) {
1757 prepend_list_to_global_free_list(free_list, free_tail, free_count);
1758 }
1759
1760 if (in_use_tail != NULL) {
1761 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count);
1762 }
1763
1764 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1765 LogStreamHandle(Info, monitorinflation) lsh_info;
1766 LogStream* ls = NULL;
1767 if (log_is_enabled(Debug, monitorinflation)) {
1768 ls = &lsh_debug;
1769 } else if ((free_count != 0 || in_use_count != 0) &&
1770 log_is_enabled(Info, monitorinflation)) {
1771 ls = &lsh_info;
1916
1917 // Setup monitor fields to proper values -- prepare the monitor
1918 m->set_header(dmw);
1919
1920 // Optimization: if the mark.locker stack address is associated
1921 // with this thread we could simply set m->_owner = self.
1922 // Note that a thread can inflate an object
1923 // that it has stack-locked -- as might happen in wait() -- directly
1924 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1925 if (AsyncDeflateIdleMonitors) {
1926 m->set_owner_from(NULL, DEFLATER_MARKER, mark.locker());
1927 } else {
1928 m->set_owner_from(NULL, mark.locker());
1929 }
1930 m->set_object(object);
1931 // TODO-FIXME: assert BasicLock->dhw != 0.
1932
1933 // Must preserve store ordering. The monitor state must
1934 // be stable at the time of publishing the monitor address.
1935 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1936 // Release semantics so that above set_object() is seen first.
1937 object->release_set_mark(markWord::encode(m));
1938
1939 // Once ObjectMonitor is configured and the object is associated
1940 // with the ObjectMonitor, it is safe to allow async deflation:
1941 assert(m->is_new(), "freshly allocated monitor must be new");
1942 // Release semantics needed to keep allocation_state from floating up.
1943 m->release_set_allocation_state(ObjectMonitor::Old);
1944
1945 // Hopefully the performance counters are allocated on distinct cache lines
1946 // to avoid false sharing on MP systems ...
1947 OM_PERFDATA_OP(Inflations, inc());
1948 if (log_is_enabled(Trace, monitorinflation)) {
1949 ResourceMark rm(self);
1950 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1951 INTPTR_FORMAT ", type='%s'", p2i(object),
1952 object->mark().value(), object->klass()->external_name());
1953 }
1954 if (event.should_commit()) {
1955 post_monitor_inflate_event(&event, object, cause);
1956 }
2090 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
2091 const markWord dmw = mid->header();
2092 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
2093
2094 if (mid->is_busy()) {
2095 // Easy checks are first - the ObjectMonitor is busy so no deflation.
2096 deflated = false;
2097 } else {
2098 // Deflate the monitor if it is no longer being used
2099 // It's idle - scavenge and return to the global free list
2100 // plain old deflation ...
2101 if (log_is_enabled(Trace, monitorinflation)) {
2102 ResourceMark rm;
2103 log_trace(monitorinflation)("deflate_monitor: "
2104 "object=" INTPTR_FORMAT ", mark="
2105 INTPTR_FORMAT ", type='%s'", p2i(obj),
2106 mark.value(), obj->klass()->external_name());
2107 }
2108
2109 // Restore the header back to obj
2110 // XXX - I have no rationale for this "release", but it's been here forever.
2111 obj->release_set_mark(dmw);
2112 if (AsyncDeflateIdleMonitors) {
2113 // clear() expects the owner field to be NULL.
2114 // DEFLATER_MARKER is the only non-NULL value we should see here.
2115 mid->try_set_owner_from(DEFLATER_MARKER, NULL);
2116 }
2117 mid->clear();
2118
2119 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
2120 p2i(mid->object()));
2121 assert(mid->is_free(), "invariant");
2122
2123 // Move the deflated ObjectMonitor to the working free list
2124 // defined by free_head_p and free_tail_p.
2125 if (*free_head_p == NULL) *free_head_p = mid;
2126 if (*free_tail_p != NULL) {
2127 // We append to the list so the caller can use mid->_next_om
2128 // to fix the linkages in its context.
2129 ObjectMonitor* prevtail = *free_tail_p;
2130 // Should have been cleaned up by the caller:
2293 // This list walk executes at a safepoint and does not race with any
2294 // other list walkers.
2295
2296 for (mid = Atomic::load(list_p); mid != NULL; mid = next) {
2297 next = unmarked_next(mid);
2298 oop obj = (oop) mid->object();
2299 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
2300 // Deflation succeeded and already updated free_head_p and
2301 // free_tail_p as needed. Finish the move to the local free list
2302 // by unlinking mid from the global or per-thread in-use list.
2303 if (cur_mid_in_use == NULL) {
2304 // mid is the list head so switch the list head to next:
2305 Atomic::store(list_p, next);
2306 } else {
2307 // Switch cur_mid_in_use's next field to next:
2308 cur_mid_in_use->set_next_om(next);
2309 }
2310 // At this point mid is disconnected from the in-use list.
2311 deflated_count++;
2312 Atomic::dec(count_p);
2313 // mid is current tail in the free_head_p list so NULL terminate it.
2314 // No release semantics needed since Atomic::dec() already provides it.
2315 mid->set_next_om(NULL);
2316 } else {
2317 cur_mid_in_use = mid;
2318 }
2319 }
2320 return deflated_count;
2321 }
2322
2323 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using
2324 // a JavaThread. Returns the number of deflated ObjectMonitors. The given
2325 // list could be a per-thread in-use list or the global in-use list.
2326 // If a safepoint has started, then we save state via saved_mid_in_use_p
2327 // and return to the caller to honor the safepoint.
2328 //
2329 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
2330 int* count_p,
2331 ObjectMonitor** free_head_p,
2332 ObjectMonitor** free_tail_p,
2333 ObjectMonitor** saved_mid_in_use_p) {
2334 assert(AsyncDeflateIdleMonitors, "sanity check");
2375 // The current mid is locked at this point. If we have a
2376 // cur_mid_in_use, then it is also locked at this point.
2377
2378 if (next != NULL) {
2379 // We lock next so that an om_flush() thread that is behind us
2380 // cannot pass us when we unlock the current mid.
2381 om_lock(next);
2382 next_next = unmarked_next(next);
2383 }
2384
2385 // Only try to deflate if there is an associated Java object and if
2386 // mid is old (is not newly allocated and is not newly freed).
2387 if (mid->object() != NULL && mid->is_old() &&
2388 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
2389 // Deflation succeeded and already updated free_head_p and
2390 // free_tail_p as needed. Finish the move to the local free list
2391 // by unlinking mid from the global or per-thread in-use list.
2392 if (cur_mid_in_use == NULL) {
2393 // mid is the list head and it is locked. Switch the list head
2394 // to next which is also locked (if not NULL) and also leave
2395 // mid locked. Release semantics needed since not all code paths
2396 // in deflate_monitor_using_JT() ensure memory consistency.
2397 Atomic::release_store(list_p, next);
2398 } else {
2399 ObjectMonitor* locked_next = mark_om_ptr(next);
2400 // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
2401 // next field to locked_next and also leave mid locked.
2402 // Release semantics needed since not all code paths in
2403 // deflate_monitor_using_JT() ensure memory consistency.
2404 cur_mid_in_use->release_set_next_om(locked_next);
2405 }
2406 // At this point mid is disconnected from the in-use list so
2407 // its lock longer has any effects on in-use list.
2408 deflated_count++;
2409 Atomic::dec(count_p);
2410 // mid is current tail in the free_head_p list so NULL terminate
2411 // it (which also unlocks it). No release semantics needed since
2412 // Atomic::dec() already provides it.
2413 mid->set_next_om(NULL);
2414
2415 // All the list management is done so move on to the next one:
2416 mid = next; // mid keeps non-NULL next's locked state
2417 next = next_next;
2418 } else {
2419 // mid is considered in-use if it does not have an associated
2420 // Java object or mid is not old or deflation did not succeed.
2421 // A mid->is_new() node can be seen here when it is freshly
2422 // returned by om_alloc() (and skips the deflation code path).
2423 // A mid->is_old() node can be seen here when deflation failed.
2424 // A mid->is_free() node can be seen here when a fresh node from
2425 // om_alloc() is released by om_release() due to losing the race
2426 // in inflate().
2427
2428 // All the list management is done so move on to the next one:
2429 if (cur_mid_in_use != NULL) {
2430 om_unlock(cur_mid_in_use);
2431 }
2432 // The next cur_mid_in_use keeps mid's lock state so
2433 // that it is stable for a possible next field change. It
2434 // cannot be modified by om_release() while it is locked.
2435 cur_mid_in_use = mid;
2436 mid = next; // mid keeps non-NULL next's locked state
2437 next = next_next;
2438
2439 if (SafepointMechanism::should_block(self) &&
2440 // Acquire semantics are not needed on this list load since
2441 // it is not dependent on the following load which does have
2442 // acquire semantics.
2443 cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) {
2444 // If a safepoint has started and cur_mid_in_use is not the list
2445 // head and is old, then it is safe to use as saved state. Return
2446 // to the caller before blocking.
2447 *saved_mid_in_use_p = cur_mid_in_use;
2448 om_unlock(cur_mid_in_use);
2449 if (mid != NULL) {
2450 om_unlock(mid);
2451 }
2452 return deflated_count;
2453 }
2454 }
2455 if (mid == NULL) {
2456 if (cur_mid_in_use != NULL) {
2457 om_unlock(cur_mid_in_use);
2458 }
2459 break; // Reached end of the list so nothing more to deflate.
2460 }
2461
2462 // The current mid's next field is locked at this point. If we have
2485 if (!is_special_deflation_requested()) {
2486 return;
2487 }
2488 }
2489
2490 bool deflated = false;
2491
2492 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2493 ObjectMonitor* free_tail_p = NULL;
2494 elapsedTimer timer;
2495
2496 if (log_is_enabled(Info, monitorinflation)) {
2497 timer.start();
2498 }
2499
2500 // Note: the thread-local monitors lists get deflated in
2501 // a separate pass. See deflate_thread_local_monitors().
2502
2503 // For moribund threads, scan om_list_globals._in_use_list
2504 int deflated_count = 0;
2505 // Acquire semantics not needed since we are at a safepoint.
2506 if (Atomic::load(&om_list_globals._in_use_list) != NULL) {
2507 // Update n_in_circulation before om_list_globals._in_use_count is
2508 // updated by deflation.
2509 Atomic::add(&counters->n_in_circulation,
2510 Atomic::load(&om_list_globals._in_use_count));
2511
2512 deflated_count = deflate_monitor_list(&om_list_globals._in_use_list,
2513 &om_list_globals._in_use_count,
2514 &free_head_p, &free_tail_p);
2515 Atomic::add(&counters->n_in_use, Atomic::load(&om_list_globals._in_use_count));
2516 }
2517
2518 if (free_head_p != NULL) {
2519 // Move the deflated ObjectMonitors back to the global free list.
2520 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2521 #ifdef ASSERT
2522 ObjectMonitor* l_next_om = free_tail_p->next_om();
2523 #endif
2524 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2525 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
2571 }
2572
2573 log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, "
2574 "global_free_count=%d, global_wait_count=%d",
2575 Atomic::load(&om_list_globals._population),
2576 Atomic::load(&om_list_globals._in_use_count),
2577 Atomic::load(&om_list_globals._free_count),
2578 Atomic::load(&om_list_globals._wait_count));
2579
2580 // The ServiceThread's async deflation request has been processed.
2581 set_is_async_deflation_requested(false);
2582
2583 if (Atomic::load(&om_list_globals._wait_count) > 0) {
2584 // There are deflated ObjectMonitors waiting for a handshake
2585 // (or a safepoint) for safety.
2586
2587 ObjectMonitor* list = Atomic::load(&om_list_globals._wait_list);
2588 ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL");
2589 int count = Atomic::load(&om_list_globals._wait_count);
2590 Atomic::store(&om_list_globals._wait_count, 0);
2591 OrderAccess::storestore(); // Make sure counter update is seen first.
2592 Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL);
2593
2594 // Find the tail for prepend_list_to_common(). No need to mark
2595 // ObjectMonitors for this list walk since only the deflater
2596 // thread manages the wait list.
2597 int l_count = 0;
2598 ObjectMonitor* tail = NULL;
2599 for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) {
2600 tail = n;
2601 l_count++;
2602 }
2603 ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count);
2604
2605 // Will execute a safepoint if !ThreadLocalHandshakes:
2606 HandshakeForDeflation hfd_hc;
2607 Handshake::execute(&hfd_hc);
2608
2609 prepend_list_to_common(list, tail, count, &om_list_globals._free_list,
2610 &om_list_globals._free_count);
2611
|