132 // Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors
133 // is true, deflated ObjectMonitors wait on this list until after a
134 // handshake or a safepoint for platforms that don't support handshakes.
135 // After the handshake or safepoint, the deflated ObjectMonitors are
136 // prepended to g_free_list.
137 static ObjectMonitor* volatile g_wait_list = NULL;
138
139 static volatile int g_om_free_count = 0; // # on g_free_list
140 static volatile int g_om_in_use_count = 0; // # on g_om_in_use_list
141 static volatile int g_om_population = 0; // # Extant -- in circulation
142 static volatile int g_om_wait_count = 0; // # on g_wait_list
143
144 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
145
146
147 // =====================> List Management functions
148
149 // Return true if the ObjectMonitor's next field is marked.
150 // Otherwise returns false.
151 static bool is_next_marked(ObjectMonitor* om) {
152 return ((intptr_t)OrderAccess::load_acquire(&om->_next_om) & 0x1) != 0;
153 }
154
155 // Mark an ObjectMonitor* and return it. Note: the om parameter
156 // may or may not have been marked originally.
157 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) {
158 return (ObjectMonitor*)((intptr_t)om | 0x1);
159 }
160
161 // Mark the next field in an ObjectMonitor. If marking was successful,
162 // then the unmarked next field is returned via parameter and true is
163 // returned. Otherwise false is returned.
164 static bool mark_next(ObjectMonitor* om, ObjectMonitor** next_p) {
165 // Get current next field without any marking value.
166 ObjectMonitor* next = (ObjectMonitor*)
167 ((intptr_t)OrderAccess::load_acquire(&om->_next_om) & ~0x1);
168 if (Atomic::cmpxchg(mark_om_ptr(next), &om->_next_om, next) != next) {
169 return false; // Could not mark the next field or it was already marked.
170 }
171 *next_p = next;
172 return true;
173 }
174
175 // Loop until we mark the next field in an ObjectMonitor. The unmarked
176 // next field is returned.
177 static ObjectMonitor* mark_next_loop(ObjectMonitor* om) {
178 ObjectMonitor* next;
179 while (true) {
180 if (mark_next(om, &next)) {
181 // Marked om's next field so return the unmarked value.
182 return next;
183 }
184 }
185 }
186
187 // Set the next field in an ObjectMonitor to the specified value.
188 // The caller of set_next() must be the same thread that marked the
189 // ObjectMonitor.
190 static void set_next(ObjectMonitor* om, ObjectMonitor* value) {
191 OrderAccess::release_store(&om->_next_om, value);
192 }
193
194 // Mark the next field in the list head ObjectMonitor. If marking was
195 // successful, then the mid and the unmarked next field are returned
196 // via parameter and true is returned. Otherwise false is returned.
197 static bool mark_list_head(ObjectMonitor* volatile * list_p,
198 ObjectMonitor** mid_p, ObjectMonitor** next_p) {
199 while (true) {
200 ObjectMonitor* mid = OrderAccess::load_acquire(list_p);
201 if (mid == NULL) {
202 return false; // The list is empty so nothing to mark.
203 }
204 if (mark_next(mid, next_p)) {
205 if (OrderAccess::load_acquire(list_p) != mid) {
206 // The list head changed so we have to retry.
207 set_next(mid, *next_p); // unmark mid
208 continue;
209 }
210 // We marked next field to guard against races.
211 *mid_p = mid;
212 return true;
213 }
214 }
215 }
216
217 // Return the unmarked next field in an ObjectMonitor. Note: the next
218 // field may or may not have been marked originally.
219 static ObjectMonitor* unmarked_next(ObjectMonitor* om) {
220 return (ObjectMonitor*)((intptr_t)OrderAccess::load_acquire(&om->_next_om) & ~0x1);
221 }
222
223 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is
224 // the last ObjectMonitor in the list and there are 'count' on the list.
225 // Also updates the specified *count_p.
226 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail,
227 int count, ObjectMonitor* volatile* list_p,
228 volatile int* count_p) {
229 while (true) {
230 ObjectMonitor* cur = OrderAccess::load_acquire(list_p);
231 // Prepend list to *list_p.
232 ObjectMonitor* next = NULL;
233 if (!mark_next(tail, &next)) {
234 continue; // failed to mark next field so try it all again
235 }
236 set_next(tail, cur); // tail now points to cur (and unmarks tail)
237 if (cur == NULL) {
238 // No potential race with takers or other prependers since
239 // *list_p is empty.
240 if (Atomic::cmpxchg(list, list_p, cur) == cur) {
241 // Successfully switched *list_p to the list value.
242 Atomic::add(count, count_p);
243 break;
244 }
245 // Implied else: try it all again
246 } else {
247 // Try to mark next field to guard against races:
248 if (!mark_next(cur, &next)) {
249 continue; // failed to mark next field so try it all again
250 }
303 // Prepend a list of ObjectMonitors to g_om_in_use_list. 'tail' is the last
304 // ObjectMonitor in the list and there are 'count' on the list. Also
305 // updates g_om_in_use_list.
306 static void prepend_list_to_g_om_in_use_list(ObjectMonitor* list,
307 ObjectMonitor* tail, int count) {
308 prepend_list_to_common(list, tail, count, &g_om_in_use_list, &g_om_in_use_count);
309 }
310
311 // Prepend an ObjectMonitor to the specified list. Also updates
312 // the specified counter.
313 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor* volatile * list_p,
314 int volatile * count_p) {
315 while (true) {
316 (void)mark_next_loop(m); // mark m so we can safely update its next field
317 ObjectMonitor* cur = NULL;
318 ObjectMonitor* next = NULL;
319 // Mark the list head to guard against A-B-A race:
320 if (mark_list_head(list_p, &cur, &next)) {
321 // List head is now marked so we can safely switch it.
322 set_next(m, cur); // m now points to cur (and unmarks m)
323 OrderAccess::release_store(list_p, m); // Switch list head to unmarked m.
324 set_next(cur, next); // Unmark the previous list head.
325 break;
326 }
327 // The list is empty so try to set the list head.
328 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
329 set_next(m, cur); // m now points to NULL (and unmarks m)
330 if (Atomic::cmpxchg(m, list_p, cur) == cur) {
331 // List head is now unmarked m.
332 break;
333 }
334 // Implied else: try it all again
335 }
336 Atomic::inc(count_p);
337 }
338
339 // Prepend an ObjectMonitor to a per-thread om_free_list.
340 // Also updates the per-thread om_free_count.
341 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) {
342 prepend_to_common(m, &self->om_free_list, &self->om_free_count);
343 }
344
345 // Prepend an ObjectMonitor to a per-thread om_in_use_list.
346 // Also updates the per-thread om_in_use_count.
347 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) {
348 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
349 }
350
351 // Take an ObjectMonitor from the start of the specified list. Also
352 // decrements the specified counter. Returns NULL if none are available.
353 static ObjectMonitor* take_from_start_of_common(ObjectMonitor* volatile * list_p,
354 int volatile * count_p) {
355 ObjectMonitor* next = NULL;
356 ObjectMonitor* take = NULL;
357 // Mark the list head to guard against A-B-A race:
358 if (!mark_list_head(list_p, &take, &next)) {
359 return NULL; // None are available.
360 }
361 // Switch marked list head to next (which unmarks the list head, but
362 // leaves take marked):
363 OrderAccess::release_store(list_p, next);
364 Atomic::dec(count_p);
365 // Unmark take, but leave the next value for any lagging list
366 // walkers. It will get cleaned up when take is prepended to
367 // the in-use list:
368 set_next(take, next);
369 return take;
370 }
371
372 // Take an ObjectMonitor from the start of the global free-list. Also
373 // updates g_om_free_count. Returns NULL if none are available.
374 static ObjectMonitor* take_from_start_of_g_free_list() {
375 return take_from_start_of_common(&g_free_list, &g_om_free_count);
376 }
377
378 // Take an ObjectMonitor from the start of a per-thread free-list.
379 // Also updates om_free_count. Returns NULL if none are available.
380 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) {
381 return take_from_start_of_common(&self->om_free_list, &self->om_free_count);
382 }
383
384
1194 owner = (address) monitor->owner();
1195 }
1196
1197 if (owner != NULL) {
1198 // owning_thread_from_monitor_owner() may also return NULL here
1199 return Threads::owning_thread_from_monitor_owner(t_list, owner);
1200 }
1201
1202 // Unlocked case, header in place
1203 // Cannot have assertion since this object may have been
1204 // locked by another thread when reaching here.
1205 // assert(mark.is_neutral(), "sanity check");
1206
1207 return NULL;
1208 }
1209 }
1210
1211 // Visitors ...
1212
1213 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1214 PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
1215 while (block != NULL) {
1216 assert(block->object() == CHAINMARKER, "must be a block header");
1217 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1218 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1219 if (mid->is_active()) {
1220 ObjectMonitorHandle omh(mid);
1221
1222 if (mid->object() == NULL ||
1223 (AsyncDeflateIdleMonitors && mid->ref_count() < 0)) {
1224 // Only process with closure if the object is set.
1225 // For async deflation, race here if monitor is not owned!
1226 // The above ref_count bump (in ObjectMonitorHandle ctr)
1227 // will cause subsequent async deflation to skip it.
1228 // However, previous or concurrent async deflation is a race
1229 // so skip this ObjectMonitor if it is being async deflated.
1230 continue;
1231 }
1232 closure->do_monitor(mid);
1233 }
1234 }
1235 // unmarked_next() is not needed with g_block_list (no next field marking).
1236 block = (PaddedObjectMonitor*)OrderAccess::load_acquire(&block->_next_om);
1237 }
1238 }
1239
1240 static bool monitors_used_above_threshold() {
1241 if (OrderAccess::load_acquire(&g_om_population) == 0) {
1242 return false;
1243 }
1244 if (MonitorUsedDeflationThreshold > 0) {
1245 int monitors_used = OrderAccess::load_acquire(&g_om_population) -
1246 OrderAccess::load_acquire(&g_om_free_count);
1247 if (HandshakeAfterDeflateIdleMonitors) {
1248 monitors_used -= OrderAccess::load_acquire(&g_om_wait_count);
1249 }
1250 int monitor_usage = (monitors_used * 100LL) /
1251 OrderAccess::load_acquire(&g_om_population);
1252 return monitor_usage > MonitorUsedDeflationThreshold;
1253 }
1254 return false;
1255 }
1256
1257 // Returns true if MonitorBound is set (> 0) and if the specified
1258 // cnt is > MonitorBound. Otherwise returns false.
1259 static bool is_MonitorBound_exceeded(const int cnt) {
1260 const int mx = MonitorBound;
1261 return mx > 0 && cnt > mx;
1262 }
1263
1264 bool ObjectSynchronizer::is_async_deflation_needed() {
1265 if (!AsyncDeflateIdleMonitors) {
1266 return false;
1267 }
1268 if (is_async_deflation_requested()) {
1269 // Async deflation request.
1270 return true;
1271 }
1272 if (AsyncDeflationInterval > 0 &&
1273 time_since_last_async_deflation_ms() > AsyncDeflationInterval &&
1274 monitors_used_above_threshold()) {
1275 // It's been longer than our specified deflate interval and there
1276 // are too many monitors in use. We don't deflate more frequently
1277 // than AsyncDeflationInterval (unless is_async_deflation_requested)
1278 // in order to not swamp the ServiceThread.
1279 _last_async_deflation_time_ns = os::javaTimeNanos();
1280 return true;
1281 }
1282 int monitors_used = OrderAccess::load_acquire(&g_om_population) -
1283 OrderAccess::load_acquire(&g_om_free_count);
1284 if (HandshakeAfterDeflateIdleMonitors) {
1285 monitors_used -= OrderAccess::load_acquire(&g_om_wait_count);
1286 }
1287 if (is_MonitorBound_exceeded(monitors_used)) {
1288 // Not enough ObjectMonitors on the global free list.
1289 return true;
1290 }
1291 return false;
1292 }
1293
1294 bool ObjectSynchronizer::is_safepoint_deflation_needed() {
1295 if (!AsyncDeflateIdleMonitors) {
1296 if (monitors_used_above_threshold()) {
1297 // Too many monitors in use.
1298 return true;
1299 }
1300 return false;
1301 }
1302 if (is_special_deflation_requested()) {
1303 // For AsyncDeflateIdleMonitors only do a safepoint deflation
1304 // if there is a special deflation request.
1305 return true;
1306 }
1307 return false;
1308 }
1309
1310 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1311 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS);
1312 }
1313
1314 void ObjectSynchronizer::oops_do(OopClosure* f) {
1315 // We only scan the global used list here (for moribund threads), and
1316 // the thread-local monitors in Thread::oops_do().
1317 global_used_oops_do(f);
1318 }
1319
1320 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1321 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1322 list_oops_do(OrderAccess::load_acquire(&g_om_in_use_list), OrderAccess::load_acquire(&g_om_in_use_count), f);
1323 }
1324
1325 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1326 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1327 list_oops_do(OrderAccess::load_acquire(&thread->om_in_use_list), OrderAccess::load_acquire(&thread->om_in_use_count), f);
1328 }
1329
1330 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, int count, OopClosure* f) {
1331 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1332 // The oops_do() phase does not overlap with monitor deflation
1333 // so no need to update the ObjectMonitor's ref_count for this
1334 // ObjectMonitor* use.
1335 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1336 if (mid->object() != NULL) {
1337 f->do_oop((oop*)mid->object_addr());
1338 }
1339 }
1340 }
1341
1342
1343 // -----------------------------------------------------------------------------
1344 // ObjectMonitor Lifecycle
1345 // -----------------------
1346 // Inflation unlinks monitors from the global g_free_list and
1347 // associates them with objects. Deflation -- which occurs at
1348 // STW-time -- disassociates idle monitors from objects. Such
1349 // scavenged monitors are returned to the g_free_list.
1350 //
1351 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1352 //
1353 // Lifecycle:
1354 // -- unassigned and on the global free list
1426
1427 // 1: try to allocate from the thread's local om_free_list.
1428 // Threads will attempt to allocate first from their local list, then
1429 // from the global list, and only after those attempts fail will the
1430 // thread attempt to instantiate new monitors. Thread-local free lists
1431 // improve allocation latency, as well as reducing coherency traffic
1432 // on the shared global list.
1433 m = take_from_start_of_om_free_list(self);
1434 if (m != NULL) {
1435 guarantee(m->object() == NULL, "invariant");
1436 m->set_allocation_state(ObjectMonitor::New);
1437 prepend_to_om_in_use_list(self, m);
1438 return m;
1439 }
1440
1441 // 2: try to allocate from the global g_free_list
1442 // CONSIDER: use muxTry() instead of muxAcquire().
1443 // If the muxTry() fails then drop immediately into case 3.
1444 // If we're using thread-local free lists then try
1445 // to reprovision the caller's free list.
1446 if (OrderAccess::load_acquire(&g_free_list) != NULL) {
1447 // Reprovision the thread's om_free_list.
1448 // Use bulk transfers to reduce the allocation rate and heat
1449 // on various locks.
1450 for (int i = self->om_free_provision; --i >= 0;) {
1451 ObjectMonitor* take = take_from_start_of_g_free_list();
1452 if (take == NULL) {
1453 break; // No more are available.
1454 }
1455 guarantee(take->object() == NULL, "invariant");
1456 if (AsyncDeflateIdleMonitors) {
1457 // We allowed 3 field values to linger during async deflation.
1458 // We clear header and restore ref_count here, but we leave
1459 // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor
1460 // enter optimization can no longer race with async deflation
1461 // and reuse.
1462 take->set_header(markWord::zero());
1463 if (take->ref_count() < 0) {
1464 // Add back max_jint to restore the ref_count field to its
1465 // proper value.
1466 Atomic::add(max_jint, &take->_ref_count);
1467
1468 assert(take->ref_count() >= 0, "must not be negative: ref_count=%d",
1469 take->ref_count());
1470 }
1471 }
1472 take->Recycle();
1473 // Since we're taking from the global free-list, take must be Free.
1474 // om_release() also sets the allocation state to Free because it
1475 // is called from other code paths.
1476 assert(take->is_free(), "invariant");
1477 om_release(self, take, false);
1478 }
1479 self->om_free_provision += 1 + (self->om_free_provision/2);
1480 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1481
1482 if (!AsyncDeflateIdleMonitors &&
1483 is_MonitorBound_exceeded(OrderAccess::load_acquire(&g_om_population) -
1484 OrderAccess::load_acquire(&g_om_free_count))) {
1485 // Not enough ObjectMonitors on the global free list.
1486 // We can't safely induce a STW safepoint from om_alloc() as our thread
1487 // state may not be appropriate for such activities and callers may hold
1488 // naked oops, so instead we defer the action.
1489 InduceScavenge(self, "om_alloc");
1490 }
1491 continue;
1492 }
1493
1494 // 3: allocate a block of new ObjectMonitors
1495 // Both the local and global free lists are empty -- resort to malloc().
1496 // In the current implementation ObjectMonitors are TSM - immortal.
1497 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1498 // each ObjectMonitor to start at the beginning of a cache line,
1499 // so we use align_up().
1500 // A better solution would be to use C++ placement-new.
1501 // BEWARE: As it stands currently, we don't run the ctors!
1502 assert(_BLOCKSIZE > 1, "invariant");
1503 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
1504 PaddedObjectMonitor* temp;
1505 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1);
1506 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal);
1507 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE);
1508 (void)memset((void *) temp, 0, neededsize);
1509
1510 // Format the block.
1511 // initialize the linked list, each monitor points to its next
1512 // forming the single linked free list, the very first monitor
1513 // will points to next block, which forms the block list.
1514 // The trick of using the 1st element in the block as g_block_list
1515 // linkage should be reconsidered. A better implementation would
1516 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1517
1518 for (int i = 1; i < _BLOCKSIZE; i++) {
1519 OrderAccess::release_store(&temp[i]._next_om, (ObjectMonitor*)&temp[i+1]);
1520 assert(temp[i].is_free(), "invariant");
1521 }
1522
1523 // terminate the last monitor as the end of list
1524 OrderAccess::release_store(&temp[_BLOCKSIZE - 1]._next_om, (ObjectMonitor*)NULL);
1525
1526 // Element [0] is reserved for global list linkage
1527 temp[0].set_object(CHAINMARKER);
1528
1529 // Consider carving out this thread's current request from the
1530 // block in hand. This avoids some lock traffic and redundant
1531 // list activity.
1532
1533 prepend_block_to_lists(temp);
1534 }
1535 }
1536
1537 // Place "m" on the caller's private per-thread om_free_list.
1538 // In practice there's no need to clamp or limit the number of
1539 // monitors on a thread's om_free_list as the only non-allocation time
1540 // we'll call om_release() is to return a monitor to the free list after
1541 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1542 // accumulate on a thread's free list.
1543 //
1544 // Key constraint: all ObjectMonitors on a thread's free list and the global
1558 // _next_om is used for both per-thread in-use and free lists so
1559 // we have to remove 'm' from the in-use list first (as needed).
1560 if (from_per_thread_alloc) {
1561 // Need to remove 'm' from om_in_use_list.
1562 // We use the more complicated mark-cur_mid_in_use-and-mid-as-we-go
1563 // protocol because async deflation can do list deletions in parallel.
1564 ObjectMonitor* cur_mid_in_use = NULL;
1565 ObjectMonitor* mid = NULL;
1566 ObjectMonitor* next = NULL;
1567 bool extracted = false;
1568
1569 if (!mark_list_head(&self->om_in_use_list, &mid, &next)) {
1570 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
1571 }
1572 while (true) {
1573 if (m == mid) {
1574 // We found 'm' on the per-thread in-use list so try to extract it.
1575 if (cur_mid_in_use == NULL) {
1576 // mid is the list head and it is marked. Switch the list head
1577 // to next which unmarks the list head, but leaves mid marked:
1578 OrderAccess::release_store(&self->om_in_use_list, next);
1579 } else {
1580 // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's
1581 // next field to next which unmarks cur_mid_in_use, but leaves
1582 // mid marked:
1583 OrderAccess::release_store(&cur_mid_in_use->_next_om, next);
1584 }
1585 extracted = true;
1586 Atomic::dec(&self->om_in_use_count);
1587 // Unmark mid, but leave the next value for any lagging list
1588 // walkers. It will get cleaned up when mid is prepended to
1589 // the thread's free list:
1590 set_next(mid, next);
1591 break;
1592 }
1593 if (cur_mid_in_use != NULL) {
1594 set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
1595 }
1596 // The next cur_mid_in_use keeps mid's marked next field so
1597 // that it is stable for a possible next field change. It
1598 // cannot be deflated while it is marked.
1653 // The thread is going away, however the ObjectMonitors on the
1654 // om_in_use_list may still be in-use by other threads. Link
1655 // them to in_use_tail, which will be linked into the global
1656 // in-use list g_om_in_use_list below.
1657 //
1658 // Account for the in-use list head before the loop since it is
1659 // already marked (by this thread):
1660 in_use_tail = in_use_list;
1661 in_use_count++;
1662 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) {
1663 if (is_next_marked(cur_om)) {
1664 // This next field is marked so there must be an async deflater
1665 // thread ahead of us so we'll give it a chance to finish.
1666 while (is_next_marked(cur_om)) {
1667 os::naked_short_sleep(1);
1668 }
1669 // Refetch the possibly changed next field and try again.
1670 cur_om = unmarked_next(in_use_tail);
1671 continue;
1672 }
1673 if (!cur_om->is_active()) {
1674 // cur_om was deflated and the allocation state was changed
1675 // to Free while it was marked. We happened to see it just
1676 // after it was unmarked (and added to the free list).
1677 // Refetch the possibly changed next field and try again.
1678 cur_om = unmarked_next(in_use_tail);
1679 continue;
1680 }
1681 in_use_tail = cur_om;
1682 in_use_count++;
1683 cur_om = unmarked_next(cur_om);
1684 }
1685 guarantee(in_use_tail != NULL, "invariant");
1686 int l_om_in_use_count = OrderAccess::load_acquire(&self->om_in_use_count);
1687 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't "
1688 "match: l_om_in_use_count=%d, in_use_count=%d",
1689 l_om_in_use_count, in_use_count);
1690 // Clear the in-use count before unmarking the in-use list head
1691 // to avoid races:
1692 OrderAccess::release_store(&self->om_in_use_count, 0);
1693 // Clear the in-use list head (which also unmarks it):
1694 OrderAccess::release_store(&self->om_in_use_list, (ObjectMonitor*)NULL);
1695 // Unmark the disconnected list head:
1696 set_next(in_use_list, next);
1697 }
1698
1699 int free_count = 0;
1700 ObjectMonitor* free_list = OrderAccess::load_acquire(&self->om_free_list);
1701 ObjectMonitor* free_tail = NULL;
1702 if (free_list != NULL) {
1703 // The thread is going away. Set 'free_tail' to the last per-thread free
1704 // monitor which will be linked to g_free_list below.
1705 stringStream ss;
1706 for (ObjectMonitor* s = free_list; s != NULL; s = unmarked_next(s)) {
1707 free_count++;
1708 free_tail = s;
1709 guarantee(s->object() == NULL, "invariant");
1710 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1711 }
1712 guarantee(free_tail != NULL, "invariant");
1713 int l_om_free_count = OrderAccess::load_acquire(&self->om_free_count);
1714 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: "
1715 "l_om_free_count=%d, free_count=%d", l_om_free_count,
1716 free_count);
1717 OrderAccess::release_store(&self->om_free_list, (ObjectMonitor*)NULL);
1718 OrderAccess::release_store(&self->om_free_count, 0);
1719 }
1720
1721 if (free_tail != NULL) {
1722 prepend_list_to_g_free_list(free_list, free_tail, free_count);
1723 }
1724
1725 if (in_use_tail != NULL) {
1726 prepend_list_to_g_om_in_use_list(in_use_list, in_use_tail, in_use_count);
1727 }
1728
1729 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1730 LogStreamHandle(Info, monitorinflation) lsh_info;
1731 LogStream* ls = NULL;
1732 if (log_is_enabled(Debug, monitorinflation)) {
1733 ls = &lsh_debug;
1734 } else if ((free_count != 0 || in_use_count != 0) &&
1735 log_is_enabled(Info, monitorinflation)) {
1736 ls = &lsh_info;
1737 }
1738 if (ls != NULL) {
1884
1885
1886 // fetch the displaced mark from the owner's stack.
1887 // The owner can't die or unwind past the lock while our INFLATING
1888 // object is in the mark. Furthermore the owner can't complete
1889 // an unlock on the object, either.
1890 markWord dmw = mark.displaced_mark_helper();
1891 // Catch if the object's header is not neutral (not locked and
1892 // not marked is what we care about here).
1893 ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1894
1895 // Setup monitor fields to proper values -- prepare the monitor
1896 m->set_header(dmw);
1897
1898 // Optimization: if the mark.locker stack address is associated
1899 // with this thread we could simply set m->_owner = self.
1900 // Note that a thread can inflate an object
1901 // that it has stack-locked -- as might happen in wait() -- directly
1902 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1903 if (AsyncDeflateIdleMonitors) {
1904 m->set_owner_from(mark.locker(), NULL, DEFLATER_MARKER);
1905 } else {
1906 m->set_owner_from(mark.locker(), NULL);
1907 }
1908 m->set_object(object);
1909 // TODO-FIXME: assert BasicLock->dhw != 0.
1910
1911 omh_p->set_om_ptr(m);
1912
1913 // Must preserve store ordering. The monitor state must
1914 // be stable at the time of publishing the monitor address.
1915 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1916 object->release_set_mark(markWord::encode(m));
1917
1918 // Once ObjectMonitor is configured and the object is associated
1919 // with the ObjectMonitor, it is safe to allow async deflation:
1920 assert(m->is_new(), "freshly allocated monitor must be new");
1921 m->set_allocation_state(ObjectMonitor::Old);
1922
1923 // Hopefully the performance counters are allocated on distinct cache lines
1924 // to avoid false sharing on MP systems ...
1925 OM_PERFDATA_OP(Inflations, inc());
1926 if (log_is_enabled(Trace, monitorinflation)) {
2026 // is registered with the ServiceThread and it is notified.
2027
2028 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) {
2029 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2030
2031 // The per-thread in-use lists are handled in
2032 // ParallelSPCleanupThreadClosure::do_thread().
2033
2034 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) {
2035 // Use the older mechanism for the global in-use list or if a
2036 // special deflation has been requested before the safepoint.
2037 ObjectSynchronizer::deflate_idle_monitors(counters);
2038 return;
2039 }
2040
2041 log_debug(monitorinflation)("requesting async deflation of idle monitors.");
2042 // Request deflation of idle monitors by the ServiceThread:
2043 set_is_async_deflation_requested(true);
2044 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
2045 ml.notify_all();
2046 }
2047
2048 // Deflate a single monitor if not in-use
2049 // Return true if deflated, false if in-use
2050 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
2051 ObjectMonitor** free_head_p,
2052 ObjectMonitor** free_tail_p) {
2053 bool deflated;
2054 // Normal case ... The monitor is associated with obj.
2055 const markWord mark = obj->mark();
2056 guarantee(mark == markWord::encode(mid), "should match: mark="
2057 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
2058 markWord::encode(mid).value());
2059 // Make sure that mark.monitor() and markWord::encode() agree:
2060 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
2061 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
2062 const markWord dmw = mid->header();
2063 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
2064
2065 if (mid->is_busy() || mid->ref_count() != 0) {
2228 }
2229 *free_tail_p = mid;
2230
2231 // At this point, mid->_next_om still refers to its current
2232 // value and another ObjectMonitor's _next_om field still
2233 // refers to this ObjectMonitor. Those linkages have to be
2234 // cleaned up by the caller who has the complete context.
2235
2236 // We leave owner == DEFLATER_MARKER and ref_count < 0
2237 // to force any racing threads to retry.
2238 return true; // Success, ObjectMonitor has been deflated.
2239 }
2240
2241 // The owner was changed from DEFLATER_MARKER so we lost the
2242 // race since the ObjectMonitor is now busy.
2243
2244 // Add back max_jint to restore the ref_count field to its
2245 // proper value (which may not be what we saw above):
2246 Atomic::add(max_jint, &mid->_ref_count);
2247
2248 assert(mid->ref_count() >= 0, "must not be negative: ref_count=%d",
2249 mid->ref_count());
2250 return false;
2251 }
2252
2253 // The ref_count was no longer 0 so we lost the race since the
2254 // ObjectMonitor is now busy or the ObjectMonitor* is now is use.
2255 // Restore owner to NULL if it is still DEFLATER_MARKER:
2256 mid->try_set_owner_from(NULL, DEFLATER_MARKER);
2257 }
2258
2259 // The owner field is no longer NULL so we lost the race since the
2260 // ObjectMonitor is now busy.
2261 return false;
2262 }
2263
2264 // Walk a given monitor list, and deflate idle monitors.
2265 // The given list could be a per-thread list or a global list.
2266 //
2267 // In the case of parallel processing of thread local monitor lists,
2268 // work is done by Threads::parallel_threads_do() which ensures that
2269 // each Java thread is processed by exactly one worker thread, and
2280 ObjectMonitor* cur_mid_in_use = NULL;
2281 ObjectMonitor* mid = NULL;
2282 ObjectMonitor* next = NULL;
2283 int deflated_count = 0;
2284
2285 // We use the simpler mark-mid-as-we-go protocol since there are no
2286 // parallel list deletions since we are at a safepoint.
2287 if (!mark_list_head(list_p, &mid, &next)) {
2288 return 0; // The list is empty so nothing to deflate.
2289 }
2290
2291 while (true) {
2292 oop obj = (oop) mid->object();
2293 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
2294 // Deflation succeeded and already updated free_head_p and
2295 // free_tail_p as needed. Finish the move to the local free list
2296 // by unlinking mid from the global or per-thread in-use list.
2297 if (cur_mid_in_use == NULL) {
2298 // mid is the list head and it is marked. Switch the list head
2299 // to next which unmarks the list head, but leaves mid marked:
2300 OrderAccess::release_store(list_p, next);
2301 } else {
2302 // mid is marked. Switch cur_mid_in_use's next field to next
2303 // which is safe because we have no parallel list deletions,
2304 // but we leave mid marked:
2305 OrderAccess::release_store(&cur_mid_in_use->_next_om, next);
2306 }
2307 // At this point mid is disconnected from the in-use list so
2308 // its marked next field no longer has any effects.
2309 deflated_count++;
2310 Atomic::dec(count_p);
2311 // mid is current tail in the free_head_p list so NULL terminate it
2312 // (which also unmarks it):
2313 set_next(mid, NULL);
2314
2315 // All the list management is done so move on to the next one:
2316 mid = next;
2317 } else {
2318 set_next(mid, next); // unmark next field
2319
2320 // All the list management is done so move on to the next one:
2381 // a cur_mid_in_use, then its next field is also marked at this point.
2382
2383 if (next != NULL) {
2384 // We mark next's next field so that an om_flush()
2385 // thread that is behind us cannot pass us when we
2386 // unmark the current mid's next field.
2387 next_next = mark_next_loop(next);
2388 }
2389
2390 // Only try to deflate if there is an associated Java object and if
2391 // mid is old (is not newly allocated and is not newly freed).
2392 if (mid->object() != NULL && mid->is_old() &&
2393 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
2394 // Deflation succeeded and already updated free_head_p and
2395 // free_tail_p as needed. Finish the move to the local free list
2396 // by unlinking mid from the global or per-thread in-use list.
2397 if (cur_mid_in_use == NULL) {
2398 // mid is the list head and it is marked. Switch the list head
2399 // to next which is also marked (if not NULL) and also leave
2400 // mid marked:
2401 OrderAccess::release_store(list_p, next);
2402 } else {
2403 ObjectMonitor* marked_next = mark_om_ptr(next);
2404 // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's
2405 // next field to marked_next and also leave mid marked:
2406 OrderAccess::release_store(&cur_mid_in_use->_next_om, marked_next);
2407 }
2408 // At this point mid is disconnected from the in-use list so
2409 // its marked next field no longer has any effects.
2410 deflated_count++;
2411 Atomic::dec(count_p);
2412 // mid is current tail in the free_head_p list so NULL terminate it
2413 // (which also unmarks it):
2414 set_next(mid, NULL);
2415
2416 // All the list management is done so move on to the next one:
2417 mid = next; // mid keeps non-NULL next's marked next field
2418 next = next_next;
2419 } else {
2420 // mid is considered in-use if it does not have an associated
2421 // Java object or mid is not old or deflation did not succeed.
2422 // A mid->is_new() node can be seen here when it is freshly
2423 // returned by om_alloc() (and skips the deflation code path).
2424 // A mid->is_old() node can be seen here when deflation failed.
2425 // A mid->is_free() node can be seen here when a fresh node from
2426 // om_alloc() is released by om_release() due to losing the race
2427 // in inflate().
2428
2429 // All the list management is done so move on to the next one:
2430 if (cur_mid_in_use != NULL) {
2431 set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
2432 }
2433 // The next cur_mid_in_use keeps mid's marked next field so
2434 // that it is stable for a possible next field change. It
2435 // cannot be modified by om_release() while it is marked.
2436 cur_mid_in_use = mid;
2437 mid = next; // mid keeps non-NULL next's marked next field
2438 next = next_next;
2439
2440 if (SafepointSynchronize::is_synchronizing() &&
2441 cur_mid_in_use != OrderAccess::load_acquire(list_p) &&
2442 cur_mid_in_use->is_old()) {
2443 // If a safepoint has started and cur_mid_in_use is not the list
2444 // head and is old, then it is safe to use as saved state. Return
2445 // to the caller before blocking.
2446 *saved_mid_in_use_p = cur_mid_in_use;
2447 set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
2448 if (mid != NULL) {
2449 set_next(mid, next); // umark mid
2450 }
2451 return deflated_count;
2452 }
2453 }
2454 if (mid == NULL) {
2455 if (cur_mid_in_use != NULL) {
2456 set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
2457 }
2458 break; // Reached end of the list so nothing more to deflate.
2459 }
2460
2461 // The current mid's next field is marked at this point. If we have
2462 // a cur_mid_in_use, then its next field is also marked at this point.
2463 }
2464 // We finished the list without a safepoint starting so there's
2465 // no need to save state.
2466 *saved_mid_in_use_p = NULL;
2467 return deflated_count;
2468 }
2469
2470 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2471 OrderAccess::release_store(&counters->n_in_use, 0); // currently associated with objects
2472 OrderAccess::release_store(&counters->n_in_circulation, 0); // extant
2473 OrderAccess::release_store(&counters->n_scavenged, 0); // reclaimed (global and per-thread)
2474 OrderAccess::release_store(&counters->per_thread_scavenged, 0); // per-thread scavenge total
2475 counters->per_thread_times = 0.0; // per-thread scavenge times
2476 }
2477
2478 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
2479 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2480
2481 if (AsyncDeflateIdleMonitors) {
2482 // Nothing to do when global idle ObjectMonitors are deflated using
2483 // a JavaThread unless a special deflation has been requested.
2484 if (!is_special_deflation_requested()) {
2485 return;
2486 }
2487 }
2488
2489 bool deflated = false;
2490
2491 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2492 ObjectMonitor* free_tail_p = NULL;
2493 elapsedTimer timer;
2494
2495 if (log_is_enabled(Info, monitorinflation)) {
2496 timer.start();
2497 }
2498
2499 // Note: the thread-local monitors lists get deflated in
2500 // a separate pass. See deflate_thread_local_monitors().
2501
2502 // For moribund threads, scan g_om_in_use_list
2503 int deflated_count = 0;
2504 if (OrderAccess::load_acquire(&g_om_in_use_list) != NULL) {
2505 // Update n_in_circulation before g_om_in_use_count is updated by deflation.
2506 Atomic::add(OrderAccess::load_acquire(&g_om_in_use_count), &counters->n_in_circulation);
2507
2508 deflated_count = deflate_monitor_list(&g_om_in_use_list, &g_om_in_use_count, &free_head_p, &free_tail_p);
2509 Atomic::add(OrderAccess::load_acquire(&g_om_in_use_count), &counters->n_in_use);
2510 }
2511
2512 if (free_head_p != NULL) {
2513 // Move the deflated ObjectMonitors back to the global free list.
2514 // No races on the working free list so no need for load_acquire().
2515 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2516 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
2517 INTPTR_FORMAT, p2i(free_tail_p->_next_om));
2518 prepend_list_to_g_free_list(free_head_p, free_tail_p, deflated_count);
2519 Atomic::add(deflated_count, &counters->n_scavenged);
2520 }
2521 timer.stop();
2522
2523 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2524 LogStreamHandle(Info, monitorinflation) lsh_info;
2525 LogStream* ls = NULL;
2526 if (log_is_enabled(Debug, monitorinflation)) {
2527 ls = &lsh_debug;
2528 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2529 ls = &lsh_info;
2543
2544 void ObjectSynchronizer::deflate_idle_monitors_using_JT() {
2545 assert(AsyncDeflateIdleMonitors, "sanity check");
2546
2547 // Deflate any global idle monitors.
2548 deflate_global_idle_monitors_using_JT();
2549
2550 int count = 0;
2551 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2552 if (jt->om_in_use_count > 0 && !jt->is_exiting()) {
2553 // This JavaThread is using ObjectMonitors so deflate any that
2554 // are idle unless this JavaThread is exiting; do not race with
2555 // ObjectSynchronizer::om_flush().
2556 deflate_per_thread_idle_monitors_using_JT(jt);
2557 count++;
2558 }
2559 }
2560 if (count > 0) {
2561 log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count);
2562 }
2563 // The ServiceThread's async deflation request has been processed.
2564 set_is_async_deflation_requested(false);
2565
2566 if (HandshakeAfterDeflateIdleMonitors && g_om_wait_count > 0) {
2567 // There are deflated ObjectMonitors waiting for a handshake
2568 // (or a safepoint) for safety.
2569
2570 // g_wait_list and g_om_wait_count are only updated by the calling
2571 // thread so no need for load_acquire() or release_store().
2572 ObjectMonitor* list = g_wait_list;
2573 ADIM_guarantee(list != NULL, "g_wait_list must not be NULL");
2574 int count = g_om_wait_count;
2575 g_wait_list = NULL;
2576 g_om_wait_count = 0;
2577
2578 // Find the tail for prepend_list_to_common().
2579 int l_count = 0;
2580 ObjectMonitor* tail = NULL;
2581 for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) {
2582 tail = n;
2583 l_count++;
2584 }
2585 ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count);
2586
2587 // Will execute a safepoint if !ThreadLocalHandshakes:
2588 HandshakeForDeflation hfd_tc;
2589 Handshake::execute(&hfd_tc);
2590
2591 prepend_list_to_common(list, tail, count, &g_free_list, &g_om_free_count);
2592
2593 log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count);
2594 }
2595 }
2596
2597 // Deflate global idle ObjectMonitors using a JavaThread.
2598 //
2612
2613 deflate_common_idle_monitors_using_JT(false /* !is_global */, target);
2614 }
2615
2616 // Deflate global or per-thread idle ObjectMonitors using a JavaThread.
2617 //
2618 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) {
2619 JavaThread* self = JavaThread::current();
2620
2621 int deflated_count = 0;
2622 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors
2623 ObjectMonitor* free_tail_p = NULL;
2624 ObjectMonitor* saved_mid_in_use_p = NULL;
2625 elapsedTimer timer;
2626
2627 if (log_is_enabled(Info, monitorinflation)) {
2628 timer.start();
2629 }
2630
2631 if (is_global) {
2632 OM_PERFDATA_OP(MonExtant, set_value(OrderAccess::load_acquire(&g_om_in_use_count)));
2633 } else {
2634 OM_PERFDATA_OP(MonExtant, inc(OrderAccess::load_acquire(&target->om_in_use_count)));
2635 }
2636
2637 do {
2638 int local_deflated_count;
2639 if (is_global) {
2640 local_deflated_count = deflate_monitor_list_using_JT(&g_om_in_use_list, &g_om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
2641 } else {
2642 local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
2643 }
2644 deflated_count += local_deflated_count;
2645
2646 if (free_head_p != NULL) {
2647 // Move the deflated ObjectMonitors to the global free list.
2648 // No races on the working list so no need for load_acquire().
2649 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
2650 // Note: The target thread can be doing an om_alloc() that
2651 // is trying to prepend an ObjectMonitor on its in-use list
2652 // at the same time that we have deflated the current in-use
2653 // list head and put it on the local free list. prepend_to_common()
2654 // will detect the race and retry which avoids list corruption,
2694 LogStream* ls = NULL;
2695 if (log_is_enabled(Debug, monitorinflation)) {
2696 ls = &lsh_debug;
2697 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2698 ls = &lsh_info;
2699 }
2700 if (ls != NULL) {
2701 if (is_global) {
2702 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2703 } else {
2704 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count);
2705 }
2706 }
2707 }
2708
2709 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2710 // Report the cumulative time for deflating each thread's idle
2711 // monitors. Note: if the work is split among more than one
2712 // worker thread, then the reported time will likely be more
2713 // than a beginning to end measurement of the phase.
2714 // Note: AsyncDeflateIdleMonitors only deflates per-thread idle
2715 // monitors at a safepoint when a special deflation has been requested.
2716 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d",
2717 counters->per_thread_times,
2718 OrderAccess::load_acquire(&counters->per_thread_scavenged));
2719
2720 bool needs_special_deflation = is_special_deflation_requested();
2721 if (!AsyncDeflateIdleMonitors || needs_special_deflation) {
2722 // AsyncDeflateIdleMonitors does not use these counters unless
2723 // there is a special deflation request.
2724
2725 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
2726 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
2727 }
2728
2729 if (log_is_enabled(Debug, monitorinflation)) {
2730 // exit_globals()'s call to audit_and_print_stats() is done
2731 // at the Info level.
2732 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2733 } else if (log_is_enabled(Info, monitorinflation)) {
2734 log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, "
2735 "g_om_free_count=%d, g_om_wait_count=%d",
2736 OrderAccess::load_acquire(&g_om_population),
2737 OrderAccess::load_acquire(&g_om_in_use_count),
2738 OrderAccess::load_acquire(&g_om_free_count),
2739 OrderAccess::load_acquire(&g_om_wait_count));
2740 }
2741
2742 ForceMonitorScavenge = 0; // Reset
2743 GVars.stw_random = os::random();
2744 GVars.stw_cycle++;
2745 if (needs_special_deflation) {
2746 set_is_special_deflation_requested(false); // special deflation is done
2747 }
2748 }
2749
2750 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2751 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2752
2753 if (AsyncDeflateIdleMonitors && !is_special_deflation_requested()) {
2754 // Nothing to do if a special deflation has NOT been requested.
2755 return;
2756 }
2757
2758 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2759 ObjectMonitor* free_tail_p = NULL;
2760 elapsedTimer timer;
2761
2762 if (log_is_enabled(Info, safepoint, cleanup) ||
2763 log_is_enabled(Info, monitorinflation)) {
2764 timer.start();
2765 }
2766
2767 // Update n_in_circulation before om_in_use_count is updated by deflation.
2768 Atomic::add(OrderAccess::load_acquire(&thread->om_in_use_count), &counters->n_in_circulation);
2769
2770 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
2771 Atomic::add(OrderAccess::load_acquire(&thread->om_in_use_count), &counters->n_in_use);
2772
2773 if (free_head_p != NULL) {
2774 // Move the deflated ObjectMonitors back to the global free list.
2775 // No races on the working list so no need for load_acquire().
2776 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2777 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
2778 INTPTR_FORMAT, p2i(free_tail_p->_next_om));
2779 prepend_list_to_g_free_list(free_head_p, free_tail_p, deflated_count);
2780 Atomic::add(deflated_count, &counters->n_scavenged);
2781 Atomic::add(deflated_count, &counters->per_thread_scavenged);
2782 }
2783
2784 timer.stop();
2785 // Safepoint logging cares about cumulative per_thread_times and
2786 // we'll capture most of the cost, but not the muxRelease() which
2787 // should be cheap.
2788 counters->per_thread_times += timer.seconds();
2789
2790 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2791 LogStreamHandle(Info, monitorinflation) lsh_info;
2881 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2882 LogStreamHandle(Info, monitorinflation) lsh_info;
2883 LogStreamHandle(Trace, monitorinflation) lsh_trace;
2884 LogStream* ls = NULL;
2885 if (log_is_enabled(Trace, monitorinflation)) {
2886 ls = &lsh_trace;
2887 } else if (log_is_enabled(Debug, monitorinflation)) {
2888 ls = &lsh_debug;
2889 } else if (log_is_enabled(Info, monitorinflation)) {
2890 ls = &lsh_info;
2891 }
2892 assert(ls != NULL, "sanity check");
2893
2894 // Log counts for the global and per-thread monitor lists:
2895 int chk_om_population = log_monitor_list_counts(ls);
2896 int error_cnt = 0;
2897
2898 ls->print_cr("Checking global lists:");
2899
2900 // Check g_om_population:
2901 if (OrderAccess::load_acquire(&g_om_population) == chk_om_population) {
2902 ls->print_cr("g_om_population=%d equals chk_om_population=%d",
2903 OrderAccess::load_acquire(&g_om_population),
2904 chk_om_population);
2905 } else {
2906 // With lock free access to the monitor lists, it is possible for
2907 // log_monitor_list_counts() to return a value that doesn't match
2908 // g_om_population. So far a higher value has been seen in testing
2909 // so something is being double counted by log_monitor_list_counts().
2910 ls->print_cr("WARNING: g_om_population=%d is not equal to "
2911 "chk_om_population=%d",
2912 OrderAccess::load_acquire(&g_om_population),
2913 chk_om_population);
2914 }
2915
2916 // Check g_om_in_use_list and g_om_in_use_count:
2917 chk_global_in_use_list_and_count(ls, &error_cnt);
2918
2919 // Check g_free_list and g_om_free_count:
2920 chk_global_free_list_and_count(ls, &error_cnt);
2921
2922 if (HandshakeAfterDeflateIdleMonitors) {
2923 // Check g_wait_list and g_om_wait_count:
2924 chk_global_wait_list_and_count(ls, &error_cnt);
2925 }
2926
2927 ls->print_cr("Checking per-thread lists:");
2928
2929 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2930 // Check om_in_use_list and om_in_use_count:
2931 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
2932
2933 // Check om_free_list and om_free_count:
2984 }
2985 if (n->object() != NULL) {
2986 if (jt != NULL) {
2987 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2988 ": free per-thread monitor must have NULL _object "
2989 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
2990 p2i(n->object()));
2991 } else {
2992 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2993 "must have NULL _object field: _object=" INTPTR_FORMAT,
2994 p2i(n), p2i(n->object()));
2995 }
2996 *error_cnt_p = *error_cnt_p + 1;
2997 }
2998 }
2999
3000 // Check the global free list and count; log the results of the checks.
3001 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
3002 int *error_cnt_p) {
3003 int chk_om_free_count = 0;
3004 for (ObjectMonitor* n = OrderAccess::load_acquire(&g_free_list); n != NULL; n = unmarked_next(n)) {
3005 chk_free_entry(NULL /* jt */, n, out, error_cnt_p);
3006 chk_om_free_count++;
3007 }
3008 if (OrderAccess::load_acquire(&g_om_free_count) == chk_om_free_count) {
3009 out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d",
3010 OrderAccess::load_acquire(&g_om_free_count),
3011 chk_om_free_count);
3012 } else {
3013 // With lock free access to g_free_list, it is possible for an
3014 // ObjectMonitor to be prepended to g_free_list after we started
3015 // calculating chk_om_free_count so g_om_free_count may not
3016 // match anymore.
3017 out->print_cr("WARNING: g_om_free_count=%d is not equal to "
3018 "chk_om_free_count=%d",
3019 OrderAccess::load_acquire(&g_om_free_count),
3020 chk_om_free_count);
3021 }
3022 }
3023
3024 // Check the global wait list and count; log the results of the checks.
3025 void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out,
3026 int *error_cnt_p) {
3027 int chk_om_wait_count = 0;
3028 for (ObjectMonitor* n = OrderAccess::load_acquire(&g_wait_list); n != NULL; n = unmarked_next(n)) {
3029 // Rules for g_wait_list are the same as of g_free_list:
3030 chk_free_entry(NULL /* jt */, n, out, error_cnt_p);
3031 chk_om_wait_count++;
3032 }
3033 if (OrderAccess::load_acquire(&g_om_wait_count) == chk_om_wait_count) {
3034 out->print_cr("g_om_wait_count=%d equals chk_om_wait_count=%d",
3035 OrderAccess::load_acquire(&g_om_wait_count),
3036 chk_om_wait_count);
3037 } else {
3038 out->print_cr("ERROR: g_om_wait_count=%d is not equal to "
3039 "chk_om_wait_count=%d",
3040 OrderAccess::load_acquire(&g_om_wait_count),
3041 chk_om_wait_count);
3042 *error_cnt_p = *error_cnt_p + 1;
3043 }
3044 }
3045
3046 // Check the global in-use list and count; log the results of the checks.
3047 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
3048 int *error_cnt_p) {
3049 int chk_om_in_use_count = 0;
3050 for (ObjectMonitor* n = OrderAccess::load_acquire(&g_om_in_use_list); n != NULL; n = unmarked_next(n)) {
3051 chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p);
3052 chk_om_in_use_count++;
3053 }
3054 if (OrderAccess::load_acquire(&g_om_in_use_count) == chk_om_in_use_count) {
3055 out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d",
3056 OrderAccess::load_acquire(&g_om_in_use_count),
3057 chk_om_in_use_count);
3058 } else {
3059 // With lock free access to the monitor lists, it is possible for
3060 // an exiting JavaThread to put its in-use ObjectMonitors on the
3061 // global in-use list after chk_om_in_use_count is calculated above.
3062 out->print_cr("WARNING: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d",
3063 OrderAccess::load_acquire(&g_om_in_use_count),
3064 chk_om_in_use_count);
3065 }
3066 }
3067
3068 // Check an in-use monitor entry; log any errors.
3069 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
3070 outputStream * out, int *error_cnt_p) {
3071 if (n->header().value() == 0) {
3072 if (jt != NULL) {
3073 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3074 ": in-use per-thread monitor must have non-NULL _header "
3075 "field.", p2i(jt), p2i(n));
3076 } else {
3077 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor "
3078 "must have non-NULL _header field.", p2i(n));
3079 }
3080 *error_cnt_p = *error_cnt_p + 1;
3081 }
3082 if (n->object() == NULL) {
3083 if (jt != NULL) {
3084 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3112 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3113 ": in-use per-thread monitor's object does not refer "
3114 "to the same monitor: obj=" INTPTR_FORMAT ", mark="
3115 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
3116 p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
3117 } else {
3118 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
3119 "monitor's object does not refer to the same monitor: obj="
3120 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
3121 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
3122 }
3123 *error_cnt_p = *error_cnt_p + 1;
3124 }
3125 }
3126
3127 // Check the thread's free list and count; log the results of the checks.
3128 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
3129 outputStream * out,
3130 int *error_cnt_p) {
3131 int chk_om_free_count = 0;
3132 for (ObjectMonitor* n = OrderAccess::load_acquire(&jt->om_free_list); n != NULL; n = unmarked_next(n)) {
3133 chk_free_entry(jt, n, out, error_cnt_p);
3134 chk_om_free_count++;
3135 }
3136 if (OrderAccess::load_acquire(&jt->om_free_count) == chk_om_free_count) {
3137 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals "
3138 "chk_om_free_count=%d", p2i(jt),
3139 OrderAccess::load_acquire(&jt->om_free_count),
3140 chk_om_free_count);
3141 } else {
3142 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not "
3143 "equal to chk_om_free_count=%d", p2i(jt),
3144 OrderAccess::load_acquire(&jt->om_free_count),
3145 chk_om_free_count);
3146 *error_cnt_p = *error_cnt_p + 1;
3147 }
3148 }
3149
3150 // Check the thread's in-use list and count; log the results of the checks.
3151 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
3152 outputStream * out,
3153 int *error_cnt_p) {
3154 int chk_om_in_use_count = 0;
3155 for (ObjectMonitor* n = OrderAccess::load_acquire(&jt->om_in_use_list); n != NULL; n = unmarked_next(n)) {
3156 chk_in_use_entry(jt, n, out, error_cnt_p);
3157 chk_om_in_use_count++;
3158 }
3159 if (OrderAccess::load_acquire(&jt->om_in_use_count) == chk_om_in_use_count) {
3160 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
3161 "chk_om_in_use_count=%d", p2i(jt),
3162 OrderAccess::load_acquire(&jt->om_in_use_count),
3163 chk_om_in_use_count);
3164 } else {
3165 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
3166 "equal to chk_om_in_use_count=%d", p2i(jt),
3167 OrderAccess::load_acquire(&jt->om_in_use_count),
3168 chk_om_in_use_count);
3169 *error_cnt_p = *error_cnt_p + 1;
3170 }
3171 }
3172
3173 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
3174 // flags indicate why the entry is in-use, 'object' and 'object type'
3175 // indicate the associated object and its type.
3176 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) {
3177 stringStream ss;
3178 if (OrderAccess::load_acquire(&g_om_in_use_count) > 0) {
3179 out->print_cr("In-use global monitor info:");
3180 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
3181 out->print_cr("%18s %s %7s %18s %18s",
3182 "monitor", "BHL", "ref_cnt", "object", "object type");
3183 out->print_cr("================== === ======= ================== ==================");
3184 for (ObjectMonitor* n = OrderAccess::load_acquire(&g_om_in_use_list); n != NULL; n = unmarked_next(n)) {
3185 const oop obj = (oop) n->object();
3186 const markWord mark = n->header();
3187 ResourceMark rm;
3188 out->print(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s",
3189 p2i(n), n->is_busy() != 0, mark.hash() != 0,
3190 n->owner() != NULL, (int)n->ref_count(), p2i(obj),
3191 obj->klass()->external_name());
3192 if (n->is_busy() != 0) {
3193 out->print(" (%s)", n->is_busy_to_string(&ss));
3194 ss.reset();
3195 }
3196 out->cr();
3197 }
3198 }
3199
3200 out->print_cr("In-use per-thread monitor info:");
3201 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
3202 out->print_cr("%18s %18s %s %7s %18s %18s",
3203 "jt", "monitor", "BHL", "ref_cnt", "object", "object type");
3204 out->print_cr("================== ================== === ======= ================== ==================");
3205 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3206 for (ObjectMonitor* n = OrderAccess::load_acquire(&jt->om_in_use_list); n != NULL; n = unmarked_next(n)) {
3207 const oop obj = (oop) n->object();
3208 const markWord mark = n->header();
3209 ResourceMark rm;
3210 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d "
3211 INTPTR_FORMAT " %s", p2i(jt), p2i(n), n->is_busy() != 0,
3212 mark.hash() != 0, n->owner() != NULL, (int)n->ref_count(),
3213 p2i(obj), obj->klass()->external_name());
3214 if (n->is_busy() != 0) {
3215 out->print(" (%s)", n->is_busy_to_string(&ss));
3216 ss.reset();
3217 }
3218 out->cr();
3219 }
3220 }
3221
3222 out->flush();
3223 }
3224
3225 // Log counts for the global and per-thread monitor lists and return
3226 // the population count.
3227 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
3228 int pop_count = 0;
3229 out->print_cr("%18s %10s %10s %10s %10s",
3230 "Global Lists:", "InUse", "Free", "Wait", "Total");
3231 out->print_cr("================== ========== ========== ========== ==========");
3232 out->print_cr("%18s %10d %10d %10d %10d", "",
3233 OrderAccess::load_acquire(&g_om_in_use_count),
3234 OrderAccess::load_acquire(&g_om_free_count),
3235 OrderAccess::load_acquire(&g_om_wait_count),
3236 OrderAccess::load_acquire(&g_om_population));
3237 pop_count += OrderAccess::load_acquire(&g_om_in_use_count) +
3238 OrderAccess::load_acquire(&g_om_free_count);
3239 if (HandshakeAfterDeflateIdleMonitors) {
3240 pop_count += OrderAccess::load_acquire(&g_om_wait_count);
3241 }
3242
3243 out->print_cr("%18s %10s %10s %10s",
3244 "Per-Thread Lists:", "InUse", "Free", "Provision");
3245 out->print_cr("================== ========== ========== ==========");
3246
3247 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3248 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
3249 OrderAccess::load_acquire(&jt->om_in_use_count),
3250 OrderAccess::load_acquire(&jt->om_free_count),
3251 jt->om_free_provision);
3252 pop_count += OrderAccess::load_acquire(&jt->om_in_use_count) +
3253 OrderAccess::load_acquire(&jt->om_free_count);
3254 }
3255 return pop_count;
3256 }
3257
3258 #ifndef PRODUCT
3259
3260 // Check if monitor belongs to the monitor cache
3261 // The list is grow-only so it's *relatively* safe to traverse
3262 // the list of extant blocks without taking a lock.
3263
3264 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
3265 PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
3266 while (block != NULL) {
3267 assert(block->object() == CHAINMARKER, "must be a block header");
3268 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
3269 address mon = (address)monitor;
3270 address blk = (address)block;
3271 size_t diff = mon - blk;
3272 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
3273 return 1;
3274 }
3275 // unmarked_next() is not needed with g_block_list (no next field marking).
3276 block = (PaddedObjectMonitor*)OrderAccess::load_acquire(&block->_next_om);
3277 }
3278 return 0;
3279 }
3280
3281 #endif
|
132 // Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors
133 // is true, deflated ObjectMonitors wait on this list until after a
134 // handshake or a safepoint for platforms that don't support handshakes.
135 // After the handshake or safepoint, the deflated ObjectMonitors are
136 // prepended to g_free_list.
137 static ObjectMonitor* volatile g_wait_list = NULL;
138
139 static volatile int g_om_free_count = 0; // # on g_free_list
140 static volatile int g_om_in_use_count = 0; // # on g_om_in_use_list
141 static volatile int g_om_population = 0; // # Extant -- in circulation
142 static volatile int g_om_wait_count = 0; // # on g_wait_list
143
144 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
145
146
147 // =====================> List Management functions
148
149 // Return true if the ObjectMonitor's next field is marked.
150 // Otherwise returns false.
151 static bool is_next_marked(ObjectMonitor* om) {
152 // Use load_acquire() since _next_om fields are updated with a
153 // release_store().
154 return ((intptr_t)OrderAccess::load_acquire(&om->_next_om) & 0x1) != 0;
155 }
156
157 // Mark an ObjectMonitor* and return it. Note: the om parameter
158 // may or may not have been marked originally.
159 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) {
160 return (ObjectMonitor*)((intptr_t)om | 0x1);
161 }
162
163 // Mark the next field in an ObjectMonitor. If marking was successful,
164 // then the unmarked next field is returned via parameter and true is
165 // returned. Otherwise false is returned.
166 static bool mark_next(ObjectMonitor* om, ObjectMonitor** next_p) {
167 // Get current next field without any marking value.
168 ObjectMonitor* next = (ObjectMonitor*)((intptr_t)om->_next_om & ~0x1);
169 if (Atomic::cmpxchg(mark_om_ptr(next), &om->_next_om, next) != next) {
170 return false; // Could not mark the next field or it was already marked.
171 }
172 *next_p = next;
173 return true;
174 }
175
176 // Loop until we mark the next field in an ObjectMonitor. The unmarked
177 // next field is returned.
178 static ObjectMonitor* mark_next_loop(ObjectMonitor* om) {
179 ObjectMonitor* next;
180 while (true) {
181 if (mark_next(om, &next)) {
182 // Marked om's next field so return the unmarked value.
183 return next;
184 }
185 }
186 }
187
188 // Set the next field in an ObjectMonitor to the specified value.
189 // The caller of set_next() must be the same thread that marked the
190 // ObjectMonitor.
191 static void set_next(ObjectMonitor* om, ObjectMonitor* value) {
192 OrderAccess::release_store(&om->_next_om, value);
193 }
194
195 // Mark the next field in the list head ObjectMonitor. If marking was
196 // successful, then the mid and the unmarked next field are returned
197 // via parameter and true is returned. Otherwise false is returned.
198 static bool mark_list_head(ObjectMonitor* volatile * list_p,
199 ObjectMonitor** mid_p, ObjectMonitor** next_p) {
200 while (true) {
201 ObjectMonitor* mid = *list_p;
202 if (mid == NULL) {
203 return false; // The list is empty so nothing to mark.
204 }
205 if (mark_next(mid, next_p)) {
206 if (*list_p != mid) {
207 // The list head changed so we have to retry.
208 set_next(mid, *next_p); // unmark mid
209 continue;
210 }
211 // We marked next field to guard against races.
212 *mid_p = mid;
213 return true;
214 }
215 }
216 }
217
218 // Return the unmarked next field in an ObjectMonitor. Note: the next
219 // field may or may not have been marked originally.
220 static ObjectMonitor* unmarked_next(ObjectMonitor* om) {
221 // Use load_acquire() since _next_om fields are updated with a
222 // release_store().
223 return (ObjectMonitor*)((intptr_t)OrderAccess::load_acquire(&om->_next_om) & ~0x1);
224 }
225
226 // Mark the next ObjectMonitor for traversal. The current ObjectMonitor
227 // is unmarked after the next ObjectMonitor is marked. *cur_p and *next_p
228 // are updated to their next values in the list traversal. *cur_p is set
229 // to NULL when the end of the list is reached.
230 static void mark_next_for_traversal(ObjectMonitor** cur_p, ObjectMonitor** next_p) {
231 ObjectMonitor* prev = *cur_p; // Save current for unmarking.
232 if (*next_p == NULL) { // Reached the end of the list.
233 set_next(prev, NULL); // Unmark previous.
234 *cur_p = NULL; // Tell the caller we are done.
235 return;
236 }
237 (void)mark_next_loop(*next_p); // Mark next.
238 set_next(prev, *next_p); // Unmark previous.
239 *cur_p = *next_p; // Update current.
240 *next_p = unmarked_next(*cur_p); // Update next.
241 }
242
243 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is
244 // the last ObjectMonitor in the list and there are 'count' on the list.
245 // Also updates the specified *count_p.
246 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail,
247 int count, ObjectMonitor* volatile* list_p,
248 volatile int* count_p) {
249 while (true) {
250 ObjectMonitor* cur = *list_p;
251 // Prepend list to *list_p.
252 ObjectMonitor* next = NULL;
253 if (!mark_next(tail, &next)) {
254 continue; // failed to mark next field so try it all again
255 }
256 set_next(tail, cur); // tail now points to cur (and unmarks tail)
257 if (cur == NULL) {
258 // No potential race with takers or other prependers since
259 // *list_p is empty.
260 if (Atomic::cmpxchg(list, list_p, cur) == cur) {
261 // Successfully switched *list_p to the list value.
262 Atomic::add(count, count_p);
263 break;
264 }
265 // Implied else: try it all again
266 } else {
267 // Try to mark next field to guard against races:
268 if (!mark_next(cur, &next)) {
269 continue; // failed to mark next field so try it all again
270 }
323 // Prepend a list of ObjectMonitors to g_om_in_use_list. 'tail' is the last
324 // ObjectMonitor in the list and there are 'count' on the list. Also
325 // updates g_om_in_use_list.
326 static void prepend_list_to_g_om_in_use_list(ObjectMonitor* list,
327 ObjectMonitor* tail, int count) {
328 prepend_list_to_common(list, tail, count, &g_om_in_use_list, &g_om_in_use_count);
329 }
330
331 // Prepend an ObjectMonitor to the specified list. Also updates
332 // the specified counter.
333 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor* volatile * list_p,
334 int volatile * count_p) {
335 while (true) {
336 (void)mark_next_loop(m); // mark m so we can safely update its next field
337 ObjectMonitor* cur = NULL;
338 ObjectMonitor* next = NULL;
339 // Mark the list head to guard against A-B-A race:
340 if (mark_list_head(list_p, &cur, &next)) {
341 // List head is now marked so we can safely switch it.
342 set_next(m, cur); // m now points to cur (and unmarks m)
343 *list_p = m; // Switch list head to unmarked m.
344 // mark_list_head() used cmpxchg() above, switching list head can be lazier:
345 OrderAccess::storestore();
346 set_next(cur, next); // Unmark the previous list head.
347 break;
348 }
349 // The list is empty so try to set the list head.
350 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
351 set_next(m, cur); // m now points to NULL (and unmarks m)
352 if (Atomic::cmpxchg(m, list_p, cur) == cur) {
353 // List head is now unmarked m.
354 break;
355 }
356 // Implied else: try it all again
357 }
358 Atomic::inc(count_p);
359 }
360
361 // Prepend an ObjectMonitor to a per-thread om_free_list.
362 // Also updates the per-thread om_free_count.
363 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) {
364 prepend_to_common(m, &self->om_free_list, &self->om_free_count);
365 }
366
367 // Prepend an ObjectMonitor to a per-thread om_in_use_list.
368 // Also updates the per-thread om_in_use_count.
369 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) {
370 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
371 }
372
373 // Take an ObjectMonitor from the start of the specified list. Also
374 // decrements the specified counter. Returns NULL if none are available.
375 static ObjectMonitor* take_from_start_of_common(ObjectMonitor* volatile * list_p,
376 int volatile * count_p) {
377 ObjectMonitor* next = NULL;
378 ObjectMonitor* take = NULL;
379 // Mark the list head to guard against A-B-A race:
380 if (!mark_list_head(list_p, &take, &next)) {
381 return NULL; // None are available.
382 }
383 // Switch marked list head to next (which unmarks the list head, but
384 // leaves take marked):
385 *list_p = next;
386 Atomic::dec(count_p);
387 // mark_list_head() used cmpxchg() above, switching list head can be lazier:
388 OrderAccess::storestore();
389 // Unmark take, but leave the next value for any lagging list
390 // walkers. It will get cleaned up when take is prepended to
391 // the in-use list:
392 set_next(take, next);
393 return take;
394 }
395
396 // Take an ObjectMonitor from the start of the global free-list. Also
397 // updates g_om_free_count. Returns NULL if none are available.
398 static ObjectMonitor* take_from_start_of_g_free_list() {
399 return take_from_start_of_common(&g_free_list, &g_om_free_count);
400 }
401
402 // Take an ObjectMonitor from the start of a per-thread free-list.
403 // Also updates om_free_count. Returns NULL if none are available.
404 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) {
405 return take_from_start_of_common(&self->om_free_list, &self->om_free_count);
406 }
407
408
1218 owner = (address) monitor->owner();
1219 }
1220
1221 if (owner != NULL) {
1222 // owning_thread_from_monitor_owner() may also return NULL here
1223 return Threads::owning_thread_from_monitor_owner(t_list, owner);
1224 }
1225
1226 // Unlocked case, header in place
1227 // Cannot have assertion since this object may have been
1228 // locked by another thread when reaching here.
1229 // assert(mark.is_neutral(), "sanity check");
1230
1231 return NULL;
1232 }
1233 }
1234
1235 // Visitors ...
1236
1237 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1238 PaddedObjectMonitor* block = g_block_list;
1239 while (block != NULL) {
1240 assert(block->object() == CHAINMARKER, "must be a block header");
1241 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1242 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1243 ObjectMonitorHandle omh;
1244 if (!mid->is_free() && omh.set_om_ptr_if_safe(mid)) {
1245 // The ObjectMonitor* is not free and it has been made safe.
1246 if (mid->object() == NULL) {
1247 // Only process with closure if the object is set.
1248 continue;
1249 }
1250 closure->do_monitor(mid);
1251 }
1252 }
1253 // unmarked_next() is not needed with g_block_list (no next field
1254 // marking) and no load_acquire() needed because _next_om is
1255 // updated before g_block_list is changed with cmpxchg().
1256 block = (PaddedObjectMonitor*)block->_next_om;
1257 }
1258 }
1259
1260 static bool monitors_used_above_threshold() {
1261 if (g_om_population == 0) {
1262 return false;
1263 }
1264 if (MonitorUsedDeflationThreshold > 0) {
1265 int monitors_used = g_om_population - g_om_free_count;
1266 if (HandshakeAfterDeflateIdleMonitors) {
1267 monitors_used -= g_om_wait_count;
1268 }
1269 int monitor_usage = (monitors_used * 100LL) / g_om_population;
1270 return monitor_usage > MonitorUsedDeflationThreshold;
1271 }
1272 return false;
1273 }
1274
1275 // Returns true if MonitorBound is set (> 0) and if the specified
1276 // cnt is > MonitorBound. Otherwise returns false.
1277 static bool is_MonitorBound_exceeded(const int cnt) {
1278 const int mx = MonitorBound;
1279 return mx > 0 && cnt > mx;
1280 }
1281
1282 bool ObjectSynchronizer::is_async_deflation_needed() {
1283 if (!AsyncDeflateIdleMonitors) {
1284 return false;
1285 }
1286 if (is_async_deflation_requested()) {
1287 // Async deflation request.
1288 return true;
1289 }
1290 if (AsyncDeflationInterval > 0 &&
1291 time_since_last_async_deflation_ms() > AsyncDeflationInterval &&
1292 monitors_used_above_threshold()) {
1293 // It's been longer than our specified deflate interval and there
1294 // are too many monitors in use. We don't deflate more frequently
1295 // than AsyncDeflationInterval (unless is_async_deflation_requested)
1296 // in order to not swamp the ServiceThread.
1297 _last_async_deflation_time_ns = os::javaTimeNanos();
1298 return true;
1299 }
1300 int monitors_used = g_om_population - g_om_free_count;
1301 if (HandshakeAfterDeflateIdleMonitors) {
1302 monitors_used -= g_om_wait_count;
1303 }
1304 if (is_MonitorBound_exceeded(monitors_used)) {
1305 // Not enough ObjectMonitors on the global free list.
1306 return true;
1307 }
1308 return false;
1309 }
1310
1311 bool ObjectSynchronizer::is_safepoint_deflation_needed() {
1312 if (!AsyncDeflateIdleMonitors) {
1313 if (monitors_used_above_threshold()) {
1314 // Too many monitors in use.
1315 return true;
1316 }
1317 return false;
1318 }
1319 if (is_special_deflation_requested()) {
1320 // For AsyncDeflateIdleMonitors only do a safepoint deflation
1321 // if there is a special deflation request.
1322 return true;
1323 }
1324 return false;
1325 }
1326
1327 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1328 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS);
1329 }
1330
1331 void ObjectSynchronizer::oops_do(OopClosure* f) {
1332 // We only scan the global used list here (for moribund threads), and
1333 // the thread-local monitors in Thread::oops_do().
1334 global_used_oops_do(f);
1335 }
1336
1337 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1338 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1339 list_oops_do(g_om_in_use_list, g_om_in_use_count, f);
1340 }
1341
1342 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1343 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1344 list_oops_do(thread->om_in_use_list, thread->om_in_use_count, f);
1345 }
1346
1347 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, int count, OopClosure* f) {
1348 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1349 // The oops_do() phase does not overlap with monitor deflation
1350 // so no need to update the ObjectMonitor's ref_count for this
1351 // ObjectMonitor* use and no need to mark ObjectMonitors for the
1352 // list traversal.
1353 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1354 if (mid->object() != NULL) {
1355 f->do_oop((oop*)mid->object_addr());
1356 }
1357 }
1358 }
1359
1360
1361 // -----------------------------------------------------------------------------
1362 // ObjectMonitor Lifecycle
1363 // -----------------------
1364 // Inflation unlinks monitors from the global g_free_list and
1365 // associates them with objects. Deflation -- which occurs at
1366 // STW-time -- disassociates idle monitors from objects. Such
1367 // scavenged monitors are returned to the g_free_list.
1368 //
1369 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1370 //
1371 // Lifecycle:
1372 // -- unassigned and on the global free list
1444
1445 // 1: try to allocate from the thread's local om_free_list.
1446 // Threads will attempt to allocate first from their local list, then
1447 // from the global list, and only after those attempts fail will the
1448 // thread attempt to instantiate new monitors. Thread-local free lists
1449 // improve allocation latency, as well as reducing coherency traffic
1450 // on the shared global list.
1451 m = take_from_start_of_om_free_list(self);
1452 if (m != NULL) {
1453 guarantee(m->object() == NULL, "invariant");
1454 m->set_allocation_state(ObjectMonitor::New);
1455 prepend_to_om_in_use_list(self, m);
1456 return m;
1457 }
1458
1459 // 2: try to allocate from the global g_free_list
1460 // CONSIDER: use muxTry() instead of muxAcquire().
1461 // If the muxTry() fails then drop immediately into case 3.
1462 // If we're using thread-local free lists then try
1463 // to reprovision the caller's free list.
1464 if (g_free_list != NULL) {
1465 // Reprovision the thread's om_free_list.
1466 // Use bulk transfers to reduce the allocation rate and heat
1467 // on various locks.
1468 for (int i = self->om_free_provision; --i >= 0;) {
1469 ObjectMonitor* take = take_from_start_of_g_free_list();
1470 if (take == NULL) {
1471 break; // No more are available.
1472 }
1473 guarantee(take->object() == NULL, "invariant");
1474 if (AsyncDeflateIdleMonitors) {
1475 // We allowed 3 field values to linger during async deflation.
1476 // We clear header and restore ref_count here, but we leave
1477 // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor
1478 // enter optimization can no longer race with async deflation
1479 // and reuse.
1480 take->set_header(markWord::zero());
1481 if (take->ref_count() < 0) {
1482 // Add back max_jint to restore the ref_count field to its
1483 // proper value.
1484 Atomic::add(max_jint, &take->_ref_count);
1485
1486 DEBUG_ONLY(jint l_ref_count = take->ref_count();)
1487 assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
1488 l_ref_count, take->ref_count());
1489 }
1490 }
1491 take->Recycle();
1492 // Since we're taking from the global free-list, take must be Free.
1493 // om_release() also sets the allocation state to Free because it
1494 // is called from other code paths.
1495 assert(take->is_free(), "invariant");
1496 om_release(self, take, false);
1497 }
1498 self->om_free_provision += 1 + (self->om_free_provision / 2);
1499 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1500
1501 if (!AsyncDeflateIdleMonitors &&
1502 is_MonitorBound_exceeded(g_om_population - g_om_free_count)) {
1503 // Not enough ObjectMonitors on the global free list.
1504 // We can't safely induce a STW safepoint from om_alloc() as our thread
1505 // state may not be appropriate for such activities and callers may hold
1506 // naked oops, so instead we defer the action.
1507 InduceScavenge(self, "om_alloc");
1508 }
1509 continue;
1510 }
1511
1512 // 3: allocate a block of new ObjectMonitors
1513 // Both the local and global free lists are empty -- resort to malloc().
1514 // In the current implementation ObjectMonitors are TSM - immortal.
1515 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1516 // each ObjectMonitor to start at the beginning of a cache line,
1517 // so we use align_up().
1518 // A better solution would be to use C++ placement-new.
1519 // BEWARE: As it stands currently, we don't run the ctors!
1520 assert(_BLOCKSIZE > 1, "invariant");
1521 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
1522 PaddedObjectMonitor* temp;
1523 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1);
1524 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal);
1525 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE);
1526 (void)memset((void *) temp, 0, neededsize);
1527
1528 // Format the block.
1529 // initialize the linked list, each monitor points to its next
1530 // forming the single linked free list, the very first monitor
1531 // will points to next block, which forms the block list.
1532 // The trick of using the 1st element in the block as g_block_list
1533 // linkage should be reconsidered. A better implementation would
1534 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1535
1536 for (int i = 1; i < _BLOCKSIZE; i++) {
1537 temp[i]._next_om = (ObjectMonitor*)&temp[i + 1];
1538 assert(temp[i].is_free(), "invariant");
1539 }
1540
1541 // terminate the last monitor as the end of list
1542 temp[_BLOCKSIZE - 1]._next_om = (ObjectMonitor*)NULL;
1543
1544 // Element [0] is reserved for global list linkage
1545 temp[0].set_object(CHAINMARKER);
1546
1547 // Consider carving out this thread's current request from the
1548 // block in hand. This avoids some lock traffic and redundant
1549 // list activity.
1550
1551 prepend_block_to_lists(temp);
1552 }
1553 }
1554
1555 // Place "m" on the caller's private per-thread om_free_list.
1556 // In practice there's no need to clamp or limit the number of
1557 // monitors on a thread's om_free_list as the only non-allocation time
1558 // we'll call om_release() is to return a monitor to the free list after
1559 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1560 // accumulate on a thread's free list.
1561 //
1562 // Key constraint: all ObjectMonitors on a thread's free list and the global
1576 // _next_om is used for both per-thread in-use and free lists so
1577 // we have to remove 'm' from the in-use list first (as needed).
1578 if (from_per_thread_alloc) {
1579 // Need to remove 'm' from om_in_use_list.
1580 // We use the more complicated mark-cur_mid_in_use-and-mid-as-we-go
1581 // protocol because async deflation can do list deletions in parallel.
1582 ObjectMonitor* cur_mid_in_use = NULL;
1583 ObjectMonitor* mid = NULL;
1584 ObjectMonitor* next = NULL;
1585 bool extracted = false;
1586
1587 if (!mark_list_head(&self->om_in_use_list, &mid, &next)) {
1588 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
1589 }
1590 while (true) {
1591 if (m == mid) {
1592 // We found 'm' on the per-thread in-use list so try to extract it.
1593 if (cur_mid_in_use == NULL) {
1594 // mid is the list head and it is marked. Switch the list head
1595 // to next which unmarks the list head, but leaves mid marked:
1596 self->om_in_use_list = next;
1597 // mark_list_head() used cmpxchg() above, switching list head can be lazier:
1598 OrderAccess::storestore();
1599 } else {
1600 // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's
1601 // next field to next which unmarks cur_mid_in_use, but leaves
1602 // mid marked:
1603 OrderAccess::release_store(&cur_mid_in_use->_next_om, next);
1604 }
1605 extracted = true;
1606 Atomic::dec(&self->om_in_use_count);
1607 // Unmark mid, but leave the next value for any lagging list
1608 // walkers. It will get cleaned up when mid is prepended to
1609 // the thread's free list:
1610 set_next(mid, next);
1611 break;
1612 }
1613 if (cur_mid_in_use != NULL) {
1614 set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
1615 }
1616 // The next cur_mid_in_use keeps mid's marked next field so
1617 // that it is stable for a possible next field change. It
1618 // cannot be deflated while it is marked.
1673 // The thread is going away, however the ObjectMonitors on the
1674 // om_in_use_list may still be in-use by other threads. Link
1675 // them to in_use_tail, which will be linked into the global
1676 // in-use list g_om_in_use_list below.
1677 //
1678 // Account for the in-use list head before the loop since it is
1679 // already marked (by this thread):
1680 in_use_tail = in_use_list;
1681 in_use_count++;
1682 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) {
1683 if (is_next_marked(cur_om)) {
1684 // This next field is marked so there must be an async deflater
1685 // thread ahead of us so we'll give it a chance to finish.
1686 while (is_next_marked(cur_om)) {
1687 os::naked_short_sleep(1);
1688 }
1689 // Refetch the possibly changed next field and try again.
1690 cur_om = unmarked_next(in_use_tail);
1691 continue;
1692 }
1693 if (cur_om->is_free()) {
1694 // cur_om was deflated and the allocation state was changed
1695 // to Free while it was marked. We happened to see it just
1696 // after it was unmarked (and added to the free list).
1697 // Refetch the possibly changed next field and try again.
1698 cur_om = unmarked_next(in_use_tail);
1699 continue;
1700 }
1701 in_use_tail = cur_om;
1702 in_use_count++;
1703 cur_om = unmarked_next(cur_om);
1704 }
1705 guarantee(in_use_tail != NULL, "invariant");
1706 int l_om_in_use_count = self->om_in_use_count;
1707 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't "
1708 "match: l_om_in_use_count=%d, in_use_count=%d",
1709 l_om_in_use_count, in_use_count);
1710 self->om_in_use_count = 0;
1711 // Clear the in-use list head (which also unmarks it):
1712 self->om_in_use_list = (ObjectMonitor*)NULL;
1713 // mark_list_head() used cmpxchg() above, clearing the disconnected list head can be lazier:
1714 OrderAccess::storestore();
1715 set_next(in_use_list, next);
1716 }
1717
1718 int free_count = 0;
1719 ObjectMonitor* free_list = self->om_free_list;
1720 ObjectMonitor* free_tail = NULL;
1721 if (free_list != NULL) {
1722 // The thread is going away. Set 'free_tail' to the last per-thread free
1723 // monitor which will be linked to g_free_list below.
1724 stringStream ss;
1725 for (ObjectMonitor* s = free_list; s != NULL; s = unmarked_next(s)) {
1726 free_count++;
1727 free_tail = s;
1728 guarantee(s->object() == NULL, "invariant");
1729 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1730 }
1731 guarantee(free_tail != NULL, "invariant");
1732 int l_om_free_count = self->om_free_count;
1733 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: "
1734 "l_om_free_count=%d, free_count=%d", l_om_free_count,
1735 free_count);
1736 self->om_free_count = 0;
1737 self->om_free_list = NULL;
1738 OrderAccess::storestore(); // Lazier memory is okay for list walkers.
1739 }
1740
1741 if (free_tail != NULL) {
1742 prepend_list_to_g_free_list(free_list, free_tail, free_count);
1743 }
1744
1745 if (in_use_tail != NULL) {
1746 prepend_list_to_g_om_in_use_list(in_use_list, in_use_tail, in_use_count);
1747 }
1748
1749 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1750 LogStreamHandle(Info, monitorinflation) lsh_info;
1751 LogStream* ls = NULL;
1752 if (log_is_enabled(Debug, monitorinflation)) {
1753 ls = &lsh_debug;
1754 } else if ((free_count != 0 || in_use_count != 0) &&
1755 log_is_enabled(Info, monitorinflation)) {
1756 ls = &lsh_info;
1757 }
1758 if (ls != NULL) {
1904
1905
1906 // fetch the displaced mark from the owner's stack.
1907 // The owner can't die or unwind past the lock while our INFLATING
1908 // object is in the mark. Furthermore the owner can't complete
1909 // an unlock on the object, either.
1910 markWord dmw = mark.displaced_mark_helper();
1911 // Catch if the object's header is not neutral (not locked and
1912 // not marked is what we care about here).
1913 ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1914
1915 // Setup monitor fields to proper values -- prepare the monitor
1916 m->set_header(dmw);
1917
1918 // Optimization: if the mark.locker stack address is associated
1919 // with this thread we could simply set m->_owner = self.
1920 // Note that a thread can inflate an object
1921 // that it has stack-locked -- as might happen in wait() -- directly
1922 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1923 if (AsyncDeflateIdleMonitors) {
1924 m->simply_set_owner_from(mark.locker(), NULL, DEFLATER_MARKER);
1925 } else {
1926 m->simply_set_owner_from(mark.locker(), NULL);
1927 }
1928 m->set_object(object);
1929 // TODO-FIXME: assert BasicLock->dhw != 0.
1930
1931 omh_p->set_om_ptr(m);
1932
1933 // Must preserve store ordering. The monitor state must
1934 // be stable at the time of publishing the monitor address.
1935 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1936 object->release_set_mark(markWord::encode(m));
1937
1938 // Once ObjectMonitor is configured and the object is associated
1939 // with the ObjectMonitor, it is safe to allow async deflation:
1940 assert(m->is_new(), "freshly allocated monitor must be new");
1941 m->set_allocation_state(ObjectMonitor::Old);
1942
1943 // Hopefully the performance counters are allocated on distinct cache lines
1944 // to avoid false sharing on MP systems ...
1945 OM_PERFDATA_OP(Inflations, inc());
1946 if (log_is_enabled(Trace, monitorinflation)) {
2046 // is registered with the ServiceThread and it is notified.
2047
2048 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) {
2049 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2050
2051 // The per-thread in-use lists are handled in
2052 // ParallelSPCleanupThreadClosure::do_thread().
2053
2054 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) {
2055 // Use the older mechanism for the global in-use list or if a
2056 // special deflation has been requested before the safepoint.
2057 ObjectSynchronizer::deflate_idle_monitors(counters);
2058 return;
2059 }
2060
2061 log_debug(monitorinflation)("requesting async deflation of idle monitors.");
2062 // Request deflation of idle monitors by the ServiceThread:
2063 set_is_async_deflation_requested(true);
2064 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
2065 ml.notify_all();
2066
2067 if (log_is_enabled(Debug, monitorinflation)) {
2068 // exit_globals()'s call to audit_and_print_stats() is done
2069 // at the Info level and not at a safepoint.
2070 // For safepoint based deflation, audit_and_print_stats() is called
2071 // in ObjectSynchronizer::finish_deflate_idle_monitors() at the
2072 // Debug level at a safepoint.
2073 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2074 }
2075 }
2076
2077 // Deflate a single monitor if not in-use
2078 // Return true if deflated, false if in-use
2079 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
2080 ObjectMonitor** free_head_p,
2081 ObjectMonitor** free_tail_p) {
2082 bool deflated;
2083 // Normal case ... The monitor is associated with obj.
2084 const markWord mark = obj->mark();
2085 guarantee(mark == markWord::encode(mid), "should match: mark="
2086 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
2087 markWord::encode(mid).value());
2088 // Make sure that mark.monitor() and markWord::encode() agree:
2089 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
2090 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
2091 const markWord dmw = mid->header();
2092 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
2093
2094 if (mid->is_busy() || mid->ref_count() != 0) {
2257 }
2258 *free_tail_p = mid;
2259
2260 // At this point, mid->_next_om still refers to its current
2261 // value and another ObjectMonitor's _next_om field still
2262 // refers to this ObjectMonitor. Those linkages have to be
2263 // cleaned up by the caller who has the complete context.
2264
2265 // We leave owner == DEFLATER_MARKER and ref_count < 0
2266 // to force any racing threads to retry.
2267 return true; // Success, ObjectMonitor has been deflated.
2268 }
2269
2270 // The owner was changed from DEFLATER_MARKER so we lost the
2271 // race since the ObjectMonitor is now busy.
2272
2273 // Add back max_jint to restore the ref_count field to its
2274 // proper value (which may not be what we saw above):
2275 Atomic::add(max_jint, &mid->_ref_count);
2276
2277 DEBUG_ONLY(jint l_ref_count = mid->ref_count();)
2278 assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
2279 l_ref_count, mid->ref_count());
2280 return false;
2281 }
2282
2283 // The ref_count was no longer 0 so we lost the race since the
2284 // ObjectMonitor is now busy or the ObjectMonitor* is now is use.
2285 // Restore owner to NULL if it is still DEFLATER_MARKER:
2286 mid->try_set_owner_from(NULL, DEFLATER_MARKER);
2287 }
2288
2289 // The owner field is no longer NULL so we lost the race since the
2290 // ObjectMonitor is now busy.
2291 return false;
2292 }
2293
2294 // Walk a given monitor list, and deflate idle monitors.
2295 // The given list could be a per-thread list or a global list.
2296 //
2297 // In the case of parallel processing of thread local monitor lists,
2298 // work is done by Threads::parallel_threads_do() which ensures that
2299 // each Java thread is processed by exactly one worker thread, and
2310 ObjectMonitor* cur_mid_in_use = NULL;
2311 ObjectMonitor* mid = NULL;
2312 ObjectMonitor* next = NULL;
2313 int deflated_count = 0;
2314
2315 // We use the simpler mark-mid-as-we-go protocol since there are no
2316 // parallel list deletions since we are at a safepoint.
2317 if (!mark_list_head(list_p, &mid, &next)) {
2318 return 0; // The list is empty so nothing to deflate.
2319 }
2320
2321 while (true) {
2322 oop obj = (oop) mid->object();
2323 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
2324 // Deflation succeeded and already updated free_head_p and
2325 // free_tail_p as needed. Finish the move to the local free list
2326 // by unlinking mid from the global or per-thread in-use list.
2327 if (cur_mid_in_use == NULL) {
2328 // mid is the list head and it is marked. Switch the list head
2329 // to next which unmarks the list head, but leaves mid marked:
2330 *list_p = next;
2331 // mark_list_head() used cmpxchg() above, switching list head can be lazier:
2332 OrderAccess::storestore();
2333 } else {
2334 // mid is marked. Switch cur_mid_in_use's next field to next
2335 // which is safe because we have no parallel list deletions,
2336 // but we leave mid marked:
2337 OrderAccess::release_store(&cur_mid_in_use->_next_om, next);
2338 }
2339 // At this point mid is disconnected from the in-use list so
2340 // its marked next field no longer has any effects.
2341 deflated_count++;
2342 Atomic::dec(count_p);
2343 // mid is current tail in the free_head_p list so NULL terminate it
2344 // (which also unmarks it):
2345 set_next(mid, NULL);
2346
2347 // All the list management is done so move on to the next one:
2348 mid = next;
2349 } else {
2350 set_next(mid, next); // unmark next field
2351
2352 // All the list management is done so move on to the next one:
2413 // a cur_mid_in_use, then its next field is also marked at this point.
2414
2415 if (next != NULL) {
2416 // We mark next's next field so that an om_flush()
2417 // thread that is behind us cannot pass us when we
2418 // unmark the current mid's next field.
2419 next_next = mark_next_loop(next);
2420 }
2421
2422 // Only try to deflate if there is an associated Java object and if
2423 // mid is old (is not newly allocated and is not newly freed).
2424 if (mid->object() != NULL && mid->is_old() &&
2425 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
2426 // Deflation succeeded and already updated free_head_p and
2427 // free_tail_p as needed. Finish the move to the local free list
2428 // by unlinking mid from the global or per-thread in-use list.
2429 if (cur_mid_in_use == NULL) {
2430 // mid is the list head and it is marked. Switch the list head
2431 // to next which is also marked (if not NULL) and also leave
2432 // mid marked:
2433 *list_p = next;
2434 // mark_list_head() used cmpxchg() above, switching list head can be lazier:
2435 OrderAccess::storestore();
2436 } else {
2437 ObjectMonitor* marked_next = mark_om_ptr(next);
2438 // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's
2439 // next field to marked_next and also leave mid marked:
2440 OrderAccess::release_store(&cur_mid_in_use->_next_om, marked_next);
2441 }
2442 // At this point mid is disconnected from the in-use list so
2443 // its marked next field no longer has any effects.
2444 deflated_count++;
2445 Atomic::dec(count_p);
2446 // mid is current tail in the free_head_p list so NULL terminate it
2447 // (which also unmarks it):
2448 set_next(mid, NULL);
2449
2450 // All the list management is done so move on to the next one:
2451 mid = next; // mid keeps non-NULL next's marked next field
2452 next = next_next;
2453 } else {
2454 // mid is considered in-use if it does not have an associated
2455 // Java object or mid is not old or deflation did not succeed.
2456 // A mid->is_new() node can be seen here when it is freshly
2457 // returned by om_alloc() (and skips the deflation code path).
2458 // A mid->is_old() node can be seen here when deflation failed.
2459 // A mid->is_free() node can be seen here when a fresh node from
2460 // om_alloc() is released by om_release() due to losing the race
2461 // in inflate().
2462
2463 // All the list management is done so move on to the next one:
2464 if (cur_mid_in_use != NULL) {
2465 set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
2466 }
2467 // The next cur_mid_in_use keeps mid's marked next field so
2468 // that it is stable for a possible next field change. It
2469 // cannot be modified by om_release() while it is marked.
2470 cur_mid_in_use = mid;
2471 mid = next; // mid keeps non-NULL next's marked next field
2472 next = next_next;
2473
2474 if (SafepointSynchronize::is_synchronizing() &&
2475 cur_mid_in_use != *list_p && cur_mid_in_use->is_old()) {
2476 // If a safepoint has started and cur_mid_in_use is not the list
2477 // head and is old, then it is safe to use as saved state. Return
2478 // to the caller before blocking.
2479 *saved_mid_in_use_p = cur_mid_in_use;
2480 set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
2481 if (mid != NULL) {
2482 set_next(mid, next); // umark mid
2483 }
2484 return deflated_count;
2485 }
2486 }
2487 if (mid == NULL) {
2488 if (cur_mid_in_use != NULL) {
2489 set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
2490 }
2491 break; // Reached end of the list so nothing more to deflate.
2492 }
2493
2494 // The current mid's next field is marked at this point. If we have
2495 // a cur_mid_in_use, then its next field is also marked at this point.
2496 }
2497 // We finished the list without a safepoint starting so there's
2498 // no need to save state.
2499 *saved_mid_in_use_p = NULL;
2500 return deflated_count;
2501 }
2502
2503 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2504 counters->n_in_use = 0; // currently associated with objects
2505 counters->n_in_circulation = 0; // extant
2506 counters->n_scavenged = 0; // reclaimed (global and per-thread)
2507 counters->per_thread_scavenged = 0; // per-thread scavenge total
2508 counters->per_thread_times = 0.0; // per-thread scavenge times
2509 OrderAccess::storestore(); // flush inits for worker threads
2510 }
2511
2512 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
2513 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2514
2515 if (AsyncDeflateIdleMonitors) {
2516 // Nothing to do when global idle ObjectMonitors are deflated using
2517 // a JavaThread unless a special deflation has been requested.
2518 if (!is_special_deflation_requested()) {
2519 return;
2520 }
2521 }
2522
2523 bool deflated = false;
2524
2525 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2526 ObjectMonitor* free_tail_p = NULL;
2527 elapsedTimer timer;
2528
2529 if (log_is_enabled(Info, monitorinflation)) {
2530 timer.start();
2531 }
2532
2533 // Note: the thread-local monitors lists get deflated in
2534 // a separate pass. See deflate_thread_local_monitors().
2535
2536 // For moribund threads, scan g_om_in_use_list
2537 int deflated_count = 0;
2538 if (g_om_in_use_list != NULL) {
2539 // Update n_in_circulation before g_om_in_use_count is updated by deflation.
2540 Atomic::add(g_om_in_use_count, &counters->n_in_circulation);
2541
2542 deflated_count = deflate_monitor_list(&g_om_in_use_list, &g_om_in_use_count, &free_head_p, &free_tail_p);
2543 Atomic::add(g_om_in_use_count, &counters->n_in_use);
2544 }
2545
2546 if (free_head_p != NULL) {
2547 // Move the deflated ObjectMonitors back to the global free list.
2548 // No races on the working free list so no need for load_acquire().
2549 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2550 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
2551 INTPTR_FORMAT, p2i(free_tail_p->_next_om));
2552 prepend_list_to_g_free_list(free_head_p, free_tail_p, deflated_count);
2553 Atomic::add(deflated_count, &counters->n_scavenged);
2554 }
2555 timer.stop();
2556
2557 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2558 LogStreamHandle(Info, monitorinflation) lsh_info;
2559 LogStream* ls = NULL;
2560 if (log_is_enabled(Debug, monitorinflation)) {
2561 ls = &lsh_debug;
2562 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2563 ls = &lsh_info;
2577
2578 void ObjectSynchronizer::deflate_idle_monitors_using_JT() {
2579 assert(AsyncDeflateIdleMonitors, "sanity check");
2580
2581 // Deflate any global idle monitors.
2582 deflate_global_idle_monitors_using_JT();
2583
2584 int count = 0;
2585 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2586 if (jt->om_in_use_count > 0 && !jt->is_exiting()) {
2587 // This JavaThread is using ObjectMonitors so deflate any that
2588 // are idle unless this JavaThread is exiting; do not race with
2589 // ObjectSynchronizer::om_flush().
2590 deflate_per_thread_idle_monitors_using_JT(jt);
2591 count++;
2592 }
2593 }
2594 if (count > 0) {
2595 log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count);
2596 }
2597
2598 log_info(monitorinflation)("async g_om_population=%d, g_om_in_use_count=%d, "
2599 "g_om_free_count=%d, g_om_wait_count=%d",
2600 g_om_population, g_om_in_use_count,
2601 g_om_free_count, g_om_wait_count);
2602
2603 // The ServiceThread's async deflation request has been processed.
2604 set_is_async_deflation_requested(false);
2605
2606 if (HandshakeAfterDeflateIdleMonitors && g_om_wait_count > 0) {
2607 // There are deflated ObjectMonitors waiting for a handshake
2608 // (or a safepoint) for safety.
2609
2610 // g_wait_list and g_om_wait_count are only updated by the calling
2611 // thread so no need for load_acquire() or release_store().
2612 ObjectMonitor* list = g_wait_list;
2613 ADIM_guarantee(list != NULL, "g_wait_list must not be NULL");
2614 int count = g_om_wait_count;
2615 g_om_wait_count = 0;
2616 g_wait_list = NULL;
2617 OrderAccess::storestore(); // Lazier memory sync is okay for list walkers.
2618
2619 // Find the tail for prepend_list_to_common(). No need to mark
2620 // ObjectMonitors for this list walk since only the deflater
2621 // thread manages the wait list.
2622 int l_count = 0;
2623 ObjectMonitor* tail = NULL;
2624 for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) {
2625 tail = n;
2626 l_count++;
2627 }
2628 ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count);
2629
2630 // Will execute a safepoint if !ThreadLocalHandshakes:
2631 HandshakeForDeflation hfd_tc;
2632 Handshake::execute(&hfd_tc);
2633
2634 prepend_list_to_common(list, tail, count, &g_free_list, &g_om_free_count);
2635
2636 log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count);
2637 }
2638 }
2639
2640 // Deflate global idle ObjectMonitors using a JavaThread.
2641 //
2655
2656 deflate_common_idle_monitors_using_JT(false /* !is_global */, target);
2657 }
2658
2659 // Deflate global or per-thread idle ObjectMonitors using a JavaThread.
2660 //
2661 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) {
2662 JavaThread* self = JavaThread::current();
2663
2664 int deflated_count = 0;
2665 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors
2666 ObjectMonitor* free_tail_p = NULL;
2667 ObjectMonitor* saved_mid_in_use_p = NULL;
2668 elapsedTimer timer;
2669
2670 if (log_is_enabled(Info, monitorinflation)) {
2671 timer.start();
2672 }
2673
2674 if (is_global) {
2675 OM_PERFDATA_OP(MonExtant, set_value(g_om_in_use_count));
2676 } else {
2677 OM_PERFDATA_OP(MonExtant, inc(target->om_in_use_count));
2678 }
2679
2680 do {
2681 int local_deflated_count;
2682 if (is_global) {
2683 local_deflated_count = deflate_monitor_list_using_JT(&g_om_in_use_list, &g_om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
2684 } else {
2685 local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
2686 }
2687 deflated_count += local_deflated_count;
2688
2689 if (free_head_p != NULL) {
2690 // Move the deflated ObjectMonitors to the global free list.
2691 // No races on the working list so no need for load_acquire().
2692 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
2693 // Note: The target thread can be doing an om_alloc() that
2694 // is trying to prepend an ObjectMonitor on its in-use list
2695 // at the same time that we have deflated the current in-use
2696 // list head and put it on the local free list. prepend_to_common()
2697 // will detect the race and retry which avoids list corruption,
2737 LogStream* ls = NULL;
2738 if (log_is_enabled(Debug, monitorinflation)) {
2739 ls = &lsh_debug;
2740 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2741 ls = &lsh_info;
2742 }
2743 if (ls != NULL) {
2744 if (is_global) {
2745 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2746 } else {
2747 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count);
2748 }
2749 }
2750 }
2751
2752 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2753 // Report the cumulative time for deflating each thread's idle
2754 // monitors. Note: if the work is split among more than one
2755 // worker thread, then the reported time will likely be more
2756 // than a beginning to end measurement of the phase.
2757 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
2758
2759 bool needs_special_deflation = is_special_deflation_requested();
2760 if (AsyncDeflateIdleMonitors && !needs_special_deflation) {
2761 // Nothing to do when idle ObjectMonitors are deflated using
2762 // a JavaThread unless a special deflation has been requested.
2763 return;
2764 }
2765
2766 if (log_is_enabled(Debug, monitorinflation)) {
2767 // exit_globals()'s call to audit_and_print_stats() is done
2768 // at the Info level and not at a safepoint.
2769 // For async deflation, audit_and_print_stats() is called in
2770 // ObjectSynchronizer::do_safepoint_work() at the Debug level
2771 // at a safepoint.
2772 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2773 } else if (log_is_enabled(Info, monitorinflation)) {
2774 log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, "
2775 "g_om_free_count=%d, g_om_wait_count=%d",
2776 g_om_population, g_om_in_use_count,
2777 g_om_free_count, g_om_wait_count);
2778 }
2779
2780 ForceMonitorScavenge = 0; // Reset
2781
2782 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
2783 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
2784
2785 GVars.stw_random = os::random();
2786 GVars.stw_cycle++;
2787
2788 if (needs_special_deflation) {
2789 set_is_special_deflation_requested(false); // special deflation is done
2790 }
2791 }
2792
2793 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2794 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2795
2796 if (AsyncDeflateIdleMonitors && !is_special_deflation_requested()) {
2797 // Nothing to do if a special deflation has NOT been requested.
2798 return;
2799 }
2800
2801 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2802 ObjectMonitor* free_tail_p = NULL;
2803 elapsedTimer timer;
2804
2805 if (log_is_enabled(Info, safepoint, cleanup) ||
2806 log_is_enabled(Info, monitorinflation)) {
2807 timer.start();
2808 }
2809
2810 // Update n_in_circulation before om_in_use_count is updated by deflation.
2811 Atomic::add(thread->om_in_use_count, &counters->n_in_circulation);
2812
2813 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
2814 Atomic::add(thread->om_in_use_count, &counters->n_in_use);
2815
2816 if (free_head_p != NULL) {
2817 // Move the deflated ObjectMonitors back to the global free list.
2818 // No races on the working list so no need for load_acquire().
2819 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2820 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
2821 INTPTR_FORMAT, p2i(free_tail_p->_next_om));
2822 prepend_list_to_g_free_list(free_head_p, free_tail_p, deflated_count);
2823 Atomic::add(deflated_count, &counters->n_scavenged);
2824 Atomic::add(deflated_count, &counters->per_thread_scavenged);
2825 }
2826
2827 timer.stop();
2828 // Safepoint logging cares about cumulative per_thread_times and
2829 // we'll capture most of the cost, but not the muxRelease() which
2830 // should be cheap.
2831 counters->per_thread_times += timer.seconds();
2832
2833 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2834 LogStreamHandle(Info, monitorinflation) lsh_info;
2924 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2925 LogStreamHandle(Info, monitorinflation) lsh_info;
2926 LogStreamHandle(Trace, monitorinflation) lsh_trace;
2927 LogStream* ls = NULL;
2928 if (log_is_enabled(Trace, monitorinflation)) {
2929 ls = &lsh_trace;
2930 } else if (log_is_enabled(Debug, monitorinflation)) {
2931 ls = &lsh_debug;
2932 } else if (log_is_enabled(Info, monitorinflation)) {
2933 ls = &lsh_info;
2934 }
2935 assert(ls != NULL, "sanity check");
2936
2937 // Log counts for the global and per-thread monitor lists:
2938 int chk_om_population = log_monitor_list_counts(ls);
2939 int error_cnt = 0;
2940
2941 ls->print_cr("Checking global lists:");
2942
2943 // Check g_om_population:
2944 if (g_om_population == chk_om_population) {
2945 ls->print_cr("g_om_population=%d equals chk_om_population=%d",
2946 g_om_population, chk_om_population);
2947 } else {
2948 // With lock free access to the monitor lists, it is possible for
2949 // log_monitor_list_counts() to return a value that doesn't match
2950 // g_om_population. So far a higher value has been seen in testing
2951 // so something is being double counted by log_monitor_list_counts().
2952 ls->print_cr("WARNING: g_om_population=%d is not equal to "
2953 "chk_om_population=%d", g_om_population, chk_om_population);
2954 }
2955
2956 // Check g_om_in_use_list and g_om_in_use_count:
2957 chk_global_in_use_list_and_count(ls, &error_cnt);
2958
2959 // Check g_free_list and g_om_free_count:
2960 chk_global_free_list_and_count(ls, &error_cnt);
2961
2962 if (HandshakeAfterDeflateIdleMonitors) {
2963 // Check g_wait_list and g_om_wait_count:
2964 chk_global_wait_list_and_count(ls, &error_cnt);
2965 }
2966
2967 ls->print_cr("Checking per-thread lists:");
2968
2969 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2970 // Check om_in_use_list and om_in_use_count:
2971 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
2972
2973 // Check om_free_list and om_free_count:
3024 }
3025 if (n->object() != NULL) {
3026 if (jt != NULL) {
3027 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3028 ": free per-thread monitor must have NULL _object "
3029 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
3030 p2i(n->object()));
3031 } else {
3032 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
3033 "must have NULL _object field: _object=" INTPTR_FORMAT,
3034 p2i(n), p2i(n->object()));
3035 }
3036 *error_cnt_p = *error_cnt_p + 1;
3037 }
3038 }
3039
3040 // Check the global free list and count; log the results of the checks.
3041 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
3042 int *error_cnt_p) {
3043 int chk_om_free_count = 0;
3044 ObjectMonitor* cur = NULL;
3045 ObjectMonitor* next = NULL;
3046 if (mark_list_head(&g_free_list, &cur, &next)) {
3047 // Marked the global free list head so process the list.
3048 while (true) {
3049 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p);
3050 chk_om_free_count++;
3051
3052 mark_next_for_traversal(&cur, &next);
3053 if (cur == NULL) {
3054 break;
3055 }
3056 }
3057 }
3058 if (g_om_free_count == chk_om_free_count) {
3059 out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d",
3060 g_om_free_count, chk_om_free_count);
3061 } else {
3062 // With lock free access to g_free_list, it is possible for an
3063 // ObjectMonitor to be prepended to g_free_list after we started
3064 // calculating chk_om_free_count so g_om_free_count may not
3065 // match anymore.
3066 out->print_cr("WARNING: g_om_free_count=%d is not equal to "
3067 "chk_om_free_count=%d", g_om_free_count, chk_om_free_count);
3068 }
3069 }
3070
3071 // Check the global wait list and count; log the results of the checks.
3072 void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out,
3073 int *error_cnt_p) {
3074 int chk_om_wait_count = 0;
3075 ObjectMonitor* cur = NULL;
3076 ObjectMonitor* next = NULL;
3077 if (mark_list_head(&g_wait_list, &cur, &next)) {
3078 // Marked the global wait list head so process the list.
3079 while (true) {
3080 // Rules for g_wait_list are the same as of g_free_list:
3081 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p);
3082 chk_om_wait_count++;
3083
3084 mark_next_for_traversal(&cur, &next);
3085 if (cur == NULL) {
3086 break;
3087 }
3088 }
3089 }
3090 if (g_om_wait_count == chk_om_wait_count) {
3091 out->print_cr("g_om_wait_count=%d equals chk_om_wait_count=%d",
3092 g_om_wait_count, chk_om_wait_count);
3093 } else {
3094 out->print_cr("ERROR: g_om_wait_count=%d is not equal to "
3095 "chk_om_wait_count=%d", g_om_wait_count, chk_om_wait_count);
3096 *error_cnt_p = *error_cnt_p + 1;
3097 }
3098 }
3099
3100 // Check the global in-use list and count; log the results of the checks.
3101 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
3102 int *error_cnt_p) {
3103 int chk_om_in_use_count = 0;
3104 ObjectMonitor* cur = NULL;
3105 ObjectMonitor* next = NULL;
3106 if (mark_list_head(&g_om_in_use_list, &cur, &next)) {
3107 // Marked the global in-use list head so process the list.
3108 while (true) {
3109 chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p);
3110 chk_om_in_use_count++;
3111
3112 mark_next_for_traversal(&cur, &next);
3113 if (cur == NULL) {
3114 break;
3115 }
3116 }
3117 }
3118 if (g_om_in_use_count == chk_om_in_use_count) {
3119 out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d",
3120 g_om_in_use_count, chk_om_in_use_count);
3121 } else {
3122 // With lock free access to the monitor lists, it is possible for
3123 // an exiting JavaThread to put its in-use ObjectMonitors on the
3124 // global in-use list after chk_om_in_use_count is calculated above.
3125 out->print_cr("WARNING: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d",
3126 g_om_in_use_count, chk_om_in_use_count);
3127 }
3128 }
3129
3130 // Check an in-use monitor entry; log any errors.
3131 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
3132 outputStream * out, int *error_cnt_p) {
3133 if (n->header().value() == 0) {
3134 if (jt != NULL) {
3135 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3136 ": in-use per-thread monitor must have non-NULL _header "
3137 "field.", p2i(jt), p2i(n));
3138 } else {
3139 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor "
3140 "must have non-NULL _header field.", p2i(n));
3141 }
3142 *error_cnt_p = *error_cnt_p + 1;
3143 }
3144 if (n->object() == NULL) {
3145 if (jt != NULL) {
3146 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3174 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3175 ": in-use per-thread monitor's object does not refer "
3176 "to the same monitor: obj=" INTPTR_FORMAT ", mark="
3177 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
3178 p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
3179 } else {
3180 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
3181 "monitor's object does not refer to the same monitor: obj="
3182 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
3183 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
3184 }
3185 *error_cnt_p = *error_cnt_p + 1;
3186 }
3187 }
3188
3189 // Check the thread's free list and count; log the results of the checks.
3190 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
3191 outputStream * out,
3192 int *error_cnt_p) {
3193 int chk_om_free_count = 0;
3194 ObjectMonitor* cur = NULL;
3195 ObjectMonitor* next = NULL;
3196 if (mark_list_head(&jt->om_free_list, &cur, &next)) {
3197 // Marked the per-thread free list head so process the list.
3198 while (true) {
3199 chk_free_entry(jt, cur, out, error_cnt_p);
3200 chk_om_free_count++;
3201
3202 mark_next_for_traversal(&cur, &next);
3203 if (cur == NULL) {
3204 break;
3205 }
3206 }
3207 }
3208 if (jt->om_free_count == chk_om_free_count) {
3209 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals "
3210 "chk_om_free_count=%d", p2i(jt), jt->om_free_count,
3211 chk_om_free_count);
3212 } else {
3213 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not "
3214 "equal to chk_om_free_count=%d", p2i(jt), jt->om_free_count,
3215 chk_om_free_count);
3216 *error_cnt_p = *error_cnt_p + 1;
3217 }
3218 }
3219
3220 // Check the thread's in-use list and count; log the results of the checks.
3221 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
3222 outputStream * out,
3223 int *error_cnt_p) {
3224 int chk_om_in_use_count = 0;
3225 ObjectMonitor* cur = NULL;
3226 ObjectMonitor* next = NULL;
3227 if (mark_list_head(&jt->om_in_use_list, &cur, &next)) {
3228 // Marked the per-thread in-use list head so process the list.
3229 while (true) {
3230 chk_in_use_entry(jt, cur, out, error_cnt_p);
3231 chk_om_in_use_count++;
3232
3233 mark_next_for_traversal(&cur, &next);
3234 if (cur == NULL) {
3235 break;
3236 }
3237 }
3238 }
3239 if (jt->om_in_use_count == chk_om_in_use_count) {
3240 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
3241 "chk_om_in_use_count=%d", p2i(jt),
3242 jt->om_in_use_count, chk_om_in_use_count);
3243 } else {
3244 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
3245 "equal to chk_om_in_use_count=%d", p2i(jt),
3246 jt->om_in_use_count, chk_om_in_use_count);
3247 *error_cnt_p = *error_cnt_p + 1;
3248 }
3249 }
3250
3251 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
3252 // flags indicate why the entry is in-use, 'object' and 'object type'
3253 // indicate the associated object and its type.
3254 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) {
3255 stringStream ss;
3256 if (g_om_in_use_count > 0) {
3257 out->print_cr("In-use global monitor info:");
3258 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
3259 out->print_cr("%18s %s %7s %18s %18s",
3260 "monitor", "BHL", "ref_cnt", "object", "object type");
3261 out->print_cr("================== === ======= ================== ==================");
3262 ObjectMonitor* cur = NULL;
3263 ObjectMonitor* next = NULL;
3264 if (mark_list_head(&g_om_in_use_list, &cur, &next)) {
3265 // Marked the global in-use list head so process the list.
3266 while (true) {
3267 const oop obj = (oop) cur->object();
3268 const markWord mark = cur->header();
3269 ResourceMark rm;
3270 out->print(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s",
3271 p2i(cur), cur->is_busy() != 0, mark.hash() != 0,
3272 cur->owner() != NULL, (int)cur->ref_count(), p2i(obj),
3273 obj->klass()->external_name());
3274 if (cur->is_busy() != 0) {
3275 out->print(" (%s)", cur->is_busy_to_string(&ss));
3276 ss.reset();
3277 }
3278 out->cr();
3279
3280 mark_next_for_traversal(&cur, &next);
3281 if (cur == NULL) {
3282 break;
3283 }
3284 }
3285 }
3286 }
3287
3288 out->print_cr("In-use per-thread monitor info:");
3289 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
3290 out->print_cr("%18s %18s %s %7s %18s %18s",
3291 "jt", "monitor", "BHL", "ref_cnt", "object", "object type");
3292 out->print_cr("================== ================== === ======= ================== ==================");
3293 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3294 ObjectMonitor* cur = NULL;
3295 ObjectMonitor* next = NULL;
3296 if (mark_list_head(&jt->om_in_use_list, &cur, &next)) {
3297 // Marked the global in-use list head so process the list.
3298 while (true) {
3299 const oop obj = (oop) cur->object();
3300 const markWord mark = cur->header();
3301 ResourceMark rm;
3302 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d "
3303 INTPTR_FORMAT " %s", p2i(jt), p2i(cur), cur->is_busy() != 0,
3304 mark.hash() != 0, cur->owner() != NULL, (int)cur->ref_count(),
3305 p2i(obj), obj->klass()->external_name());
3306 if (cur->is_busy() != 0) {
3307 out->print(" (%s)", cur->is_busy_to_string(&ss));
3308 ss.reset();
3309 }
3310 out->cr();
3311
3312 mark_next_for_traversal(&cur, &next);
3313 if (cur == NULL) {
3314 break;
3315 }
3316 }
3317 }
3318 }
3319
3320 out->flush();
3321 }
3322
3323 // Log counts for the global and per-thread monitor lists and return
3324 // the population count.
3325 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
3326 int pop_count = 0;
3327 out->print_cr("%18s %10s %10s %10s %10s",
3328 "Global Lists:", "InUse", "Free", "Wait", "Total");
3329 out->print_cr("================== ========== ========== ========== ==========");
3330 out->print_cr("%18s %10d %10d %10d %10d", "", g_om_in_use_count,
3331 g_om_free_count, g_om_wait_count, g_om_population);
3332 pop_count += g_om_in_use_count + g_om_free_count;
3333 if (HandshakeAfterDeflateIdleMonitors) {
3334 pop_count += g_om_wait_count;
3335 }
3336
3337 out->print_cr("%18s %10s %10s %10s",
3338 "Per-Thread Lists:", "InUse", "Free", "Provision");
3339 out->print_cr("================== ========== ========== ==========");
3340
3341 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3342 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
3343 jt->om_in_use_count, jt->om_free_count, jt->om_free_provision);
3344 pop_count += jt->om_in_use_count + jt->om_free_count;
3345 }
3346 return pop_count;
3347 }
3348
3349 #ifndef PRODUCT
3350
3351 // Check if monitor belongs to the monitor cache
3352 // The list is grow-only so it's *relatively* safe to traverse
3353 // the list of extant blocks without taking a lock.
3354
3355 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
3356 PaddedObjectMonitor* block = g_block_list;
3357 while (block != NULL) {
3358 assert(block->object() == CHAINMARKER, "must be a block header");
3359 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
3360 address mon = (address)monitor;
3361 address blk = (address)block;
3362 size_t diff = mon - blk;
3363 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
3364 return 1;
3365 }
3366 // unmarked_next() is not needed with g_block_list (no next field
3367 // marking) and no load_acquire() needed because _next_om is
3368 // updated before g_block_list is changed with cmpxchg().
3369 block = (PaddedObjectMonitor*)block->_next_om;
3370 }
3371 return 0;
3372 }
3373
3374 #endif
|