112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
113 return 0;
114 }
115
116 #define NINFLATIONLOCKS 256
117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
118
119 // global list of blocks of monitors
120 PaddedObjectMonitor* volatile ObjectSynchronizer::g_block_list = NULL;
121 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
122 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false;
123 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
124
125 // Global ObjectMonitor free list. Newly allocated and deflated
126 // ObjectMonitors are prepended here.
127 static ObjectMonitor* volatile g_free_list = NULL;
128 // Global ObjectMonitor in-use list. When a JavaThread is exiting,
129 // ObjectMonitors on its per-thread in-use list are prepended here.
130 static ObjectMonitor* volatile g_om_in_use_list = NULL;
131
132 static volatile intptr_t gListLock = 0; // protects global monitor lists
133 static volatile int g_om_free_count = 0; // # on g_free_list
134 static volatile int g_om_in_use_count = 0; // # on g_om_in_use_list
135 static volatile int g_om_population = 0; // # Extant -- in circulation
136
137 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
138
139
140 // =====================> Quick functions
141
142 // The quick_* forms are special fast-path variants used to improve
143 // performance. In the simplest case, a "quick_*" implementation could
144 // simply return false, in which case the caller will perform the necessary
145 // state transitions and call the slow-path form.
146 // The fast-path is designed to handle frequently arising cases in an efficient
147 // manner and is just a degenerate "optimistic" variant of the slow-path.
148 // returns true -- to indicate the call was satisfied.
149 // returns false -- to indicate the call needs the services of the slow-path.
150 // A no-loitering ordinance is in effect for code in the quick_* family
151 // operators: safepoints or indefinite blocking (blocking that might span a
152 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
153 // entry.
154 //
155 // Consider: An interesting optimization is to have the JIT recognize the
156 // following common idiom:
157 // synchronized (someobj) { .... ; notify(); }
158 // That is, we find a notify() or notifyAll() call that immediately precedes
159 // the monitorexit operation. In that case the JIT could fuse the operations
994 while (block != NULL) {
995 assert(block->object() == CHAINMARKER, "must be a block header");
996 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
997 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
998 if (mid->is_active()) {
999 ObjectMonitorHandle omh(mid);
1000
1001 if (mid->object() == NULL ||
1002 (AsyncDeflateIdleMonitors && mid->ref_count() < 0)) {
1003 // Only process with closure if the object is set.
1004 // For async deflation, race here if monitor is not owned!
1005 // The above ref_count bump (in ObjectMonitorHandle ctr)
1006 // will cause subsequent async deflation to skip it.
1007 // However, previous or concurrent async deflation is a race
1008 // so skip this ObjectMonitor if it is being async deflated.
1009 continue;
1010 }
1011 closure->do_monitor(mid);
1012 }
1013 }
1014 block = (PaddedObjectMonitor*)block->_next_om;
1015 }
1016 }
1017
1018 static bool monitors_used_above_threshold() {
1019 if (g_om_population == 0) {
1020 return false;
1021 }
1022 if (MonitorUsedDeflationThreshold > 0) {
1023 int monitors_used = g_om_population - g_om_free_count;
1024 int monitor_usage = (monitors_used * 100LL) / g_om_population;
1025 return monitor_usage > MonitorUsedDeflationThreshold;
1026 }
1027 return false;
1028 }
1029
1030 // Returns true if MonitorBound is set (> 0) and if the specified
1031 // cnt is > MonitorBound. Otherwise returns false.
1032 static bool is_MonitorBound_exceeded(const int cnt) {
1033 const int mx = MonitorBound;
1034 return mx > 0 && cnt > mx;
1035 }
1036
1037 bool ObjectSynchronizer::is_async_deflation_needed() {
1038 if (!AsyncDeflateIdleMonitors) {
1039 return false;
1040 }
1041 if (is_async_deflation_requested()) {
1042 // Async deflation request.
1043 return true;
1044 }
1045 if (AsyncDeflationInterval > 0 &&
1046 time_since_last_async_deflation_ms() > AsyncDeflationInterval &&
1047 monitors_used_above_threshold()) {
1048 // It's been longer than our specified deflate interval and there
1049 // are too many monitors in use. We don't deflate more frequently
1050 // than AsyncDeflationInterval (unless is_async_deflation_requested)
1051 // in order to not swamp the ServiceThread.
1052 _last_async_deflation_time_ns = os::javaTimeNanos();
1053 return true;
1054 }
1055 if (is_MonitorBound_exceeded(g_om_population - g_om_free_count)) {
1056 // Not enough ObjectMonitors on the global free list.
1057 return true;
1058 }
1059 return false;
1060 }
1061
1062 bool ObjectSynchronizer::is_safepoint_deflation_needed() {
1063 if (!AsyncDeflateIdleMonitors) {
1064 if (monitors_used_above_threshold()) {
1065 // Too many monitors in use.
1066 return true;
1067 }
1068 return false;
1069 }
1070 if (is_special_deflation_requested()) {
1071 // For AsyncDeflateIdleMonitors only do a safepoint deflation
1072 // if there is a special deflation request.
1073 return true;
1074 }
1075 return false;
1076 }
1077
1078 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1079 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS);
1080 }
1081
1082 void ObjectSynchronizer::oops_do(OopClosure* f) {
1083 // We only scan the global used list here (for moribund threads), and
1084 // the thread-local monitors in Thread::oops_do().
1085 global_used_oops_do(f);
1086 }
1087
1088 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1089 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1090 list_oops_do(g_om_in_use_list, f);
1091 }
1092
1093 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1094 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1095 list_oops_do(thread->om_in_use_list, f);
1096 }
1097
1098 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1099 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1100 // The oops_do() phase does not overlap with monitor deflation
1101 // so no need to update the ObjectMonitor's ref_count for this
1102 // ObjectMonitor* use.
1103 for (ObjectMonitor* mid = list; mid != NULL; mid = mid->_next_om) {
1104 if (mid->object() != NULL) {
1105 f->do_oop((oop*)mid->object_addr());
1106 }
1107 }
1108 }
1109
1110
1111 // -----------------------------------------------------------------------------
1112 // ObjectMonitor Lifecycle
1113 // -----------------------
1114 // Inflation unlinks monitors from the global g_free_list and
1115 // associates them with objects. Deflation -- which occurs at
1116 // STW-time -- disassociates idle monitors from objects. Such
1117 // scavenged monitors are returned to the g_free_list.
1118 //
1119 // The global list is protected by gListLock. All the critical sections
1120 // are short and operate in constant-time.
1121 //
1122 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1123 //
1124 // Lifecycle:
1125 // -- unassigned and on the global free list
1126 // -- unassigned and on a thread's private om_free_list
1127 // -- assigned to an object. The object is inflated and the mark refers
1128 // to the objectmonitor.
1129
1130
1131 // Constraining monitor pool growth via MonitorBound ...
1132 //
1133 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
1134 //
1135 // When safepoint deflation is being used (!AsyncDeflateIdleMonitors):
1136 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
1137 // the rate of scavenging is driven primarily by GC. As such, we can find
1138 // an inordinate number of monitors in circulation.
1139 // To avoid that scenario we can artificially induce a STW safepoint
1140 // if the pool appears to be growing past some reasonable bound.
1141 // Generally we favor time in space-time tradeoffs, but as there's no
1144 // we could just loop. In addition, if MonitorBound is set to a low value
1145 // we'll incur more safepoints, which are harmful to performance.
1146 // See also: GuaranteedSafepointInterval
1147 //
1148 // The current implementation uses asynchronous VM operations.
1149 //
1150 // When safepoint deflation is being used and MonitorBound is set, the
1151 // boundry applies to
1152 // (g_om_population - g_om_free_count)
1153 // i.e., if there are not enough ObjectMonitors on the global free list,
1154 // then a safepoint deflation is induced. Picking a good MonitorBound value
1155 // is non-trivial.
1156 //
1157 // When async deflation is being used:
1158 // The monitor pool is still grow-only. Async deflation is requested
1159 // by a safepoint's cleanup phase or by the ServiceThread at periodic
1160 // intervals when is_async_deflation_needed() returns true. In
1161 // addition to other policies that are checked, if there are not
1162 // enough ObjectMonitors on the global free list, then
1163 // is_async_deflation_needed() will return true. The ServiceThread
1164 // calls deflate_global_idle_monitors_using_JT() and also sets the
1165 // per-thread om_request_deflation flag as needed.
1166
1167 static void InduceScavenge(Thread* self, const char * Whence) {
1168 assert(!AsyncDeflateIdleMonitors, "is not used by async deflation");
1169
1170 // Induce STW safepoint to trim monitors
1171 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1172 // More precisely, trigger an asynchronous STW safepoint as the number
1173 // of active monitors passes the specified threshold.
1174 // TODO: assert thread state is reasonable
1175
1176 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
1177 // Induce a 'null' safepoint to scavenge monitors
1178 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
1179 // to the VMthread and have a lifespan longer than that of this activation record.
1180 // The VMThread will delete the op when completed.
1181 VMThread::execute(new VM_ScavengeMonitors());
1182 }
1183 }
1184
1185 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self,
1186 const InflateCause cause) {
1187 // A large MAXPRIVATE value reduces both list lock contention
1188 // and list coherency traffic, but also tends to increase the
1189 // number of ObjectMonitors in circulation as well as the STW
1190 // scavenge costs. As usual, we lean toward time in space-time
1191 // tradeoffs.
1192 const int MAXPRIVATE = 1024;
1193
1194 if (AsyncDeflateIdleMonitors) {
1195 JavaThread* jt = (JavaThread *)self;
1196 if (jt->om_request_deflation && jt->om_in_use_count > 0 &&
1197 cause != inflate_cause_vm_internal) {
1198 // Deflate any per-thread idle monitors for this JavaThread if
1199 // this is not an internal inflation; internal inflations can
1200 // occur in places where it is not safe to pause for a safepoint.
1201 // Clean up your own mess (Gibbs Rule 45). Otherwise, skip this
1202 // deflation. deflate_global_idle_monitors_using_JT() is called
1203 // by the ServiceThread. Per-thread async deflation is triggered
1204 // by the ServiceThread via om_request_deflation.
1205 debug_only(jt->check_for_valid_safepoint_state(false);)
1206 ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(jt);
1207 }
1208 }
1209
1210 stringStream ss;
1211 for (;;) {
1212 ObjectMonitor* m;
1213
1214 // 1: try to allocate from the thread's local om_free_list.
1215 // Threads will attempt to allocate first from their local list, then
1216 // from the global list, and only after those attempts fail will the thread
1217 // attempt to instantiate new monitors. Thread-local free lists take
1218 // heat off the gListLock and improve allocation latency, as well as reducing
1219 // coherency traffic on the shared global list.
1220 m = self->om_free_list;
1221 if (m != NULL) {
1222 self->om_free_list = m->_next_om;
1223 self->om_free_count--;
1224 guarantee(m->object() == NULL, "invariant");
1225 m->set_allocation_state(ObjectMonitor::New);
1226 m->_next_om = self->om_in_use_list;
1227 self->om_in_use_list = m;
1228 self->om_in_use_count++;
1229 return m;
1230 }
1231
1232 // 2: try to allocate from the global g_free_list
1233 // CONSIDER: use muxTry() instead of muxAcquire().
1234 // If the muxTry() fails then drop immediately into case 3.
1235 // If we're using thread-local free lists then try
1236 // to reprovision the caller's free list.
1237 if (g_free_list != NULL) {
1238 // Reprovision the thread's om_free_list.
1239 // Use bulk transfers to reduce the allocation rate and heat
1240 // on various locks.
1241 Thread::muxAcquire(&gListLock, "om_alloc(1)");
1242 for (int i = self->om_free_provision; --i >= 0 && g_free_list != NULL;) {
1243 g_om_free_count--;
1244 ObjectMonitor* take = g_free_list;
1245 g_free_list = take->_next_om;
1246 guarantee(take->object() == NULL, "invariant");
1247 if (AsyncDeflateIdleMonitors) {
1248 // We allowed 3 field values to linger during async deflation.
1249 // We clear header and restore ref_count here, but we leave
1250 // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor
1251 // enter optimization can no longer race with async deflation
1252 // and reuse.
1253 take->set_header(markWord::zero());
1254 if (take->ref_count() < 0) {
1255 // Add back max_jint to restore the ref_count field to its
1256 // proper value.
1257 Atomic::add(max_jint, &take->_ref_count);
1258
1259 assert(take->ref_count() >= 0, "must not be negative: ref_count=%d",
1260 take->ref_count());
1261 }
1262 }
1263 take->Recycle();
1264 assert(take->is_free(), "invariant");
1265 om_release(self, take, false);
1266 }
1267 Thread::muxRelease(&gListLock);
1268 self->om_free_provision += 1 + (self->om_free_provision/2);
1269 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1270
1271 if (!AsyncDeflateIdleMonitors &&
1272 is_MonitorBound_exceeded(g_om_population - g_om_free_count)) {
1273 // Not enough ObjectMonitors on the global free list.
1274 // We can't safely induce a STW safepoint from om_alloc() as our thread
1275 // state may not be appropriate for such activities and callers may hold
1276 // naked oops, so instead we defer the action.
1277 InduceScavenge(self, "om_alloc");
1278 }
1279 continue;
1280 }
1281
1282 // 3: allocate a block of new ObjectMonitors
1283 // Both the local and global free lists are empty -- resort to malloc().
1284 // In the current implementation ObjectMonitors are TSM - immortal.
1285 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1286 // each ObjectMonitor to start at the beginning of a cache line,
1287 // so we use align_up().
1288 // A better solution would be to use C++ placement-new.
1289 // BEWARE: As it stands currently, we don't run the ctors!
1290 assert(_BLOCKSIZE > 1, "invariant");
1291 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
1292 PaddedObjectMonitor* temp;
1296 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE);
1297
1298 // NOTE: (almost) no way to recover if allocation failed.
1299 // We might be able to induce a STW safepoint and scavenge enough
1300 // ObjectMonitors to permit progress.
1301 if (temp == NULL) {
1302 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
1303 "Allocate ObjectMonitors");
1304 }
1305 (void)memset((void *) temp, 0, neededsize);
1306
1307 // Format the block.
1308 // initialize the linked list, each monitor points to its next
1309 // forming the single linked free list, the very first monitor
1310 // will points to next block, which forms the block list.
1311 // The trick of using the 1st element in the block as g_block_list
1312 // linkage should be reconsidered. A better implementation would
1313 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1314
1315 for (int i = 1; i < _BLOCKSIZE; i++) {
1316 temp[i]._next_om = (ObjectMonitor *)&temp[i+1];
1317 assert(temp[i].is_free(), "invariant");
1318 }
1319
1320 // terminate the last monitor as the end of list
1321 temp[_BLOCKSIZE - 1]._next_om = NULL;
1322
1323 // Element [0] is reserved for global list linkage
1324 temp[0].set_object(CHAINMARKER);
1325
1326 // Consider carving out this thread's current request from the
1327 // block in hand. This avoids some lock traffic and redundant
1328 // list activity.
1329
1330 // Acquire the gListLock to manipulate g_block_list and g_free_list.
1331 // An Oyama-Taura-Yonezawa scheme might be more efficient.
1332 Thread::muxAcquire(&gListLock, "om_alloc(2)");
1333 g_om_population += _BLOCKSIZE-1;
1334 g_om_free_count += _BLOCKSIZE-1;
1335
1336 // Add the new block to the list of extant blocks (g_block_list).
1337 // The very first ObjectMonitor in a block is reserved and dedicated.
1338 // It serves as blocklist "next" linkage.
1339 temp[0]._next_om = g_block_list;
1340 // There are lock-free uses of g_block_list so make sure that
1341 // the previous stores happen before we update g_block_list.
1342 OrderAccess::release_store(&g_block_list, temp);
1343
1344 // Add the new string of ObjectMonitors to the global free list
1345 temp[_BLOCKSIZE - 1]._next_om = g_free_list;
1346 g_free_list = temp + 1;
1347 Thread::muxRelease(&gListLock);
1348 }
1349 }
1350
1351 // Place "m" on the caller's private per-thread om_free_list.
1352 // In practice there's no need to clamp or limit the number of
1353 // monitors on a thread's om_free_list as the only non-allocation time
1354 // we'll call om_release() is to return a monitor to the free list after
1355 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1356 // accumulate on a thread's free list.
1357 //
1358 // Key constraint: all ObjectMonitors on a thread's free list and the global
1359 // free list must have their object field set to null. This prevents the
1360 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT()
1361 // -- from reclaiming them while we are trying to release them.
1362
1363 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
1364 bool from_per_thread_alloc) {
1365 guarantee(m->header().value() == 0, "invariant");
1366 guarantee(m->object() == NULL, "invariant");
1367 stringStream ss;
1368 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
1369 "%s, recursions=" INTPTR_FORMAT, m->is_busy_to_string(&ss),
1370 m->_recursions);
1371 m->set_allocation_state(ObjectMonitor::Free);
1372 // _next_om is used for both per-thread in-use and free lists so
1373 // we have to remove 'm' from the in-use list first (as needed).
1374 if (from_per_thread_alloc) {
1375 // Need to remove 'm' from om_in_use_list.
1376 ObjectMonitor* cur_mid_in_use = NULL;
1377 bool extracted = false;
1378 for (ObjectMonitor* mid = self->om_in_use_list; mid != NULL; cur_mid_in_use = mid, mid = mid->_next_om) {
1379 if (m == mid) {
1380 // extract from per-thread in-use list
1381 if (mid == self->om_in_use_list) {
1382 self->om_in_use_list = mid->_next_om;
1383 } else if (cur_mid_in_use != NULL) {
1384 cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list
1385 }
1386 extracted = true;
1387 self->om_in_use_count--;
1388 break;
1389 }
1390 }
1391 assert(extracted, "Should have extracted from in-use list");
1392 }
1393
1394 m->_next_om = self->om_free_list;
1395 guarantee(m->is_free(), "invariant");
1396 self->om_free_list = m;
1397 self->om_free_count++;
1398 }
1399
1400 // Return ObjectMonitors on a moribund thread's free and in-use
1401 // lists to the appropriate global lists. The ObjectMonitors on the
1402 // per-thread in-use list may still be in use by other threads.
1403 //
1404 // We currently call om_flush() from Threads::remove() before the
1405 // thread has been excised from the thread list and is no longer a
1406 // mutator. This means that om_flush() cannot run concurrently with
1407 // a safepoint and interleave with deflate_idle_monitors(). In
1408 // particular, this ensures that the thread's in-use monitors are
1409 // scanned by a GC safepoint, either via Thread::oops_do() (before
1410 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
1411 // om_flush() is called).
1412 //
1413 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT()
1414 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can
1415 // run at the same time as om_flush() so we have to be careful.
1416
1417 void ObjectSynchronizer::om_flush(Thread* self) {
1418 int in_use_count = 0;
1419 ObjectMonitor* in_use_list = self->om_in_use_list;
1420 ObjectMonitor* in_use_tail = NULL;
1421 if (in_use_list != NULL) {
1422 // The thread is going away, however the ObjectMonitors on the
1423 // om_in_use_list may still be in-use by other threads. Link
1424 // them to in_use_tail, which will be linked into the global
1425 // in-use list g_om_in_use_list below, under the gListLock.
1426 for (ObjectMonitor* cur_om = in_use_list; cur_om != NULL; cur_om = cur_om->_next_om) {
1427 in_use_tail = cur_om;
1428 in_use_count++;
1429 ADIM_guarantee(cur_om->is_active(), "invariant");
1430 }
1431 guarantee(in_use_tail != NULL, "invariant");
1432 ADIM_guarantee(self->om_in_use_count == in_use_count, "in-use count off");
1433 self->om_in_use_list = NULL;
1434 self->om_in_use_count = 0;
1435 }
1436
1437 int free_count = 0;
1438 ObjectMonitor* free_list = self->om_free_list;
1439 ObjectMonitor* free_tail = NULL;
1440 if (free_list != NULL) {
1441 // The thread is going away. Set 'free_tail' to the last per-thread free
1442 // monitor which will be linked to g_free_list below under the gListLock.
1443 stringStream ss;
1444 for (ObjectMonitor* s = free_list; s != NULL; s = s->_next_om) {
1445 free_count++;
1446 free_tail = s;
1447 guarantee(s->object() == NULL, "invariant");
1448 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1449 }
1450 guarantee(free_tail != NULL, "invariant");
1451 ADIM_guarantee(self->om_free_count == free_count, "free-count off");
1452 self->om_free_list = NULL;
1453 self->om_free_count = 0;
1454 }
1455
1456 Thread::muxAcquire(&gListLock, "om_flush");
1457 if (free_tail != NULL) {
1458 free_tail->_next_om = g_free_list;
1459 g_free_list = free_list;
1460 g_om_free_count += free_count;
1461 }
1462
1463 if (in_use_tail != NULL) {
1464 in_use_tail->_next_om = g_om_in_use_list;
1465 g_om_in_use_list = in_use_list;
1466 g_om_in_use_count += in_use_count;
1467 }
1468
1469 Thread::muxRelease(&gListLock);
1470
1471 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1472 LogStreamHandle(Info, monitorinflation) lsh_info;
1473 LogStream* ls = NULL;
1474 if (log_is_enabled(Debug, monitorinflation)) {
1475 ls = &lsh_debug;
1476 } else if ((free_count != 0 || in_use_count != 0) &&
1477 log_is_enabled(Info, monitorinflation)) {
1478 ls = &lsh_info;
1479 }
1480 if (ls != NULL) {
1481 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d"
1482 ", in_use_count=%d" ", om_free_provision=%d",
1483 p2i(self), free_count, in_use_count, self->om_free_provision);
1484 }
1485 }
1486
1487 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1488 const oop obj,
1489 ObjectSynchronizer::InflateCause cause) {
1490 assert(event != NULL, "invariant");
1567 //
1568 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1569 // to install INFLATING into the mark word. We originally installed INFLATING,
1570 // allocated the objectmonitor, and then finally STed the address of the
1571 // objectmonitor into the mark. This was correct, but artificially lengthened
1572 // the interval in which INFLATED appeared in the mark, thus increasing
1573 // the odds of inflation contention.
1574 //
1575 // We now use per-thread private objectmonitor free lists.
1576 // These list are reprovisioned from the global free list outside the
1577 // critical INFLATING...ST interval. A thread can transfer
1578 // multiple objectmonitors en-mass from the global free list to its local free list.
1579 // This reduces coherency traffic and lock contention on the global free list.
1580 // Using such local free lists, it doesn't matter if the om_alloc() call appears
1581 // before or after the CAS(INFLATING) operation.
1582 // See the comments in om_alloc().
1583
1584 LogStreamHandle(Trace, monitorinflation) lsh;
1585
1586 if (mark.has_locker()) {
1587 ObjectMonitor* m;
1588 if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) {
1589 // If !AsyncDeflateIdleMonitors or if an internal inflation, then
1590 // we won't stop for a potential safepoint in om_alloc.
1591 m = om_alloc(self, cause);
1592 } else {
1593 // If AsyncDeflateIdleMonitors and not an internal inflation, then
1594 // we may stop for a safepoint in om_alloc() so protect object.
1595 Handle h_obj(self, object);
1596 m = om_alloc(self, cause);
1597 object = h_obj(); // Refresh object.
1598 }
1599 // Optimistically prepare the objectmonitor - anticipate successful CAS
1600 // We do this before the CAS in order to minimize the length of time
1601 // in which INFLATING appears in the mark.
1602 m->Recycle();
1603 m->_Responsible = NULL;
1604 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class
1605
1606 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1607 if (cmp != mark) {
1608 om_release(self, m, true);
1609 continue; // Interference -- just retry
1610 }
1611
1612 // We've successfully installed INFLATING (0) into the mark-word.
1613 // This is the only case where 0 will appear in a mark-word.
1614 // Only the singular thread that successfully swings the mark-word
1615 // to 0 can perform (or more precisely, complete) inflation.
1616 //
1617 // Why do we CAS a 0 into the mark-word instead of just CASing the
1618 // mark-word from the stack-locked value directly to the new inflated state?
1676 }
1677 if (event.should_commit()) {
1678 post_monitor_inflate_event(&event, object, cause);
1679 }
1680 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
1681 return;
1682 }
1683
1684 // CASE: neutral
1685 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1686 // If we know we're inflating for entry it's better to inflate by swinging a
1687 // pre-locked ObjectMonitor pointer into the object header. A successful
1688 // CAS inflates the object *and* confers ownership to the inflating thread.
1689 // In the current implementation we use a 2-step mechanism where we CAS()
1690 // to inflate and then CAS() again to try to swing _owner from NULL to self.
1691 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1692 // would be useful.
1693
1694 // Catch if the object's header is not neutral (not locked and
1695 // not marked is what we care about here).
1696 ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1697 ObjectMonitor* m;
1698 if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) {
1699 // If !AsyncDeflateIdleMonitors or if an internal inflation, then
1700 // we won't stop for a potential safepoint in om_alloc.
1701 m = om_alloc(self, cause);
1702 } else {
1703 // If AsyncDeflateIdleMonitors and not an internal inflation, then
1704 // we may stop for a safepoint in om_alloc() so protect object.
1705 Handle h_obj(self, object);
1706 m = om_alloc(self, cause);
1707 object = h_obj(); // Refresh object.
1708 }
1709 // prepare m for installation - set monitor to initial state
1710 m->Recycle();
1711 m->set_header(mark);
1712 // If we leave _owner == DEFLATER_MARKER here, then the simple C2
1713 // ObjectMonitor enter optimization can no longer race with async
1714 // deflation and reuse.
1715 m->set_object(object);
1716 m->_Responsible = NULL;
1717 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
1718
1719 omh_p->set_om_ptr(m);
1720 assert(m->is_new(), "freshly allocated monitor must be new");
1721 m->set_allocation_state(ObjectMonitor::Old);
1722
1723 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1724 m->set_header(markWord::zero());
1725 m->set_object(NULL);
1726 m->Recycle();
1727 omh_p->set_om_ptr(NULL);
1728 // om_release() will reset the allocation state
1729 om_release(self, m, true);
1730 m = NULL;
1731 continue;
1732 // interference - the markword changed - just retry.
1733 // The state-transitions are one-way, so there's no chance of
1734 // live-lock -- "Inflated" is an absorbing state.
1735 }
1736
1737 // Hopefully the performance counters are allocated on distinct
1738 // cache lines to avoid false sharing on MP systems ...
1739 OM_PERFDATA_OP(Inflations, inc());
1740 if (log_is_enabled(Trace, monitorinflation)) {
1741 ResourceMark rm(self);
1742 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1743 INTPTR_FORMAT ", type='%s'", p2i(object),
1762 // These operations are called at all safepoints, immediately after mutators
1763 // are stopped, but before any objects have moved. Collectively they traverse
1764 // the population of in-use monitors, deflating where possible. The scavenged
1765 // monitors are returned to the global monitor free list.
1766 //
1767 // Beware that we scavenge at *every* stop-the-world point. Having a large
1768 // number of monitors in-use could negatively impact performance. We also want
1769 // to minimize the total # of monitors in circulation, as they incur a small
1770 // footprint penalty.
1771 //
1772 // Perversely, the heap size -- and thus the STW safepoint rate --
1773 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1774 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
1775 // This is an unfortunate aspect of this design.
1776 //
1777 // For async deflation:
1778 // If a special deflation request is made, then the safepoint based
1779 // deflation mechanism is used. Otherwise, an async deflation request
1780 // is registered with the ServiceThread and it is notified.
1781
1782 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) {
1783 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1784
1785 // The per-thread in-use lists are handled in
1786 // ParallelSPCleanupThreadClosure::do_thread().
1787
1788 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) {
1789 // Use the older mechanism for the global in-use list or if a
1790 // special deflation has been requested before the safepoint.
1791 ObjectSynchronizer::deflate_idle_monitors(_counters);
1792 return;
1793 }
1794
1795 log_debug(monitorinflation)("requesting async deflation of idle monitors.");
1796 // Request deflation of idle monitors by the ServiceThread:
1797 set_is_async_deflation_requested(true);
1798 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
1799 ml.notify_all();
1800 }
1801
1802 // Deflate a single monitor if not in-use
1803 // Return true if deflated, false if in-use
1804 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1805 ObjectMonitor** free_head_p,
1806 ObjectMonitor** free_tail_p) {
1807 bool deflated;
1808 // Normal case ... The monitor is associated with obj.
1809 const markWord mark = obj->mark();
1810 guarantee(mark == markWord::encode(mid), "should match: mark="
1811 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
1830 "object=" INTPTR_FORMAT ", mark="
1831 INTPTR_FORMAT ", type='%s'", p2i(obj),
1832 mark.value(), obj->klass()->external_name());
1833 }
1834
1835 // Restore the header back to obj
1836 obj->release_set_mark(dmw);
1837 if (AsyncDeflateIdleMonitors) {
1838 // clear() expects the owner field to be NULL and we won't race
1839 // with the simple C2 ObjectMonitor enter optimization since
1840 // we're at a safepoint.
1841 mid->set_owner(NULL);
1842 }
1843 mid->clear();
1844
1845 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
1846 p2i(mid->object()));
1847 assert(mid->is_free(), "invariant");
1848
1849 // Move the deflated ObjectMonitor to the working free list
1850 // defined by free_head_p and free_tail_p.
1851 if (*free_head_p == NULL) *free_head_p = mid;
1852 if (*free_tail_p != NULL) {
1853 // We append to the list so the caller can use mid->_next_om
1854 // to fix the linkages in its context.
1855 ObjectMonitor* prevtail = *free_tail_p;
1856 // Should have been cleaned up by the caller:
1857 assert(prevtail->_next_om == NULL, "must be NULL: _next_om="
1858 INTPTR_FORMAT, p2i(prevtail->_next_om));
1859 prevtail->_next_om = mid;
1860 }
1861 *free_tail_p = mid;
1862 // At this point, mid->_next_om still refers to its current
1863 // value and another ObjectMonitor's _next_om field still
1864 // refers to this ObjectMonitor. Those linkages have to be
1865 // cleaned up by the caller who has the complete context.
1866 deflated = true;
1867 }
1868 return deflated;
1869 }
1870
1871 // Deflate the specified ObjectMonitor if not in-use using a JavaThread.
1872 // Returns true if it was deflated and false otherwise.
1873 //
1874 // The async deflation protocol sets owner to DEFLATER_MARKER and
1875 // makes ref_count negative as signals to contending threads that
1876 // an async deflation is in progress. There are a number of checks
1877 // as part of the protocol to make sure that the calling thread has
1878 // not lost the race to a contending thread or to a thread that just
1879 // wants to use the ObjectMonitor*.
1942 const oop obj = (oop) mid->object();
1943 if (log_is_enabled(Trace, monitorinflation)) {
1944 ResourceMark rm;
1945 log_trace(monitorinflation)("deflate_monitor_using_JT: "
1946 "object=" INTPTR_FORMAT ", mark="
1947 INTPTR_FORMAT ", type='%s'",
1948 p2i(obj), obj->mark().value(),
1949 obj->klass()->external_name());
1950 }
1951
1952 // Install the old mark word if nobody else has already done it.
1953 mid->install_displaced_markword_in_object(obj);
1954 mid->clear_using_JT();
1955
1956 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT,
1957 p2i(mid->object()));
1958 assert(mid->is_free(), "must be free: allocation_state=%d",
1959 (int) mid->allocation_state());
1960
1961 // Move the deflated ObjectMonitor to the working free list
1962 // defined by free_head_p and free_tail_p.
1963 if (*free_head_p == NULL) {
1964 // First one on the list.
1965 *free_head_p = mid;
1966 }
1967 if (*free_tail_p != NULL) {
1968 // We append to the list so the caller can use mid->_next_om
1969 // to fix the linkages in its context.
1970 ObjectMonitor* prevtail = *free_tail_p;
1971 // Should have been cleaned up by the caller:
1972 assert(prevtail->_next_om == NULL, "must be NULL: _next_om="
1973 INTPTR_FORMAT, p2i(prevtail->_next_om));
1974 prevtail->_next_om = mid;
1975 }
1976 *free_tail_p = mid;
1977
1978 // At this point, mid->_next_om still refers to its current
1979 // value and another ObjectMonitor's _next_om field still
1980 // refers to this ObjectMonitor. Those linkages have to be
1981 // cleaned up by the caller who has the complete context.
1982
1983 // We leave owner == DEFLATER_MARKER and ref_count < 0
1984 // to force any racing threads to retry.
1985 return true; // Success, ObjectMonitor has been deflated.
1986 }
1987
1988 // The owner was changed from DEFLATER_MARKER so we lost the
1989 // race since the ObjectMonitor is now busy.
1990
1991 // Add back max_jint to restore the ref_count field to its
1992 // proper value (which may not be what we saw above):
1993 Atomic::add(max_jint, &mid->_ref_count);
1994
1995 assert(mid->ref_count() >= 0, "must not be negative: ref_count=%d",
1996 mid->ref_count());
1997 return false;
1998 }
1999
2000 // The ref_count was no longer 0 so we lost the race since the
2001 // ObjectMonitor is now busy or the ObjectMonitor* is now is use.
2002 // Restore owner to NULL if it is still DEFLATER_MARKER:
2003 Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER);
2004 }
2005
2006 // The owner field is no longer NULL so we lost the race since the
2007 // ObjectMonitor is now busy.
2008 return false;
2009 }
2010
2011 // Walk a given monitor list, and deflate idle monitors
2012 // The given list could be a per-thread list or a global list
2013 // Caller acquires gListLock as needed.
2014 //
2015 // In the case of parallel processing of thread local monitor lists,
2016 // work is done by Threads::parallel_threads_do() which ensures that
2017 // each Java thread is processed by exactly one worker thread, and
2018 // thus avoid conflicts that would arise when worker threads would
2019 // process the same monitor lists concurrently.
2020 //
2021 // See also ParallelSPCleanupTask and
2022 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
2023 // Threads::parallel_java_threads_do() in thread.cpp.
2024 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
2025 int* count_p,
2026 ObjectMonitor** free_head_p,
2027 ObjectMonitor** free_tail_p) {
2028 ObjectMonitor* cur_mid_in_use = NULL;
2029 ObjectMonitor* mid;
2030 ObjectMonitor* next;
2031 int deflated_count = 0;
2032
2033 for (mid = *list_p; mid != NULL;) {
2034 oop obj = (oop) mid->object();
2035 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
2036 // Deflation succeeded and already updated free_head_p and
2037 // free_tail_p as needed. Finish the move to the local free list
2038 // by unlinking mid from the global or per-thread in-use list.
2039 if (mid == *list_p) {
2040 *list_p = mid->_next_om;
2041 } else if (cur_mid_in_use != NULL) {
2042 cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list
2043 }
2044 next = mid->_next_om;
2045 mid->_next_om = NULL; // This mid is current tail in the free_head_p list
2046 mid = next;
2047 deflated_count++;
2048 *count_p = *count_p - 1;
2049 } else {
2050 cur_mid_in_use = mid;
2051 mid = mid->_next_om;
2052 }
2053 }
2054 return deflated_count;
2055 }
2056
2057 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using
2058 // a JavaThread. Returns the number of deflated ObjectMonitors. The given
2059 // list could be a per-thread in-use list or the global in-use list.
2060 // Caller acquires gListLock as appropriate. If a safepoint has started,
2061 // then we save state via saved_mid_in_use_p and return to the caller to
2062 // honor the safepoint.
2063 //
2064 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
2065 int* count_p,
2066 ObjectMonitor** free_head_p,
2067 ObjectMonitor** free_tail_p,
2068 ObjectMonitor** saved_mid_in_use_p) {
2069 assert(AsyncDeflateIdleMonitors, "sanity check");
2070 assert(Thread::current()->is_Java_thread(), "precondition");
2071
2072 ObjectMonitor* cur_mid_in_use = NULL;
2073 ObjectMonitor* mid;
2074 ObjectMonitor* next;
2075 int deflated_count = 0;
2076
2077 if (*saved_mid_in_use_p == NULL) {
2078 // No saved state so start at the beginning.
2079 mid = *list_p;
2080 } else {
2081 // We're restarting after a safepoint so restore the necessary state
2082 // before we resume.
2083 cur_mid_in_use = *saved_mid_in_use_p;
2084 mid = cur_mid_in_use->_next_om;
2085 }
2086 while (mid != NULL) {
2087 // Only try to deflate if there is an associated Java object and if
2088 // mid is old (is not newly allocated and is not newly freed).
2089 if (mid->object() != NULL && mid->is_old() &&
2090 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
2091 // Deflation succeeded and already updated free_head_p and
2092 // free_tail_p as needed. Finish the move to the local free list
2093 // by unlinking mid from the global or per-thread in-use list.
2094 if (mid == *list_p) {
2095 *list_p = mid->_next_om;
2096 } else if (cur_mid_in_use != NULL) {
2097 // Maintain the current in-use list.
2098 cur_mid_in_use->_next_om = mid->_next_om;
2099 }
2100 next = mid->_next_om;
2101 mid->_next_om = NULL;
2102 // At this point mid is disconnected from the in-use list
2103 // and is the current tail in the free_head_p list.
2104 mid = next;
2105 deflated_count++;
2106 *count_p = *count_p - 1;
2107 } else {
2108 // mid is considered in-use if it does not have an associated
2109 // Java object or mid is not old or deflation did not succeed.
2110 // A mid->is_new() node can be seen here when it is freshly
2111 // returned by om_alloc() (and skips the deflation code path).
2112 // A mid->is_old() node can be seen here when deflation failed.
2113 // A mid->is_free() node can be seen here when a fresh node from
2114 // om_alloc() is released by om_release() due to losing the race
2115 // in inflate().
2116
2117 cur_mid_in_use = mid;
2118 mid = mid->_next_om;
2119
2120 if (SafepointSynchronize::is_synchronizing() &&
2121 cur_mid_in_use != *list_p && cur_mid_in_use->is_old()) {
2122 // If a safepoint has started and cur_mid_in_use is not the list
2123 // head and is old, then it is safe to use as saved state. Return
2124 // to the caller so gListLock can be dropped as appropriate
2125 // before blocking.
2126 *saved_mid_in_use_p = cur_mid_in_use;
2127 return deflated_count;
2128 }
2129 }
2130 }
2131 // We finished the list without a safepoint starting so there's
2132 // no need to save state.
2133 *saved_mid_in_use_p = NULL;
2134 return deflated_count;
2135 }
2136
2137 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2138 counters->n_in_use = 0; // currently associated with objects
2139 counters->n_in_circulation = 0; // extant
2140 counters->n_scavenged = 0; // reclaimed (global and per-thread)
2141 counters->per_thread_scavenged = 0; // per-thread scavenge total
2142 counters->per_thread_times = 0.0; // per-thread scavenge times
2143 }
2144
2145 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
2146 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2147
2148 if (AsyncDeflateIdleMonitors) {
2149 // Nothing to do when global idle ObjectMonitors are deflated using
2150 // a JavaThread unless a special deflation has been requested.
2151 if (!is_special_deflation_requested()) {
2152 return;
2153 }
2154 }
2155
2156 bool deflated = false;
2157
2158 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2159 ObjectMonitor* free_tail_p = NULL;
2160 elapsedTimer timer;
2161
2162 if (log_is_enabled(Info, monitorinflation)) {
2163 timer.start();
2164 }
2165
2166 // Prevent om_flush from changing mids in Thread dtor's during deflation
2167 // And in case the vm thread is acquiring a lock during a safepoint
2168 // See e.g. 6320749
2169 Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
2170
2171 // Note: the thread-local monitors lists get deflated in
2172 // a separate pass. See deflate_thread_local_monitors().
2173
2174 // For moribund threads, scan g_om_in_use_list
2175 int deflated_count = 0;
2176 if (g_om_in_use_list != NULL) {
2177 // Update n_in_circulation before g_om_in_use_count is updated by deflation.
2178 counters->n_in_circulation += g_om_in_use_count;
2179 deflated_count = deflate_monitor_list((ObjectMonitor**)&g_om_in_use_list, (int*)&g_om_in_use_count, &free_head_p, &free_tail_p);
2180 counters->n_in_use += g_om_in_use_count;
2181 }
2182
2183 if (free_head_p != NULL) {
2184 // Move the deflated ObjectMonitors back to the global free list.
2185 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2186 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
2187 INTPTR_FORMAT, p2i(free_tail_p->_next_om));
2188 // constant-time list splice - prepend scavenged segment to g_free_list
2189 free_tail_p->_next_om = g_free_list;
2190 g_free_list = free_head_p;
2191 counters->n_scavenged += deflated_count;
2192 }
2193 Thread::muxRelease(&gListLock);
2194 timer.stop();
2195
2196 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2197 LogStreamHandle(Info, monitorinflation) lsh_info;
2198 LogStream* ls = NULL;
2199 if (log_is_enabled(Debug, monitorinflation)) {
2200 ls = &lsh_debug;
2201 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2202 ls = &lsh_info;
2203 }
2204 if (ls != NULL) {
2205 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2206 }
2207 }
2208
2209 // Deflate global idle ObjectMonitors using a JavaThread.
2210 //
2211 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() {
2212 assert(AsyncDeflateIdleMonitors, "sanity check");
2213 assert(Thread::current()->is_Java_thread(), "precondition");
2214 JavaThread* self = JavaThread::current();
2215
2216 deflate_common_idle_monitors_using_JT(true /* is_global */, self);
2217 }
2218
2219 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread.
2220 //
2221 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) {
2222 assert(AsyncDeflateIdleMonitors, "sanity check");
2223 assert(Thread::current()->is_Java_thread(), "precondition");
2224
2225 target->om_request_deflation = false;
2226
2227 deflate_common_idle_monitors_using_JT(false /* !is_global */, target);
2228 }
2229
2230 // Deflate global or per-thread idle ObjectMonitors using a JavaThread.
2231 //
2232 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) {
2233 JavaThread* self = JavaThread::current();
2234
2235 int deflated_count = 0;
2236 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors
2237 ObjectMonitor* free_tail_p = NULL;
2238 ObjectMonitor* saved_mid_in_use_p = NULL;
2239 elapsedTimer timer;
2240
2241 if (log_is_enabled(Info, monitorinflation)) {
2242 timer.start();
2243 }
2244
2245 if (is_global) {
2246 Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(1)");
2247 OM_PERFDATA_OP(MonExtant, set_value(g_om_in_use_count));
2248 } else {
2249 OM_PERFDATA_OP(MonExtant, inc(target->om_in_use_count));
2250 }
2251
2252 do {
2253 int local_deflated_count;
2254 if (is_global) {
2255 local_deflated_count = deflate_monitor_list_using_JT((ObjectMonitor**)&g_om_in_use_list, (int*)&g_om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
2256 } else {
2257 local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
2258 }
2259 deflated_count += local_deflated_count;
2260
2261 if (free_head_p != NULL) {
2262 // Move the deflated ObjectMonitors to the global free list.
2263 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
2264 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
2265 INTPTR_FORMAT, p2i(free_tail_p->_next_om));
2266
2267 if (!is_global) {
2268 Thread::muxAcquire(&gListLock, "deflate_per_thread_idle_monitors_using_JT(2)");
2269 }
2270 // Constant-time list splice - prepend scavenged segment to g_free_list.
2271 free_tail_p->_next_om = g_free_list;
2272 g_free_list = free_head_p;
2273
2274 OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
2275 if (!is_global) {
2276 Thread::muxRelease(&gListLock);
2277 }
2278 }
2279
2280 if (saved_mid_in_use_p != NULL) {
2281 // deflate_monitor_list_using_JT() detected a safepoint starting.
2282 if (is_global) {
2283 Thread::muxRelease(&gListLock);
2284 }
2285 timer.stop();
2286 {
2287 if (is_global) {
2288 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
2289 } else {
2290 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target));
2291 }
2292 assert(SafepointSynchronize::is_synchronizing(), "sanity check");
2293 ThreadBlockInVM blocker(self);
2294 }
2295 // Prepare for another loop after the safepoint.
2296 free_head_p = NULL;
2297 free_tail_p = NULL;
2298 if (log_is_enabled(Info, monitorinflation)) {
2299 timer.start();
2300 }
2301 if (is_global) {
2302 Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(3)");
2303 }
2304 }
2305 } while (saved_mid_in_use_p != NULL);
2306 if (is_global) {
2307 Thread::muxRelease(&gListLock);
2308 }
2309 timer.stop();
2310
2311 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2312 LogStreamHandle(Info, monitorinflation) lsh_info;
2313 LogStream* ls = NULL;
2314 if (log_is_enabled(Debug, monitorinflation)) {
2315 ls = &lsh_debug;
2316 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2317 ls = &lsh_info;
2318 }
2319 if (ls != NULL) {
2320 if (is_global) {
2321 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2322 } else {
2323 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count);
2324 }
2325 }
2326 }
2327
2328 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2329 // Report the cumulative time for deflating each thread's idle
2330 // monitors. Note: if the work is split among more than one
2331 // worker thread, then the reported time will likely be more
2332 // than a beginning to end measurement of the phase.
2333 // Note: AsyncDeflateIdleMonitors only deflates per-thread idle
2334 // monitors at a safepoint when a special deflation has been requested.
2335 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
2336
2337 bool needs_special_deflation = is_special_deflation_requested();
2338 if (!AsyncDeflateIdleMonitors || needs_special_deflation) {
2339 // AsyncDeflateIdleMonitors does not use these counters unless
2340 // there is a special deflation request.
2341
2342 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
2343 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
2344 }
2345
2346 if (log_is_enabled(Debug, monitorinflation)) {
2347 // exit_globals()'s call to audit_and_print_stats() is done
2348 // at the Info level.
2349 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2350 } else if (log_is_enabled(Info, monitorinflation)) {
2351 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
2352 log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, "
2353 "g_om_free_count=%d", g_om_population,
2354 g_om_in_use_count, g_om_free_count);
2355 Thread::muxRelease(&gListLock);
2356 }
2357
2358 ForceMonitorScavenge = 0; // Reset
2359 GVars.stw_random = os::random();
2360 GVars.stw_cycle++;
2361 if (needs_special_deflation) {
2362 set_is_special_deflation_requested(false); // special deflation is done
2363 }
2364 }
2365
2366 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2367 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2368
2369 if (AsyncDeflateIdleMonitors) {
2370 if (!is_special_deflation_requested()) {
2371 // Mark the JavaThread for idle monitor deflation if a special
2372 // deflation has NOT been requested.
2373 if (thread->om_in_use_count > 0) {
2374 // This JavaThread is using monitors so mark it.
2375 thread->om_request_deflation = true;
2376 }
2377 return;
2378 }
2379 }
2380
2381 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2382 ObjectMonitor* free_tail_p = NULL;
2383 elapsedTimer timer;
2384
2385 if (log_is_enabled(Info, safepoint, cleanup) ||
2386 log_is_enabled(Info, monitorinflation)) {
2387 timer.start();
2388 }
2389
2390 // Update n_in_circulation before om_in_use_count is updated by deflation.
2391 counters->n_in_circulation += thread->om_in_use_count;
2392
2393 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
2394 counters->n_in_use += thread->om_in_use_count;
2395
2396 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
2397
2398 if (free_head_p != NULL) {
2399 // Move the deflated ObjectMonitors back to the global free list.
2400 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2401 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
2402 INTPTR_FORMAT, p2i(free_tail_p->_next_om));
2403
2404 // constant-time list splice - prepend scavenged segment to g_free_list
2405 free_tail_p->_next_om = g_free_list;
2406 g_free_list = free_head_p;
2407 counters->n_scavenged += deflated_count;
2408 counters->per_thread_scavenged += deflated_count;
2409 }
2410
2411 timer.stop();
2412 // Safepoint logging cares about cumulative per_thread_times and
2413 // we'll capture most of the cost, but not the muxRelease() which
2414 // should be cheap.
2415 counters->per_thread_times += timer.seconds();
2416
2417 Thread::muxRelease(&gListLock);
2418
2419 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2420 LogStreamHandle(Info, monitorinflation) lsh_info;
2421 LogStream* ls = NULL;
2422 if (log_is_enabled(Debug, monitorinflation)) {
2423 ls = &lsh_debug;
2424 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2425 ls = &lsh_info;
2426 }
2427 if (ls != NULL) {
2428 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count);
2429 }
2430 }
2431
2432 // Monitor cleanup on JavaThread::exit
2433
2434 // Iterate through monitor cache and attempt to release thread's monitors
2435 // Gives up on a particular monitor if an exception occurs, but continues
2436 // the overall iteration, swallowing the exception.
2437 class ReleaseJavaMonitorsClosure: public MonitorClosure {
2438 private:
2449
2450 // Release all inflated monitors owned by THREAD. Lightweight monitors are
2451 // ignored. This is meant to be called during JNI thread detach which assumes
2452 // all remaining monitors are heavyweight. All exceptions are swallowed.
2453 // Scanning the extant monitor list can be time consuming.
2454 // A simple optimization is to add a per-thread flag that indicates a thread
2455 // called jni_monitorenter() during its lifetime.
2456 //
2457 // Instead of No_Savepoint_Verifier it might be cheaper to
2458 // use an idiom of the form:
2459 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
2460 // <code that must not run at safepoint>
2461 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
2462 // Since the tests are extremely cheap we could leave them enabled
2463 // for normal product builds.
2464
2465 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
2466 assert(THREAD == JavaThread::current(), "must be current Java thread");
2467 NoSafepointVerifier nsv;
2468 ReleaseJavaMonitorsClosure rjmc(THREAD);
2469 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread");
2470 ObjectSynchronizer::monitors_iterate(&rjmc);
2471 Thread::muxRelease(&gListLock);
2472 THREAD->clear_pending_exception();
2473 }
2474
2475 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
2476 switch (cause) {
2477 case inflate_cause_vm_internal: return "VM Internal";
2478 case inflate_cause_monitor_enter: return "Monitor Enter";
2479 case inflate_cause_wait: return "Monitor Wait";
2480 case inflate_cause_notify: return "Monitor Notify";
2481 case inflate_cause_hash_code: return "Monitor Hash Code";
2482 case inflate_cause_jni_enter: return "JNI Monitor Enter";
2483 case inflate_cause_jni_exit: return "JNI Monitor Exit";
2484 default:
2485 ShouldNotReachHere();
2486 }
2487 return "Unknown";
2488 }
2489
2490 //------------------------------------------------------------------------------
2491 // Debugging code
2505 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
2506 return (u_char*)&GVars.stw_random;
2507 }
2508
2509 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
2510 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
2511
2512 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2513 LogStreamHandle(Info, monitorinflation) lsh_info;
2514 LogStreamHandle(Trace, monitorinflation) lsh_trace;
2515 LogStream* ls = NULL;
2516 if (log_is_enabled(Trace, monitorinflation)) {
2517 ls = &lsh_trace;
2518 } else if (log_is_enabled(Debug, monitorinflation)) {
2519 ls = &lsh_debug;
2520 } else if (log_is_enabled(Info, monitorinflation)) {
2521 ls = &lsh_info;
2522 }
2523 assert(ls != NULL, "sanity check");
2524
2525 if (!on_exit) {
2526 // Not at VM exit so grab the global list lock.
2527 Thread::muxAcquire(&gListLock, "audit_and_print_stats");
2528 }
2529
2530 // Log counts for the global and per-thread monitor lists:
2531 int chk_om_population = log_monitor_list_counts(ls);
2532 int error_cnt = 0;
2533
2534 ls->print_cr("Checking global lists:");
2535
2536 // Check g_om_population:
2537 if (g_om_population == chk_om_population) {
2538 ls->print_cr("g_om_population=%d equals chk_om_population=%d",
2539 g_om_population, chk_om_population);
2540 } else {
2541 ls->print_cr("ERROR: g_om_population=%d is not equal to "
2542 "chk_om_population=%d", g_om_population,
2543 chk_om_population);
2544 error_cnt++;
2545 }
2546
2547 // Check g_om_in_use_list and g_om_in_use_count:
2548 chk_global_in_use_list_and_count(ls, &error_cnt);
2549
2550 // Check g_free_list and g_om_free_count:
2551 chk_global_free_list_and_count(ls, &error_cnt);
2552
2553 if (!on_exit) {
2554 Thread::muxRelease(&gListLock);
2555 }
2556
2557 ls->print_cr("Checking per-thread lists:");
2558
2559 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2560 // Check om_in_use_list and om_in_use_count:
2561 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
2562
2563 // Check om_free_list and om_free_count:
2564 chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
2565 }
2566
2567 if (error_cnt == 0) {
2568 ls->print_cr("No errors found in monitor list checks.");
2569 } else {
2570 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
2571 }
2572
2573 if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
2574 (!on_exit && log_is_enabled(Trace, monitorinflation))) {
2575 // When exiting this log output is at the Info level. When called
2576 // at a safepoint, this log output is at the Trace level since
2577 // there can be a lot of it.
2578 log_in_use_monitor_details(ls, on_exit);
2579 }
2580
2581 ls->flush();
2582
2583 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
2584 }
2585
2586 // Check a free monitor entry; log any errors.
2587 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n,
2588 outputStream * out, int *error_cnt_p) {
2589 stringStream ss;
2590 if (n->is_busy()) {
2591 if (jt != NULL) {
2592 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2593 ": free per-thread monitor must not be busy: %s", p2i(jt),
2594 p2i(n), n->is_busy_to_string(&ss));
2595 } else {
2596 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2597 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
2598 }
2614 }
2615 if (n->object() != NULL) {
2616 if (jt != NULL) {
2617 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2618 ": free per-thread monitor must have NULL _object "
2619 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
2620 p2i(n->object()));
2621 } else {
2622 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2623 "must have NULL _object field: _object=" INTPTR_FORMAT,
2624 p2i(n), p2i(n->object()));
2625 }
2626 *error_cnt_p = *error_cnt_p + 1;
2627 }
2628 }
2629
2630 // Check the global free list and count; log the results of the checks.
2631 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
2632 int *error_cnt_p) {
2633 int chk_om_free_count = 0;
2634 for (ObjectMonitor* n = g_free_list; n != NULL; n = n->_next_om) {
2635 chk_free_entry(NULL /* jt */, n, out, error_cnt_p);
2636 chk_om_free_count++;
2637 }
2638 if (g_om_free_count == chk_om_free_count) {
2639 out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d",
2640 g_om_free_count, chk_om_free_count);
2641 } else {
2642 out->print_cr("ERROR: g_om_free_count=%d is not equal to "
2643 "chk_om_free_count=%d", g_om_free_count,
2644 chk_om_free_count);
2645 *error_cnt_p = *error_cnt_p + 1;
2646 }
2647 }
2648
2649 // Check the global in-use list and count; log the results of the checks.
2650 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
2651 int *error_cnt_p) {
2652 int chk_om_in_use_count = 0;
2653 for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) {
2654 chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p);
2655 chk_om_in_use_count++;
2656 }
2657 if (g_om_in_use_count == chk_om_in_use_count) {
2658 out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d", g_om_in_use_count,
2659 chk_om_in_use_count);
2660 } else {
2661 out->print_cr("ERROR: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d",
2662 g_om_in_use_count, chk_om_in_use_count);
2663 *error_cnt_p = *error_cnt_p + 1;
2664 }
2665 }
2666
2667 // Check an in-use monitor entry; log any errors.
2668 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
2669 outputStream * out, int *error_cnt_p) {
2670 if (n->header().value() == 0) {
2671 if (jt != NULL) {
2672 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2673 ": in-use per-thread monitor must have non-NULL _header "
2674 "field.", p2i(jt), p2i(n));
2675 } else {
2676 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor "
2677 "must have non-NULL _header field.", p2i(n));
2678 }
2679 *error_cnt_p = *error_cnt_p + 1;
2680 }
2681 if (n->object() == NULL) {
2682 if (jt != NULL) {
2711 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2712 ": in-use per-thread monitor's object does not refer "
2713 "to the same monitor: obj=" INTPTR_FORMAT ", mark="
2714 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
2715 p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2716 } else {
2717 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
2718 "monitor's object does not refer to the same monitor: obj="
2719 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
2720 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2721 }
2722 *error_cnt_p = *error_cnt_p + 1;
2723 }
2724 }
2725
2726 // Check the thread's free list and count; log the results of the checks.
2727 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
2728 outputStream * out,
2729 int *error_cnt_p) {
2730 int chk_om_free_count = 0;
2731 for (ObjectMonitor* n = jt->om_free_list; n != NULL; n = n->_next_om) {
2732 chk_free_entry(jt, n, out, error_cnt_p);
2733 chk_om_free_count++;
2734 }
2735 if (jt->om_free_count == chk_om_free_count) {
2736 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals "
2737 "chk_om_free_count=%d", p2i(jt), jt->om_free_count, chk_om_free_count);
2738 } else {
2739 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not "
2740 "equal to chk_om_free_count=%d", p2i(jt), jt->om_free_count,
2741 chk_om_free_count);
2742 *error_cnt_p = *error_cnt_p + 1;
2743 }
2744 }
2745
2746 // Check the thread's in-use list and count; log the results of the checks.
2747 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
2748 outputStream * out,
2749 int *error_cnt_p) {
2750 int chk_om_in_use_count = 0;
2751 for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) {
2752 chk_in_use_entry(jt, n, out, error_cnt_p);
2753 chk_om_in_use_count++;
2754 }
2755 if (jt->om_in_use_count == chk_om_in_use_count) {
2756 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
2757 "chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count,
2758 chk_om_in_use_count);
2759 } else {
2760 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
2761 "equal to chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count,
2762 chk_om_in_use_count);
2763 *error_cnt_p = *error_cnt_p + 1;
2764 }
2765 }
2766
2767 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
2768 // flags indicate why the entry is in-use, 'object' and 'object type'
2769 // indicate the associated object and its type.
2770 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out,
2771 bool on_exit) {
2772 if (!on_exit) {
2773 // Not at VM exit so grab the global list lock.
2774 Thread::muxAcquire(&gListLock, "log_in_use_monitor_details");
2775 }
2776
2777 stringStream ss;
2778 if (g_om_in_use_count > 0) {
2779 out->print_cr("In-use global monitor info:");
2780 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2781 out->print_cr("%18s %s %7s %18s %18s",
2782 "monitor", "BHL", "ref_cnt", "object", "object type");
2783 out->print_cr("================== === ======= ================== ==================");
2784 for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) {
2785 const oop obj = (oop) n->object();
2786 const markWord mark = n->header();
2787 ResourceMark rm;
2788 out->print(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s",
2789 p2i(n), n->is_busy() != 0, mark.hash() != 0,
2790 n->owner() != NULL, (int)n->ref_count(), p2i(obj),
2791 obj->klass()->external_name());
2792 if (n->is_busy() != 0) {
2793 out->print(" (%s)", n->is_busy_to_string(&ss));
2794 ss.reset();
2795 }
2796 out->cr();
2797 }
2798 }
2799
2800 if (!on_exit) {
2801 Thread::muxRelease(&gListLock);
2802 }
2803
2804 out->print_cr("In-use per-thread monitor info:");
2805 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2806 out->print_cr("%18s %18s %s %7s %18s %18s",
2807 "jt", "monitor", "BHL", "ref_cnt", "object", "object type");
2808 out->print_cr("================== ================== === ======= ================== ==================");
2809 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2810 for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) {
2811 const oop obj = (oop) n->object();
2812 const markWord mark = n->header();
2813 ResourceMark rm;
2814 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d "
2815 INTPTR_FORMAT " %s", p2i(jt), p2i(n), n->is_busy() != 0,
2816 mark.hash() != 0, n->owner() != NULL, (int)n->ref_count(),
2817 p2i(obj), obj->klass()->external_name());
2818 if (n->is_busy() != 0) {
2819 out->print(" (%s)", n->is_busy_to_string(&ss));
2820 ss.reset();
2821 }
2822 out->cr();
2823 }
2824 }
2825
2826 out->flush();
2827 }
2828
2829 // Log counts for the global and per-thread monitor lists and return
2830 // the population count.
2831 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
2832 int pop_count = 0;
2833 out->print_cr("%18s %10s %10s %10s",
2834 "Global Lists:", "InUse", "Free", "Total");
2835 out->print_cr("================== ========== ========== ==========");
2836 out->print_cr("%18s %10d %10d %10d", "",
2837 g_om_in_use_count, g_om_free_count, g_om_population);
2838 pop_count += g_om_in_use_count + g_om_free_count;
2839
2840 out->print_cr("%18s %10s %10s %10s",
2841 "Per-Thread Lists:", "InUse", "Free", "Provision");
2842 out->print_cr("================== ========== ========== ==========");
2843
2844 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2845 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
2846 jt->om_in_use_count, jt->om_free_count, jt->om_free_provision);
2847 pop_count += jt->om_in_use_count + jt->om_free_count;
2848 }
2849 return pop_count;
2850 }
2851
2852 #ifndef PRODUCT
2853
2854 // Check if monitor belongs to the monitor cache
2855 // The list is grow-only so it's *relatively* safe to traverse
2856 // the list of extant blocks without taking a lock.
2857
2858 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
2859 PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
2860 while (block != NULL) {
2861 assert(block->object() == CHAINMARKER, "must be a block header");
2862 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
2863 address mon = (address)monitor;
2864 address blk = (address)block;
2865 size_t diff = mon - blk;
2866 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
2867 return 1;
2868 }
2869 block = (PaddedObjectMonitor*)block->_next_om;
2870 }
2871 return 0;
2872 }
2873
2874 #endif
|
112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
113 return 0;
114 }
115
116 #define NINFLATIONLOCKS 256
117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
118
119 // global list of blocks of monitors
120 PaddedObjectMonitor* volatile ObjectSynchronizer::g_block_list = NULL;
121 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
122 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false;
123 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
124
125 // Global ObjectMonitor free list. Newly allocated and deflated
126 // ObjectMonitors are prepended here.
127 static ObjectMonitor* volatile g_free_list = NULL;
128 // Global ObjectMonitor in-use list. When a JavaThread is exiting,
129 // ObjectMonitors on its per-thread in-use list are prepended here.
130 static ObjectMonitor* volatile g_om_in_use_list = NULL;
131
132 static volatile int g_om_free_count = 0; // # on g_free_list
133 static volatile int g_om_in_use_count = 0; // # on g_om_in_use_list
134 static volatile int g_om_population = 0; // # Extant -- in circulation
135
136 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
137
138
139 // =====================> List Management functions
140
141 // Return true if the ObjectMonitor's next field is marked.
142 // Otherwise returns false.
143 static bool is_next_marked(ObjectMonitor* om) {
144 return ((intptr_t)OrderAccess::load_acquire(&om->_next_om) & 0x1) != 0;
145 }
146
147 // Mark an ObjectMonitor* and return it. Note: the om parameter
148 // may or may not have been marked originally.
149 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) {
150 return (ObjectMonitor*)((intptr_t)om | 0x1);
151 }
152
153 // Mark the next field in an ObjectMonitor. If marking was successful,
154 // then the unmarked next field is returned via parameter and true is
155 // returned. Otherwise false is returned.
156 static bool mark_next(ObjectMonitor* om, ObjectMonitor** next_p) {
157 // Get current next field without any marking value.
158 ObjectMonitor* next = (ObjectMonitor*)
159 ((intptr_t)OrderAccess::load_acquire(&om->_next_om) & ~0x1);
160 if (Atomic::cmpxchg(mark_om_ptr(next), &om->_next_om, next) != next) {
161 return false; // Could not mark the next field or it was already marked.
162 }
163 *next_p = next;
164 return true;
165 }
166
167 // Loop until we mark the next field in an ObjectMonitor. The unmarked
168 // next field is returned.
169 static ObjectMonitor* mark_next_loop(ObjectMonitor* om) {
170 ObjectMonitor* next;
171 while (true) {
172 if (mark_next(om, &next)) {
173 // Marked om's next field so return the unmarked value.
174 return next;
175 }
176 }
177 }
178
179 // Set the next field in an ObjectMonitor to the specified value.
180 // The caller of set_next() must be the same thread that marked the
181 // ObjectMonitor.
182 static void set_next(ObjectMonitor* om, ObjectMonitor* value) {
183 OrderAccess::release_store(&om->_next_om, value);
184 }
185
186 // Mark the next field in the list head ObjectMonitor. If marking was
187 // successful, then the mid and the unmarked next field are returned
188 // via parameter and true is returned. Otherwise false is returned.
189 static bool mark_list_head(ObjectMonitor* volatile * list_p,
190 ObjectMonitor** mid_p, ObjectMonitor** next_p) {
191 while (true) {
192 ObjectMonitor* mid = OrderAccess::load_acquire(list_p);
193 if (mid == NULL) {
194 return false; // The list is empty so nothing to mark.
195 }
196 if (mark_next(mid, next_p)) {
197 if (OrderAccess::load_acquire(list_p) != mid) {
198 // The list head changed so we have to retry.
199 set_next(mid, *next_p); // unmark mid
200 continue;
201 }
202 // We marked next field to guard against races.
203 *mid_p = mid;
204 return true;
205 }
206 }
207 }
208
209 // Return the unmarked next field in an ObjectMonitor. Note: the next
210 // field may or may not have been marked originally.
211 static ObjectMonitor* unmarked_next(ObjectMonitor* om) {
212 return (ObjectMonitor*)((intptr_t)OrderAccess::load_acquire(&om->_next_om) & ~0x1);
213 }
214
215 #if 0
216 // XXX - this is unused
217 // Unmark the next field in an ObjectMonitor. Requires that the next
218 // field be marked.
219 static void unmark_next(ObjectMonitor* om) {
220 ADIM_guarantee(is_next_marked(om), "next field must be marked: next=" INTPTR_FORMAT, p2i(om->_next_om));
221
222 ObjectMonitor* next = unmarked_next(om);
223 set_next(om, next);
224 }
225 #endif
226
227 volatile int visit_counter = 42;
228 static void chk_for_list_loop(ObjectMonitor* list, int count) {
229 if (!CheckMonitorLists) {
230 return;
231 }
232 int l_visit_counter = Atomic::add(1, &visit_counter);
233 int l_count = 0;
234 ObjectMonitor* prev = NULL;
235 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
236 if (mid->visit_marker == l_visit_counter) {
237 log_error(monitorinflation)("ERROR: prev=" INTPTR_FORMAT ", l_count=%d"
238 " refers to an ObjectMonitor that has"
239 " already been visited: mid=" INTPTR_FORMAT,
240 p2i(prev), l_count, p2i(mid));
241 fatal("list=" INTPTR_FORMAT " of %d items has a loop.", p2i(list), count);
242 }
243 mid->visit_marker = l_visit_counter;
244 prev = mid;
245 if (++l_count > count + 1024 * 1024) {
246 fatal("list=" INTPTR_FORMAT " of %d items may have a loop; l_count=%d",
247 p2i(list), count, l_count);
248 }
249 }
250 }
251
252 static void chk_om_not_on_list(ObjectMonitor* om, ObjectMonitor* list, int count) {
253 if (!CheckMonitorLists) {
254 return;
255 }
256 guarantee(list != om, "ERROR: om=" INTPTR_FORMAT " must not be head of the "
257 "list=" INTPTR_FORMAT ", count=%d", p2i(om), p2i(list), count);
258 int l_count = 0;
259 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
260 if (unmarked_next(mid) == om) {
261 log_error(monitorinflation)("ERROR: mid=" INTPTR_FORMAT ", l_count=%d"
262 " next_om refers to om=" INTPTR_FORMAT,
263 p2i(mid), l_count, p2i(om));
264 fatal("list=" INTPTR_FORMAT " of %d items has bad next_om value.",
265 p2i(list), count);
266 }
267 if (++l_count > count + 1024 * 1024) {
268 fatal("list=" INTPTR_FORMAT " of %d items may have a loop; l_count=%d",
269 p2i(list), count, l_count);
270 }
271 }
272 }
273
274 static void chk_om_elems_not_on_list(ObjectMonitor* elems, int elems_count,
275 ObjectMonitor* list, int list_count) {
276 if (!CheckMonitorLists) {
277 return;
278 }
279 chk_for_list_loop(elems, elems_count);
280 for (ObjectMonitor* mid = elems; mid != NULL; mid = unmarked_next(mid)) {
281 chk_om_not_on_list(mid, list, list_count);
282 }
283 }
284
285 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is
286 // the last ObjectMonitor in the list and there are 'count' on the list.
287 // Also updates the specified *count_p.
288 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail,
289 int count, ObjectMonitor* volatile* list_p,
290 volatile int* count_p) {
291 chk_for_list_loop(OrderAccess::load_acquire(list_p),
292 OrderAccess::load_acquire(count_p));
293 chk_om_elems_not_on_list(list, count, OrderAccess::load_acquire(list_p),
294 OrderAccess::load_acquire(count_p));
295 while (true) {
296 ObjectMonitor* cur = OrderAccess::load_acquire(list_p);
297 // Prepend list to *list_p.
298 ObjectMonitor* next = NULL;
299 if (!mark_next(tail, &next)) {
300 continue; // failed to mark next field so try it all again
301 }
302 set_next(tail, cur); // tail now points to cur (and unmarks tail)
303 if (cur == NULL) {
304 // No potential race with takers or other prependers since
305 // *list_p is empty.
306 if (Atomic::cmpxchg(list, list_p, cur) == cur) {
307 // Successfully switched *list_p to the list value.
308 Atomic::add(count, count_p);
309 break;
310 }
311 // Implied else: try it all again
312 } else {
313 // Try to mark next field to guard against races:
314 if (!mark_next(cur, &next)) {
315 continue; // failed to mark next field so try it all again
316 }
317 // We marked the next field so try to switch *list_p to the list value.
318 if (Atomic::cmpxchg(list, list_p, cur) != cur) {
319 // The list head has changed so unmark the next field and try again:
320 set_next(cur, next);
321 continue;
322 }
323 Atomic::add(count, count_p);
324 set_next(cur, next); // unmark next field
325 break;
326 }
327 }
328 }
329
330 // Prepend a newly allocated block of ObjectMonitors to g_block_list and
331 // g_free_list. Also updates g_om_population and g_om_free_count.
332 void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) {
333 // First we handle g_block_list:
334 while (true) {
335 PaddedObjectMonitor* cur = OrderAccess::load_acquire(&g_block_list);
336 // Prepend new_blk to g_block_list. The first ObjectMonitor in
337 // a block is reserved for use as linkage to the next block.
338 OrderAccess::release_store(&new_blk[0]._next_om, cur);
339 if (Atomic::cmpxchg(new_blk, &g_block_list, cur) == cur) {
340 // Successfully switched g_block_list to the new_blk value.
341 Atomic::add(_BLOCKSIZE - 1, &g_om_population);
342 break;
343 }
344 // Implied else: try it all again
345 }
346
347 // Second we handle g_free_list:
348 prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1,
349 &g_free_list, &g_om_free_count);
350 }
351
352 // Prepend a list of ObjectMonitors to g_free_list. 'tail' is the last
353 // ObjectMonitor in the list and there are 'count' on the list. Also
354 // updates g_om_free_count.
355 static void prepend_list_to_g_free_list(ObjectMonitor* list,
356 ObjectMonitor* tail, int count) {
357 prepend_list_to_common(list, tail, count, &g_free_list, &g_om_free_count);
358 }
359
360 // Prepend a list of ObjectMonitors to g_om_in_use_list. 'tail' is the last
361 // ObjectMonitor in the list and there are 'count' on the list. Also
362 // updates g_om_in_use_list.
363 static void prepend_list_to_g_om_in_use_list(ObjectMonitor* list,
364 ObjectMonitor* tail, int count) {
365 prepend_list_to_common(list, tail, count, &g_om_in_use_list, &g_om_in_use_count);
366 }
367
368 // Prepend an ObjectMonitor to the specified list. Also updates
369 // the specified counter.
370 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor* volatile * list_p,
371 int volatile * count_p) {
372 chk_for_list_loop(OrderAccess::load_acquire(list_p),
373 OrderAccess::load_acquire(count_p));
374 chk_om_not_on_list(m, OrderAccess::load_acquire(list_p),
375 OrderAccess::load_acquire(count_p));
376
377 while (true) {
378 ObjectMonitor* cur = OrderAccess::load_acquire(list_p);
379 // Prepend ObjectMonitor to *list_p.
380 ObjectMonitor* next = NULL;
381 if (!mark_next(m, &next)) {
382 continue; // failed to mark next field so try it all again
383 }
384 set_next(m, cur); // m now points to cur (and unmarks m)
385 if (cur == NULL) {
386 // No potential race with other prependers since *list_p is empty.
387 if (Atomic::cmpxchg(m, list_p, cur) == cur) {
388 // Successfully switched *list_p to 'm'.
389 Atomic::inc(count_p);
390 break;
391 }
392 // Implied else: try it all again
393 } else {
394 // Try to mark next field to guard against races:
395 if (!mark_next(cur, &next)) {
396 continue; // failed to mark next field so try it all again
397 }
398 // We marked the next field so try to switch *list_p to 'm'.
399 if (Atomic::cmpxchg(m, list_p, cur) != cur) {
400 // The list head has changed so unmark the next field and try again:
401 set_next(cur, next);
402 continue;
403 }
404 Atomic::inc(count_p);
405 set_next(cur, next); // unmark next field
406 break;
407 }
408 }
409 }
410
411 // Prepend an ObjectMonitor to a per-thread om_free_list.
412 // Also updates the per-thread om_free_count.
413 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) {
414 prepend_to_common(m, &self->om_free_list, &self->om_free_count);
415 }
416
417 // Prepend an ObjectMonitor to a per-thread om_in_use_list.
418 // Also updates the per-thread om_in_use_count.
419 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) {
420 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
421 }
422
423 // Take an ObjectMonitor from the start of the specified list. Also
424 // decrements the specified counter. Returns NULL if none are available.
425 static ObjectMonitor* take_from_start_of_common(ObjectMonitor* volatile * list_p,
426 int volatile * count_p) {
427 chk_for_list_loop(OrderAccess::load_acquire(list_p),
428 OrderAccess::load_acquire(count_p));
429
430 ObjectMonitor* next = NULL;
431 ObjectMonitor* take = NULL;
432 // Mark the list head to guard against A-B-A race:
433 if (!mark_list_head(list_p, &take, &next)) {
434 return NULL; // None are available.
435 }
436 // Switch marked list head to next (which unmarks the list head, but
437 // leaves take marked):
438 OrderAccess::release_store(list_p, next);
439 Atomic::dec(count_p);
440 // Unmark take, but leave the next value for any lagging list
441 // walkers. It will get cleaned up when take is prepended to
442 // the in-use list:
443 set_next(take, next);
444 return take;
445 }
446
447 // Take an ObjectMonitor from the start of the global free-list. Also
448 // updates g_om_free_count. Returns NULL if none are available.
449 static ObjectMonitor* take_from_start_of_g_free_list() {
450 return take_from_start_of_common(&g_free_list, &g_om_free_count);
451 }
452
453 // Take an ObjectMonitor from the start of a per-thread free-list.
454 // Also updates om_free_count. Returns NULL if none are available.
455 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) {
456 return take_from_start_of_common(&self->om_free_list, &self->om_free_count);
457 }
458
459
460 // =====================> Quick functions
461
462 // The quick_* forms are special fast-path variants used to improve
463 // performance. In the simplest case, a "quick_*" implementation could
464 // simply return false, in which case the caller will perform the necessary
465 // state transitions and call the slow-path form.
466 // The fast-path is designed to handle frequently arising cases in an efficient
467 // manner and is just a degenerate "optimistic" variant of the slow-path.
468 // returns true -- to indicate the call was satisfied.
469 // returns false -- to indicate the call needs the services of the slow-path.
470 // A no-loitering ordinance is in effect for code in the quick_* family
471 // operators: safepoints or indefinite blocking (blocking that might span a
472 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
473 // entry.
474 //
475 // Consider: An interesting optimization is to have the JIT recognize the
476 // following common idiom:
477 // synchronized (someobj) { .... ; notify(); }
478 // That is, we find a notify() or notifyAll() call that immediately precedes
479 // the monitorexit operation. In that case the JIT could fuse the operations
1314 while (block != NULL) {
1315 assert(block->object() == CHAINMARKER, "must be a block header");
1316 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1317 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1318 if (mid->is_active()) {
1319 ObjectMonitorHandle omh(mid);
1320
1321 if (mid->object() == NULL ||
1322 (AsyncDeflateIdleMonitors && mid->ref_count() < 0)) {
1323 // Only process with closure if the object is set.
1324 // For async deflation, race here if monitor is not owned!
1325 // The above ref_count bump (in ObjectMonitorHandle ctr)
1326 // will cause subsequent async deflation to skip it.
1327 // However, previous or concurrent async deflation is a race
1328 // so skip this ObjectMonitor if it is being async deflated.
1329 continue;
1330 }
1331 closure->do_monitor(mid);
1332 }
1333 }
1334 // unmarked_next() is not needed with g_block_list (no next field marking).
1335 block = (PaddedObjectMonitor*)OrderAccess::load_acquire(&block->_next_om);
1336 }
1337 }
1338
1339 static bool monitors_used_above_threshold() {
1340 if (OrderAccess::load_acquire(&g_om_population) == 0) {
1341 return false;
1342 }
1343 if (MonitorUsedDeflationThreshold > 0) {
1344 int monitors_used = OrderAccess::load_acquire(&g_om_population) -
1345 OrderAccess::load_acquire(&g_om_free_count);
1346 int monitor_usage = (monitors_used * 100LL) /
1347 OrderAccess::load_acquire(&g_om_population);
1348 return monitor_usage > MonitorUsedDeflationThreshold;
1349 }
1350 return false;
1351 }
1352
1353 // Returns true if MonitorBound is set (> 0) and if the specified
1354 // cnt is > MonitorBound. Otherwise returns false.
1355 static bool is_MonitorBound_exceeded(const int cnt) {
1356 const int mx = MonitorBound;
1357 return mx > 0 && cnt > mx;
1358 }
1359
1360 bool ObjectSynchronizer::is_async_deflation_needed() {
1361 if (!AsyncDeflateIdleMonitors) {
1362 return false;
1363 }
1364 if (is_async_deflation_requested()) {
1365 // Async deflation request.
1366 return true;
1367 }
1368 if (AsyncDeflationInterval > 0 &&
1369 time_since_last_async_deflation_ms() > AsyncDeflationInterval &&
1370 monitors_used_above_threshold()) {
1371 // It's been longer than our specified deflate interval and there
1372 // are too many monitors in use. We don't deflate more frequently
1373 // than AsyncDeflationInterval (unless is_async_deflation_requested)
1374 // in order to not swamp the ServiceThread.
1375 _last_async_deflation_time_ns = os::javaTimeNanos();
1376 return true;
1377 }
1378 if (is_MonitorBound_exceeded(OrderAccess::load_acquire(&g_om_population) -
1379 OrderAccess::load_acquire(&g_om_free_count))) {
1380 // Not enough ObjectMonitors on the global free list.
1381 return true;
1382 }
1383 return false;
1384 }
1385
1386 bool ObjectSynchronizer::is_safepoint_deflation_needed() {
1387 if (!AsyncDeflateIdleMonitors) {
1388 if (monitors_used_above_threshold()) {
1389 // Too many monitors in use.
1390 return true;
1391 }
1392 return false;
1393 }
1394 if (is_special_deflation_requested()) {
1395 // For AsyncDeflateIdleMonitors only do a safepoint deflation
1396 // if there is a special deflation request.
1397 return true;
1398 }
1399 return false;
1400 }
1401
1402 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1403 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS);
1404 }
1405
1406 void ObjectSynchronizer::oops_do(OopClosure* f) {
1407 // We only scan the global used list here (for moribund threads), and
1408 // the thread-local monitors in Thread::oops_do().
1409 global_used_oops_do(f);
1410 }
1411
1412 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1413 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1414 list_oops_do(OrderAccess::load_acquire(&g_om_in_use_list), OrderAccess::load_acquire(&g_om_in_use_count), f);
1415 }
1416
1417 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1418 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1419 list_oops_do(OrderAccess::load_acquire(&thread->om_in_use_list), OrderAccess::load_acquire(&thread->om_in_use_count), f);
1420 }
1421
1422 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, int count, OopClosure* f) {
1423 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1424 chk_for_list_loop(list, count);
1425 // The oops_do() phase does not overlap with monitor deflation
1426 // so no need to update the ObjectMonitor's ref_count for this
1427 // ObjectMonitor* use.
1428 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1429 if (mid->object() != NULL) {
1430 f->do_oop((oop*)mid->object_addr());
1431 }
1432 }
1433 }
1434
1435
1436 // -----------------------------------------------------------------------------
1437 // ObjectMonitor Lifecycle
1438 // -----------------------
1439 // Inflation unlinks monitors from the global g_free_list and
1440 // associates them with objects. Deflation -- which occurs at
1441 // STW-time -- disassociates idle monitors from objects. Such
1442 // scavenged monitors are returned to the g_free_list.
1443 //
1444 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1445 //
1446 // Lifecycle:
1447 // -- unassigned and on the global free list
1448 // -- unassigned and on a thread's private om_free_list
1449 // -- assigned to an object. The object is inflated and the mark refers
1450 // to the objectmonitor.
1451
1452
1453 // Constraining monitor pool growth via MonitorBound ...
1454 //
1455 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
1456 //
1457 // When safepoint deflation is being used (!AsyncDeflateIdleMonitors):
1458 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
1459 // the rate of scavenging is driven primarily by GC. As such, we can find
1460 // an inordinate number of monitors in circulation.
1461 // To avoid that scenario we can artificially induce a STW safepoint
1462 // if the pool appears to be growing past some reasonable bound.
1463 // Generally we favor time in space-time tradeoffs, but as there's no
1466 // we could just loop. In addition, if MonitorBound is set to a low value
1467 // we'll incur more safepoints, which are harmful to performance.
1468 // See also: GuaranteedSafepointInterval
1469 //
1470 // The current implementation uses asynchronous VM operations.
1471 //
1472 // When safepoint deflation is being used and MonitorBound is set, the
1473 // boundry applies to
1474 // (g_om_population - g_om_free_count)
1475 // i.e., if there are not enough ObjectMonitors on the global free list,
1476 // then a safepoint deflation is induced. Picking a good MonitorBound value
1477 // is non-trivial.
1478 //
1479 // When async deflation is being used:
1480 // The monitor pool is still grow-only. Async deflation is requested
1481 // by a safepoint's cleanup phase or by the ServiceThread at periodic
1482 // intervals when is_async_deflation_needed() returns true. In
1483 // addition to other policies that are checked, if there are not
1484 // enough ObjectMonitors on the global free list, then
1485 // is_async_deflation_needed() will return true. The ServiceThread
1486 // calls deflate_global_idle_monitors_using_JT() and also calls
1487 // deflate_per_thread_idle_monitors_using_JT() as needed.
1488
1489 static void InduceScavenge(Thread* self, const char * Whence) {
1490 assert(!AsyncDeflateIdleMonitors, "is not used by async deflation");
1491
1492 // Induce STW safepoint to trim monitors
1493 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1494 // More precisely, trigger an asynchronous STW safepoint as the number
1495 // of active monitors passes the specified threshold.
1496 // TODO: assert thread state is reasonable
1497
1498 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
1499 // Induce a 'null' safepoint to scavenge monitors
1500 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
1501 // to the VMthread and have a lifespan longer than that of this activation record.
1502 // The VMThread will delete the op when completed.
1503 VMThread::execute(new VM_ScavengeMonitors());
1504 }
1505 }
1506
1507 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self,
1508 const InflateCause cause) {
1509 // A large MAXPRIVATE value reduces both list lock contention
1510 // and list coherency traffic, but also tends to increase the
1511 // number of ObjectMonitors in circulation as well as the STW
1512 // scavenge costs. As usual, we lean toward time in space-time
1513 // tradeoffs.
1514 const int MAXPRIVATE = 1024;
1515
1516 stringStream ss;
1517 for (;;) {
1518 ObjectMonitor* m;
1519
1520 // 1: try to allocate from the thread's local om_free_list.
1521 // Threads will attempt to allocate first from their local list, then
1522 // from the global list, and only after those attempts fail will the
1523 // thread attempt to instantiate new monitors. Thread-local free lists
1524 // improve allocation latency, as well as reducing coherency traffic
1525 // on the shared global list.
1526 m = take_from_start_of_om_free_list(self);
1527 if (m != NULL) {
1528 guarantee(m->object() == NULL, "invariant");
1529 m->set_allocation_state(ObjectMonitor::New);
1530 prepend_to_om_in_use_list(self, m);
1531 return m;
1532 }
1533
1534 // 2: try to allocate from the global g_free_list
1535 // CONSIDER: use muxTry() instead of muxAcquire().
1536 // If the muxTry() fails then drop immediately into case 3.
1537 // If we're using thread-local free lists then try
1538 // to reprovision the caller's free list.
1539 if (OrderAccess::load_acquire(&g_free_list) != NULL) {
1540 // Reprovision the thread's om_free_list.
1541 // Use bulk transfers to reduce the allocation rate and heat
1542 // on various locks.
1543 for (int i = self->om_free_provision; --i >= 0;) {
1544 ObjectMonitor* take = take_from_start_of_g_free_list();
1545 if (take == NULL) {
1546 break; // No more are available.
1547 }
1548 guarantee(take->object() == NULL, "invariant");
1549 if (AsyncDeflateIdleMonitors) {
1550 // We allowed 3 field values to linger during async deflation.
1551 // We clear header and restore ref_count here, but we leave
1552 // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor
1553 // enter optimization can no longer race with async deflation
1554 // and reuse.
1555 take->set_header(markWord::zero());
1556 if (take->ref_count() < 0) {
1557 // Add back max_jint to restore the ref_count field to its
1558 // proper value.
1559 Atomic::add(max_jint, &take->_ref_count);
1560
1561 assert(take->ref_count() >= 0, "must not be negative: ref_count=%d",
1562 take->ref_count());
1563 }
1564 }
1565 take->Recycle();
1566 assert(take->is_free(), "invariant");
1567 om_release(self, take, false);
1568 }
1569 self->om_free_provision += 1 + (self->om_free_provision/2);
1570 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1571
1572 if (!AsyncDeflateIdleMonitors &&
1573 is_MonitorBound_exceeded(OrderAccess::load_acquire(&g_om_population) -
1574 OrderAccess::load_acquire(&g_om_free_count))) {
1575 // Not enough ObjectMonitors on the global free list.
1576 // We can't safely induce a STW safepoint from om_alloc() as our thread
1577 // state may not be appropriate for such activities and callers may hold
1578 // naked oops, so instead we defer the action.
1579 InduceScavenge(self, "om_alloc");
1580 }
1581 continue;
1582 }
1583
1584 // 3: allocate a block of new ObjectMonitors
1585 // Both the local and global free lists are empty -- resort to malloc().
1586 // In the current implementation ObjectMonitors are TSM - immortal.
1587 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1588 // each ObjectMonitor to start at the beginning of a cache line,
1589 // so we use align_up().
1590 // A better solution would be to use C++ placement-new.
1591 // BEWARE: As it stands currently, we don't run the ctors!
1592 assert(_BLOCKSIZE > 1, "invariant");
1593 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
1594 PaddedObjectMonitor* temp;
1598 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE);
1599
1600 // NOTE: (almost) no way to recover if allocation failed.
1601 // We might be able to induce a STW safepoint and scavenge enough
1602 // ObjectMonitors to permit progress.
1603 if (temp == NULL) {
1604 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
1605 "Allocate ObjectMonitors");
1606 }
1607 (void)memset((void *) temp, 0, neededsize);
1608
1609 // Format the block.
1610 // initialize the linked list, each monitor points to its next
1611 // forming the single linked free list, the very first monitor
1612 // will points to next block, which forms the block list.
1613 // The trick of using the 1st element in the block as g_block_list
1614 // linkage should be reconsidered. A better implementation would
1615 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1616
1617 for (int i = 1; i < _BLOCKSIZE; i++) {
1618 OrderAccess::release_store(&temp[i]._next_om, (ObjectMonitor*)&temp[i+1]);
1619 assert(temp[i].is_free(), "invariant");
1620 }
1621
1622 // terminate the last monitor as the end of list
1623 OrderAccess::release_store(&temp[_BLOCKSIZE - 1]._next_om, (ObjectMonitor*)NULL);
1624
1625 // Element [0] is reserved for global list linkage
1626 temp[0].set_object(CHAINMARKER);
1627
1628 // Consider carving out this thread's current request from the
1629 // block in hand. This avoids some lock traffic and redundant
1630 // list activity.
1631
1632 prepend_block_to_lists(temp);
1633 }
1634 }
1635
1636 // Place "m" on the caller's private per-thread om_free_list.
1637 // In practice there's no need to clamp or limit the number of
1638 // monitors on a thread's om_free_list as the only non-allocation time
1639 // we'll call om_release() is to return a monitor to the free list after
1640 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1641 // accumulate on a thread's free list.
1642 //
1643 // Key constraint: all ObjectMonitors on a thread's free list and the global
1644 // free list must have their object field set to null. This prevents the
1645 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT()
1646 // -- from reclaiming them while we are trying to release them.
1647
1648 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
1649 bool from_per_thread_alloc) {
1650 guarantee(m->header().value() == 0, "invariant");
1651 guarantee(m->object() == NULL, "invariant");
1652 stringStream ss;
1653 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
1654 "%s, recursions=" INTPTR_FORMAT, m->is_busy_to_string(&ss),
1655 m->_recursions);
1656 m->set_allocation_state(ObjectMonitor::Free);
1657 // _next_om is used for both per-thread in-use and free lists so
1658 // we have to remove 'm' from the in-use list first (as needed).
1659 if (from_per_thread_alloc) {
1660 // Need to remove 'm' from om_in_use_list.
1661 // We use the more complicated mark-cur_mid_in_use-and-mid-as-we-go
1662 // protocol because async deflation can do list deletions in parallel.
1663 ObjectMonitor* cur_mid_in_use = NULL;
1664 ObjectMonitor* mid = NULL;
1665 ObjectMonitor* next = NULL;
1666 bool extracted = false;
1667
1668 if (!mark_list_head(&self->om_in_use_list, &mid, &next)) {
1669 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
1670 }
1671 while (true) {
1672 if (m == mid) {
1673 // We found 'm' on the per-thread in-use list so try to extract it.
1674 // First try the list head:
1675 if (Atomic::cmpxchg(next, &self->om_in_use_list, mid) != mid) {
1676 // We could not switch the list head to next.
1677 ObjectMonitor* marked_mid = mark_om_ptr(mid);
1678 // Switch cur_mid_in_use's next field to next (which also
1679 // unmarks cur_mid_in_use):
1680 ADIM_guarantee(cur_mid_in_use != NULL, "must not be NULL");
1681 if (Atomic::cmpxchg(next, &cur_mid_in_use->_next_om, marked_mid)
1682 != marked_mid) {
1683 // We could not switch cur_mid_in_use's next field. This
1684 // should not be possible since it was marked so we:
1685 fatal("mid=" INTPTR_FORMAT " must be referred to by the list "
1686 "head: &om_in_use_list=" INTPTR_FORMAT " or by "
1687 "cur_mid_in_use's next field: cur_mid_in_use=" INTPTR_FORMAT
1688 ", next_om=" INTPTR_FORMAT, p2i(mid),
1689 p2i((ObjectMonitor**)&self->om_in_use_list),
1690 p2i(cur_mid_in_use), p2i(cur_mid_in_use->_next_om));
1691 }
1692 }
1693 extracted = true;
1694 Atomic::dec(&self->om_in_use_count);
1695 // Unmark mid, but leave the next value for any lagging list
1696 // walkers. It will get cleaned up when mid is prepended to
1697 // the thread's free list:
1698 set_next(mid, next);
1699 break;
1700 }
1701 if (cur_mid_in_use != NULL) {
1702 set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
1703 }
1704 // The next cur_mid_in_use keeps mid's marked next field so
1705 // that it is stable for a possible next field change. It
1706 // cannot be deflated while it is marked.
1707 cur_mid_in_use = mid;
1708 mid = next;
1709 if (mid == NULL) {
1710 // Reached end of the list and didn't find m so:
1711 fatal("must find m=" INTPTR_FORMAT "on om_in_use_list=" INTPTR_FORMAT,
1712 p2i(m), p2i(self->om_in_use_list));
1713 }
1714 // Mark mid's next field so we can possibly extract it:
1715 next = mark_next_loop(mid);
1716 }
1717 }
1718
1719 prepend_to_om_free_list(self, m);
1720 guarantee(m->is_free(), "invariant");
1721 }
1722
1723 // Return ObjectMonitors on a moribund thread's free and in-use
1724 // lists to the appropriate global lists. The ObjectMonitors on the
1725 // per-thread in-use list may still be in use by other threads.
1726 //
1727 // We currently call om_flush() from Threads::remove() before the
1728 // thread has been excised from the thread list and is no longer a
1729 // mutator. This means that om_flush() cannot run concurrently with
1730 // a safepoint and interleave with deflate_idle_monitors(). In
1731 // particular, this ensures that the thread's in-use monitors are
1732 // scanned by a GC safepoint, either via Thread::oops_do() (before
1733 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
1734 // om_flush() is called).
1735 //
1736 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT()
1737 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can
1738 // run at the same time as om_flush() so we have to follow a careful
1739 // protocol to prevent list corruption.
1740
1741 void ObjectSynchronizer::om_flush(Thread* self) {
1742 // This function can race with an async deflater thread. Since
1743 // deflation has to process the per-thread in-use list before
1744 // prepending the deflated ObjectMonitors to the global free list,
1745 // we process the per-thread lists in the same order to prevent
1746 // ordering races.
1747 int in_use_count = 0;
1748 ObjectMonitor* in_use_list = NULL;
1749 ObjectMonitor* in_use_tail = NULL;
1750 ObjectMonitor* next = NULL;
1751
1752 // An async deflation thread checks to see if the target thread
1753 // is exiting, but if it has made it past that check before we
1754 // started exiting, then it is racing to get to the in-use list.
1755 if (mark_list_head(&self->om_in_use_list, &in_use_list, &next)) {
1756 chk_for_list_loop(in_use_list, OrderAccess::load_acquire(&self->om_in_use_count));
1757 // At this point, we have marked the in-use list head so an
1758 // async deflation thread cannot come in after us. If an async
1759 // deflation thread is ahead of us, then we'll detect that and
1760 // wait for it to finish its work.
1761 //
1762 // The thread is going away, however the ObjectMonitors on the
1763 // om_in_use_list may still be in-use by other threads. Link
1764 // them to in_use_tail, which will be linked into the global
1765 // in-use list g_om_in_use_list below.
1766 //
1767 // Account for the in-use list head before the loop since it is
1768 // already marked (by this thread):
1769 in_use_tail = in_use_list;
1770 in_use_count++;
1771 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) {
1772 if (is_next_marked(cur_om)) {
1773 // This next field is marked so there must be an async deflater
1774 // thread ahead of us so we'll give it a chance to finish.
1775 while (is_next_marked(cur_om)) {
1776 os::naked_short_sleep(1);
1777 }
1778 // Refetch the possibly changed next field and try again.
1779 cur_om = unmarked_next(in_use_tail);
1780 continue;
1781 }
1782 if (!cur_om->is_active()) {
1783 // cur_om was deflated and the allocation state was changed
1784 // to Free while it was marked. We happened to see it just
1785 // after it was unmarked (and added to the free list).
1786 // Refetch the possibly changed next field and try again.
1787 cur_om = unmarked_next(in_use_tail);
1788 continue;
1789 }
1790 in_use_tail = cur_om;
1791 in_use_count++;
1792 cur_om = unmarked_next(cur_om);
1793 }
1794 guarantee(in_use_tail != NULL, "invariant");
1795 int l_om_in_use_count = OrderAccess::load_acquire(&self->om_in_use_count);
1796 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't "
1797 "match: l_om_in_use_count=%d, in_use_count=%d",
1798 l_om_in_use_count, in_use_count);
1799 // Clear the in-use count before unmarking the in-use list head
1800 // to avoid races:
1801 OrderAccess::release_store(&self->om_in_use_count, 0);
1802 // Clear the in-use list head (which also unmarks it):
1803 OrderAccess::release_store(&self->om_in_use_list, (ObjectMonitor*)NULL);
1804 // Unmark the disconnected list head:
1805 set_next(in_use_list, next);
1806 }
1807
1808 int free_count = 0;
1809 ObjectMonitor* free_list = OrderAccess::load_acquire(&self->om_free_list);
1810 ObjectMonitor* free_tail = NULL;
1811 if (free_list != NULL) {
1812 chk_for_list_loop(free_list, OrderAccess::load_acquire(&self->om_free_count));
1813 // The thread is going away. Set 'free_tail' to the last per-thread free
1814 // monitor which will be linked to g_free_list below.
1815 stringStream ss;
1816 for (ObjectMonitor* s = free_list; s != NULL; s = unmarked_next(s)) {
1817 free_count++;
1818 free_tail = s;
1819 guarantee(s->object() == NULL, "invariant");
1820 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1821 }
1822 guarantee(free_tail != NULL, "invariant");
1823 int l_om_free_count = OrderAccess::load_acquire(&self->om_free_count);
1824 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: "
1825 "l_om_free_count=%d, free_count=%d", l_om_free_count,
1826 free_count);
1827 OrderAccess::release_store(&self->om_free_list, (ObjectMonitor*)NULL);
1828 OrderAccess::release_store(&self->om_free_count, 0);
1829 }
1830
1831 if (free_tail != NULL) {
1832 prepend_list_to_g_free_list(free_list, free_tail, free_count);
1833 }
1834
1835 if (in_use_tail != NULL) {
1836 prepend_list_to_g_om_in_use_list(in_use_list, in_use_tail, in_use_count);
1837 }
1838
1839 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1840 LogStreamHandle(Info, monitorinflation) lsh_info;
1841 LogStream* ls = NULL;
1842 if (log_is_enabled(Debug, monitorinflation)) {
1843 ls = &lsh_debug;
1844 } else if ((free_count != 0 || in_use_count != 0) &&
1845 log_is_enabled(Info, monitorinflation)) {
1846 ls = &lsh_info;
1847 }
1848 if (ls != NULL) {
1849 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d"
1850 ", in_use_count=%d" ", om_free_provision=%d",
1851 p2i(self), free_count, in_use_count, self->om_free_provision);
1852 }
1853 }
1854
1855 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1856 const oop obj,
1857 ObjectSynchronizer::InflateCause cause) {
1858 assert(event != NULL, "invariant");
1935 //
1936 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1937 // to install INFLATING into the mark word. We originally installed INFLATING,
1938 // allocated the objectmonitor, and then finally STed the address of the
1939 // objectmonitor into the mark. This was correct, but artificially lengthened
1940 // the interval in which INFLATED appeared in the mark, thus increasing
1941 // the odds of inflation contention.
1942 //
1943 // We now use per-thread private objectmonitor free lists.
1944 // These list are reprovisioned from the global free list outside the
1945 // critical INFLATING...ST interval. A thread can transfer
1946 // multiple objectmonitors en-mass from the global free list to its local free list.
1947 // This reduces coherency traffic and lock contention on the global free list.
1948 // Using such local free lists, it doesn't matter if the om_alloc() call appears
1949 // before or after the CAS(INFLATING) operation.
1950 // See the comments in om_alloc().
1951
1952 LogStreamHandle(Trace, monitorinflation) lsh;
1953
1954 if (mark.has_locker()) {
1955 ObjectMonitor* m = om_alloc(self, cause);
1956 // Optimistically prepare the objectmonitor - anticipate successful CAS
1957 // We do this before the CAS in order to minimize the length of time
1958 // in which INFLATING appears in the mark.
1959 m->Recycle();
1960 m->_Responsible = NULL;
1961 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class
1962
1963 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1964 if (cmp != mark) {
1965 om_release(self, m, true);
1966 continue; // Interference -- just retry
1967 }
1968
1969 // We've successfully installed INFLATING (0) into the mark-word.
1970 // This is the only case where 0 will appear in a mark-word.
1971 // Only the singular thread that successfully swings the mark-word
1972 // to 0 can perform (or more precisely, complete) inflation.
1973 //
1974 // Why do we CAS a 0 into the mark-word instead of just CASing the
1975 // mark-word from the stack-locked value directly to the new inflated state?
2033 }
2034 if (event.should_commit()) {
2035 post_monitor_inflate_event(&event, object, cause);
2036 }
2037 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
2038 return;
2039 }
2040
2041 // CASE: neutral
2042 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
2043 // If we know we're inflating for entry it's better to inflate by swinging a
2044 // pre-locked ObjectMonitor pointer into the object header. A successful
2045 // CAS inflates the object *and* confers ownership to the inflating thread.
2046 // In the current implementation we use a 2-step mechanism where we CAS()
2047 // to inflate and then CAS() again to try to swing _owner from NULL to self.
2048 // An inflateTry() method that we could call from fast_enter() and slow_enter()
2049 // would be useful.
2050
2051 // Catch if the object's header is not neutral (not locked and
2052 // not marked is what we care about here).
2053 ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT,mark.value());
2054 ObjectMonitor* m = om_alloc(self, cause);
2055 // prepare m for installation - set monitor to initial state
2056 m->Recycle();
2057 m->set_header(mark);
2058 // If we leave _owner == DEFLATER_MARKER here, then the simple C2
2059 // ObjectMonitor enter optimization can no longer race with async
2060 // deflation and reuse.
2061 m->set_object(object);
2062 m->_Responsible = NULL;
2063 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
2064
2065 omh_p->set_om_ptr(m);
2066 assert(m->is_new(), "freshly allocated monitor must be new");
2067 m->set_allocation_state(ObjectMonitor::Old);
2068
2069 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
2070 guarantee(!m->owner_is_DEFLATER_MARKER() || m->ref_count() >= 0,
2071 "race between deflation and om_release() with m=" INTPTR_FORMAT
2072 ", _owner=" INTPTR_FORMAT ", ref_count=%d", p2i(m),
2073 p2i(m->_owner), m->ref_count());
2074 m->set_header(markWord::zero());
2075 m->set_object(NULL);
2076 m->Recycle();
2077 omh_p->set_om_ptr(NULL);
2078 // om_release() will reset the allocation state
2079 om_release(self, m, true);
2080 m = NULL;
2081 continue;
2082 // interference - the markword changed - just retry.
2083 // The state-transitions are one-way, so there's no chance of
2084 // live-lock -- "Inflated" is an absorbing state.
2085 }
2086
2087 // Hopefully the performance counters are allocated on distinct
2088 // cache lines to avoid false sharing on MP systems ...
2089 OM_PERFDATA_OP(Inflations, inc());
2090 if (log_is_enabled(Trace, monitorinflation)) {
2091 ResourceMark rm(self);
2092 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
2093 INTPTR_FORMAT ", type='%s'", p2i(object),
2112 // These operations are called at all safepoints, immediately after mutators
2113 // are stopped, but before any objects have moved. Collectively they traverse
2114 // the population of in-use monitors, deflating where possible. The scavenged
2115 // monitors are returned to the global monitor free list.
2116 //
2117 // Beware that we scavenge at *every* stop-the-world point. Having a large
2118 // number of monitors in-use could negatively impact performance. We also want
2119 // to minimize the total # of monitors in circulation, as they incur a small
2120 // footprint penalty.
2121 //
2122 // Perversely, the heap size -- and thus the STW safepoint rate --
2123 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
2124 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
2125 // This is an unfortunate aspect of this design.
2126 //
2127 // For async deflation:
2128 // If a special deflation request is made, then the safepoint based
2129 // deflation mechanism is used. Otherwise, an async deflation request
2130 // is registered with the ServiceThread and it is notified.
2131
2132 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) {
2133 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2134
2135 // The per-thread in-use lists are handled in
2136 // ParallelSPCleanupThreadClosure::do_thread().
2137
2138 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) {
2139 // Use the older mechanism for the global in-use list or if a
2140 // special deflation has been requested before the safepoint.
2141 ObjectSynchronizer::deflate_idle_monitors(counters);
2142 return;
2143 }
2144
2145 log_debug(monitorinflation)("requesting async deflation of idle monitors.");
2146 // Request deflation of idle monitors by the ServiceThread:
2147 set_is_async_deflation_requested(true);
2148 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
2149 ml.notify_all();
2150 }
2151
2152 // Deflate a single monitor if not in-use
2153 // Return true if deflated, false if in-use
2154 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
2155 ObjectMonitor** free_head_p,
2156 ObjectMonitor** free_tail_p) {
2157 bool deflated;
2158 // Normal case ... The monitor is associated with obj.
2159 const markWord mark = obj->mark();
2160 guarantee(mark == markWord::encode(mid), "should match: mark="
2161 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
2180 "object=" INTPTR_FORMAT ", mark="
2181 INTPTR_FORMAT ", type='%s'", p2i(obj),
2182 mark.value(), obj->klass()->external_name());
2183 }
2184
2185 // Restore the header back to obj
2186 obj->release_set_mark(dmw);
2187 if (AsyncDeflateIdleMonitors) {
2188 // clear() expects the owner field to be NULL and we won't race
2189 // with the simple C2 ObjectMonitor enter optimization since
2190 // we're at a safepoint.
2191 mid->set_owner(NULL);
2192 }
2193 mid->clear();
2194
2195 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
2196 p2i(mid->object()));
2197 assert(mid->is_free(), "invariant");
2198
2199 // Move the deflated ObjectMonitor to the working free list
2200 // defined by free_head_p and free_tail_p. No races on this list
2201 // so no need for load_acquire() or store_release().
2202 if (*free_head_p == NULL) *free_head_p = mid;
2203 if (*free_tail_p != NULL) {
2204 // We append to the list so the caller can use mid->_next_om
2205 // to fix the linkages in its context.
2206 ObjectMonitor* prevtail = *free_tail_p;
2207 // Should have been cleaned up by the caller:
2208 // Note: Should not have to mark prevtail here since we're at a
2209 // safepoint and ObjectMonitors on the local free list should
2210 // not be accessed in parallel.
2211 assert(prevtail->_next_om == NULL, "must be NULL: _next_om="
2212 INTPTR_FORMAT, p2i(prevtail->_next_om));
2213 set_next(prevtail, mid);
2214 }
2215 *free_tail_p = mid;
2216 // At this point, mid->_next_om still refers to its current
2217 // value and another ObjectMonitor's _next_om field still
2218 // refers to this ObjectMonitor. Those linkages have to be
2219 // cleaned up by the caller who has the complete context.
2220 deflated = true;
2221 }
2222 return deflated;
2223 }
2224
2225 // Deflate the specified ObjectMonitor if not in-use using a JavaThread.
2226 // Returns true if it was deflated and false otherwise.
2227 //
2228 // The async deflation protocol sets owner to DEFLATER_MARKER and
2229 // makes ref_count negative as signals to contending threads that
2230 // an async deflation is in progress. There are a number of checks
2231 // as part of the protocol to make sure that the calling thread has
2232 // not lost the race to a contending thread or to a thread that just
2233 // wants to use the ObjectMonitor*.
2296 const oop obj = (oop) mid->object();
2297 if (log_is_enabled(Trace, monitorinflation)) {
2298 ResourceMark rm;
2299 log_trace(monitorinflation)("deflate_monitor_using_JT: "
2300 "object=" INTPTR_FORMAT ", mark="
2301 INTPTR_FORMAT ", type='%s'",
2302 p2i(obj), obj->mark().value(),
2303 obj->klass()->external_name());
2304 }
2305
2306 // Install the old mark word if nobody else has already done it.
2307 mid->install_displaced_markword_in_object(obj);
2308 mid->clear_using_JT();
2309
2310 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT,
2311 p2i(mid->object()));
2312 assert(mid->is_free(), "must be free: allocation_state=%d",
2313 (int) mid->allocation_state());
2314
2315 // Move the deflated ObjectMonitor to the working free list
2316 // defined by free_head_p and free_tail_p. No races on this list
2317 // so no need for load_acquire() or store_release().
2318 if (*free_head_p == NULL) {
2319 // First one on the list.
2320 *free_head_p = mid;
2321 }
2322 if (*free_tail_p != NULL) {
2323 // We append to the list so the caller can use mid->_next_om
2324 // to fix the linkages in its context.
2325 ObjectMonitor* prevtail = *free_tail_p;
2326 // Should have been cleaned up by the caller:
2327 ObjectMonitor* next = mark_next_loop(prevtail);
2328 assert(unmarked_next(prevtail) == NULL, "must be NULL: _next_om="
2329 INTPTR_FORMAT, p2i(unmarked_next(prevtail)));
2330 set_next(prevtail, mid); // prevtail now points to mid (and is unmarked)
2331 }
2332 *free_tail_p = mid;
2333
2334 // At this point, mid->_next_om still refers to its current
2335 // value and another ObjectMonitor's _next_om field still
2336 // refers to this ObjectMonitor. Those linkages have to be
2337 // cleaned up by the caller who has the complete context.
2338
2339 // We leave owner == DEFLATER_MARKER and ref_count < 0
2340 // to force any racing threads to retry.
2341 return true; // Success, ObjectMonitor has been deflated.
2342 }
2343
2344 // The owner was changed from DEFLATER_MARKER so we lost the
2345 // race since the ObjectMonitor is now busy.
2346
2347 // Add back max_jint to restore the ref_count field to its
2348 // proper value (which may not be what we saw above):
2349 Atomic::add(max_jint, &mid->_ref_count);
2350
2351 assert(mid->ref_count() >= 0, "must not be negative: ref_count=%d",
2352 mid->ref_count());
2353 return false;
2354 }
2355
2356 // The ref_count was no longer 0 so we lost the race since the
2357 // ObjectMonitor is now busy or the ObjectMonitor* is now is use.
2358 // Restore owner to NULL if it is still DEFLATER_MARKER:
2359 Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER);
2360 }
2361
2362 // The owner field is no longer NULL so we lost the race since the
2363 // ObjectMonitor is now busy.
2364 return false;
2365 }
2366
2367 // Walk a given monitor list, and deflate idle monitors.
2368 // The given list could be a per-thread list or a global list.
2369 //
2370 // In the case of parallel processing of thread local monitor lists,
2371 // work is done by Threads::parallel_threads_do() which ensures that
2372 // each Java thread is processed by exactly one worker thread, and
2373 // thus avoid conflicts that would arise when worker threads would
2374 // process the same monitor lists concurrently.
2375 //
2376 // See also ParallelSPCleanupTask and
2377 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
2378 // Threads::parallel_java_threads_do() in thread.cpp.
2379 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor* volatile * list_p,
2380 int volatile * count_p,
2381 ObjectMonitor** free_head_p,
2382 ObjectMonitor** free_tail_p) {
2383 ObjectMonitor* cur_mid_in_use = NULL;
2384 ObjectMonitor* mid = NULL;
2385 ObjectMonitor* next = NULL;
2386 int deflated_count = 0;
2387
2388 // We use the simpler mark-mid-as-we-go protocol since there are no
2389 // parallel list deletions since we are at a safepoint.
2390 if (!mark_list_head(list_p, &mid, &next)) {
2391 return 0; // The list is empty so nothing to deflate.
2392 }
2393
2394 while (true) {
2395 oop obj = (oop) mid->object();
2396 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
2397 // Deflation succeeded and already updated free_head_p and
2398 // free_tail_p as needed. Finish the move to the local free list
2399 // by unlinking mid from the global or per-thread in-use list.
2400 if (Atomic::cmpxchg(next, list_p, mid) != mid) {
2401 // We could not switch the list head to next.
2402 ADIM_guarantee(cur_mid_in_use != NULL, "must not be NULL");
2403 if (Atomic::cmpxchg(next, &cur_mid_in_use->_next_om, mid) != mid) {
2404 // deflate_monitor_list() is called at a safepoint so the
2405 // global or per-thread in-use list should not be modified
2406 // in parallel so we:
2407 fatal("mid=" INTPTR_FORMAT " must be referred to by the list head: "
2408 "list_p=" INTPTR_FORMAT " or by cur_mid_in_use's next field: "
2409 "cur_mid_in_use=" INTPTR_FORMAT ", next_om=" INTPTR_FORMAT,
2410 p2i(mid), p2i((ObjectMonitor**)list_p), p2i(cur_mid_in_use),
2411 p2i(cur_mid_in_use->_next_om));
2412 }
2413 }
2414 // At this point mid is disconnected from the in-use list so
2415 // its marked next field no longer has any effects.
2416 deflated_count++;
2417 Atomic::dec(count_p);
2418 chk_for_list_loop(OrderAccess::load_acquire(list_p),
2419 OrderAccess::load_acquire(count_p));
2420 chk_om_not_on_list(mid, OrderAccess::load_acquire(list_p),
2421 OrderAccess::load_acquire(count_p));
2422 // mid is current tail in the free_head_p list so NULL terminate it
2423 // (which also unmarks it):
2424 set_next(mid, NULL);
2425
2426 // All the list management is done so move on to the next one:
2427 mid = next;
2428 } else {
2429 set_next(mid, next); // unmark next field
2430
2431 // All the list management is done so move on to the next one:
2432 cur_mid_in_use = mid;
2433 mid = next;
2434 }
2435 if (mid == NULL) {
2436 break; // Reached end of the list so nothing more to deflate.
2437 }
2438 // Mark mid's next field so we can possibly deflate it:
2439 next = mark_next_loop(mid);
2440 }
2441 return deflated_count;
2442 }
2443
2444 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using
2445 // a JavaThread. Returns the number of deflated ObjectMonitors. The given
2446 // list could be a per-thread in-use list or the global in-use list.
2447 // If a safepoint has started, then we save state via saved_mid_in_use_p
2448 // and return to the caller to honor the safepoint.
2449 //
2450 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor* volatile * list_p,
2451 int volatile * count_p,
2452 ObjectMonitor** free_head_p,
2453 ObjectMonitor** free_tail_p,
2454 ObjectMonitor** saved_mid_in_use_p) {
2455 assert(AsyncDeflateIdleMonitors, "sanity check");
2456 assert(Thread::current()->is_Java_thread(), "precondition");
2457
2458 ObjectMonitor* cur_mid_in_use = NULL;
2459 ObjectMonitor* mid = NULL;
2460 ObjectMonitor* next = NULL;
2461 ObjectMonitor* next_next = NULL;
2462 int deflated_count = 0;
2463
2464 // We use the more complicated mark-cur_mid_in_use-and-mid-as-we-go
2465 // protocol because om_release() can do list deletions in parallel.
2466 // We also mark-next-next-as-we-go to prevent an om_flush() that is
2467 // behind this thread from passing us.
2468 if (*saved_mid_in_use_p == NULL) {
2469 // No saved state so start at the beginning.
2470 // Mark the list head's next field so we can possibly deflate it:
2471 if (!mark_list_head(list_p, &mid, &next)) {
2472 return 0; // The list is empty so nothing to deflate.
2473 }
2474 } else {
2475 // We're restarting after a safepoint so restore the necessary state
2476 // before we resume.
2477 cur_mid_in_use = *saved_mid_in_use_p;
2478 // Mark cur_mid_in_use's next field so we can possibly update its
2479 // next field to extract a deflated ObjectMonitor.
2480 mid = mark_next_loop(cur_mid_in_use);
2481 if (mid == NULL) {
2482 set_next(cur_mid_in_use, NULL); // unmark next field
2483 *saved_mid_in_use_p = NULL;
2484 return 0; // The remainder is empty so nothing more to deflate.
2485 }
2486 // Mark mid's next field so we can possibly deflate it:
2487 next = mark_next_loop(mid);
2488 }
2489
2490 while (true) {
2491 // The current mid's next field is marked at this point. If we have
2492 // a cur_mid_in_use, then its next field is also marked at this point.
2493
2494 if (next != NULL) {
2495 // We mark the next -> next field so that an om_flush()
2496 // thread that is behind us cannot pass us when we
2497 // unmark the current mid's next field.
2498 next_next = mark_next_loop(next);
2499 }
2500
2501 // Only try to deflate if there is an associated Java object and if
2502 // mid is old (is not newly allocated and is not newly freed).
2503 if (mid->object() != NULL && mid->is_old() &&
2504 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
2505 // Deflation succeeded and already updated free_head_p and
2506 // free_tail_p as needed. Finish the move to the local free list
2507 // by unlinking mid from the global or per-thread in-use list.
2508 if (Atomic::cmpxchg(next, list_p, mid) != mid) {
2509 // We could not switch the list head to next.
2510 ObjectMonitor* marked_mid = mark_om_ptr(mid);
2511 ObjectMonitor* marked_next = mark_om_ptr(next);
2512 // Switch cur_mid_in_use's next field to marked next:
2513 ADIM_guarantee(cur_mid_in_use != NULL, "must not be NULL");
2514 if (Atomic::cmpxchg(marked_next, &cur_mid_in_use->_next_om,
2515 marked_mid) != marked_mid) {
2516 // We could not switch cur_mid_in_use's next field. This
2517 // should not be possible since it was marked so we:
2518 fatal("mid=" INTPTR_FORMAT " must be referred to by the list head: "
2519 "&list_p=" INTPTR_FORMAT " or by cur_mid_in_use's next field: "
2520 "cur_mid_in_use=" INTPTR_FORMAT ", next_om=" INTPTR_FORMAT,
2521 p2i(mid), p2i((ObjectMonitor**)list_p), p2i(cur_mid_in_use),
2522 p2i(cur_mid_in_use->_next_om));
2523 }
2524 }
2525 // At this point mid is disconnected from the in-use list so
2526 // its marked next field no longer has any effects.
2527 deflated_count++;
2528 Atomic::dec(count_p);
2529 chk_for_list_loop(OrderAccess::load_acquire(list_p),
2530 OrderAccess::load_acquire(count_p));
2531 chk_om_not_on_list(mid, OrderAccess::load_acquire(list_p),
2532 OrderAccess::load_acquire(count_p));
2533 // mid is current tail in the free_head_p list so NULL terminate it
2534 // (which also unmarks it):
2535 set_next(mid, NULL);
2536
2537 // All the list management is done so move on to the next one:
2538 mid = next; // mid keeps non-NULL next's marked next field
2539 next = next_next;
2540 } else {
2541 // mid is considered in-use if it does not have an associated
2542 // Java object or mid is not old or deflation did not succeed.
2543 // A mid->is_new() node can be seen here when it is freshly
2544 // returned by om_alloc() (and skips the deflation code path).
2545 // A mid->is_old() node can be seen here when deflation failed.
2546 // A mid->is_free() node can be seen here when a fresh node from
2547 // om_alloc() is released by om_release() due to losing the race
2548 // in inflate().
2549
2550 // All the list management is done so move on to the next one:
2551 if (cur_mid_in_use != NULL) {
2552 set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
2553 }
2554 // The next cur_mid_in_use keeps mid's marked next field so
2555 // that it is stable for a possible next field change. It
2556 // cannot be modified by om_release() while it is marked.
2557 cur_mid_in_use = mid;
2558 mid = next; // mid keeps non-NULL next's marked next field
2559 next = next_next;
2560
2561 if (SafepointSynchronize::is_synchronizing() &&
2562 cur_mid_in_use != OrderAccess::load_acquire(list_p) &&
2563 cur_mid_in_use->is_old()) {
2564 // If a safepoint has started and cur_mid_in_use is not the list
2565 // head and is old, then it is safe to use as saved state. Return
2566 // to the caller before blocking.
2567 *saved_mid_in_use_p = cur_mid_in_use;
2568 set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
2569 if (mid != NULL) {
2570 set_next(mid, next); // umark mid
2571 }
2572 return deflated_count;
2573 }
2574 }
2575 if (mid == NULL) {
2576 if (cur_mid_in_use != NULL) {
2577 set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
2578 }
2579 break; // Reached end of the list so nothing more to deflate.
2580 }
2581
2582 // The current mid's next field is marked at this point. If we have
2583 // a cur_mid_in_use, then its next field is also marked at this point.
2584 }
2585 // We finished the list without a safepoint starting so there's
2586 // no need to save state.
2587 *saved_mid_in_use_p = NULL;
2588 return deflated_count;
2589 }
2590
2591 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2592 OrderAccess::release_store(&counters->n_in_use, 0); // currently associated with objects
2593 OrderAccess::release_store(&counters->n_in_circulation, 0); // extant
2594 OrderAccess::release_store(&counters->n_scavenged, 0); // reclaimed (global and per-thread)
2595 OrderAccess::release_store(&counters->per_thread_scavenged, 0); // per-thread scavenge total
2596 counters->per_thread_times = 0.0; // per-thread scavenge times
2597 }
2598
2599 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
2600 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2601
2602 if (AsyncDeflateIdleMonitors) {
2603 // Nothing to do when global idle ObjectMonitors are deflated using
2604 // a JavaThread unless a special deflation has been requested.
2605 if (!is_special_deflation_requested()) {
2606 return;
2607 }
2608 }
2609
2610 bool deflated = false;
2611
2612 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2613 ObjectMonitor* free_tail_p = NULL;
2614 elapsedTimer timer;
2615
2616 if (log_is_enabled(Info, monitorinflation)) {
2617 timer.start();
2618 }
2619
2620 // Note: the thread-local monitors lists get deflated in
2621 // a separate pass. See deflate_thread_local_monitors().
2622
2623 // For moribund threads, scan g_om_in_use_list
2624 int deflated_count = 0;
2625 if (OrderAccess::load_acquire(&g_om_in_use_list) != NULL) {
2626 // Update n_in_circulation before g_om_in_use_count is updated by deflation.
2627 Atomic::add(OrderAccess::load_acquire(&g_om_in_use_count), &counters->n_in_circulation);
2628
2629 deflated_count = deflate_monitor_list(&g_om_in_use_list, &g_om_in_use_count, &free_head_p, &free_tail_p);
2630 Atomic::add(OrderAccess::load_acquire(&g_om_in_use_count), &counters->n_in_use);
2631 }
2632
2633 if (free_head_p != NULL) {
2634 // Move the deflated ObjectMonitors back to the global free list.
2635 // No races on the working free list so no need for load_acquire().
2636 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2637 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
2638 INTPTR_FORMAT, p2i(free_tail_p->_next_om));
2639 prepend_list_to_g_free_list(free_head_p, free_tail_p, deflated_count);
2640 Atomic::add(deflated_count, &counters->n_scavenged);
2641 }
2642 timer.stop();
2643
2644 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2645 LogStreamHandle(Info, monitorinflation) lsh_info;
2646 LogStream* ls = NULL;
2647 if (log_is_enabled(Debug, monitorinflation)) {
2648 ls = &lsh_debug;
2649 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2650 ls = &lsh_info;
2651 }
2652 if (ls != NULL) {
2653 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2654 }
2655 }
2656
2657 // Deflate global idle ObjectMonitors using a JavaThread.
2658 //
2659 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() {
2660 assert(AsyncDeflateIdleMonitors, "sanity check");
2661 assert(Thread::current()->is_Java_thread(), "precondition");
2662 JavaThread* self = JavaThread::current();
2663
2664 deflate_common_idle_monitors_using_JT(true /* is_global */, self);
2665 }
2666
2667 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread.
2668 //
2669 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) {
2670 assert(AsyncDeflateIdleMonitors, "sanity check");
2671 assert(Thread::current()->is_Java_thread(), "precondition");
2672
2673 deflate_common_idle_monitors_using_JT(false /* !is_global */, target);
2674 }
2675
2676 // Deflate global or per-thread idle ObjectMonitors using a JavaThread.
2677 //
2678 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) {
2679 JavaThread* self = JavaThread::current();
2680
2681 int deflated_count = 0;
2682 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors
2683 ObjectMonitor* free_tail_p = NULL;
2684 ObjectMonitor* saved_mid_in_use_p = NULL;
2685 elapsedTimer timer;
2686
2687 if (log_is_enabled(Info, monitorinflation)) {
2688 timer.start();
2689 }
2690
2691 if (is_global) {
2692 OM_PERFDATA_OP(MonExtant, set_value(OrderAccess::load_acquire(&g_om_in_use_count)));
2693 } else {
2694 OM_PERFDATA_OP(MonExtant, inc(OrderAccess::load_acquire(&target->om_in_use_count)));
2695 }
2696
2697 do {
2698 int local_deflated_count;
2699 if (is_global) {
2700 local_deflated_count = deflate_monitor_list_using_JT(&g_om_in_use_list, &g_om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
2701 } else {
2702 local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
2703 }
2704 deflated_count += local_deflated_count;
2705
2706 if (free_head_p != NULL) {
2707 // Move the deflated ObjectMonitors to the global free list.
2708 // No races on the working list so no need for load_acquire().
2709 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
2710 // Note: The target thread can be doing an om_alloc() that
2711 // is trying to prepend an ObjectMonitor on its in-use list
2712 // at the same time that we have deflated the current in-use
2713 // list head and put it on the local free list. prepend_to_common()
2714 // will detect the race and retry which avoids list corruption,
2715 // but the next field in free_tail_p can flicker to marked
2716 // and then unmarked while prepend_to_common() is sorting it
2717 // all out.
2718 assert(unmarked_next(free_tail_p) == NULL, "must be NULL: _next_om="
2719 INTPTR_FORMAT, p2i(unmarked_next(free_tail_p)));
2720
2721 prepend_list_to_g_free_list(free_head_p, free_tail_p, local_deflated_count);
2722
2723 OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
2724 }
2725
2726 if (saved_mid_in_use_p != NULL) {
2727 // deflate_monitor_list_using_JT() detected a safepoint starting.
2728 timer.stop();
2729 {
2730 if (is_global) {
2731 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
2732 } else {
2733 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target));
2734 }
2735 assert(SafepointSynchronize::is_synchronizing(), "sanity check");
2736 ThreadBlockInVM blocker(self);
2737 }
2738 // Prepare for another loop after the safepoint.
2739 free_head_p = NULL;
2740 free_tail_p = NULL;
2741 if (log_is_enabled(Info, monitorinflation)) {
2742 timer.start();
2743 }
2744 }
2745 } while (saved_mid_in_use_p != NULL);
2746 timer.stop();
2747
2748 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2749 LogStreamHandle(Info, monitorinflation) lsh_info;
2750 LogStream* ls = NULL;
2751 if (log_is_enabled(Debug, monitorinflation)) {
2752 ls = &lsh_debug;
2753 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2754 ls = &lsh_info;
2755 }
2756 if (ls != NULL) {
2757 if (is_global) {
2758 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2759 } else {
2760 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count);
2761 }
2762 }
2763 }
2764
2765 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2766 // Report the cumulative time for deflating each thread's idle
2767 // monitors. Note: if the work is split among more than one
2768 // worker thread, then the reported time will likely be more
2769 // than a beginning to end measurement of the phase.
2770 // Note: AsyncDeflateIdleMonitors only deflates per-thread idle
2771 // monitors at a safepoint when a special deflation has been requested.
2772 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d",
2773 counters->per_thread_times,
2774 OrderAccess::load_acquire(&counters->per_thread_scavenged));
2775
2776 bool needs_special_deflation = is_special_deflation_requested();
2777 if (!AsyncDeflateIdleMonitors || needs_special_deflation) {
2778 // AsyncDeflateIdleMonitors does not use these counters unless
2779 // there is a special deflation request.
2780
2781 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
2782 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
2783 }
2784
2785 if (log_is_enabled(Debug, monitorinflation)) {
2786 // exit_globals()'s call to audit_and_print_stats() is done
2787 // at the Info level.
2788 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2789 } else if (log_is_enabled(Info, monitorinflation)) {
2790 log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, "
2791 "g_om_free_count=%d",
2792 OrderAccess::load_acquire(&g_om_population),
2793 OrderAccess::load_acquire(&g_om_in_use_count),
2794 OrderAccess::load_acquire(&g_om_free_count));
2795 }
2796
2797 ForceMonitorScavenge = 0; // Reset
2798 GVars.stw_random = os::random();
2799 GVars.stw_cycle++;
2800 if (needs_special_deflation) {
2801 set_is_special_deflation_requested(false); // special deflation is done
2802 }
2803 }
2804
2805 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2806 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2807
2808 if (AsyncDeflateIdleMonitors && !is_special_deflation_requested()) {
2809 // Nothing to do if a special deflation has NOT been requested.
2810 return;
2811 }
2812
2813 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2814 ObjectMonitor* free_tail_p = NULL;
2815 elapsedTimer timer;
2816
2817 if (log_is_enabled(Info, safepoint, cleanup) ||
2818 log_is_enabled(Info, monitorinflation)) {
2819 timer.start();
2820 }
2821
2822 // Update n_in_circulation before om_in_use_count is updated by deflation.
2823 Atomic::add(OrderAccess::load_acquire(&thread->om_in_use_count), &counters->n_in_circulation);
2824
2825 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
2826 Atomic::add(OrderAccess::load_acquire(&thread->om_in_use_count), &counters->n_in_use);
2827
2828 if (free_head_p != NULL) {
2829 // Move the deflated ObjectMonitors back to the global free list.
2830 // No races on the working list so no need for load_acquire().
2831 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2832 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
2833 INTPTR_FORMAT, p2i(free_tail_p->_next_om));
2834 prepend_list_to_g_free_list(free_head_p, free_tail_p, deflated_count);
2835 Atomic::add(deflated_count, &counters->n_scavenged);
2836 Atomic::add(deflated_count, &counters->per_thread_scavenged);
2837 }
2838
2839 timer.stop();
2840 // Safepoint logging cares about cumulative per_thread_times and
2841 // we'll capture most of the cost, but not the muxRelease() which
2842 // should be cheap.
2843 counters->per_thread_times += timer.seconds();
2844
2845 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2846 LogStreamHandle(Info, monitorinflation) lsh_info;
2847 LogStream* ls = NULL;
2848 if (log_is_enabled(Debug, monitorinflation)) {
2849 ls = &lsh_debug;
2850 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2851 ls = &lsh_info;
2852 }
2853 if (ls != NULL) {
2854 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count);
2855 }
2856 }
2857
2858 // Monitor cleanup on JavaThread::exit
2859
2860 // Iterate through monitor cache and attempt to release thread's monitors
2861 // Gives up on a particular monitor if an exception occurs, but continues
2862 // the overall iteration, swallowing the exception.
2863 class ReleaseJavaMonitorsClosure: public MonitorClosure {
2864 private:
2875
2876 // Release all inflated monitors owned by THREAD. Lightweight monitors are
2877 // ignored. This is meant to be called during JNI thread detach which assumes
2878 // all remaining monitors are heavyweight. All exceptions are swallowed.
2879 // Scanning the extant monitor list can be time consuming.
2880 // A simple optimization is to add a per-thread flag that indicates a thread
2881 // called jni_monitorenter() during its lifetime.
2882 //
2883 // Instead of No_Savepoint_Verifier it might be cheaper to
2884 // use an idiom of the form:
2885 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
2886 // <code that must not run at safepoint>
2887 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
2888 // Since the tests are extremely cheap we could leave them enabled
2889 // for normal product builds.
2890
2891 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
2892 assert(THREAD == JavaThread::current(), "must be current Java thread");
2893 NoSafepointVerifier nsv;
2894 ReleaseJavaMonitorsClosure rjmc(THREAD);
2895 ObjectSynchronizer::monitors_iterate(&rjmc);
2896 THREAD->clear_pending_exception();
2897 }
2898
2899 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
2900 switch (cause) {
2901 case inflate_cause_vm_internal: return "VM Internal";
2902 case inflate_cause_monitor_enter: return "Monitor Enter";
2903 case inflate_cause_wait: return "Monitor Wait";
2904 case inflate_cause_notify: return "Monitor Notify";
2905 case inflate_cause_hash_code: return "Monitor Hash Code";
2906 case inflate_cause_jni_enter: return "JNI Monitor Enter";
2907 case inflate_cause_jni_exit: return "JNI Monitor Exit";
2908 default:
2909 ShouldNotReachHere();
2910 }
2911 return "Unknown";
2912 }
2913
2914 //------------------------------------------------------------------------------
2915 // Debugging code
2929 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
2930 return (u_char*)&GVars.stw_random;
2931 }
2932
2933 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
2934 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
2935
2936 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2937 LogStreamHandle(Info, monitorinflation) lsh_info;
2938 LogStreamHandle(Trace, monitorinflation) lsh_trace;
2939 LogStream* ls = NULL;
2940 if (log_is_enabled(Trace, monitorinflation)) {
2941 ls = &lsh_trace;
2942 } else if (log_is_enabled(Debug, monitorinflation)) {
2943 ls = &lsh_debug;
2944 } else if (log_is_enabled(Info, monitorinflation)) {
2945 ls = &lsh_info;
2946 }
2947 assert(ls != NULL, "sanity check");
2948
2949 // Log counts for the global and per-thread monitor lists:
2950 int chk_om_population = log_monitor_list_counts(ls);
2951 int error_cnt = 0;
2952
2953 ls->print_cr("Checking global lists:");
2954
2955 // Check g_om_population:
2956 if (OrderAccess::load_acquire(&g_om_population) == chk_om_population) {
2957 ls->print_cr("g_om_population=%d equals chk_om_population=%d",
2958 OrderAccess::load_acquire(&g_om_population),
2959 chk_om_population);
2960 } else {
2961 ls->print_cr("ERROR: g_om_population=%d is not equal to "
2962 "chk_om_population=%d",
2963 OrderAccess::load_acquire(&g_om_population),
2964 chk_om_population);
2965 error_cnt++;
2966 }
2967
2968 // Check g_om_in_use_list and g_om_in_use_count:
2969 chk_global_in_use_list_and_count(ls, &error_cnt);
2970
2971 // Check g_free_list and g_om_free_count:
2972 chk_global_free_list_and_count(ls, &error_cnt);
2973
2974 ls->print_cr("Checking per-thread lists:");
2975
2976 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2977 // Check om_in_use_list and om_in_use_count:
2978 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
2979
2980 // Check om_free_list and om_free_count:
2981 chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
2982 }
2983
2984 if (error_cnt == 0) {
2985 ls->print_cr("No errors found in monitor list checks.");
2986 } else {
2987 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
2988 }
2989
2990 if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
2991 (!on_exit && log_is_enabled(Trace, monitorinflation))) {
2992 // When exiting this log output is at the Info level. When called
2993 // at a safepoint, this log output is at the Trace level since
2994 // there can be a lot of it.
2995 log_in_use_monitor_details(ls);
2996 }
2997
2998 ls->flush();
2999
3000 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
3001 }
3002
3003 // Check a free monitor entry; log any errors.
3004 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n,
3005 outputStream * out, int *error_cnt_p) {
3006 stringStream ss;
3007 if (n->is_busy()) {
3008 if (jt != NULL) {
3009 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3010 ": free per-thread monitor must not be busy: %s", p2i(jt),
3011 p2i(n), n->is_busy_to_string(&ss));
3012 } else {
3013 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
3014 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
3015 }
3031 }
3032 if (n->object() != NULL) {
3033 if (jt != NULL) {
3034 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3035 ": free per-thread monitor must have NULL _object "
3036 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
3037 p2i(n->object()));
3038 } else {
3039 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
3040 "must have NULL _object field: _object=" INTPTR_FORMAT,
3041 p2i(n), p2i(n->object()));
3042 }
3043 *error_cnt_p = *error_cnt_p + 1;
3044 }
3045 }
3046
3047 // Check the global free list and count; log the results of the checks.
3048 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
3049 int *error_cnt_p) {
3050 int chk_om_free_count = 0;
3051 for (ObjectMonitor* n = OrderAccess::load_acquire(&g_free_list); n != NULL; n = unmarked_next(n)) {
3052 chk_free_entry(NULL /* jt */, n, out, error_cnt_p);
3053 chk_om_free_count++;
3054 }
3055 if (OrderAccess::load_acquire(&g_om_free_count) == chk_om_free_count) {
3056 out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d",
3057 OrderAccess::load_acquire(&g_om_free_count),
3058 chk_om_free_count);
3059 } else {
3060 // With lock free access to g_free_list, it is possible for an
3061 // ObjectMonitor to be prepended to g_free_list after we started
3062 // calculating chk_om_free_count so g_om_free_count may not
3063 // match anymore.
3064 out->print_cr("WARNING: g_om_free_count=%d is not equal to "
3065 "chk_om_free_count=%d",
3066 OrderAccess::load_acquire(&g_om_free_count),
3067 chk_om_free_count);
3068 }
3069 }
3070
3071 // Check the global in-use list and count; log the results of the checks.
3072 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
3073 int *error_cnt_p) {
3074 int chk_om_in_use_count = 0;
3075 for (ObjectMonitor* n = OrderAccess::load_acquire(&g_om_in_use_list); n != NULL; n = unmarked_next(n)) {
3076 chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p);
3077 chk_om_in_use_count++;
3078 }
3079 if (OrderAccess::load_acquire(&g_om_in_use_count) == chk_om_in_use_count) {
3080 out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d",
3081 OrderAccess::load_acquire(&g_om_in_use_count),
3082 chk_om_in_use_count);
3083 } else {
3084 out->print_cr("ERROR: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d",
3085 OrderAccess::load_acquire(&g_om_in_use_count),
3086 chk_om_in_use_count);
3087 *error_cnt_p = *error_cnt_p + 1;
3088 }
3089 }
3090
3091 // Check an in-use monitor entry; log any errors.
3092 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
3093 outputStream * out, int *error_cnt_p) {
3094 if (n->header().value() == 0) {
3095 if (jt != NULL) {
3096 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3097 ": in-use per-thread monitor must have non-NULL _header "
3098 "field.", p2i(jt), p2i(n));
3099 } else {
3100 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor "
3101 "must have non-NULL _header field.", p2i(n));
3102 }
3103 *error_cnt_p = *error_cnt_p + 1;
3104 }
3105 if (n->object() == NULL) {
3106 if (jt != NULL) {
3135 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3136 ": in-use per-thread monitor's object does not refer "
3137 "to the same monitor: obj=" INTPTR_FORMAT ", mark="
3138 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
3139 p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
3140 } else {
3141 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
3142 "monitor's object does not refer to the same monitor: obj="
3143 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
3144 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
3145 }
3146 *error_cnt_p = *error_cnt_p + 1;
3147 }
3148 }
3149
3150 // Check the thread's free list and count; log the results of the checks.
3151 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
3152 outputStream * out,
3153 int *error_cnt_p) {
3154 int chk_om_free_count = 0;
3155 for (ObjectMonitor* n = OrderAccess::load_acquire(&jt->om_free_list); n != NULL; n = unmarked_next(n)) {
3156 chk_free_entry(jt, n, out, error_cnt_p);
3157 chk_om_free_count++;
3158 }
3159 if (OrderAccess::load_acquire(&jt->om_free_count) == chk_om_free_count) {
3160 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals "
3161 "chk_om_free_count=%d", p2i(jt),
3162 OrderAccess::load_acquire(&jt->om_free_count),
3163 chk_om_free_count);
3164 } else {
3165 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not "
3166 "equal to chk_om_free_count=%d", p2i(jt),
3167 OrderAccess::load_acquire(&jt->om_free_count),
3168 chk_om_free_count);
3169 *error_cnt_p = *error_cnt_p + 1;
3170 }
3171 }
3172
3173 // Check the thread's in-use list and count; log the results of the checks.
3174 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
3175 outputStream * out,
3176 int *error_cnt_p) {
3177 int chk_om_in_use_count = 0;
3178 for (ObjectMonitor* n = OrderAccess::load_acquire(&jt->om_in_use_list); n != NULL; n = unmarked_next(n)) {
3179 chk_in_use_entry(jt, n, out, error_cnt_p);
3180 chk_om_in_use_count++;
3181 }
3182 if (OrderAccess::load_acquire(&jt->om_in_use_count) == chk_om_in_use_count) {
3183 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
3184 "chk_om_in_use_count=%d", p2i(jt),
3185 OrderAccess::load_acquire(&jt->om_in_use_count),
3186 chk_om_in_use_count);
3187 } else {
3188 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
3189 "equal to chk_om_in_use_count=%d", p2i(jt),
3190 OrderAccess::load_acquire(&jt->om_in_use_count),
3191 chk_om_in_use_count);
3192 *error_cnt_p = *error_cnt_p + 1;
3193 }
3194 }
3195
3196 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
3197 // flags indicate why the entry is in-use, 'object' and 'object type'
3198 // indicate the associated object and its type.
3199 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) {
3200 stringStream ss;
3201 if (OrderAccess::load_acquire(&g_om_in_use_count) > 0) {
3202 out->print_cr("In-use global monitor info:");
3203 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
3204 out->print_cr("%18s %s %7s %18s %18s",
3205 "monitor", "BHL", "ref_cnt", "object", "object type");
3206 out->print_cr("================== === ======= ================== ==================");
3207 for (ObjectMonitor* n = OrderAccess::load_acquire(&g_om_in_use_list); n != NULL; n = unmarked_next(n)) {
3208 const oop obj = (oop) n->object();
3209 const markWord mark = n->header();
3210 ResourceMark rm;
3211 out->print(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s",
3212 p2i(n), n->is_busy() != 0, mark.hash() != 0,
3213 n->owner() != NULL, (int)n->ref_count(), p2i(obj),
3214 obj->klass()->external_name());
3215 if (n->is_busy() != 0) {
3216 out->print(" (%s)", n->is_busy_to_string(&ss));
3217 ss.reset();
3218 }
3219 out->cr();
3220 }
3221 }
3222
3223 out->print_cr("In-use per-thread monitor info:");
3224 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
3225 out->print_cr("%18s %18s %s %7s %18s %18s",
3226 "jt", "monitor", "BHL", "ref_cnt", "object", "object type");
3227 out->print_cr("================== ================== === ======= ================== ==================");
3228 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3229 for (ObjectMonitor* n = OrderAccess::load_acquire(&jt->om_in_use_list); n != NULL; n = unmarked_next(n)) {
3230 const oop obj = (oop) n->object();
3231 const markWord mark = n->header();
3232 ResourceMark rm;
3233 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d "
3234 INTPTR_FORMAT " %s", p2i(jt), p2i(n), n->is_busy() != 0,
3235 mark.hash() != 0, n->owner() != NULL, (int)n->ref_count(),
3236 p2i(obj), obj->klass()->external_name());
3237 if (n->is_busy() != 0) {
3238 out->print(" (%s)", n->is_busy_to_string(&ss));
3239 ss.reset();
3240 }
3241 out->cr();
3242 }
3243 }
3244
3245 out->flush();
3246 }
3247
3248 // Log counts for the global and per-thread monitor lists and return
3249 // the population count.
3250 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
3251 int pop_count = 0;
3252 out->print_cr("%18s %10s %10s %10s",
3253 "Global Lists:", "InUse", "Free", "Total");
3254 out->print_cr("================== ========== ========== ==========");
3255 out->print_cr("%18s %10d %10d %10d", "",
3256 OrderAccess::load_acquire(&g_om_in_use_count),
3257 OrderAccess::load_acquire(&g_om_free_count),
3258 OrderAccess::load_acquire(&g_om_population));
3259 pop_count += OrderAccess::load_acquire(&g_om_in_use_count) +
3260 OrderAccess::load_acquire(&g_om_free_count);
3261
3262 out->print_cr("%18s %10s %10s %10s",
3263 "Per-Thread Lists:", "InUse", "Free", "Provision");
3264 out->print_cr("================== ========== ========== ==========");
3265
3266 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3267 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
3268 OrderAccess::load_acquire(&jt->om_in_use_count),
3269 OrderAccess::load_acquire(&jt->om_free_count),
3270 jt->om_free_provision);
3271 pop_count += OrderAccess::load_acquire(&jt->om_in_use_count) +
3272 OrderAccess::load_acquire(&jt->om_free_count);
3273 }
3274 return pop_count;
3275 }
3276
3277 #ifndef PRODUCT
3278
3279 // Check if monitor belongs to the monitor cache
3280 // The list is grow-only so it's *relatively* safe to traverse
3281 // the list of extant blocks without taking a lock.
3282
3283 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
3284 PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
3285 while (block != NULL) {
3286 assert(block->object() == CHAINMARKER, "must be a block header");
3287 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
3288 address mon = (address)monitor;
3289 address blk = (address)block;
3290 size_t diff = mon - blk;
3291 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
3292 return 1;
3293 }
3294 // unmarked_next() is not needed with g_block_list (no next field marking).
3295 block = (PaddedObjectMonitor*)OrderAccess::load_acquire(&block->_next_om);
3296 }
3297 return 0;
3298 }
3299
3300 #endif
|