100 } \
101 }
102
103 #else // ndef DTRACE_ENABLED
104
105 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
106 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
107
108 #endif // ndef DTRACE_ENABLED
109
110 // This exists only as a workaround of dtrace bug 6254741
111 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
113 return 0;
114 }
115
116 #define NINFLATIONLOCKS 256
117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
118
119 // global list of blocks of monitors
120 PaddedObjectMonitor* volatile ObjectSynchronizer::g_block_list = NULL;
121 // Global ObjectMonitor free list. Newly allocated and deflated
122 // ObjectMonitors are prepended here.
123 ObjectMonitor* volatile ObjectSynchronizer::g_free_list = NULL;
124 // Global ObjectMonitor in-use list. When a JavaThread is exiting,
125 // ObjectMonitors on its per-thread in-use list are prepended here.
126 ObjectMonitor* volatile ObjectSynchronizer::g_om_in_use_list = NULL;
127 int ObjectSynchronizer::g_om_in_use_count = 0; // # on g_om_in_use_list
128
129 static volatile intptr_t gListLock = 0; // protects global monitor lists
130 static volatile int g_om_free_count = 0; // # on g_free_list
131 static volatile int g_om_population = 0; // # Extant -- in circulation
132
133 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
134
135
136 // =====================> Quick functions
137
138 // The quick_* forms are special fast-path variants used to improve
139 // performance. In the simplest case, a "quick_*" implementation could
140 // simply return false, in which case the caller will perform the necessary
141 // state transitions and call the slow-path form.
142 // The fast-path is designed to handle frequently arising cases in an efficient
143 // manner and is just a degenerate "optimistic" variant of the slow-path.
144 // returns true -- to indicate the call was satisfied.
145 // returns false -- to indicate the call needs the services of the slow-path.
146 // A no-loitering ordinance is in effect for code in the quick_* family
147 // operators: safepoints or indefinite blocking (blocking that might span a
148 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
149 // entry.
150 //
151 // Consider: An interesting optimization is to have the JIT recognize the
152 // following common idiom:
153 // synchronized (someobj) { .... ; notify(); }
154 // That is, we find a notify() or notifyAll() call that immediately precedes
155 // the monitorexit operation. In that case the JIT could fuse the operations
871 assert(monitor != NULL, "monitor should be non-null");
872 owner = (address) monitor->owner();
873 }
874
875 if (owner != NULL) {
876 // owning_thread_from_monitor_owner() may also return NULL here
877 return Threads::owning_thread_from_monitor_owner(t_list, owner);
878 }
879
880 // Unlocked case, header in place
881 // Cannot have assertion since this object may have been
882 // locked by another thread when reaching here.
883 // assert(mark.is_neutral(), "sanity check");
884
885 return NULL;
886 }
887
888 // Visitors ...
889
890 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
891 PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
892 while (block != NULL) {
893 assert(block->object() == CHAINMARKER, "must be a block header");
894 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
895 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
896 oop object = (oop)mid->object();
897 if (object != NULL) {
898 // Only process with closure if the object is set.
899 closure->do_monitor(mid);
900 }
901 }
902 block = (PaddedObjectMonitor*)block->_next_om;
903 }
904 }
905
906 static bool monitors_used_above_threshold() {
907 if (g_om_population == 0) {
908 return false;
909 }
910 int monitors_used = g_om_population - g_om_free_count;
911 int monitor_usage = (monitors_used * 100LL) / g_om_population;
912 return monitor_usage > MonitorUsedDeflationThreshold;
913 }
914
915 bool ObjectSynchronizer::is_cleanup_needed() {
916 if (MonitorUsedDeflationThreshold > 0) {
917 if (monitors_used_above_threshold()) {
918 return true;
919 }
920 }
921 return needs_monitor_scavenge();
922 }
923
924 bool ObjectSynchronizer::needs_monitor_scavenge() {
925 if (Atomic::load(&_forceMonitorScavenge) == 1) {
926 log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
927 return true;
928 }
929 return false;
930 }
931
932 void ObjectSynchronizer::oops_do(OopClosure* f) {
933 // We only scan the global used list here (for moribund threads), and
934 // the thread-local monitors in Thread::oops_do().
935 global_used_oops_do(f);
936 }
937
938 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
939 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
940 list_oops_do(g_om_in_use_list, f);
941 }
942
943 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
944 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
945 list_oops_do(thread->om_in_use_list, f);
946 }
947
948 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
949 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
950 ObjectMonitor* mid;
951 for (mid = list; mid != NULL; mid = mid->_next_om) {
952 if (mid->object() != NULL) {
953 f->do_oop((oop*)mid->object_addr());
954 }
955 }
956 }
957
958
959 // -----------------------------------------------------------------------------
960 // ObjectMonitor Lifecycle
961 // -----------------------
962 // Inflation unlinks monitors from the global g_free_list and
963 // associates them with objects. Deflation -- which occurs at
964 // STW-time -- disassociates idle monitors from objects. Such
965 // scavenged monitors are returned to the g_free_list.
966 //
967 // The global list is protected by gListLock. All the critical sections
968 // are short and operate in constant-time.
969 //
970 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
971 //
972 // Lifecycle:
973 // -- unassigned and on the global free list
974 // -- unassigned and on a thread's private om_free_list
975 // -- assigned to an object. The object is inflated and the mark refers
976 // to the objectmonitor.
977
978
979 // Constraining monitor pool growth via MonitorBound ...
980 //
981 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
982 //
983 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
984 // the rate of scavenging is driven primarily by GC. As such, we can find
985 // an inordinate number of monitors in circulation.
986 // To avoid that scenario we can artificially induce a STW safepoint
987 // if the pool appears to be growing past some reasonable bound.
988 // Generally we favor time in space-time tradeoffs, but as there's no
989 // natural back-pressure on the # of extant monitors we need to impose some
990 // type of limit. Beware that if MonitorBound is set to too low a value
991 // we could just loop. In addition, if MonitorBound is set to a low value
992 // we'll incur more safepoints, which are harmful to performance.
993 // See also: GuaranteedSafepointInterval
994 //
995 // If MonitorBound is set, the boundry applies to
996 // (g_om_population - g_om_free_count)
997 // i.e., if there are not enough ObjectMonitors on the global free list,
998 // then a safepoint deflation is induced. Picking a good MonitorBound value
999 // is non-trivial.
1000
1001 static void InduceScavenge(Thread* self, const char * Whence) {
1002 // Induce STW safepoint to trim monitors
1003 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1004 // More precisely, trigger a cleanup safepoint as the number
1005 // of active monitors passes the specified threshold.
1006 // TODO: assert thread state is reasonable
1007
1008 if (Atomic::xchg (&_forceMonitorScavenge, 1) == 0) {
1009 VMThread::check_for_forced_cleanup();
1010 }
1011 }
1012
1013 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
1014 // A large MAXPRIVATE value reduces both list lock contention
1015 // and list coherency traffic, but also tends to increase the
1016 // number of ObjectMonitors in circulation as well as the STW
1017 // scavenge costs. As usual, we lean toward time in space-time
1018 // tradeoffs.
1019 const int MAXPRIVATE = 1024;
1020 stringStream ss;
1021 for (;;) {
1022 ObjectMonitor* m;
1023
1024 // 1: try to allocate from the thread's local om_free_list.
1025 // Threads will attempt to allocate first from their local list, then
1026 // from the global list, and only after those attempts fail will the thread
1027 // attempt to instantiate new monitors. Thread-local free lists take
1028 // heat off the gListLock and improve allocation latency, as well as reducing
1029 // coherency traffic on the shared global list.
1030 m = self->om_free_list;
1031 if (m != NULL) {
1032 self->om_free_list = m->_next_om;
1033 self->om_free_count--;
1034 guarantee(m->object() == NULL, "invariant");
1035 m->_next_om = self->om_in_use_list;
1036 self->om_in_use_list = m;
1037 self->om_in_use_count++;
1038 return m;
1039 }
1040
1041 // 2: try to allocate from the global g_free_list
1042 // CONSIDER: use muxTry() instead of muxAcquire().
1043 // If the muxTry() fails then drop immediately into case 3.
1044 // If we're using thread-local free lists then try
1045 // to reprovision the caller's free list.
1046 if (g_free_list != NULL) {
1047 // Reprovision the thread's om_free_list.
1048 // Use bulk transfers to reduce the allocation rate and heat
1049 // on various locks.
1050 Thread::muxAcquire(&gListLock, "om_alloc(1)");
1051 for (int i = self->om_free_provision; --i >= 0 && g_free_list != NULL;) {
1052 g_om_free_count--;
1053 ObjectMonitor* take = g_free_list;
1054 g_free_list = take->_next_om;
1055 guarantee(take->object() == NULL, "invariant");
1056 take->Recycle();
1057 om_release(self, take, false);
1058 }
1059 Thread::muxRelease(&gListLock);
1060 self->om_free_provision += 1 + (self->om_free_provision/2);
1061 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1062
1063 const int mx = MonitorBound;
1064 if (mx > 0 && (g_om_population-g_om_free_count) > mx) {
1065 // Not enough ObjectMonitors on the global free list.
1066 // We can't safely induce a STW safepoint from om_alloc() as our thread
1067 // state may not be appropriate for such activities and callers may hold
1068 // naked oops, so instead we defer the action.
1069 InduceScavenge(self, "om_alloc");
1070 }
1071 continue;
1072 }
1073
1074 // 3: allocate a block of new ObjectMonitors
1075 // Both the local and global free lists are empty -- resort to malloc().
1076 // In the current implementation ObjectMonitors are TSM - immortal.
1077 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1078 // each ObjectMonitor to start at the beginning of a cache line,
1079 // so we use align_up().
1080 // A better solution would be to use C++ placement-new.
1081 // BEWARE: As it stands currently, we don't run the ctors!
1082 assert(_BLOCKSIZE > 1, "invariant");
1083 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
1084 PaddedObjectMonitor* temp;
1085 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
1086 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal);
1087 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
1088 (void)memset((void *) temp, 0, neededsize);
1089
1090 // Format the block.
1091 // initialize the linked list, each monitor points to its next
1092 // forming the single linked free list, the very first monitor
1093 // will points to next block, which forms the block list.
1094 // The trick of using the 1st element in the block as g_block_list
1095 // linkage should be reconsidered. A better implementation would
1096 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1097
1098 for (int i = 1; i < _BLOCKSIZE; i++) {
1099 temp[i]._next_om = (ObjectMonitor *)&temp[i+1];
1100 }
1101
1102 // terminate the last monitor as the end of list
1103 temp[_BLOCKSIZE - 1]._next_om = NULL;
1104
1105 // Element [0] is reserved for global list linkage
1106 temp[0].set_object(CHAINMARKER);
1107
1108 // Consider carving out this thread's current request from the
1109 // block in hand. This avoids some lock traffic and redundant
1110 // list activity.
1111
1112 // Acquire the gListLock to manipulate g_block_list and g_free_list.
1113 // An Oyama-Taura-Yonezawa scheme might be more efficient.
1114 Thread::muxAcquire(&gListLock, "om_alloc(2)");
1115 g_om_population += _BLOCKSIZE-1;
1116 g_om_free_count += _BLOCKSIZE-1;
1117
1118 // Add the new block to the list of extant blocks (g_block_list).
1119 // The very first ObjectMonitor in a block is reserved and dedicated.
1120 // It serves as blocklist "next" linkage.
1121 temp[0]._next_om = g_block_list;
1122 // There are lock-free uses of g_block_list so make sure that
1123 // the previous stores happen before we update g_block_list.
1124 Atomic::release_store(&g_block_list, temp);
1125
1126 // Add the new string of ObjectMonitors to the global free list
1127 temp[_BLOCKSIZE - 1]._next_om = g_free_list;
1128 g_free_list = temp + 1;
1129 Thread::muxRelease(&gListLock);
1130 }
1131 }
1132
1133 // Place "m" on the caller's private per-thread om_free_list.
1134 // In practice there's no need to clamp or limit the number of
1135 // monitors on a thread's om_free_list as the only non-allocation time
1136 // we'll call om_release() is to return a monitor to the free list after
1137 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1138 // accumulate on a thread's free list.
1139 //
1140 // Key constraint: all ObjectMonitors on a thread's free list and the global
1141 // free list must have their object field set to null. This prevents the
1142 // scavenger -- deflate_monitor_list() -- from reclaiming them while we
1143 // are trying to release them.
1144
1145 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
1146 bool from_per_thread_alloc) {
1147 guarantee(m->header().value() == 0, "invariant");
1148 guarantee(m->object() == NULL, "invariant");
1149 stringStream ss;
1150 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
1151 "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss),
1152 m->_recursions);
1153 // _next_om is used for both per-thread in-use and free lists so
1154 // we have to remove 'm' from the in-use list first (as needed).
1155 if (from_per_thread_alloc) {
1156 // Need to remove 'm' from om_in_use_list.
1157 ObjectMonitor* cur_mid_in_use = NULL;
1158 bool extracted = false;
1159 for (ObjectMonitor* mid = self->om_in_use_list; mid != NULL; cur_mid_in_use = mid, mid = mid->_next_om) {
1160 if (m == mid) {
1161 // extract from per-thread in-use list
1162 if (mid == self->om_in_use_list) {
1163 self->om_in_use_list = mid->_next_om;
1164 } else if (cur_mid_in_use != NULL) {
1165 cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list
1166 }
1167 extracted = true;
1168 self->om_in_use_count--;
1169 break;
1170 }
1171 }
1172 assert(extracted, "Should have extracted from in-use list");
1173 }
1174
1175 m->_next_om = self->om_free_list;
1176 self->om_free_list = m;
1177 self->om_free_count++;
1178 }
1179
1180 // Return ObjectMonitors on a moribund thread's free and in-use
1181 // lists to the appropriate global lists. The ObjectMonitors on the
1182 // per-thread in-use list may still be in use by other threads.
1183 //
1184 // We currently call om_flush() from Threads::remove() before the
1185 // thread has been excised from the thread list and is no longer a
1186 // mutator. This means that om_flush() cannot run concurrently with
1187 // a safepoint and interleave with deflate_idle_monitors(). In
1188 // particular, this ensures that the thread's in-use monitors are
1189 // scanned by a GC safepoint, either via Thread::oops_do() (before
1190 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
1191 // om_flush() is called).
1192
1193 void ObjectSynchronizer::om_flush(Thread* self) {
1194 ObjectMonitor* free_list = self->om_free_list;
1195 ObjectMonitor* free_tail = NULL;
1196 int free_count = 0;
1197 if (free_list != NULL) {
1198 ObjectMonitor* s;
1199 // The thread is going away. Set 'free_tail' to the last per-thread free
1200 // monitor which will be linked to g_free_list below under the gListLock.
1201 stringStream ss;
1202 for (s = free_list; s != NULL; s = s->_next_om) {
1203 free_count++;
1204 free_tail = s;
1205 guarantee(s->object() == NULL, "invariant");
1206 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1207 }
1208 guarantee(free_tail != NULL, "invariant");
1209 assert(self->om_free_count == free_count, "free-count off");
1210 self->om_free_list = NULL;
1211 self->om_free_count = 0;
1212 }
1213
1214 ObjectMonitor* in_use_list = self->om_in_use_list;
1215 ObjectMonitor* in_use_tail = NULL;
1216 int in_use_count = 0;
1217 if (in_use_list != NULL) {
1218 // The thread is going away, however the ObjectMonitors on the
1219 // om_in_use_list may still be in-use by other threads. Link
1220 // them to in_use_tail, which will be linked into the global
1221 // in-use list g_om_in_use_list below, under the gListLock.
1222 ObjectMonitor *cur_om;
1223 for (cur_om = in_use_list; cur_om != NULL; cur_om = cur_om->_next_om) {
1224 in_use_tail = cur_om;
1225 in_use_count++;
1226 }
1227 guarantee(in_use_tail != NULL, "invariant");
1228 assert(self->om_in_use_count == in_use_count, "in-use count off");
1229 self->om_in_use_list = NULL;
1230 self->om_in_use_count = 0;
1231 }
1232
1233 Thread::muxAcquire(&gListLock, "om_flush");
1234 if (free_tail != NULL) {
1235 free_tail->_next_om = g_free_list;
1236 g_free_list = free_list;
1237 g_om_free_count += free_count;
1238 }
1239
1240 if (in_use_tail != NULL) {
1241 in_use_tail->_next_om = g_om_in_use_list;
1242 g_om_in_use_list = in_use_list;
1243 g_om_in_use_count += in_use_count;
1244 }
1245
1246 Thread::muxRelease(&gListLock);
1247
1248 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1249 LogStreamHandle(Info, monitorinflation) lsh_info;
1250 LogStream* ls = NULL;
1251 if (log_is_enabled(Debug, monitorinflation)) {
1252 ls = &lsh_debug;
1253 } else if ((free_count != 0 || in_use_count != 0) &&
1254 log_is_enabled(Info, monitorinflation)) {
1255 ls = &lsh_info;
1256 }
1257 if (ls != NULL) {
1258 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d"
1259 ", in_use_count=%d" ", om_free_provision=%d",
1260 p2i(self), free_count, in_use_count, self->om_free_provision);
1261 }
1262 }
1263
1264 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1265 const oop obj,
1266 ObjectSynchronizer::InflateCause cause) {
1267 assert(event != NULL, "invariant");
1268 assert(event->should_commit(), "invariant");
1269 event->set_monitorClass(obj->klass());
1270 event->set_address((uintptr_t)(void*)obj);
1271 event->set_cause((u1)cause);
1272 event->commit();
1273 }
1274
1275 // Fast path code shared by multiple functions
1276 void ObjectSynchronizer::inflate_helper(oop obj) {
1277 markWord mark = obj->mark();
1278 if (mark.has_monitor()) {
1279 assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid");
1280 assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header");
1281 return;
1282 }
1283 inflate(Thread::current(), obj, inflate_cause_vm_internal);
1284 }
1285
1286 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self,
1287 oop object,
1288 const InflateCause cause) {
1289 // Inflate mutates the heap ...
1290 // Relaxing assertion for bug 6320749.
1291 assert(Universe::verify_in_progress() ||
1292 !SafepointSynchronize::is_at_safepoint(), "invariant");
1293
1294 EventJavaMonitorInflate event;
1295
1296 for (;;) {
1297 const markWord mark = object->mark();
1298 assert(!mark.has_bias_pattern(), "invariant");
1299
1300 // The mark can be in one of the following states:
1301 // * Inflated - just return
1302 // * Stack-locked - coerce it to inflated
1303 // * INFLATING - busy wait for conversion to complete
1304 // * Neutral - aggressively inflate the object.
1305 // * BIASED - Illegal. We should never see this
1306
1307 // CASE: inflated
1308 if (mark.has_monitor()) {
1499 // This is an unfortunate aspect of this design.
1500
1501 // Deflate a single monitor if not in-use
1502 // Return true if deflated, false if in-use
1503 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1504 ObjectMonitor** free_head_p,
1505 ObjectMonitor** free_tail_p) {
1506 bool deflated;
1507 // Normal case ... The monitor is associated with obj.
1508 const markWord mark = obj->mark();
1509 guarantee(mark == markWord::encode(mid), "should match: mark="
1510 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
1511 markWord::encode(mid).value());
1512 // Make sure that mark.monitor() and markWord::encode() agree:
1513 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
1514 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
1515 const markWord dmw = mid->header();
1516 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1517
1518 if (mid->is_busy()) {
1519 deflated = false;
1520 } else {
1521 // Deflate the monitor if it is no longer being used
1522 // It's idle - scavenge and return to the global free list
1523 // plain old deflation ...
1524 if (log_is_enabled(Trace, monitorinflation)) {
1525 ResourceMark rm;
1526 log_trace(monitorinflation)("deflate_monitor: "
1527 "object=" INTPTR_FORMAT ", mark="
1528 INTPTR_FORMAT ", type='%s'", p2i(obj),
1529 mark.value(), obj->klass()->external_name());
1530 }
1531
1532 // Restore the header back to obj
1533 obj->release_set_mark(dmw);
1534 mid->clear();
1535
1536 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
1537 p2i(mid->object()));
1538
1539 // Move the deflated ObjectMonitor to the working free list
1540 // defined by free_head_p and free_tail_p.
1541 if (*free_head_p == NULL) *free_head_p = mid;
1542 if (*free_tail_p != NULL) {
1543 // We append to the list so the caller can use mid->_next_om
1544 // to fix the linkages in its context.
1545 ObjectMonitor* prevtail = *free_tail_p;
1546 // Should have been cleaned up by the caller:
1547 assert(prevtail->_next_om == NULL, "cleaned up deflated?");
1548 prevtail->_next_om = mid;
1549 }
1550 *free_tail_p = mid;
1551 // At this point, mid->_next_om still refers to its current
1552 // value and another ObjectMonitor's _next_om field still
1553 // refers to this ObjectMonitor. Those linkages have to be
1554 // cleaned up by the caller who has the complete context.
1555 deflated = true;
1556 }
1557 return deflated;
1558 }
1559
1560 // Walk a given monitor list, and deflate idle monitors
1561 // The given list could be a per-thread list or a global list
1562 // Caller acquires gListLock as needed.
1563 //
1564 // In the case of parallel processing of thread local monitor lists,
1565 // work is done by Threads::parallel_threads_do() which ensures that
1566 // each Java thread is processed by exactly one worker thread, and
1567 // thus avoid conflicts that would arise when worker threads would
1568 // process the same monitor lists concurrently.
1569 //
1570 // See also ParallelSPCleanupTask and
1571 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
1572 // Threads::parallel_java_threads_do() in thread.cpp.
1573 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
1574 ObjectMonitor** free_head_p,
1575 ObjectMonitor** free_tail_p) {
1576 ObjectMonitor* mid;
1577 ObjectMonitor* next;
1578 ObjectMonitor* cur_mid_in_use = NULL;
1579 int deflated_count = 0;
1580
1581 for (mid = *list_p; mid != NULL;) {
1582 oop obj = (oop) mid->object();
1583 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
1584 // Deflation succeeded and already updated free_head_p and
1585 // free_tail_p as needed. Finish the move to the local free list
1586 // by unlinking mid from the global or per-thread in-use list.
1587 if (mid == *list_p) {
1588 *list_p = mid->_next_om;
1589 } else if (cur_mid_in_use != NULL) {
1590 cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list
1591 }
1592 next = mid->_next_om;
1593 mid->_next_om = NULL; // This mid is current tail in the free_head_p list
1594 mid = next;
1595 deflated_count++;
1596 } else {
1597 cur_mid_in_use = mid;
1598 mid = mid->_next_om;
1599 }
1600 }
1601 return deflated_count;
1602 }
1603
1604 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1605 counters->n_in_use = 0; // currently associated with objects
1606 counters->n_in_circulation = 0; // extant
1607 counters->n_scavenged = 0; // reclaimed (global and per-thread)
1608 counters->per_thread_scavenged = 0; // per-thread scavenge total
1609 counters->per_thread_times = 0.0; // per-thread scavenge times
1610 }
1611
1612 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
1613 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1614 bool deflated = false;
1615
1616 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
1617 ObjectMonitor* free_tail_p = NULL;
1618 elapsedTimer timer;
1619
1620 if (log_is_enabled(Info, monitorinflation)) {
1621 timer.start();
1622 }
1623
1624 // Prevent om_flush from changing mids in Thread dtor's during deflation
1625 // And in case the vm thread is acquiring a lock during a safepoint
1626 // See e.g. 6320749
1627 Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
1628
1629 // Note: the thread-local monitors lists get deflated in
1630 // a separate pass. See deflate_thread_local_monitors().
1631
1632 // For moribund threads, scan g_om_in_use_list
1633 int deflated_count = 0;
1634 if (g_om_in_use_list) {
1635 counters->n_in_circulation += g_om_in_use_count;
1636 deflated_count = deflate_monitor_list((ObjectMonitor **)&g_om_in_use_list, &free_head_p, &free_tail_p);
1637 g_om_in_use_count -= deflated_count;
1638 counters->n_scavenged += deflated_count;
1639 counters->n_in_use += g_om_in_use_count;
1640 }
1641
1642 if (free_head_p != NULL) {
1643 // Move the deflated ObjectMonitors back to the global free list.
1644 guarantee(free_tail_p != NULL && counters->n_scavenged > 0, "invariant");
1645 assert(free_tail_p->_next_om == NULL, "invariant");
1646 // constant-time list splice - prepend scavenged segment to g_free_list
1647 free_tail_p->_next_om = g_free_list;
1648 g_free_list = free_head_p;
1649 }
1650 Thread::muxRelease(&gListLock);
1651 timer.stop();
1652
1653 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1654 LogStreamHandle(Info, monitorinflation) lsh_info;
1655 LogStream* ls = NULL;
1656 if (log_is_enabled(Debug, monitorinflation)) {
1657 ls = &lsh_debug;
1658 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
1659 ls = &lsh_info;
1660 }
1661 if (ls != NULL) {
1662 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
1663 }
1664 }
1665
1666 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1667 // Report the cumulative time for deflating each thread's idle
1668 // monitors. Note: if the work is split among more than one
1669 // worker thread, then the reported time will likely be more
1670 // than a beginning to end measurement of the phase.
1671 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
1672
1673 g_om_free_count += counters->n_scavenged;
1674
1675 if (log_is_enabled(Debug, monitorinflation)) {
1676 // exit_globals()'s call to audit_and_print_stats() is done
1677 // at the Info level.
1678 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
1679 } else if (log_is_enabled(Info, monitorinflation)) {
1680 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
1681 log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, "
1682 "g_om_free_count=%d", g_om_population,
1683 g_om_in_use_count, g_om_free_count);
1684 Thread::muxRelease(&gListLock);
1685 }
1686
1687 Atomic::store(&_forceMonitorScavenge, 0); // Reset
1688
1689 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
1690 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
1691
1692 GVars.stw_random = os::random();
1693 GVars.stw_cycle++;
1694 }
1695
1696 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
1697 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1698
1699 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
1700 ObjectMonitor* free_tail_p = NULL;
1701 elapsedTimer timer;
1702
1703 if (log_is_enabled(Info, safepoint, cleanup) ||
1704 log_is_enabled(Info, monitorinflation)) {
1705 timer.start();
1706 }
1707
1708 int deflated_count = deflate_monitor_list(thread->om_in_use_list_addr(), &free_head_p, &free_tail_p);
1709
1710 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
1711
1712 // Adjust counters
1713 counters->n_in_circulation += thread->om_in_use_count;
1714 thread->om_in_use_count -= deflated_count;
1715 counters->n_scavenged += deflated_count;
1716 counters->n_in_use += thread->om_in_use_count;
1717 counters->per_thread_scavenged += deflated_count;
1718
1719 if (free_head_p != NULL) {
1720 // Move the deflated ObjectMonitors back to the global free list.
1721 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
1722 assert(free_tail_p->_next_om == NULL, "invariant");
1723
1724 // constant-time list splice - prepend scavenged segment to g_free_list
1725 free_tail_p->_next_om = g_free_list;
1726 g_free_list = free_head_p;
1727 }
1728
1729 timer.stop();
1730 // Safepoint logging cares about cumulative per_thread_times and
1731 // we'll capture most of the cost, but not the muxRelease() which
1732 // should be cheap.
1733 counters->per_thread_times += timer.seconds();
1734
1735 Thread::muxRelease(&gListLock);
1736
1737 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1738 LogStreamHandle(Info, monitorinflation) lsh_info;
1739 LogStream* ls = NULL;
1740 if (log_is_enabled(Debug, monitorinflation)) {
1741 ls = &lsh_debug;
1742 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
1743 ls = &lsh_info;
1744 }
1745 if (ls != NULL) {
1746 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count);
1747 }
1748 }
1749
1750 // Monitor cleanup on JavaThread::exit
1751
1752 // Iterate through monitor cache and attempt to release thread's monitors
1753 // Gives up on a particular monitor if an exception occurs, but continues
1754 // the overall iteration, swallowing the exception.
1755 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1756 private:
1767
1768 // Release all inflated monitors owned by THREAD. Lightweight monitors are
1769 // ignored. This is meant to be called during JNI thread detach which assumes
1770 // all remaining monitors are heavyweight. All exceptions are swallowed.
1771 // Scanning the extant monitor list can be time consuming.
1772 // A simple optimization is to add a per-thread flag that indicates a thread
1773 // called jni_monitorenter() during its lifetime.
1774 //
1775 // Instead of No_Savepoint_Verifier it might be cheaper to
1776 // use an idiom of the form:
1777 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1778 // <code that must not run at safepoint>
1779 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1780 // Since the tests are extremely cheap we could leave them enabled
1781 // for normal product builds.
1782
1783 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1784 assert(THREAD == JavaThread::current(), "must be current Java thread");
1785 NoSafepointVerifier nsv;
1786 ReleaseJavaMonitorsClosure rjmc(THREAD);
1787 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread");
1788 ObjectSynchronizer::monitors_iterate(&rjmc);
1789 Thread::muxRelease(&gListLock);
1790 THREAD->clear_pending_exception();
1791 }
1792
1793 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1794 switch (cause) {
1795 case inflate_cause_vm_internal: return "VM Internal";
1796 case inflate_cause_monitor_enter: return "Monitor Enter";
1797 case inflate_cause_wait: return "Monitor Wait";
1798 case inflate_cause_notify: return "Monitor Notify";
1799 case inflate_cause_hash_code: return "Monitor Hash Code";
1800 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1801 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1802 default:
1803 ShouldNotReachHere();
1804 }
1805 return "Unknown";
1806 }
1807
1808 //------------------------------------------------------------------------------
1809 // Debugging code
1810
1811 u_char* ObjectSynchronizer::get_gvars_addr() {
1812 return (u_char*)&GVars;
1813 }
1814
1815 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1816 return (u_char*)&GVars.hc_sequence;
1817 }
1818
1819 size_t ObjectSynchronizer::get_gvars_size() {
1820 return sizeof(SharedGlobals);
1821 }
1822
1823 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
1824 return (u_char*)&GVars.stw_random;
1825 }
1826
1827 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
1828 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
1829
1830 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1831 LogStreamHandle(Info, monitorinflation) lsh_info;
1832 LogStreamHandle(Trace, monitorinflation) lsh_trace;
1833 LogStream* ls = NULL;
1834 if (log_is_enabled(Trace, monitorinflation)) {
1835 ls = &lsh_trace;
1836 } else if (log_is_enabled(Debug, monitorinflation)) {
1837 ls = &lsh_debug;
1838 } else if (log_is_enabled(Info, monitorinflation)) {
1839 ls = &lsh_info;
1840 }
1841 assert(ls != NULL, "sanity check");
1842
1843 if (!on_exit) {
1844 // Not at VM exit so grab the global list lock.
1845 Thread::muxAcquire(&gListLock, "audit_and_print_stats");
1846 }
1847
1848 // Log counts for the global and per-thread monitor lists:
1849 int chk_om_population = log_monitor_list_counts(ls);
1850 int error_cnt = 0;
1851
1852 ls->print_cr("Checking global lists:");
1853
1854 // Check g_om_population:
1855 if (g_om_population == chk_om_population) {
1856 ls->print_cr("g_om_population=%d equals chk_om_population=%d",
1857 g_om_population, chk_om_population);
1858 } else {
1859 ls->print_cr("ERROR: g_om_population=%d is not equal to "
1860 "chk_om_population=%d", g_om_population,
1861 chk_om_population);
1862 error_cnt++;
1863 }
1864
1865 // Check g_om_in_use_list and g_om_in_use_count:
1866 chk_global_in_use_list_and_count(ls, &error_cnt);
1867
1868 // Check g_free_list and g_om_free_count:
1869 chk_global_free_list_and_count(ls, &error_cnt);
1870
1871 if (!on_exit) {
1872 Thread::muxRelease(&gListLock);
1873 }
1874
1875 ls->print_cr("Checking per-thread lists:");
1876
1877 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1878 // Check om_in_use_list and om_in_use_count:
1879 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
1880
1881 // Check om_free_list and om_free_count:
1882 chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
1883 }
1884
1885 if (error_cnt == 0) {
1886 ls->print_cr("No errors found in monitor list checks.");
1887 } else {
1888 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
1889 }
1890
1891 if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
1892 (!on_exit && log_is_enabled(Trace, monitorinflation))) {
1893 // When exiting this log output is at the Info level. When called
1894 // at a safepoint, this log output is at the Trace level since
1895 // there can be a lot of it.
1896 log_in_use_monitor_details(ls, on_exit);
1897 }
1898
1899 ls->flush();
1900
1901 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
1902 }
1903
1904 // Check a free monitor entry; log any errors.
1905 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n,
1906 outputStream * out, int *error_cnt_p) {
1907 stringStream ss;
1908 if (n->is_busy()) {
1909 if (jt != NULL) {
1910 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1911 ": free per-thread monitor must not be busy: %s", p2i(jt),
1912 p2i(n), n->is_busy_to_string(&ss));
1913 } else {
1914 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
1915 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
1916 }
1927 "must have NULL _header field: _header=" INTPTR_FORMAT,
1928 p2i(n), n->header().value());
1929 }
1930 *error_cnt_p = *error_cnt_p + 1;
1931 }
1932 if (n->object() != NULL) {
1933 if (jt != NULL) {
1934 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1935 ": free per-thread monitor must have NULL _object "
1936 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
1937 p2i(n->object()));
1938 } else {
1939 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
1940 "must have NULL _object field: _object=" INTPTR_FORMAT,
1941 p2i(n), p2i(n->object()));
1942 }
1943 *error_cnt_p = *error_cnt_p + 1;
1944 }
1945 }
1946
1947 // Check the global free list and count; log the results of the checks.
1948 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
1949 int *error_cnt_p) {
1950 int chk_om_free_count = 0;
1951 for (ObjectMonitor* n = g_free_list; n != NULL; n = n->_next_om) {
1952 chk_free_entry(NULL /* jt */, n, out, error_cnt_p);
1953 chk_om_free_count++;
1954 }
1955 if (g_om_free_count == chk_om_free_count) {
1956 out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d",
1957 g_om_free_count, chk_om_free_count);
1958 } else {
1959 out->print_cr("ERROR: g_om_free_count=%d is not equal to "
1960 "chk_om_free_count=%d", g_om_free_count,
1961 chk_om_free_count);
1962 *error_cnt_p = *error_cnt_p + 1;
1963 }
1964 }
1965
1966 // Check the global in-use list and count; log the results of the checks.
1967 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
1968 int *error_cnt_p) {
1969 int chk_om_in_use_count = 0;
1970 for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) {
1971 chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p);
1972 chk_om_in_use_count++;
1973 }
1974 if (g_om_in_use_count == chk_om_in_use_count) {
1975 out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d", g_om_in_use_count,
1976 chk_om_in_use_count);
1977 } else {
1978 out->print_cr("ERROR: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d",
1979 g_om_in_use_count, chk_om_in_use_count);
1980 *error_cnt_p = *error_cnt_p + 1;
1981 }
1982 }
1983
1984 // Check an in-use monitor entry; log any errors.
1985 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
1986 outputStream * out, int *error_cnt_p) {
1987 if (n->header().value() == 0) {
1988 if (jt != NULL) {
1989 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1990 ": in-use per-thread monitor must have non-NULL _header "
1991 "field.", p2i(jt), p2i(n));
1992 } else {
1993 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor "
1994 "must have non-NULL _header field.", p2i(n));
1995 }
1996 *error_cnt_p = *error_cnt_p + 1;
1997 }
1998 if (n->object() == NULL) {
1999 if (jt != NULL) {
2000 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2028 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2029 ": in-use per-thread monitor's object does not refer "
2030 "to the same monitor: obj=" INTPTR_FORMAT ", mark="
2031 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
2032 p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2033 } else {
2034 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
2035 "monitor's object does not refer to the same monitor: obj="
2036 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
2037 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2038 }
2039 *error_cnt_p = *error_cnt_p + 1;
2040 }
2041 }
2042
2043 // Check the thread's free list and count; log the results of the checks.
2044 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
2045 outputStream * out,
2046 int *error_cnt_p) {
2047 int chk_om_free_count = 0;
2048 for (ObjectMonitor* n = jt->om_free_list; n != NULL; n = n->_next_om) {
2049 chk_free_entry(jt, n, out, error_cnt_p);
2050 chk_om_free_count++;
2051 }
2052 if (jt->om_free_count == chk_om_free_count) {
2053 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals "
2054 "chk_om_free_count=%d", p2i(jt), jt->om_free_count, chk_om_free_count);
2055 } else {
2056 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not "
2057 "equal to chk_om_free_count=%d", p2i(jt), jt->om_free_count,
2058 chk_om_free_count);
2059 *error_cnt_p = *error_cnt_p + 1;
2060 }
2061 }
2062
2063 // Check the thread's in-use list and count; log the results of the checks.
2064 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
2065 outputStream * out,
2066 int *error_cnt_p) {
2067 int chk_om_in_use_count = 0;
2068 for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) {
2069 chk_in_use_entry(jt, n, out, error_cnt_p);
2070 chk_om_in_use_count++;
2071 }
2072 if (jt->om_in_use_count == chk_om_in_use_count) {
2073 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
2074 "chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count,
2075 chk_om_in_use_count);
2076 } else {
2077 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
2078 "equal to chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count,
2079 chk_om_in_use_count);
2080 *error_cnt_p = *error_cnt_p + 1;
2081 }
2082 }
2083
2084 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
2085 // flags indicate why the entry is in-use, 'object' and 'object type'
2086 // indicate the associated object and its type.
2087 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out,
2088 bool on_exit) {
2089 if (!on_exit) {
2090 // Not at VM exit so grab the global list lock.
2091 Thread::muxAcquire(&gListLock, "log_in_use_monitor_details");
2092 }
2093
2094 stringStream ss;
2095 if (g_om_in_use_count > 0) {
2096 out->print_cr("In-use global monitor info:");
2097 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2098 out->print_cr("%18s %s %18s %18s",
2099 "monitor", "BHL", "object", "object type");
2100 out->print_cr("================== === ================== ==================");
2101 for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) {
2102 const oop obj = (oop) n->object();
2103 const markWord mark = n->header();
2104 ResourceMark rm;
2105 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(n),
2106 n->is_busy() != 0, mark.hash() != 0, n->owner() != NULL,
2107 p2i(obj), obj->klass()->external_name());
2108 if (n->is_busy() != 0) {
2109 out->print(" (%s)", n->is_busy_to_string(&ss));
2110 ss.reset();
2111 }
2112 out->cr();
2113 }
2114 }
2115
2116 if (!on_exit) {
2117 Thread::muxRelease(&gListLock);
2118 }
2119
2120 out->print_cr("In-use per-thread monitor info:");
2121 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2122 out->print_cr("%18s %18s %s %18s %18s",
2123 "jt", "monitor", "BHL", "object", "object type");
2124 out->print_cr("================== ================== === ================== ==================");
2125 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2126 for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) {
2127 const oop obj = (oop) n->object();
2128 const markWord mark = n->header();
2129 ResourceMark rm;
2130 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT
2131 " %s", p2i(jt), p2i(n), n->is_busy() != 0,
2132 mark.hash() != 0, n->owner() != NULL, p2i(obj),
2133 obj->klass()->external_name());
2134 if (n->is_busy() != 0) {
2135 out->print(" (%s)", n->is_busy_to_string(&ss));
2136 ss.reset();
2137 }
2138 out->cr();
2139 }
2140 }
2141
2142 out->flush();
2143 }
2144
2145 // Log counts for the global and per-thread monitor lists and return
2146 // the population count.
2147 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
2148 int pop_count = 0;
2149 out->print_cr("%18s %10s %10s %10s",
2150 "Global Lists:", "InUse", "Free", "Total");
2151 out->print_cr("================== ========== ========== ==========");
2152 out->print_cr("%18s %10d %10d %10d", "",
2153 g_om_in_use_count, g_om_free_count, g_om_population);
2154 pop_count += g_om_in_use_count + g_om_free_count;
2155
2156 out->print_cr("%18s %10s %10s %10s",
2157 "Per-Thread Lists:", "InUse", "Free", "Provision");
2158 out->print_cr("================== ========== ========== ==========");
2159
2160 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2161 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
2162 jt->om_in_use_count, jt->om_free_count, jt->om_free_provision);
2163 pop_count += jt->om_in_use_count + jt->om_free_count;
2164 }
2165 return pop_count;
2166 }
2167
2168 #ifndef PRODUCT
2169
2170 // Check if monitor belongs to the monitor cache
2171 // The list is grow-only so it's *relatively* safe to traverse
2172 // the list of extant blocks without taking a lock.
2173
2174 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
2175 PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
2176 while (block != NULL) {
2177 assert(block->object() == CHAINMARKER, "must be a block header");
2178 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
2179 address mon = (address)monitor;
2180 address blk = (address)block;
2181 size_t diff = mon - blk;
2182 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
2183 return 1;
2184 }
2185 block = (PaddedObjectMonitor*)block->_next_om;
2186 }
2187 return 0;
2188 }
2189
2190 #endif
|
100 } \
101 }
102
103 #else // ndef DTRACE_ENABLED
104
105 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
106 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
107
108 #endif // ndef DTRACE_ENABLED
109
110 // This exists only as a workaround of dtrace bug 6254741
111 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
113 return 0;
114 }
115
116 #define NINFLATIONLOCKS 256
117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
118
119 // global list of blocks of monitors
120 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL;
121
122 struct ListGlobals {
123 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
124 // These are highly shared list related variables.
125 // To avoid false-sharing they need to be the sole occupants of a cache line.
126
127 // Global ObjectMonitor free list. Newly allocated and deflated
128 // ObjectMonitors are prepended here.
129 ObjectMonitor* free_list;
130 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
131
132 // Global ObjectMonitor in-use list. When a JavaThread is exiting,
133 // ObjectMonitors on its per-thread in-use list are prepended here.
134 ObjectMonitor* in_use_list;
135 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
136
137 int free_count; // # on free_list
138 DEFINE_PAD_MINUS_SIZE(3, DEFAULT_CACHE_LINE_SIZE, sizeof(int));
139
140 int in_use_count; // # on in_use_list
141 DEFINE_PAD_MINUS_SIZE(4, DEFAULT_CACHE_LINE_SIZE, sizeof(int));
142
143 int population; // # Extant -- in circulation
144 DEFINE_PAD_MINUS_SIZE(5, DEFAULT_CACHE_LINE_SIZE, sizeof(int));
145 };
146 static ListGlobals LVars;
147
148 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
149
150
151 // =====================> Spin-lock functions
152
153 // ObjectMonitors are not lockable outside of this file. We use spin-locks
154 // implemented using a bit in the _next_om field instead of the heavier
155 // weight locking mechanisms for faster list management.
156
157 #define OM_LOCK_BIT 0x1
158
159 // Return true if the ObjectMonitor is locked.
160 // Otherwise returns false.
161 static bool is_locked(ObjectMonitor* om) {
162 return ((intptr_t)Atomic::load(&om->_next_om) & OM_LOCK_BIT) == OM_LOCK_BIT;
163 }
164
165 // Mark an ObjectMonitor* with OM_LOCK_BIT and return it.
166 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) {
167 return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT);
168 }
169
170 // Return the unmarked next field in an ObjectMonitor. Note: the next
171 // field may or may not have been marked with OM_LOCK_BIT originally.
172 static ObjectMonitor* unmarked_next(ObjectMonitor* om) {
173 return (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT);
174 }
175
176 // Try to lock an ObjectMonitor. Returns true if locking was successful.
177 // Otherwise returns false.
178 static bool try_om_lock(ObjectMonitor* om) {
179 // Get current next field without any OM_LOCK_BIT value.
180 ObjectMonitor* next = unmarked_next(om);
181 if (Atomic::cmpxchg(&om->_next_om, next, mark_om_ptr(next)) != next) {
182 return false; // Cannot lock the ObjectMonitor.
183 }
184 return true;
185 }
186
187 // Lock an ObjectMonitor.
188 static void om_lock(ObjectMonitor* om) {
189 while (true) {
190 if (try_om_lock(om)) {
191 return;
192 }
193 }
194 }
195
196 // Unlock an ObjectMonitor.
197 static void om_unlock(ObjectMonitor* om) {
198 ObjectMonitor* next = Atomic::load(&om->_next_om);
199 guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT
200 " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT);
201
202 next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT.
203 Atomic::store(&om->_next_om, next);
204 }
205
206 // Get the list head after locking it. Returns the list head or NULL
207 // if the list is empty.
208 static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) {
209 while (true) {
210 ObjectMonitor* mid = Atomic::load(list_p);
211 if (mid == NULL) {
212 return NULL; // The list is empty.
213 }
214 if (try_om_lock(mid)) {
215 if (Atomic::load(list_p) != mid) {
216 // The list head changed before we could lock it so we have to retry.
217 om_unlock(mid);
218 continue;
219 }
220 return mid;
221 }
222 }
223 }
224
225 #undef OM_LOCK_BIT
226
227
228 // =====================> List Management functions
229
230 // Set the next field in an ObjectMonitor to the specified value.
231 static void set_next(ObjectMonitor* om, ObjectMonitor* value) {
232 Atomic::store(&om->_next_om, value);
233 }
234
235 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is
236 // the last ObjectMonitor in the list and there are 'count' on the list.
237 // Also updates the specified *count_p.
238 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail,
239 int count, ObjectMonitor** list_p,
240 int* count_p) {
241 while (true) {
242 ObjectMonitor* cur = Atomic::load(list_p);
243 // Prepend list to *list_p.
244 if (!try_om_lock(tail)) {
245 continue; // failed to lock tail so try it all again
246 }
247 set_next(tail, cur); // tail now points to cur (and unlocks tail)
248 if (cur == NULL) {
249 // No potential race with takers or other prependers since
250 // *list_p is empty.
251 if (Atomic::cmpxchg(list_p, cur, list) == cur) {
252 // Successfully switched *list_p to the list value.
253 Atomic::add(count_p, count);
254 break;
255 }
256 // Implied else: try it all again
257 } else {
258 if (!try_om_lock(cur)) {
259 continue; // failed to lock cur so try it all again
260 }
261 // We locked cur so try to switch *list_p to the list value.
262 if (Atomic::cmpxchg(list_p, cur, list) != cur) {
263 // The list head has changed so unlock cur and try again:
264 om_unlock(cur);
265 continue;
266 }
267 Atomic::add(count_p, count);
268 om_unlock(cur);
269 break;
270 }
271 }
272 }
273
274 // Prepend a newly allocated block of ObjectMonitors to g_block_list and
275 // LVars.free_list. Also updates LVars.population and LVars.free_count.
276 void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) {
277 // First we handle g_block_list:
278 while (true) {
279 PaddedObjectMonitor* cur = Atomic::load(&g_block_list);
280 // Prepend new_blk to g_block_list. The first ObjectMonitor in
281 // a block is reserved for use as linkage to the next block.
282 Atomic::store(&new_blk[0]._next_om, cur);
283 if (Atomic::cmpxchg(&g_block_list, cur, new_blk) == cur) {
284 // Successfully switched g_block_list to the new_blk value.
285 Atomic::add(&LVars.population, _BLOCKSIZE - 1);
286 break;
287 }
288 // Implied else: try it all again
289 }
290
291 // Second we handle LVars.free_list:
292 prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1,
293 &LVars.free_list, &LVars.free_count);
294 }
295
296 // Prepend a list of ObjectMonitors to LVars.free_list. 'tail' is the last
297 // ObjectMonitor in the list and there are 'count' on the list. Also
298 // updates LVars.free_count.
299 static void prepend_list_to_global_free_list(ObjectMonitor* list,
300 ObjectMonitor* tail, int count) {
301 prepend_list_to_common(list, tail, count, &LVars.free_list, &LVars.free_count);
302 }
303
304 // Prepend a list of ObjectMonitors to LVars.in_use_list. 'tail' is the last
305 // ObjectMonitor in the list and there are 'count' on the list. Also
306 // updates LVars.in_use_list.
307 static void prepend_list_to_global_in_use_list(ObjectMonitor* list,
308 ObjectMonitor* tail, int count) {
309 prepend_list_to_common(list, tail, count, &LVars.in_use_list, &LVars.in_use_count);
310 }
311
312 // Prepend an ObjectMonitor to the specified list. Also updates
313 // the specified counter.
314 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
315 int* count_p) {
316 while (true) {
317 om_lock(m); // Lock m so we can safely update its next field.
318 ObjectMonitor* cur = NULL;
319 // Lock the list head to guard against races with a list walker
320 // thread:
321 if ((cur = get_list_head_locked(list_p)) != NULL) {
322 // List head is now locked so we can safely switch it.
323 set_next(m, cur); // m now points to cur (and unlocks m)
324 Atomic::store(list_p, m); // Switch list head to unlocked m.
325 om_unlock(cur);
326 break;
327 }
328 // The list is empty so try to set the list head.
329 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
330 set_next(m, cur); // m now points to NULL (and unlocks m)
331 if (Atomic::cmpxchg(list_p, cur, m) == cur) {
332 // List head is now unlocked m.
333 break;
334 }
335 // Implied else: try it all again
336 }
337 Atomic::inc(count_p);
338 }
339
340 // Prepend an ObjectMonitor to a per-thread om_free_list.
341 // Also updates the per-thread om_free_count.
342 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) {
343 prepend_to_common(m, &self->om_free_list, &self->om_free_count);
344 }
345
346 // Prepend an ObjectMonitor to a per-thread om_in_use_list.
347 // Also updates the per-thread om_in_use_count.
348 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) {
349 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
350 }
351
352 // Take an ObjectMonitor from the start of the specified list. Also
353 // decrements the specified counter. Returns NULL if none are available.
354 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p,
355 int* count_p) {
356 ObjectMonitor* take = NULL;
357 // Lock the list head to guard against races with a list walker
358 // thread:
359 if ((take = get_list_head_locked(list_p)) == NULL) {
360 return NULL; // None are available.
361 }
362 ObjectMonitor* next = unmarked_next(take);
363 // Switch locked list head to next (which unlocks the list head, but
364 // leaves take locked):
365 Atomic::store(list_p, next);
366 Atomic::dec(count_p);
367 // Unlock take, but leave the next value for any lagging list
368 // walkers. It will get cleaned up when take is prepended to
369 // the in-use list:
370 om_unlock(take);
371 return take;
372 }
373
374 // Take an ObjectMonitor from the start of the LVars.free_list. Also
375 // updates LVars.free_count. Returns NULL if none are available.
376 static ObjectMonitor* take_from_start_of_global_free_list() {
377 return take_from_start_of_common(&LVars.free_list, &LVars.free_count);
378 }
379
380 // Take an ObjectMonitor from the start of a per-thread free-list.
381 // Also updates om_free_count. Returns NULL if none are available.
382 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) {
383 return take_from_start_of_common(&self->om_free_list, &self->om_free_count);
384 }
385
386
387 // =====================> Quick functions
388
389 // The quick_* forms are special fast-path variants used to improve
390 // performance. In the simplest case, a "quick_*" implementation could
391 // simply return false, in which case the caller will perform the necessary
392 // state transitions and call the slow-path form.
393 // The fast-path is designed to handle frequently arising cases in an efficient
394 // manner and is just a degenerate "optimistic" variant of the slow-path.
395 // returns true -- to indicate the call was satisfied.
396 // returns false -- to indicate the call needs the services of the slow-path.
397 // A no-loitering ordinance is in effect for code in the quick_* family
398 // operators: safepoints or indefinite blocking (blocking that might span a
399 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
400 // entry.
401 //
402 // Consider: An interesting optimization is to have the JIT recognize the
403 // following common idiom:
404 // synchronized (someobj) { .... ; notify(); }
405 // That is, we find a notify() or notifyAll() call that immediately precedes
406 // the monitorexit operation. In that case the JIT could fuse the operations
1122 assert(monitor != NULL, "monitor should be non-null");
1123 owner = (address) monitor->owner();
1124 }
1125
1126 if (owner != NULL) {
1127 // owning_thread_from_monitor_owner() may also return NULL here
1128 return Threads::owning_thread_from_monitor_owner(t_list, owner);
1129 }
1130
1131 // Unlocked case, header in place
1132 // Cannot have assertion since this object may have been
1133 // locked by another thread when reaching here.
1134 // assert(mark.is_neutral(), "sanity check");
1135
1136 return NULL;
1137 }
1138
1139 // Visitors ...
1140
1141 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1142 PaddedObjectMonitor* block = Atomic::load(&g_block_list);
1143 while (block != NULL) {
1144 assert(block->object() == CHAINMARKER, "must be a block header");
1145 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1146 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1147 oop object = (oop)mid->object();
1148 if (object != NULL) {
1149 // Only process with closure if the object is set.
1150 closure->do_monitor(mid);
1151 }
1152 }
1153 // unmarked_next() is not needed with g_block_list (no locking
1154 // used with block linkage _next_om fields).
1155 block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om);
1156 }
1157 }
1158
1159 static bool monitors_used_above_threshold() {
1160 int population = Atomic::load(&LVars.population);
1161 if (population == 0) {
1162 return false;
1163 }
1164 if (MonitorUsedDeflationThreshold > 0) {
1165 int monitors_used = population - Atomic::load(&LVars.free_count);
1166 int monitor_usage = (monitors_used * 100LL) / population;
1167 return monitor_usage > MonitorUsedDeflationThreshold;
1168 }
1169 return false;
1170 }
1171
1172 // Returns true if MonitorBound is set (> 0) and if the specified
1173 // cnt is > MonitorBound. Otherwise returns false.
1174 static bool is_MonitorBound_exceeded(const int cnt) {
1175 const int mx = MonitorBound;
1176 return mx > 0 && cnt > mx;
1177 }
1178
1179 bool ObjectSynchronizer::is_cleanup_needed() {
1180 if (monitors_used_above_threshold()) {
1181 // Too many monitors in use.
1182 return true;
1183 }
1184 return needs_monitor_scavenge();
1185 }
1186
1187 bool ObjectSynchronizer::needs_monitor_scavenge() {
1188 if (Atomic::load(&_forceMonitorScavenge) == 1) {
1189 log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
1190 return true;
1191 }
1192 return false;
1193 }
1194
1195 void ObjectSynchronizer::oops_do(OopClosure* f) {
1196 // We only scan the global used list here (for moribund threads), and
1197 // the thread-local monitors in Thread::oops_do().
1198 global_used_oops_do(f);
1199 }
1200
1201 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1202 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1203 list_oops_do(Atomic::load(&LVars.in_use_list), f);
1204 }
1205
1206 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1207 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1208 list_oops_do(thread->om_in_use_list, f);
1209 }
1210
1211 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1212 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1213 // The oops_do() phase does not overlap with monitor deflation
1214 // so no need to lock ObjectMonitors for the list traversal.
1215 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1216 if (mid->object() != NULL) {
1217 f->do_oop((oop*)mid->object_addr());
1218 }
1219 }
1220 }
1221
1222
1223 // -----------------------------------------------------------------------------
1224 // ObjectMonitor Lifecycle
1225 // -----------------------
1226 // Inflation unlinks monitors from LVars.free_list or a per-thread free
1227 // list and associates them with objects. Deflation -- which occurs at
1228 // STW-time -- disassociates idle monitors from objects.
1229 // Such scavenged monitors are returned to the LVars.free_list.
1230 //
1231 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1232 //
1233 // Lifecycle:
1234 // -- unassigned and on the LVars.free_list
1235 // -- unassigned and on a per-thread free list
1236 // -- assigned to an object. The object is inflated and the mark refers
1237 // to the ObjectMonitor.
1238
1239
1240 // Constraining monitor pool growth via MonitorBound ...
1241 //
1242 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
1243 //
1244 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
1245 // the rate of scavenging is driven primarily by GC. As such, we can find
1246 // an inordinate number of monitors in circulation.
1247 // To avoid that scenario we can artificially induce a STW safepoint
1248 // if the pool appears to be growing past some reasonable bound.
1249 // Generally we favor time in space-time tradeoffs, but as there's no
1250 // natural back-pressure on the # of extant monitors we need to impose some
1251 // type of limit. Beware that if MonitorBound is set to too low a value
1252 // we could just loop. In addition, if MonitorBound is set to a low value
1253 // we'll incur more safepoints, which are harmful to performance.
1254 // See also: GuaranteedSafepointInterval
1255 //
1256 // If MonitorBound is set, the boundry applies to
1257 // (LVars.population - LVars.free_count)
1258 // i.e., if there are not enough ObjectMonitors on the global free list,
1259 // then a safepoint deflation is induced. Picking a good MonitorBound value
1260 // is non-trivial.
1261
1262 static void InduceScavenge(Thread* self, const char * Whence) {
1263 // Induce STW safepoint to trim monitors
1264 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1265 // More precisely, trigger a cleanup safepoint as the number
1266 // of active monitors passes the specified threshold.
1267 // TODO: assert thread state is reasonable
1268
1269 if (Atomic::xchg(&_forceMonitorScavenge, 1) == 0) {
1270 VMThread::check_for_forced_cleanup();
1271 }
1272 }
1273
1274 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
1275 // A large MAXPRIVATE value reduces both list lock contention
1276 // and list coherency traffic, but also tends to increase the
1277 // number of ObjectMonitors in circulation as well as the STW
1278 // scavenge costs. As usual, we lean toward time in space-time
1279 // tradeoffs.
1280 const int MAXPRIVATE = 1024;
1281 NoSafepointVerifier nsv;
1282
1283 stringStream ss;
1284 for (;;) {
1285 ObjectMonitor* m;
1286
1287 // 1: try to allocate from the thread's local om_free_list.
1288 // Threads will attempt to allocate first from their local list, then
1289 // from the global list, and only after those attempts fail will the
1290 // thread attempt to instantiate new monitors. Thread-local free lists
1291 // improve allocation latency, as well as reducing coherency traffic
1292 // on the shared global list.
1293 m = take_from_start_of_om_free_list(self);
1294 if (m != NULL) {
1295 guarantee(m->object() == NULL, "invariant");
1296 prepend_to_om_in_use_list(self, m);
1297 return m;
1298 }
1299
1300 // 2: try to allocate from the global LVars.free_list
1301 // CONSIDER: use muxTry() instead of muxAcquire().
1302 // If the muxTry() fails then drop immediately into case 3.
1303 // If we're using thread-local free lists then try
1304 // to reprovision the caller's free list.
1305 if (Atomic::load(&LVars.free_list) != NULL) {
1306 // Reprovision the thread's om_free_list.
1307 // Use bulk transfers to reduce the allocation rate and heat
1308 // on various locks.
1309 for (int i = self->om_free_provision; --i >= 0;) {
1310 ObjectMonitor* take = take_from_start_of_global_free_list();
1311 if (take == NULL) {
1312 break; // No more are available.
1313 }
1314 guarantee(take->object() == NULL, "invariant");
1315 take->Recycle();
1316 om_release(self, take, false);
1317 }
1318 self->om_free_provision += 1 + (self->om_free_provision / 2);
1319 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1320
1321 if (is_MonitorBound_exceeded(Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count))) {
1322 // Not enough ObjectMonitors on the global free list.
1323 // We can't safely induce a STW safepoint from om_alloc() as our thread
1324 // state may not be appropriate for such activities and callers may hold
1325 // naked oops, so instead we defer the action.
1326 InduceScavenge(self, "om_alloc");
1327 }
1328 continue;
1329 }
1330
1331 // 3: allocate a block of new ObjectMonitors
1332 // Both the local and global free lists are empty -- resort to malloc().
1333 // In the current implementation ObjectMonitors are TSM - immortal.
1334 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1335 // each ObjectMonitor to start at the beginning of a cache line,
1336 // so we use align_up().
1337 // A better solution would be to use C++ placement-new.
1338 // BEWARE: As it stands currently, we don't run the ctors!
1339 assert(_BLOCKSIZE > 1, "invariant");
1340 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
1341 PaddedObjectMonitor* temp;
1342 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
1343 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal);
1344 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
1345 (void)memset((void *) temp, 0, neededsize);
1346
1347 // Format the block.
1348 // initialize the linked list, each monitor points to its next
1349 // forming the single linked free list, the very first monitor
1350 // will points to next block, which forms the block list.
1351 // The trick of using the 1st element in the block as g_block_list
1352 // linkage should be reconsidered. A better implementation would
1353 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1354
1355 for (int i = 1; i < _BLOCKSIZE; i++) {
1356 Atomic::store(&temp[i]._next_om, (ObjectMonitor*)&temp[i + 1]);
1357 }
1358
1359 // terminate the last monitor as the end of list
1360 Atomic::store(&temp[_BLOCKSIZE - 1]._next_om, (ObjectMonitor*)NULL);
1361
1362 // Element [0] is reserved for global list linkage
1363 temp[0].set_object(CHAINMARKER);
1364
1365 // Consider carving out this thread's current request from the
1366 // block in hand. This avoids some lock traffic and redundant
1367 // list activity.
1368
1369 prepend_block_to_lists(temp);
1370 }
1371 }
1372
1373 // Place "m" on the caller's private per-thread om_free_list.
1374 // In practice there's no need to clamp or limit the number of
1375 // monitors on a thread's om_free_list as the only non-allocation time
1376 // we'll call om_release() is to return a monitor to the free list after
1377 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1378 // accumulate on a thread's free list.
1379 //
1380 // Key constraint: all ObjectMonitors on a thread's free list and the global
1381 // free list must have their object field set to null. This prevents the
1382 // scavenger -- deflate_monitor_list() -- from reclaiming them while we
1383 // are trying to release them.
1384
1385 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
1386 bool from_per_thread_alloc) {
1387 guarantee(m->header().value() == 0, "invariant");
1388 guarantee(m->object() == NULL, "invariant");
1389 NoSafepointVerifier nsv;
1390
1391 stringStream ss;
1392 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
1393 "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss),
1394 m->_recursions);
1395 // _next_om is used for both per-thread in-use and free lists so
1396 // we have to remove 'm' from the in-use list first (as needed).
1397 if (from_per_thread_alloc) {
1398 // Need to remove 'm' from om_in_use_list.
1399 ObjectMonitor* mid = NULL;
1400 ObjectMonitor* next = NULL;
1401
1402 // This list walk can only race with another list walker since
1403 // deflation can only happen at a safepoint so we don't have to
1404 // worry about an ObjectMonitor being removed from this list
1405 // while we are walking it.
1406
1407 // Lock the list head to avoid racing with another list walker.
1408 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) {
1409 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
1410 }
1411 next = unmarked_next(mid);
1412 if (m == mid) {
1413 // First special case:
1414 // 'm' matches mid, is the list head and is locked. Switch the list
1415 // head to next which unlocks the list head, but leaves the extracted
1416 // mid locked:
1417 Atomic::store(&self->om_in_use_list, next);
1418 } else if (m == next) {
1419 // Second special case:
1420 // 'm' matches next after the list head and we already have the list
1421 // head locked so set mid to what we are extracting:
1422 mid = next;
1423 // Lock mid to prevent races with a list walker:
1424 om_lock(mid);
1425 // Update next to what follows mid (if anything):
1426 next = unmarked_next(mid);
1427 // Switch next after the list head to new next which unlocks the
1428 // list head, but leaves the extracted mid locked:
1429 set_next(self->om_in_use_list, next);
1430 } else {
1431 // We have to search the list to find 'm'.
1432 om_unlock(mid); // unlock the list head
1433 guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT
1434 " is too short.", p2i(self), p2i(self->om_in_use_list));
1435 // Our starting anchor is next after the list head which is the
1436 // last ObjectMonitor we checked:
1437 ObjectMonitor* anchor = next;
1438 while ((mid = unmarked_next(anchor)) != NULL) {
1439 if (m == mid) {
1440 // We found 'm' on the per-thread in-use list so extract it.
1441 om_lock(anchor); // Lock the anchor so we can safely modify it.
1442 // Update next to what follows mid (if anything):
1443 next = unmarked_next(mid);
1444 // Switch next after the anchor to new next which unlocks the
1445 // anchor, but leaves the extracted mid locked:
1446 set_next(anchor, next);
1447 break;
1448 } else {
1449 anchor = mid;
1450 }
1451 }
1452 }
1453
1454 if (mid == NULL) {
1455 // Reached end of the list and didn't find 'm' so:
1456 fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list="
1457 INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list));
1458 }
1459
1460 // At this point mid is disconnected from the in-use list so
1461 // its lock no longer has any effects on the in-use list.
1462 Atomic::dec(&self->om_in_use_count);
1463 // Unlock mid, but leave the next value for any lagging list
1464 // walkers. It will get cleaned up when mid is prepended to
1465 // the thread's free list:
1466 om_unlock(mid);
1467 }
1468
1469 prepend_to_om_free_list(self, m);
1470 }
1471
1472 // Return ObjectMonitors on a moribund thread's free and in-use
1473 // lists to the appropriate global lists. The ObjectMonitors on the
1474 // per-thread in-use list may still be in use by other threads.
1475 //
1476 // We currently call om_flush() from Threads::remove() before the
1477 // thread has been excised from the thread list and is no longer a
1478 // mutator. This means that om_flush() cannot run concurrently with
1479 // a safepoint and interleave with deflate_idle_monitors(). In
1480 // particular, this ensures that the thread's in-use monitors are
1481 // scanned by a GC safepoint, either via Thread::oops_do() (before
1482 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
1483 // om_flush() is called).
1484
1485 void ObjectSynchronizer::om_flush(Thread* self) {
1486 // Process the per-thread in-use list first to be consistent.
1487 int in_use_count = 0;
1488 ObjectMonitor* in_use_list = NULL;
1489 ObjectMonitor* in_use_tail = NULL;
1490 NoSafepointVerifier nsv;
1491
1492 // This function can race with a list walker thread so we lock the
1493 // list head to prevent confusion.
1494 if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) {
1495 // At this point, we have locked the in-use list head so a racing
1496 // thread cannot come in after us. However, a racing thread could
1497 // be ahead of us; we'll detect that and delay to let it finish.
1498 //
1499 // The thread is going away, however the ObjectMonitors on the
1500 // om_in_use_list may still be in-use by other threads. Link
1501 // them to in_use_tail, which will be linked into the global
1502 // in-use list (LVars.in_use_list) below.
1503 //
1504 // Account for the in-use list head before the loop since it is
1505 // already locked (by this thread):
1506 in_use_tail = in_use_list;
1507 in_use_count++;
1508 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL; cur_om = unmarked_next(cur_om)) {
1509 if (is_locked(cur_om)) {
1510 // cur_om is locked so there must be a racing walker thread ahead
1511 // of us so we'll give it a chance to finish.
1512 while (is_locked(cur_om)) {
1513 os::naked_short_sleep(1);
1514 }
1515 }
1516 in_use_tail = cur_om;
1517 in_use_count++;
1518 }
1519 guarantee(in_use_tail != NULL, "invariant");
1520 int l_om_in_use_count = Atomic::load(&self->om_in_use_count);
1521 assert(l_om_in_use_count == in_use_count, "in-use counts don't match: "
1522 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
1523 Atomic::store(&self->om_in_use_count, 0);
1524 // Clear the in-use list head (which also unlocks it):
1525 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
1526 om_unlock(in_use_list);
1527 }
1528
1529 int free_count = 0;
1530 ObjectMonitor* free_list = NULL;
1531 ObjectMonitor* free_tail = NULL;
1532 // This function can race with a list walker thread so we lock the
1533 // list head to prevent confusion.
1534 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) {
1535 // At this point, we have locked the free list head so a racing
1536 // thread cannot come in after us. However, a racing thread could
1537 // be ahead of us; we'll detect that and delay to let it finish.
1538 //
1539 // The thread is going away. Set 'free_tail' to the last per-thread free
1540 // monitor which will be linked to LVars.free_list below.
1541 //
1542 // Account for the free list head before the loop since it is
1543 // already locked (by this thread):
1544 free_tail = free_list;
1545 free_count++;
1546 for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) {
1547 if (is_locked(s)) {
1548 // s is locked so there must be a racing walker thread ahead
1549 // of us so we'll give it a chance to finish.
1550 while (is_locked(s)) {
1551 os::naked_short_sleep(1);
1552 }
1553 }
1554 free_tail = s;
1555 free_count++;
1556 guarantee(s->object() == NULL, "invariant");
1557 stringStream ss;
1558 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1559 }
1560 guarantee(free_tail != NULL, "invariant");
1561 int l_om_free_count = Atomic::load(&self->om_free_count);
1562 assert(l_om_free_count == free_count, "free counts don't match: "
1563 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
1564 Atomic::store(&self->om_free_count, 0);
1565 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
1566 om_unlock(free_list);
1567 }
1568
1569 if (free_tail != NULL) {
1570 prepend_list_to_global_free_list(free_list, free_tail, free_count);
1571 }
1572
1573 if (in_use_tail != NULL) {
1574 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count);
1575 }
1576
1577 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1578 LogStreamHandle(Info, monitorinflation) lsh_info;
1579 LogStream* ls = NULL;
1580 if (log_is_enabled(Debug, monitorinflation)) {
1581 ls = &lsh_debug;
1582 } else if ((free_count != 0 || in_use_count != 0) &&
1583 log_is_enabled(Info, monitorinflation)) {
1584 ls = &lsh_info;
1585 }
1586 if (ls != NULL) {
1587 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d"
1588 ", in_use_count=%d" ", om_free_provision=%d",
1589 p2i(self), free_count, in_use_count, self->om_free_provision);
1590 }
1591 }
1592
1593 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1594 const oop obj,
1595 ObjectSynchronizer::InflateCause cause) {
1596 assert(event != NULL, "invariant");
1597 assert(event->should_commit(), "invariant");
1598 event->set_monitorClass(obj->klass());
1599 event->set_address((uintptr_t)(void*)obj);
1600 event->set_cause((u1)cause);
1601 event->commit();
1602 }
1603
1604 // Fast path code shared by multiple functions
1605 void ObjectSynchronizer::inflate_helper(oop obj) {
1606 markWord mark = obj->mark();
1607 if (mark.has_monitor()) {
1608 assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid");
1609 assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header");
1610 return;
1611 }
1612 inflate(Thread::current(), obj, inflate_cause_vm_internal);
1613 }
1614
1615 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self,
1616 oop object, const InflateCause cause) {
1617 // Inflate mutates the heap ...
1618 // Relaxing assertion for bug 6320749.
1619 assert(Universe::verify_in_progress() ||
1620 !SafepointSynchronize::is_at_safepoint(), "invariant");
1621
1622 EventJavaMonitorInflate event;
1623
1624 for (;;) {
1625 const markWord mark = object->mark();
1626 assert(!mark.has_bias_pattern(), "invariant");
1627
1628 // The mark can be in one of the following states:
1629 // * Inflated - just return
1630 // * Stack-locked - coerce it to inflated
1631 // * INFLATING - busy wait for conversion to complete
1632 // * Neutral - aggressively inflate the object.
1633 // * BIASED - Illegal. We should never see this
1634
1635 // CASE: inflated
1636 if (mark.has_monitor()) {
1827 // This is an unfortunate aspect of this design.
1828
1829 // Deflate a single monitor if not in-use
1830 // Return true if deflated, false if in-use
1831 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1832 ObjectMonitor** free_head_p,
1833 ObjectMonitor** free_tail_p) {
1834 bool deflated;
1835 // Normal case ... The monitor is associated with obj.
1836 const markWord mark = obj->mark();
1837 guarantee(mark == markWord::encode(mid), "should match: mark="
1838 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
1839 markWord::encode(mid).value());
1840 // Make sure that mark.monitor() and markWord::encode() agree:
1841 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
1842 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
1843 const markWord dmw = mid->header();
1844 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1845
1846 if (mid->is_busy()) {
1847 // Easy checks are first - the ObjectMonitor is busy so no deflation.
1848 deflated = false;
1849 } else {
1850 // Deflate the monitor if it is no longer being used
1851 // It's idle - scavenge and return to the global free list
1852 // plain old deflation ...
1853 if (log_is_enabled(Trace, monitorinflation)) {
1854 ResourceMark rm;
1855 log_trace(monitorinflation)("deflate_monitor: "
1856 "object=" INTPTR_FORMAT ", mark="
1857 INTPTR_FORMAT ", type='%s'", p2i(obj),
1858 mark.value(), obj->klass()->external_name());
1859 }
1860
1861 // Restore the header back to obj
1862 obj->release_set_mark(dmw);
1863 mid->clear();
1864
1865 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
1866 p2i(mid->object()));
1867
1868 // Move the deflated ObjectMonitor to the working free list
1869 // defined by free_head_p and free_tail_p.
1870 if (*free_head_p == NULL) *free_head_p = mid;
1871 if (*free_tail_p != NULL) {
1872 // We append to the list so the caller can use mid->_next_om
1873 // to fix the linkages in its context.
1874 ObjectMonitor* prevtail = *free_tail_p;
1875 // Should have been cleaned up by the caller:
1876 // Note: Should not have to lock prevtail here since we're at a
1877 // safepoint and ObjectMonitors on the local free list should
1878 // not be accessed in parallel.
1879 #ifdef ASSERT
1880 ObjectMonitor* l_next_om = Atomic::load(&prevtail->_next_om);
1881 #endif
1882 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
1883 set_next(prevtail, mid);
1884 }
1885 *free_tail_p = mid;
1886 // At this point, mid->_next_om still refers to its current
1887 // value and another ObjectMonitor's _next_om field still
1888 // refers to this ObjectMonitor. Those linkages have to be
1889 // cleaned up by the caller who has the complete context.
1890 deflated = true;
1891 }
1892 return deflated;
1893 }
1894
1895 // Walk a given monitor list, and deflate idle monitors.
1896 // The given list could be a per-thread list or a global list.
1897 //
1898 // In the case of parallel processing of thread local monitor lists,
1899 // work is done by Threads::parallel_threads_do() which ensures that
1900 // each Java thread is processed by exactly one worker thread, and
1901 // thus avoid conflicts that would arise when worker threads would
1902 // process the same monitor lists concurrently.
1903 //
1904 // See also ParallelSPCleanupTask and
1905 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
1906 // Threads::parallel_java_threads_do() in thread.cpp.
1907 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
1908 int* count_p,
1909 ObjectMonitor** free_head_p,
1910 ObjectMonitor** free_tail_p) {
1911 ObjectMonitor* cur_mid_in_use = NULL;
1912 ObjectMonitor* mid = NULL;
1913 ObjectMonitor* next = NULL;
1914 int deflated_count = 0;
1915
1916 // This list walk executes at a safepoint and does not race with any
1917 // other list walkers.
1918
1919 for (mid = Atomic::load(list_p); mid != NULL; mid = next) {
1920 next = unmarked_next(mid);
1921 oop obj = (oop) mid->object();
1922 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
1923 // Deflation succeeded and already updated free_head_p and
1924 // free_tail_p as needed. Finish the move to the local free list
1925 // by unlinking mid from the global or per-thread in-use list.
1926 if (cur_mid_in_use == NULL) {
1927 // mid is the list head so switch the list head to next:
1928 Atomic::store(list_p, next);
1929 } else {
1930 // Switch cur_mid_in_use's next field to next:
1931 set_next(cur_mid_in_use, next);
1932 }
1933 // At this point mid is disconnected from the in-use list.
1934 deflated_count++;
1935 Atomic::dec(count_p);
1936 // mid is current tail in the free_head_p list so NULL terminate it:
1937 set_next(mid, NULL);
1938 } else {
1939 cur_mid_in_use = mid;
1940 }
1941 }
1942 return deflated_count;
1943 }
1944
1945 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1946 counters->n_in_use = 0; // currently associated with objects
1947 counters->n_in_circulation = 0; // extant
1948 counters->n_scavenged = 0; // reclaimed (global and per-thread)
1949 counters->per_thread_scavenged = 0; // per-thread scavenge total
1950 counters->per_thread_times = 0.0; // per-thread scavenge times
1951 }
1952
1953 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
1954 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1955 bool deflated = false;
1956
1957 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
1958 ObjectMonitor* free_tail_p = NULL;
1959 elapsedTimer timer;
1960
1961 if (log_is_enabled(Info, monitorinflation)) {
1962 timer.start();
1963 }
1964
1965 // Note: the thread-local monitors lists get deflated in
1966 // a separate pass. See deflate_thread_local_monitors().
1967
1968 // For moribund threads, scan LVars.in_use_list
1969 int deflated_count = 0;
1970 if (Atomic::load(&LVars.in_use_list) != NULL) {
1971 // Update n_in_circulation before LVars.in_use_count is updated by deflation.
1972 Atomic::add(&counters->n_in_circulation, Atomic::load(&LVars.in_use_count));
1973
1974 deflated_count = deflate_monitor_list(&LVars.in_use_list, &LVars.in_use_count, &free_head_p, &free_tail_p);
1975 Atomic::add(&counters->n_in_use, Atomic::load(&LVars.in_use_count));
1976 }
1977
1978 if (free_head_p != NULL) {
1979 // Move the deflated ObjectMonitors back to the global free list.
1980 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
1981 #ifdef ASSERT
1982 ObjectMonitor* l_next_om = Atomic::load(&free_tail_p->_next_om);
1983 #endif
1984 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
1985 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
1986 Atomic::add(&counters->n_scavenged, deflated_count);
1987 }
1988 timer.stop();
1989
1990 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1991 LogStreamHandle(Info, monitorinflation) lsh_info;
1992 LogStream* ls = NULL;
1993 if (log_is_enabled(Debug, monitorinflation)) {
1994 ls = &lsh_debug;
1995 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
1996 ls = &lsh_info;
1997 }
1998 if (ls != NULL) {
1999 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2000 }
2001 }
2002
2003 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2004 // Report the cumulative time for deflating each thread's idle
2005 // monitors. Note: if the work is split among more than one
2006 // worker thread, then the reported time will likely be more
2007 // than a beginning to end measurement of the phase.
2008 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
2009
2010 if (log_is_enabled(Debug, monitorinflation)) {
2011 // exit_globals()'s call to audit_and_print_stats() is done
2012 // at the Info level and not at a safepoint.
2013 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2014 } else if (log_is_enabled(Info, monitorinflation)) {
2015 log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, "
2016 "global_free_count=%d", Atomic::load(&LVars.population),
2017 Atomic::load(&LVars.in_use_count), Atomic::load(&LVars.free_count));
2018 }
2019
2020 Atomic::store(&_forceMonitorScavenge, 0); // Reset
2021
2022 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
2023 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
2024
2025 GVars.stw_random = os::random();
2026 GVars.stw_cycle++;
2027 }
2028
2029 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2030 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2031
2032 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2033 ObjectMonitor* free_tail_p = NULL;
2034 elapsedTimer timer;
2035
2036 if (log_is_enabled(Info, safepoint, cleanup) ||
2037 log_is_enabled(Info, monitorinflation)) {
2038 timer.start();
2039 }
2040
2041 // Update n_in_circulation before om_in_use_count is updated by deflation.
2042 Atomic::add(&counters->n_in_circulation, Atomic::load(&thread->om_in_use_count));
2043
2044 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
2045 Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count));
2046
2047 if (free_head_p != NULL) {
2048 // Move the deflated ObjectMonitors back to the global free list.
2049 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2050 #ifdef ASSERT
2051 ObjectMonitor* l_next_om = Atomic::load(&free_tail_p->_next_om);
2052 #endif
2053 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2054 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
2055 Atomic::add(&counters->n_scavenged, deflated_count);
2056 Atomic::add(&counters->per_thread_scavenged, deflated_count);
2057 }
2058
2059 timer.stop();
2060 // Safepoint logging cares about cumulative per_thread_times and
2061 // we'll capture most of the cost, but not the muxRelease() which
2062 // should be cheap.
2063 counters->per_thread_times += timer.seconds();
2064
2065 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2066 LogStreamHandle(Info, monitorinflation) lsh_info;
2067 LogStream* ls = NULL;
2068 if (log_is_enabled(Debug, monitorinflation)) {
2069 ls = &lsh_debug;
2070 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2071 ls = &lsh_info;
2072 }
2073 if (ls != NULL) {
2074 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count);
2075 }
2076 }
2077
2078 // Monitor cleanup on JavaThread::exit
2079
2080 // Iterate through monitor cache and attempt to release thread's monitors
2081 // Gives up on a particular monitor if an exception occurs, but continues
2082 // the overall iteration, swallowing the exception.
2083 class ReleaseJavaMonitorsClosure: public MonitorClosure {
2084 private:
2095
2096 // Release all inflated monitors owned by THREAD. Lightweight monitors are
2097 // ignored. This is meant to be called during JNI thread detach which assumes
2098 // all remaining monitors are heavyweight. All exceptions are swallowed.
2099 // Scanning the extant monitor list can be time consuming.
2100 // A simple optimization is to add a per-thread flag that indicates a thread
2101 // called jni_monitorenter() during its lifetime.
2102 //
2103 // Instead of No_Savepoint_Verifier it might be cheaper to
2104 // use an idiom of the form:
2105 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
2106 // <code that must not run at safepoint>
2107 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
2108 // Since the tests are extremely cheap we could leave them enabled
2109 // for normal product builds.
2110
2111 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
2112 assert(THREAD == JavaThread::current(), "must be current Java thread");
2113 NoSafepointVerifier nsv;
2114 ReleaseJavaMonitorsClosure rjmc(THREAD);
2115 ObjectSynchronizer::monitors_iterate(&rjmc);
2116 THREAD->clear_pending_exception();
2117 }
2118
2119 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
2120 switch (cause) {
2121 case inflate_cause_vm_internal: return "VM Internal";
2122 case inflate_cause_monitor_enter: return "Monitor Enter";
2123 case inflate_cause_wait: return "Monitor Wait";
2124 case inflate_cause_notify: return "Monitor Notify";
2125 case inflate_cause_hash_code: return "Monitor Hash Code";
2126 case inflate_cause_jni_enter: return "JNI Monitor Enter";
2127 case inflate_cause_jni_exit: return "JNI Monitor Exit";
2128 default:
2129 ShouldNotReachHere();
2130 }
2131 return "Unknown";
2132 }
2133
2134 //------------------------------------------------------------------------------
2135 // Debugging code
2136
2137 u_char* ObjectSynchronizer::get_gvars_addr() {
2138 return (u_char*)&GVars;
2139 }
2140
2141 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
2142 return (u_char*)&GVars.hc_sequence;
2143 }
2144
2145 size_t ObjectSynchronizer::get_gvars_size() {
2146 return sizeof(SharedGlobals);
2147 }
2148
2149 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
2150 return (u_char*)&GVars.stw_random;
2151 }
2152
2153 // This function can be called at a safepoint or it can be called when
2154 // we are trying to exit the VM. When we are trying to exit the VM, the
2155 // list walker functions can run in parallel with the other list
2156 // operations so spin-locking is used for safety.
2157 //
2158 // Calls to this function can be added in various places as a debugging
2159 // aid; pass 'true' for the 'on_exit' parameter to have in-use monitor
2160 // details logged at the Info level and 'false' for the 'on_exit'
2161 // parameter to have in-use monitor details logged at the Trace level.
2162 //
2163 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
2164 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
2165
2166 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2167 LogStreamHandle(Info, monitorinflation) lsh_info;
2168 LogStreamHandle(Trace, monitorinflation) lsh_trace;
2169 LogStream* ls = NULL;
2170 if (log_is_enabled(Trace, monitorinflation)) {
2171 ls = &lsh_trace;
2172 } else if (log_is_enabled(Debug, monitorinflation)) {
2173 ls = &lsh_debug;
2174 } else if (log_is_enabled(Info, monitorinflation)) {
2175 ls = &lsh_info;
2176 }
2177 assert(ls != NULL, "sanity check");
2178
2179 // Log counts for the global and per-thread monitor lists:
2180 int chk_om_population = log_monitor_list_counts(ls);
2181 int error_cnt = 0;
2182
2183 ls->print_cr("Checking global lists:");
2184
2185 // Check LVars.population:
2186 if (Atomic::load(&LVars.population) == chk_om_population) {
2187 ls->print_cr("global_population=%d equals chk_om_population=%d",
2188 Atomic::load(&LVars.population), chk_om_population);
2189 } else {
2190 // With fine grained locks on the monitor lists, it is possible for
2191 // log_monitor_list_counts() to return a value that doesn't match
2192 // LVars.population. So far a higher value has been seen in testing
2193 // so something is being double counted by log_monitor_list_counts().
2194 ls->print_cr("WARNING: global_population=%d is not equal to "
2195 "chk_om_population=%d", Atomic::load(&LVars.population), chk_om_population);
2196 }
2197
2198 // Check LVars.in_use_list and LVars.in_use_count:
2199 chk_global_in_use_list_and_count(ls, &error_cnt);
2200
2201 // Check LVars.free_list and LVars.free_count:
2202 chk_global_free_list_and_count(ls, &error_cnt);
2203
2204 ls->print_cr("Checking per-thread lists:");
2205
2206 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2207 // Check om_in_use_list and om_in_use_count:
2208 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
2209
2210 // Check om_free_list and om_free_count:
2211 chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
2212 }
2213
2214 if (error_cnt == 0) {
2215 ls->print_cr("No errors found in monitor list checks.");
2216 } else {
2217 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
2218 }
2219
2220 if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
2221 (!on_exit && log_is_enabled(Trace, monitorinflation))) {
2222 // When exiting this log output is at the Info level. When called
2223 // at a safepoint, this log output is at the Trace level since
2224 // there can be a lot of it.
2225 log_in_use_monitor_details(ls);
2226 }
2227
2228 ls->flush();
2229
2230 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
2231 }
2232
2233 // Check a free monitor entry; log any errors.
2234 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n,
2235 outputStream * out, int *error_cnt_p) {
2236 stringStream ss;
2237 if (n->is_busy()) {
2238 if (jt != NULL) {
2239 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2240 ": free per-thread monitor must not be busy: %s", p2i(jt),
2241 p2i(n), n->is_busy_to_string(&ss));
2242 } else {
2243 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2244 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
2245 }
2256 "must have NULL _header field: _header=" INTPTR_FORMAT,
2257 p2i(n), n->header().value());
2258 }
2259 *error_cnt_p = *error_cnt_p + 1;
2260 }
2261 if (n->object() != NULL) {
2262 if (jt != NULL) {
2263 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2264 ": free per-thread monitor must have NULL _object "
2265 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
2266 p2i(n->object()));
2267 } else {
2268 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2269 "must have NULL _object field: _object=" INTPTR_FORMAT,
2270 p2i(n), p2i(n->object()));
2271 }
2272 *error_cnt_p = *error_cnt_p + 1;
2273 }
2274 }
2275
2276 // Lock the next ObjectMonitor for traversal and unlock the current
2277 // ObjectMonitor. Returns the next ObjectMonitor if there is one.
2278 // Otherwise returns NULL (after unlocking the current ObjectMonitor).
2279 // This function is used by the various list walker functions to
2280 // safely walk a list without allowing an ObjectMonitor to be moved
2281 // to another list in the middle of a walk.
2282 static ObjectMonitor* lock_next_for_traversal(ObjectMonitor* cur) {
2283 assert(is_locked(cur), "cur=" INTPTR_FORMAT " must be locked", p2i(cur));
2284 ObjectMonitor* next = unmarked_next(cur);
2285 if (next == NULL) { // Reached the end of the list.
2286 om_unlock(cur);
2287 return NULL;
2288 }
2289 om_lock(next); // Lock next before unlocking current to keep
2290 om_unlock(cur); // from being by-passed by another thread.
2291 return next;
2292 }
2293
2294 // Check the global free list and count; log the results of the checks.
2295 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
2296 int *error_cnt_p) {
2297 int chk_om_free_count = 0;
2298 ObjectMonitor* cur = NULL;
2299 if ((cur = get_list_head_locked(&LVars.free_list)) != NULL) {
2300 // Marked the global free list head so process the list.
2301 while (true) {
2302 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p);
2303 chk_om_free_count++;
2304
2305 cur = lock_next_for_traversal(cur);
2306 if (cur == NULL) {
2307 break;
2308 }
2309 }
2310 }
2311 int l_free_count = Atomic::load(&LVars.free_count);
2312 if (l_free_count == chk_om_free_count) {
2313 out->print_cr("global_free_count=%d equals chk_om_free_count=%d",
2314 l_free_count, chk_om_free_count);
2315 } else {
2316 // With fine grained locks on LVars.free_list, it is possible for an
2317 // ObjectMonitor to be prepended to LVars.free_list after we started
2318 // calculating chk_om_free_count so LVars.free_count may not
2319 // match anymore.
2320 out->print_cr("WARNING: global_free_count=%d is not equal to "
2321 "chk_om_free_count=%d", l_free_count, chk_om_free_count);
2322 }
2323 }
2324
2325 // Check the global in-use list and count; log the results of the checks.
2326 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
2327 int *error_cnt_p) {
2328 int chk_om_in_use_count = 0;
2329 ObjectMonitor* cur = NULL;
2330 if ((cur = get_list_head_locked(&LVars.in_use_list)) != NULL) {
2331 // Marked the global in-use list head so process the list.
2332 while (true) {
2333 chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p);
2334 chk_om_in_use_count++;
2335
2336 cur = lock_next_for_traversal(cur);
2337 if (cur == NULL) {
2338 break;
2339 }
2340 }
2341 }
2342 int l_in_use_count = Atomic::load(&LVars.in_use_count);
2343 if (l_in_use_count == chk_om_in_use_count) {
2344 out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d",
2345 l_in_use_count, chk_om_in_use_count);
2346 } else {
2347 // With fine grained locks on the monitor lists, it is possible for
2348 // an exiting JavaThread to put its in-use ObjectMonitors on the
2349 // global in-use list after chk_om_in_use_count is calculated above.
2350 out->print_cr("WARNING: global_in_use_count=%d is not equal to chk_om_in_use_count=%d",
2351 l_in_use_count, chk_om_in_use_count);
2352 }
2353 }
2354
2355 // Check an in-use monitor entry; log any errors.
2356 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
2357 outputStream * out, int *error_cnt_p) {
2358 if (n->header().value() == 0) {
2359 if (jt != NULL) {
2360 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2361 ": in-use per-thread monitor must have non-NULL _header "
2362 "field.", p2i(jt), p2i(n));
2363 } else {
2364 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor "
2365 "must have non-NULL _header field.", p2i(n));
2366 }
2367 *error_cnt_p = *error_cnt_p + 1;
2368 }
2369 if (n->object() == NULL) {
2370 if (jt != NULL) {
2371 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2399 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2400 ": in-use per-thread monitor's object does not refer "
2401 "to the same monitor: obj=" INTPTR_FORMAT ", mark="
2402 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
2403 p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2404 } else {
2405 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
2406 "monitor's object does not refer to the same monitor: obj="
2407 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
2408 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2409 }
2410 *error_cnt_p = *error_cnt_p + 1;
2411 }
2412 }
2413
2414 // Check the thread's free list and count; log the results of the checks.
2415 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
2416 outputStream * out,
2417 int *error_cnt_p) {
2418 int chk_om_free_count = 0;
2419 ObjectMonitor* cur = NULL;
2420 if ((cur = get_list_head_locked(&jt->om_free_list)) != NULL) {
2421 // Marked the per-thread free list head so process the list.
2422 while (true) {
2423 chk_free_entry(jt, cur, out, error_cnt_p);
2424 chk_om_free_count++;
2425
2426 cur = lock_next_for_traversal(cur);
2427 if (cur == NULL) {
2428 break;
2429 }
2430 }
2431 }
2432 int l_om_free_count = Atomic::load(&jt->om_free_count);
2433 if (l_om_free_count == chk_om_free_count) {
2434 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals "
2435 "chk_om_free_count=%d", p2i(jt), l_om_free_count, chk_om_free_count);
2436 } else {
2437 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not "
2438 "equal to chk_om_free_count=%d", p2i(jt), l_om_free_count,
2439 chk_om_free_count);
2440 *error_cnt_p = *error_cnt_p + 1;
2441 }
2442 }
2443
2444 // Check the thread's in-use list and count; log the results of the checks.
2445 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
2446 outputStream * out,
2447 int *error_cnt_p) {
2448 int chk_om_in_use_count = 0;
2449 ObjectMonitor* cur = NULL;
2450 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) {
2451 // Marked the per-thread in-use list head so process the list.
2452 while (true) {
2453 chk_in_use_entry(jt, cur, out, error_cnt_p);
2454 chk_om_in_use_count++;
2455
2456 cur = lock_next_for_traversal(cur);
2457 if (cur == NULL) {
2458 break;
2459 }
2460 }
2461 }
2462 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count);
2463 if (l_om_in_use_count == chk_om_in_use_count) {
2464 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
2465 "chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count,
2466 chk_om_in_use_count);
2467 } else {
2468 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
2469 "equal to chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count,
2470 chk_om_in_use_count);
2471 *error_cnt_p = *error_cnt_p + 1;
2472 }
2473 }
2474
2475 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
2476 // flags indicate why the entry is in-use, 'object' and 'object type'
2477 // indicate the associated object and its type.
2478 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) {
2479 stringStream ss;
2480 if (Atomic::load(&LVars.in_use_count) > 0) {
2481 out->print_cr("In-use global monitor info:");
2482 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2483 out->print_cr("%18s %s %18s %18s",
2484 "monitor", "BHL", "object", "object type");
2485 out->print_cr("================== === ================== ==================");
2486 ObjectMonitor* cur = NULL;
2487 if ((cur = get_list_head_locked(&LVars.in_use_list)) != NULL) {
2488 // Marked the global in-use list head so process the list.
2489 while (true) {
2490 const oop obj = (oop) cur->object();
2491 const markWord mark = cur->header();
2492 ResourceMark rm;
2493 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(cur),
2494 cur->is_busy() != 0, mark.hash() != 0, cur->owner() != NULL,
2495 p2i(obj), obj->klass()->external_name());
2496 if (cur->is_busy() != 0) {
2497 out->print(" (%s)", cur->is_busy_to_string(&ss));
2498 ss.reset();
2499 }
2500 out->cr();
2501
2502 cur = lock_next_for_traversal(cur);
2503 if (cur == NULL) {
2504 break;
2505 }
2506 }
2507 }
2508 }
2509
2510 out->print_cr("In-use per-thread monitor info:");
2511 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2512 out->print_cr("%18s %18s %s %18s %18s",
2513 "jt", "monitor", "BHL", "object", "object type");
2514 out->print_cr("================== ================== === ================== ==================");
2515 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2516 ObjectMonitor* cur = NULL;
2517 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) {
2518 // Marked the global in-use list head so process the list.
2519 while (true) {
2520 const oop obj = (oop) cur->object();
2521 const markWord mark = cur->header();
2522 ResourceMark rm;
2523 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT
2524 " %s", p2i(jt), p2i(cur), cur->is_busy() != 0,
2525 mark.hash() != 0, cur->owner() != NULL, p2i(obj),
2526 obj->klass()->external_name());
2527 if (cur->is_busy() != 0) {
2528 out->print(" (%s)", cur->is_busy_to_string(&ss));
2529 ss.reset();
2530 }
2531 out->cr();
2532
2533 cur = lock_next_for_traversal(cur);
2534 if (cur == NULL) {
2535 break;
2536 }
2537 }
2538 }
2539 }
2540
2541 out->flush();
2542 }
2543
2544 // Log counts for the global and per-thread monitor lists and return
2545 // the population count.
2546 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
2547 int pop_count = 0;
2548 out->print_cr("%18s %10s %10s %10s",
2549 "Global Lists:", "InUse", "Free", "Total");
2550 out->print_cr("================== ========== ========== ==========");
2551 int l_in_use_count = Atomic::load(&LVars.in_use_count);
2552 int l_free_count = Atomic::load(&LVars.free_count);
2553 out->print_cr("%18s %10d %10d %10d", "", l_in_use_count,
2554 l_free_count, Atomic::load(&LVars.population));
2555 pop_count += l_in_use_count + l_free_count;
2556
2557 out->print_cr("%18s %10s %10s %10s",
2558 "Per-Thread Lists:", "InUse", "Free", "Provision");
2559 out->print_cr("================== ========== ========== ==========");
2560
2561 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2562 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count);
2563 int l_om_free_count = Atomic::load(&jt->om_free_count);
2564 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
2565 l_om_in_use_count, l_om_free_count, jt->om_free_provision);
2566 pop_count += l_om_in_use_count + l_om_free_count;
2567 }
2568 return pop_count;
2569 }
2570
2571 #ifndef PRODUCT
2572
2573 // Check if monitor belongs to the monitor cache
2574 // The list is grow-only so it's *relatively* safe to traverse
2575 // the list of extant blocks without taking a lock.
2576
2577 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
2578 PaddedObjectMonitor* block = Atomic::load(&g_block_list);
2579 while (block != NULL) {
2580 assert(block->object() == CHAINMARKER, "must be a block header");
2581 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
2582 address mon = (address)monitor;
2583 address blk = (address)block;
2584 size_t diff = mon - blk;
2585 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
2586 return 1;
2587 }
2588 // unmarked_next() is not needed with g_block_list (no locking
2589 // used with block linkage _next_om fields).
2590 block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om);
2591 }
2592 return 0;
2593 }
2594
2595 #endif
|