108 #endif // ndef DTRACE_ENABLED
109
110 // This exists only as a workaround of dtrace bug 6254741
111 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
113 return 0;
114 }
115
116 #define NINFLATIONLOCKS 256
117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
118
119 // global list of blocks of monitors
120 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
121 // global monitor free list
122 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
123 // global monitor in-use list, for moribund threads,
124 // monitors they inflated need to be scanned for deflation
125 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
126 // count of entries in gOmInUseList
127 int ObjectSynchronizer::gOmInUseCount = 0;
128 bool ObjectSynchronizer::_gOmShouldDeflateIdleMonitors = false;
129 bool volatile ObjectSynchronizer::_is_cleanup_requested = false;
130
131 static volatile intptr_t gListLock = 0; // protects global monitor lists
132 static volatile int gMonitorFreeCount = 0; // # on gFreeList
133 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
134
135 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
136
137
138 // =====================> Quick functions
139
140 // The quick_* forms are special fast-path variants used to improve
141 // performance. In the simplest case, a "quick_*" implementation could
142 // simply return false, in which case the caller will perform the necessary
143 // state transitions and call the slow-path form.
144 // The fast-path is designed to handle frequently arising cases in an efficient
145 // manner and is just a degenerate "optimistic" variant of the slow-path.
146 // returns true -- to indicate the call was satisfied.
147 // returns false -- to indicate the call needs the services of the slow-path.
148 // A no-loitering ordinance is in effect for code in the quick_* family
149 // operators: safepoints or indefinite blocking (blocking that might span a
996 }
997 closure->do_monitor(mid);
998 }
999 }
1000 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
1001 }
1002 }
1003
1004 // Get the next block in the block list.
1005 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) {
1006 assert(block->object() == CHAINMARKER, "must be a block header");
1007 block = (PaddedEnd<ObjectMonitor>*) block->FreeNext;
1008 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
1009 return block;
1010 }
1011
1012 static bool monitors_used_above_threshold() {
1013 if (gMonitorPopulation == 0) {
1014 return false;
1015 }
1016 int monitors_used = gMonitorPopulation - gMonitorFreeCount;
1017 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation;
1018 return monitor_usage > MonitorUsedDeflationThreshold;
1019 }
1020
1021 bool ObjectSynchronizer::is_cleanup_needed() {
1022 if (MonitorUsedDeflationThreshold > 0) {
1023 return monitors_used_above_threshold();
1024 }
1025 return false;
1026 }
1027
1028 void ObjectSynchronizer::oops_do(OopClosure* f) {
1029 // We only scan the global used list here (for moribund threads), and
1030 // the thread-local monitors in Thread::oops_do().
1031 global_used_oops_do(f);
1032 }
1033
1034 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1035 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1036 list_oops_do(gOmInUseList, f);
1037 }
1038
1039 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1040 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1041 list_oops_do(thread->omInUseList, f);
1042 }
1043
1044 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1045 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1046 ObjectMonitor* mid;
1047 for (mid = list; mid != NULL; mid = mid->FreeNext) {
1101 // to the VMthread and have a lifespan longer than that of this activation record.
1102 // The VMThread will delete the op when completed.
1103 VMThread::execute(new VM_ScavengeMonitors());
1104 }
1105 }
1106
1107 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self,
1108 const InflateCause cause) {
1109 // A large MAXPRIVATE value reduces both list lock contention
1110 // and list coherency traffic, but also tends to increase the
1111 // number of objectMonitors in circulation as well as the STW
1112 // scavenge costs. As usual, we lean toward time in space-time
1113 // tradeoffs.
1114 const int MAXPRIVATE = 1024;
1115
1116 if (AsyncDeflateIdleMonitors) {
1117 JavaThread * jt = (JavaThread *)Self;
1118 if (jt->omShouldDeflateIdleMonitors && jt->omInUseCount > 0 &&
1119 cause != inflate_cause_vm_internal) {
1120 // Deflate any per-thread idle monitors for this JavaThread if
1121 // this is not an internal inflation. Clean up your own mess.
1122 // (Gibbs Rule 45) Otherwise, skip this cleanup.
1123 // deflate_global_idle_monitors_using_JT() is called by the ServiceThread.
1124 debug_only(jt->check_for_valid_safepoint_state(false);)
1125 ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT();
1126 }
1127 }
1128
1129 for (;;) {
1130 ObjectMonitor * m;
1131
1132 // 1: try to allocate from the thread's local omFreeList.
1133 // Threads will attempt to allocate first from their local list, then
1134 // from the global list, and only after those attempts fail will the thread
1135 // attempt to instantiate new monitors. Thread-local free lists take
1136 // heat off the gListLock and improve allocation latency, as well as reducing
1137 // coherency traffic on the shared global list.
1138 m = Self->omFreeList;
1139 if (m != NULL) {
1140 Self->omFreeList = m->FreeNext;
1141 Self->omFreeCount--;
1142 guarantee(m->object() == NULL, "invariant");
1143 m->set_allocation_state(ObjectMonitor::New);
1686 // are stopped, but before any objects have moved. Collectively they traverse
1687 // the population of in-use monitors, deflating where possible. The scavenged
1688 // monitors are returned to the global monitor free list.
1689 //
1690 // Beware that we scavenge at *every* stop-the-world point. Having a large
1691 // number of monitors in-use could negatively impact performance. We also want
1692 // to minimize the total # of monitors in circulation, as they incur a small
1693 // footprint penalty.
1694 //
1695 // Perversely, the heap size -- and thus the STW safepoint rate --
1696 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1697 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
1698 // This is an unfortunate aspect of this design.
1699
1700 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) {
1701 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1702
1703 // The per-thread in-use lists are handled in
1704 // ParallelSPCleanupThreadClosure::do_thread().
1705
1706 if (!AsyncDeflateIdleMonitors || is_cleanup_requested()) {
1707 // Use the older mechanism for the global in-use list or
1708 // if a special cleanup has been requested.
1709 ObjectSynchronizer::deflate_idle_monitors(_counters);
1710 return;
1711 }
1712
1713 log_debug(monitorinflation)("requesting deflation of idle monitors.");
1714 // Request deflation of global idle monitors by the ServiceThread:
1715 _gOmShouldDeflateIdleMonitors = true;
1716 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
1717 ml.notify_all();
1718 }
1719
1720 // Deflate a single monitor if not in-use
1721 // Return true if deflated, false if in-use
1722 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1723 ObjectMonitor** freeHeadp,
1724 ObjectMonitor** freeTailp) {
1725 bool deflated;
1726 // Normal case ... The monitor is associated with obj.
1727 const markOop mark = obj->mark();
1728 guarantee(mark == markOopDesc::encode(mid), "should match: mark="
1729 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, p2i(mark),
1730 p2i(markOopDesc::encode(mid)));
1731 // Make sure that mark->monitor() and markOopDesc::encode() agree:
1732 guarantee(mark->monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
1733 ", mid=" INTPTR_FORMAT, p2i(mark->monitor()), p2i(mid));
1734 const markOop dmw = mid->header();
1735 guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
2024 }
2025 // We finished the list without a safepoint starting so there's
2026 // no need to save state.
2027 *savedMidInUsep = NULL;
2028 return deflated_count;
2029 }
2030
2031 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2032 counters->nInuse = 0; // currently associated with objects
2033 counters->nInCirculation = 0; // extant
2034 counters->nScavenged = 0; // reclaimed (global and per-thread)
2035 counters->perThreadScavenged = 0; // per-thread scavenge total
2036 counters->perThreadTimes = 0.0; // per-thread scavenge times
2037 }
2038
2039 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
2040 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2041
2042 if (AsyncDeflateIdleMonitors) {
2043 // Nothing to do when global idle ObjectMonitors are deflated using
2044 // a JavaThread unless a special cleanup has been requested.
2045 if (!is_cleanup_requested()) {
2046 return;
2047 }
2048 }
2049
2050 bool deflated = false;
2051
2052 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
2053 ObjectMonitor * freeTailp = NULL;
2054 elapsedTimer timer;
2055
2056 if (log_is_enabled(Info, monitorinflation)) {
2057 timer.start();
2058 }
2059
2060 // Prevent omFlush from changing mids in Thread dtor's during deflation
2061 // And in case the vm thread is acquiring a lock during a safepoint
2062 // See e.g. 6320749
2063 Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
2064
2065 // Note: the thread-local monitors lists get deflated in
2089 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2090 LogStreamHandle(Info, monitorinflation) lsh_info;
2091 LogStream * ls = NULL;
2092 if (log_is_enabled(Debug, monitorinflation)) {
2093 ls = &lsh_debug;
2094 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2095 ls = &lsh_info;
2096 }
2097 if (ls != NULL) {
2098 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2099 }
2100 }
2101
2102 // Deflate global idle ObjectMonitors using a JavaThread.
2103 //
2104 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() {
2105 assert(AsyncDeflateIdleMonitors, "sanity check");
2106 assert(Thread::current()->is_Java_thread(), "precondition");
2107 JavaThread * self = JavaThread::current();
2108
2109 _gOmShouldDeflateIdleMonitors = false;
2110
2111 deflate_common_idle_monitors_using_JT(true /* is_global */, self);
2112 }
2113
2114 // Deflate per-thread idle ObjectMonitors using a JavaThread.
2115 //
2116 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT() {
2117 assert(AsyncDeflateIdleMonitors, "sanity check");
2118 assert(Thread::current()->is_Java_thread(), "precondition");
2119 JavaThread * self = JavaThread::current();
2120
2121 self->omShouldDeflateIdleMonitors = false;
2122
2123 deflate_common_idle_monitors_using_JT(false /* !is_global */, self);
2124 }
2125
2126 // Deflate global or per-thread idle ObjectMonitors using a JavaThread.
2127 //
2128 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread * self) {
2129 int deflated_count = 0;
2130 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged ObjectMonitors
2210 if (log_is_enabled(Debug, monitorinflation)) {
2211 ls = &lsh_debug;
2212 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2213 ls = &lsh_info;
2214 }
2215 if (ls != NULL) {
2216 if (is_global) {
2217 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2218 } else {
2219 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(self), timer.seconds(), deflated_count);
2220 }
2221 }
2222 }
2223
2224 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2225 // Report the cumulative time for deflating each thread's idle
2226 // monitors. Note: if the work is split among more than one
2227 // worker thread, then the reported time will likely be more
2228 // than a beginning to end measurement of the phase.
2229 // Note: AsyncDeflateIdleMonitors only deflates per-thread idle
2230 // monitors at a safepoint when a special cleanup has been requested.
2231 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged);
2232
2233 bool needs_special_cleanup = is_cleanup_requested();
2234 if (!AsyncDeflateIdleMonitors || needs_special_cleanup) {
2235 // AsyncDeflateIdleMonitors does not use these counters unless
2236 // there is a special cleanup request.
2237
2238 gMonitorFreeCount += counters->nScavenged;
2239
2240 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged));
2241 OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation));
2242 }
2243
2244 if (log_is_enabled(Debug, monitorinflation)) {
2245 // exit_globals()'s call to audit_and_print_stats() is done
2246 // at the Info level.
2247 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2248 } else if (log_is_enabled(Info, monitorinflation)) {
2249 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
2250 log_info(monitorinflation)("gMonitorPopulation=%d, gOmInUseCount=%d, "
2251 "gMonitorFreeCount=%d", gMonitorPopulation,
2252 gOmInUseCount, gMonitorFreeCount);
2253 Thread::muxRelease(&gListLock);
2254 }
2255
2256 ForceMonitorScavenge = 0; // Reset
2257 GVars.stwRandom = os::random();
2258 GVars.stwCycle++;
2259 if (needs_special_cleanup) {
2260 set_is_cleanup_requested(false); // special clean up is done
2261 }
2262 }
2263
2264 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2265 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2266
2267 if (AsyncDeflateIdleMonitors) {
2268 if (!is_cleanup_requested()) {
2269 // Mark the JavaThread for idle monitor cleanup if a special
2270 // cleanup has NOT been requested.
2271 if (thread->omInUseCount > 0) {
2272 // This JavaThread is using monitors so mark it.
2273 thread->omShouldDeflateIdleMonitors = true;
2274 }
2275 return;
2276 }
2277 }
2278
2279 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
2280 ObjectMonitor * freeTailp = NULL;
2281 elapsedTimer timer;
2282
2283 if (log_is_enabled(Info, safepoint, cleanup) ||
2284 log_is_enabled(Info, monitorinflation)) {
2285 timer.start();
2286 }
2287
2288 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
2289
2290 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
|
108 #endif // ndef DTRACE_ENABLED
109
110 // This exists only as a workaround of dtrace bug 6254741
111 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
113 return 0;
114 }
115
116 #define NINFLATIONLOCKS 256
117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
118
119 // global list of blocks of monitors
120 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
121 // global monitor free list
122 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
123 // global monitor in-use list, for moribund threads,
124 // monitors they inflated need to be scanned for deflation
125 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
126 // count of entries in gOmInUseList
127 int ObjectSynchronizer::gOmInUseCount = 0;
128 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
129 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false;
130 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
131
132 static volatile intptr_t gListLock = 0; // protects global monitor lists
133 static volatile int gMonitorFreeCount = 0; // # on gFreeList
134 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
135
136 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
137
138
139 // =====================> Quick functions
140
141 // The quick_* forms are special fast-path variants used to improve
142 // performance. In the simplest case, a "quick_*" implementation could
143 // simply return false, in which case the caller will perform the necessary
144 // state transitions and call the slow-path form.
145 // The fast-path is designed to handle frequently arising cases in an efficient
146 // manner and is just a degenerate "optimistic" variant of the slow-path.
147 // returns true -- to indicate the call was satisfied.
148 // returns false -- to indicate the call needs the services of the slow-path.
149 // A no-loitering ordinance is in effect for code in the quick_* family
150 // operators: safepoints or indefinite blocking (blocking that might span a
997 }
998 closure->do_monitor(mid);
999 }
1000 }
1001 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
1002 }
1003 }
1004
1005 // Get the next block in the block list.
1006 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) {
1007 assert(block->object() == CHAINMARKER, "must be a block header");
1008 block = (PaddedEnd<ObjectMonitor>*) block->FreeNext;
1009 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
1010 return block;
1011 }
1012
1013 static bool monitors_used_above_threshold() {
1014 if (gMonitorPopulation == 0) {
1015 return false;
1016 }
1017 if (MonitorUsedDeflationThreshold > 0) {
1018 int monitors_used = gMonitorPopulation - gMonitorFreeCount;
1019 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation;
1020 return monitor_usage > MonitorUsedDeflationThreshold;
1021 }
1022 return false;
1023 }
1024
1025 bool ObjectSynchronizer::is_async_deflation_needed() {
1026 if (!AsyncDeflateIdleMonitors) {
1027 return false;
1028 }
1029 if (is_async_deflation_requested()) {
1030 // Async deflation request.
1031 return true;
1032 }
1033 if (AsyncDeflationInterval > 0 &&
1034 time_since_last_async_deflation_ms() > AsyncDeflationInterval &&
1035 monitors_used_above_threshold()) {
1036 // It's been longer than our specified deflate interval and there
1037 // are too many monitors in use. We don't deflate more frequently
1038 // than AsyncDeflationInterval (unless is_async_deflation_requested)
1039 // in order to not swamp the ServiceThread.
1040 _last_async_deflation_time_ns = os::javaTimeNanos();
1041 return true;
1042 }
1043 return false;
1044 }
1045
1046 bool ObjectSynchronizer::is_safepoint_deflation_needed() {
1047 if (!AsyncDeflateIdleMonitors) {
1048 if (monitors_used_above_threshold()) {
1049 // Too many monitors in use.
1050 return true;
1051 }
1052 return false;
1053 }
1054 if (is_special_deflation_requested()) {
1055 // For AsyncDeflateIdleMonitors only do a safepoint deflation
1056 // if there is a special deflation request.
1057 return true;
1058 }
1059 return false;
1060 }
1061
1062 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1063 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS);
1064 }
1065
1066 void ObjectSynchronizer::oops_do(OopClosure* f) {
1067 // We only scan the global used list here (for moribund threads), and
1068 // the thread-local monitors in Thread::oops_do().
1069 global_used_oops_do(f);
1070 }
1071
1072 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1073 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1074 list_oops_do(gOmInUseList, f);
1075 }
1076
1077 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1078 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1079 list_oops_do(thread->omInUseList, f);
1080 }
1081
1082 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1083 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1084 ObjectMonitor* mid;
1085 for (mid = list; mid != NULL; mid = mid->FreeNext) {
1139 // to the VMthread and have a lifespan longer than that of this activation record.
1140 // The VMThread will delete the op when completed.
1141 VMThread::execute(new VM_ScavengeMonitors());
1142 }
1143 }
1144
1145 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self,
1146 const InflateCause cause) {
1147 // A large MAXPRIVATE value reduces both list lock contention
1148 // and list coherency traffic, but also tends to increase the
1149 // number of objectMonitors in circulation as well as the STW
1150 // scavenge costs. As usual, we lean toward time in space-time
1151 // tradeoffs.
1152 const int MAXPRIVATE = 1024;
1153
1154 if (AsyncDeflateIdleMonitors) {
1155 JavaThread * jt = (JavaThread *)Self;
1156 if (jt->omShouldDeflateIdleMonitors && jt->omInUseCount > 0 &&
1157 cause != inflate_cause_vm_internal) {
1158 // Deflate any per-thread idle monitors for this JavaThread if
1159 // this is not an internal inflation; internal inflations can
1160 // occur in places where it is not safe to pause for a safepoint.
1161 // Clean up your own mess. (Gibbs Rule 45) Otherwise, skip this
1162 // deflation. deflate_global_idle_monitors_using_JT() is called
1163 // by the ServiceThread.
1164 debug_only(jt->check_for_valid_safepoint_state(false);)
1165 ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT();
1166 }
1167 }
1168
1169 for (;;) {
1170 ObjectMonitor * m;
1171
1172 // 1: try to allocate from the thread's local omFreeList.
1173 // Threads will attempt to allocate first from their local list, then
1174 // from the global list, and only after those attempts fail will the thread
1175 // attempt to instantiate new monitors. Thread-local free lists take
1176 // heat off the gListLock and improve allocation latency, as well as reducing
1177 // coherency traffic on the shared global list.
1178 m = Self->omFreeList;
1179 if (m != NULL) {
1180 Self->omFreeList = m->FreeNext;
1181 Self->omFreeCount--;
1182 guarantee(m->object() == NULL, "invariant");
1183 m->set_allocation_state(ObjectMonitor::New);
1726 // are stopped, but before any objects have moved. Collectively they traverse
1727 // the population of in-use monitors, deflating where possible. The scavenged
1728 // monitors are returned to the global monitor free list.
1729 //
1730 // Beware that we scavenge at *every* stop-the-world point. Having a large
1731 // number of monitors in-use could negatively impact performance. We also want
1732 // to minimize the total # of monitors in circulation, as they incur a small
1733 // footprint penalty.
1734 //
1735 // Perversely, the heap size -- and thus the STW safepoint rate --
1736 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1737 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
1738 // This is an unfortunate aspect of this design.
1739
1740 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) {
1741 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1742
1743 // The per-thread in-use lists are handled in
1744 // ParallelSPCleanupThreadClosure::do_thread().
1745
1746 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) {
1747 // Use the older mechanism for the global in-use list or if a
1748 // special deflation has been requested before the safepoint.
1749 ObjectSynchronizer::deflate_idle_monitors(_counters);
1750 return;
1751 }
1752
1753 log_debug(monitorinflation)("requesting async deflation of idle monitors.");
1754 // Request deflation of idle monitors by the ServiceThread:
1755 set_is_async_deflation_requested(true);
1756 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
1757 ml.notify_all();
1758 }
1759
1760 // Deflate a single monitor if not in-use
1761 // Return true if deflated, false if in-use
1762 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1763 ObjectMonitor** freeHeadp,
1764 ObjectMonitor** freeTailp) {
1765 bool deflated;
1766 // Normal case ... The monitor is associated with obj.
1767 const markOop mark = obj->mark();
1768 guarantee(mark == markOopDesc::encode(mid), "should match: mark="
1769 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, p2i(mark),
1770 p2i(markOopDesc::encode(mid)));
1771 // Make sure that mark->monitor() and markOopDesc::encode() agree:
1772 guarantee(mark->monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
1773 ", mid=" INTPTR_FORMAT, p2i(mark->monitor()), p2i(mid));
1774 const markOop dmw = mid->header();
1775 guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
2064 }
2065 // We finished the list without a safepoint starting so there's
2066 // no need to save state.
2067 *savedMidInUsep = NULL;
2068 return deflated_count;
2069 }
2070
2071 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2072 counters->nInuse = 0; // currently associated with objects
2073 counters->nInCirculation = 0; // extant
2074 counters->nScavenged = 0; // reclaimed (global and per-thread)
2075 counters->perThreadScavenged = 0; // per-thread scavenge total
2076 counters->perThreadTimes = 0.0; // per-thread scavenge times
2077 }
2078
2079 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
2080 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2081
2082 if (AsyncDeflateIdleMonitors) {
2083 // Nothing to do when global idle ObjectMonitors are deflated using
2084 // a JavaThread unless a special deflation has been requested.
2085 if (!is_special_deflation_requested()) {
2086 return;
2087 }
2088 }
2089
2090 bool deflated = false;
2091
2092 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
2093 ObjectMonitor * freeTailp = NULL;
2094 elapsedTimer timer;
2095
2096 if (log_is_enabled(Info, monitorinflation)) {
2097 timer.start();
2098 }
2099
2100 // Prevent omFlush from changing mids in Thread dtor's during deflation
2101 // And in case the vm thread is acquiring a lock during a safepoint
2102 // See e.g. 6320749
2103 Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
2104
2105 // Note: the thread-local monitors lists get deflated in
2129 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2130 LogStreamHandle(Info, monitorinflation) lsh_info;
2131 LogStream * ls = NULL;
2132 if (log_is_enabled(Debug, monitorinflation)) {
2133 ls = &lsh_debug;
2134 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2135 ls = &lsh_info;
2136 }
2137 if (ls != NULL) {
2138 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2139 }
2140 }
2141
2142 // Deflate global idle ObjectMonitors using a JavaThread.
2143 //
2144 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() {
2145 assert(AsyncDeflateIdleMonitors, "sanity check");
2146 assert(Thread::current()->is_Java_thread(), "precondition");
2147 JavaThread * self = JavaThread::current();
2148
2149 deflate_common_idle_monitors_using_JT(true /* is_global */, self);
2150 }
2151
2152 // Deflate per-thread idle ObjectMonitors using a JavaThread.
2153 //
2154 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT() {
2155 assert(AsyncDeflateIdleMonitors, "sanity check");
2156 assert(Thread::current()->is_Java_thread(), "precondition");
2157 JavaThread * self = JavaThread::current();
2158
2159 self->omShouldDeflateIdleMonitors = false;
2160
2161 deflate_common_idle_monitors_using_JT(false /* !is_global */, self);
2162 }
2163
2164 // Deflate global or per-thread idle ObjectMonitors using a JavaThread.
2165 //
2166 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread * self) {
2167 int deflated_count = 0;
2168 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged ObjectMonitors
2248 if (log_is_enabled(Debug, monitorinflation)) {
2249 ls = &lsh_debug;
2250 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2251 ls = &lsh_info;
2252 }
2253 if (ls != NULL) {
2254 if (is_global) {
2255 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2256 } else {
2257 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(self), timer.seconds(), deflated_count);
2258 }
2259 }
2260 }
2261
2262 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2263 // Report the cumulative time for deflating each thread's idle
2264 // monitors. Note: if the work is split among more than one
2265 // worker thread, then the reported time will likely be more
2266 // than a beginning to end measurement of the phase.
2267 // Note: AsyncDeflateIdleMonitors only deflates per-thread idle
2268 // monitors at a safepoint when a special deflation has been requested.
2269 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged);
2270
2271 bool needs_special_deflation = is_special_deflation_requested();
2272 if (!AsyncDeflateIdleMonitors || needs_special_deflation) {
2273 // AsyncDeflateIdleMonitors does not use these counters unless
2274 // there is a special deflation request.
2275
2276 gMonitorFreeCount += counters->nScavenged;
2277
2278 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged));
2279 OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation));
2280 }
2281
2282 if (log_is_enabled(Debug, monitorinflation)) {
2283 // exit_globals()'s call to audit_and_print_stats() is done
2284 // at the Info level.
2285 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2286 } else if (log_is_enabled(Info, monitorinflation)) {
2287 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
2288 log_info(monitorinflation)("gMonitorPopulation=%d, gOmInUseCount=%d, "
2289 "gMonitorFreeCount=%d", gMonitorPopulation,
2290 gOmInUseCount, gMonitorFreeCount);
2291 Thread::muxRelease(&gListLock);
2292 }
2293
2294 ForceMonitorScavenge = 0; // Reset
2295 GVars.stwRandom = os::random();
2296 GVars.stwCycle++;
2297 if (needs_special_deflation) {
2298 set_is_special_deflation_requested(false); // special deflation is done
2299 }
2300 }
2301
2302 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2303 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2304
2305 if (AsyncDeflateIdleMonitors) {
2306 if (!is_special_deflation_requested()) {
2307 // Mark the JavaThread for idle monitor deflation if a special
2308 // deflation has NOT been requested.
2309 if (thread->omInUseCount > 0) {
2310 // This JavaThread is using monitors so mark it.
2311 thread->omShouldDeflateIdleMonitors = true;
2312 }
2313 return;
2314 }
2315 }
2316
2317 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
2318 ObjectMonitor * freeTailp = NULL;
2319 elapsedTimer timer;
2320
2321 if (log_is_enabled(Info, safepoint, cleanup) ||
2322 log_is_enabled(Info, monitorinflation)) {
2323 timer.start();
2324 }
2325
2326 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
2327
2328 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
|