68
69 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
70 char* bytes = NULL; \
71 int len = 0; \
72 jlong jtid = SharedRuntime::get_java_tid(thread); \
73 Symbol* klassname = ((oop)(obj))->klass()->name(); \
74 if (klassname != NULL) { \
75 bytes = (char*)klassname->bytes(); \
76 len = klassname->utf8_length(); \
77 }
78
79 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
80 { \
81 if (DTraceMonitorProbes) { \
82 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
83 HOTSPOT_MONITOR_WAIT(jtid, \
84 (uintptr_t)(monitor), bytes, len, (millis)); \
85 } \
86 }
87
88 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
89
90 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
91 { \
92 if (DTraceMonitorProbes) { \
93 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
94 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
95 (uintptr_t)(monitor), bytes, len); \
96 } \
97 }
98
99 #else // ndef DTRACE_ENABLED
100
101 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
102 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
103
104 #endif // ndef DTRACE_ENABLED
105
106 // This exists only as a workaround of dtrace bug 6254741
107 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
128 static volatile int gMonitorFreeCount = 0; // # on gFreeList
129 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
130
131 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
132
133
134 // =====================> Quick functions
135
136 // The quick_* forms are special fast-path variants used to improve
137 // performance. In the simplest case, a "quick_*" implementation could
138 // simply return false, in which case the caller will perform the necessary
139 // state transitions and call the slow-path form.
140 // The fast-path is designed to handle frequently arising cases in an efficient
141 // manner and is just a degenerate "optimistic" variant of the slow-path.
142 // returns true -- to indicate the call was satisfied.
143 // returns false -- to indicate the call needs the services of the slow-path.
144 // A no-loitering ordinance is in effect for code in the quick_* family
145 // operators: safepoints or indefinite blocking (blocking that might span a
146 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
147 // entry.
148
149 // The LockNode emitted directly at the synchronization site would have
150 // been too big if it were to have included support for the cases of inflated
151 // recursive enter and exit, so they go here instead.
152 // Note that we can't safely call AsyncPrintJavaStack() from within
153 // quick_enter() as our thread state remains _in_Java.
154
155 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
156 BasicLock * Lock) {
157 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
158 assert(Self->is_Java_thread(), "invariant");
159 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
160 No_Safepoint_Verifier nsv;
161 if (obj == NULL) return false; // Need to throw NPE
162 const markOop mark = obj->mark();
163
164 if (mark->has_monitor()) {
165 ObjectMonitor * const m = mark->monitor();
166 assert(m->object() == obj, "invariant");
167 Thread * const owner = (Thread *) m->_owner;
168
1431 //
1432 // We have added a flag, MonitorInUseLists, which creates a list
1433 // of active monitors for each thread. deflate_idle_monitors()
1434 // only scans the per-thread in-use lists. omAlloc() puts all
1435 // assigned monitors on the per-thread list. deflate_idle_monitors()
1436 // returns the non-busy monitors to the global free list.
1437 // When a thread dies, omFlush() adds the list of active monitors for
1438 // that thread to a global gOmInUseList acquiring the
1439 // global list lock. deflate_idle_monitors() acquires the global
1440 // list lock to scan for non-busy monitors to the global free list.
1441 // An alternative could have used a single global in-use list. The
1442 // downside would have been the additional cost of acquiring the global list lock
1443 // for every omAlloc().
1444 //
1445 // Perversely, the heap size -- and thus the STW safepoint rate --
1446 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1447 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1448 // This is an unfortunate aspect of this design.
1449
1450 enum ManifestConstants {
1451 ClearResponsibleAtSTW = 0,
1452 MaximumRecheckInterval = 1000
1453 };
1454
1455 // Deflate a single monitor if not in-use
1456 // Return true if deflated, false if in-use
1457 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1458 ObjectMonitor** freeHeadp,
1459 ObjectMonitor** freeTailp) {
1460 bool deflated;
1461 // Normal case ... The monitor is associated with obj.
1462 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1463 guarantee(mid == obj->mark()->monitor(), "invariant");
1464 guarantee(mid->header()->is_neutral(), "invariant");
1465
1466 if (mid->is_busy()) {
1467 if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1468 deflated = false;
1469 } else {
1470 // Deflate the monitor if it is no longer being used
1471 // It's idle - scavenge and return to the global free list
1472 // plain old deflation ...
|
68
69 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
70 char* bytes = NULL; \
71 int len = 0; \
72 jlong jtid = SharedRuntime::get_java_tid(thread); \
73 Symbol* klassname = ((oop)(obj))->klass()->name(); \
74 if (klassname != NULL) { \
75 bytes = (char*)klassname->bytes(); \
76 len = klassname->utf8_length(); \
77 }
78
79 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
80 { \
81 if (DTraceMonitorProbes) { \
82 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
83 HOTSPOT_MONITOR_WAIT(jtid, \
84 (uintptr_t)(monitor), bytes, len, (millis)); \
85 } \
86 }
87
88 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
89 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
90 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
91
92 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
93 { \
94 if (DTraceMonitorProbes) { \
95 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
96 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
97 (uintptr_t)(monitor), bytes, len); \
98 } \
99 }
100
101 #else // ndef DTRACE_ENABLED
102
103 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
104 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
105
106 #endif // ndef DTRACE_ENABLED
107
108 // This exists only as a workaround of dtrace bug 6254741
109 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
130 static volatile int gMonitorFreeCount = 0; // # on gFreeList
131 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
132
133 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
134
135
136 // =====================> Quick functions
137
138 // The quick_* forms are special fast-path variants used to improve
139 // performance. In the simplest case, a "quick_*" implementation could
140 // simply return false, in which case the caller will perform the necessary
141 // state transitions and call the slow-path form.
142 // The fast-path is designed to handle frequently arising cases in an efficient
143 // manner and is just a degenerate "optimistic" variant of the slow-path.
144 // returns true -- to indicate the call was satisfied.
145 // returns false -- to indicate the call needs the services of the slow-path.
146 // A no-loitering ordinance is in effect for code in the quick_* family
147 // operators: safepoints or indefinite blocking (blocking that might span a
148 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
149 // entry.
150 //
151 // Consider: An interesting optimization is to have the JIT recognize the
152 // following common idiom:
153 // synchronized (someobj) { .... ; notify(); }
154 // That is, we find a notify() or notifyAll() call that immediately precedes
155 // the monitorexit operation. In that case the JIT could fuse the operations
156 // into a single notifyAndExit() runtime primitive.
157
158 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
159 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
160 assert(self->is_Java_thread(), "invariant");
161 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
162 No_Safepoint_Verifier nsv;
163 if (obj == NULL) return false; // slow-path for invalid obj
164 const markOop mark = obj->mark();
165
166 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
167 // Degenerate notify
168 // stack-locked by caller so by definition the implied waitset is empty.
169 return true;
170 }
171
172 if (mark->has_monitor()) {
173 ObjectMonitor * const mon = mark->monitor();
174 assert(mon->object() == obj, "invariant");
175 if (mon->owner() != self) return false; // slow-path for IMS exception
176
177 if (mon->first_waiter() != NULL) {
178 // We have one or more waiters. Since this is an inflated monitor
179 // that we own, we can transfer one or more threads from the waitset
180 // to the entrylist here and now, avoiding the slow-path.
181 if (all) {
182 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
183 } else {
184 DTRACE_MONITOR_PROBE(notify, mon, obj, self);
185 }
186 int tally = 0;
187 do {
188 mon->INotify(self);
189 ++tally;
190 } while (mon->first_waiter() != NULL && all);
191 if (ObjectMonitor::_sync_Notifications != NULL) {
192 ObjectMonitor::_sync_Notifications->inc(tally);
193 }
194 }
195 return true;
196 }
197
198 // biased locking and any other IMS exception states take the slow-path
199 return false;
200 }
201
202
203 // The LockNode emitted directly at the synchronization site would have
204 // been too big if it were to have included support for the cases of inflated
205 // recursive enter and exit, so they go here instead.
206 // Note that we can't safely call AsyncPrintJavaStack() from within
207 // quick_enter() as our thread state remains _in_Java.
208
209 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
210 BasicLock * Lock) {
211 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
212 assert(Self->is_Java_thread(), "invariant");
213 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
214 No_Safepoint_Verifier nsv;
215 if (obj == NULL) return false; // Need to throw NPE
216 const markOop mark = obj->mark();
217
218 if (mark->has_monitor()) {
219 ObjectMonitor * const m = mark->monitor();
220 assert(m->object() == obj, "invariant");
221 Thread * const owner = (Thread *) m->_owner;
222
1485 //
1486 // We have added a flag, MonitorInUseLists, which creates a list
1487 // of active monitors for each thread. deflate_idle_monitors()
1488 // only scans the per-thread in-use lists. omAlloc() puts all
1489 // assigned monitors on the per-thread list. deflate_idle_monitors()
1490 // returns the non-busy monitors to the global free list.
1491 // When a thread dies, omFlush() adds the list of active monitors for
1492 // that thread to a global gOmInUseList acquiring the
1493 // global list lock. deflate_idle_monitors() acquires the global
1494 // list lock to scan for non-busy monitors to the global free list.
1495 // An alternative could have used a single global in-use list. The
1496 // downside would have been the additional cost of acquiring the global list lock
1497 // for every omAlloc().
1498 //
1499 // Perversely, the heap size -- and thus the STW safepoint rate --
1500 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1501 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1502 // This is an unfortunate aspect of this design.
1503
1504 enum ManifestConstants {
1505 ClearResponsibleAtSTW = 0
1506 };
1507
1508 // Deflate a single monitor if not in-use
1509 // Return true if deflated, false if in-use
1510 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1511 ObjectMonitor** freeHeadp,
1512 ObjectMonitor** freeTailp) {
1513 bool deflated;
1514 // Normal case ... The monitor is associated with obj.
1515 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1516 guarantee(mid == obj->mark()->monitor(), "invariant");
1517 guarantee(mid->header()->is_neutral(), "invariant");
1518
1519 if (mid->is_busy()) {
1520 if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1521 deflated = false;
1522 } else {
1523 // Deflate the monitor if it is no longer being used
1524 // It's idle - scavenge and return to the global free list
1525 // plain old deflation ...
|