src/share/vm/runtime/synchronizer.cpp
Print this page
*** 82,91 ****
--- 82,93 ----
HOTSPOT_MONITOR_WAIT(jtid, \
(uintptr_t)(monitor), bytes, len, (millis)); \
} \
}
+ #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
+ #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
#define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
{ \
if (DTraceMonitorProbes) { \
*** 142,152 ****
--- 144,209 ----
// returns false -- to indicate the call needs the services of the slow-path.
// A no-loitering ordinance is in effect for code in the quick_* family
// operators: safepoints or indefinite blocking (blocking that might span a
// safepoint) are forbidden. Generally the thread_state() is _in_Java upon
// entry.
+ //
+ // An interesting optimization is to have the JIT recognize the following
+ // common idiom:
+ // synchronized (someobj) { .... ; notify(); }
+ // That is, we find a notify() or notifyAll() call that immediately precedes
+ // the monitorexit operation. In that case the JIT could fuse the operations
+ // into a single notifyAndExit() runtime primitive.
+ bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * Self, bool All) {
+ assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
+ assert(Self->is_Java_thread(), "invariant");
+ assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
+ No_Safepoint_Verifier nsv;
+ if (obj == NULL) return false;
+ const markOop mark = obj->mark();
+
+ if (mark->has_locker() && Self->is_lock_owned((address)mark->locker())) {
+ // Degenerate notify
+ // stack-locked by caller so by definition the implied waitset is empty.
+ return true;
+ }
+
+ if (mark->has_monitor()) {
+ ObjectMonitor * const mon = mark->monitor();
+ assert(mon->object() == obj, "invariant");
+ if (mon->owner() != Self) return false;
+
+ // As long as the object is unbiased and doesn't require safepoint revocation
+ // and is owned by the caller we can transfer a thread or threads from
+ // the waitset to the entrylist here and now, avoiding the slow-path.
+ // That is, the only case where the slow-path is mandatory is
+ // when the object is biased or we need to throw IMSX exceptions.
+ if (mon->first_waiter() != NULL) {
+ if (All) {
+ DTRACE_MONITOR_PROBE(notifyAll, mon, obj, Self);
+ } else {
+ DTRACE_MONITOR_PROBE(notify, mon, obj, Self);
+ }
+ int tally = 0;
+ for (;;) {
+ if (mon->first_waiter() == NULL) break;
+ mon->INotify(Self);
+ ++tally;
+ if (!All) break;
+ }
+ if (ObjectMonitor::_sync_Notifications != NULL) {
+ ObjectMonitor::_sync_Notifications->inc(tally);
+ }
+ }
+ return true;
+ }
+
+ return false; // revert to slow-path
+ }
+
+
// The LockNode emitted directly at the synchronization site would have
// been too big if it were to have included support for the cases of inflated
// recursive enter and exit, so they go here instead.
// Note that we can't safely call AsyncPrintJavaStack() from within
// quick_enter() as our thread state remains _in_Java.