src/share/vm/runtime/synchronizer.cpp

Print this page

        

*** 83,92 **** --- 83,94 ---- HOTSPOT_MONITOR_WAIT(jtid, \ (uintptr_t)(monitor), bytes, len, (millis)); \ } \ } + #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY + #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ { \ if (DTraceMonitorProbes) { \
*** 143,153 **** --- 145,207 ---- // returns false -- to indicate the call needs the services of the slow-path. // A no-loitering ordinance is in effect for code in the quick_* family // operators: safepoints or indefinite blocking (blocking that might span a // safepoint) are forbidden. Generally the thread_state() is _in_Java upon // entry. + // + // Consider: An interesting optimization is to have the JIT recognize the + // following common idiom: + // synchronized (someobj) { .... ; notify(); } + // That is, we find a notify() or notifyAll() call that immediately precedes + // the monitorexit operation. In that case the JIT could fuse the operations + // into a single notifyAndExit() runtime primitive. + bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { + assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); + assert(self->is_Java_thread(), "invariant"); + assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); + No_Safepoint_Verifier nsv; + if (obj == NULL) return false; // slow-path for invalid obj + const markOop mark = obj->mark(); + + if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { + // Degenerate notify + // stack-locked by caller so by definition the implied waitset is empty. + return true; + } + + if (mark->has_monitor()) { + ObjectMonitor * const mon = mark->monitor(); + assert(mon->object() == obj, "invariant"); + if (mon->owner() != self) return false; // slow-path for IMS exception + + if (mon->first_waiter() != NULL) { + // We have one or more waiters. Since this is an inflated monitor + // that we own, we can transfer one or more threads from the waitset + // to the entrylist here and now, avoiding the slow-path. + if (all) { + DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); + } else { + DTRACE_MONITOR_PROBE(notify, mon, obj, self); + } + int tally = 0; + do { + mon->INotify(self); + ++tally; + } while (mon->first_waiter() != NULL && all); + if (ObjectMonitor::_sync_Notifications != NULL) { + ObjectMonitor::_sync_Notifications->inc(tally); + } + } + return true; + } + + // biased locking and any other IMS exception states take the slow-path + return false; + } + + // The LockNode emitted directly at the synchronization site would have // been too big if it were to have included support for the cases of inflated // recursive enter and exit, so they go here instead. // Note that we can't safely call AsyncPrintJavaStack() from within // quick_enter() as our thread state remains _in_Java.
*** 1446,1457 **** // typically drives the scavenge rate. Large heaps can mean infrequent GC, // which in turn can mean large(r) numbers of objectmonitors in circulation. // This is an unfortunate aspect of this design. enum ManifestConstants { ! ClearResponsibleAtSTW = 0, ! MaximumRecheckInterval = 1000 }; // Deflate a single monitor if not in-use // Return true if deflated, false if in-use bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, --- 1500,1510 ---- // typically drives the scavenge rate. Large heaps can mean infrequent GC, // which in turn can mean large(r) numbers of objectmonitors in circulation. // This is an unfortunate aspect of this design. enum ManifestConstants { ! ClearResponsibleAtSTW = 0 }; // Deflate a single monitor if not in-use // Return true if deflated, false if in-use bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,