src/share/vm/runtime/mutex.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/share/vm/runtime/mutex.cpp	Wed Jan 15 01:42:33 2014
--- new/src/share/vm/runtime/mutex.cpp	Wed Jan 15 01:42:33 2014

*** 505,515 **** --- 505,515 ---- assert (_OnDeck == ESelf, "invariant") ; _OnDeck = NULL ; // Note that we current drop the inner lock (clear OnDeck) in the slow-path ! // epilogue immediately after having acquired the outer lock. // But instead we could consider the following optimizations: // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation. // This might avoid potential reacquisition of the inner lock in IUlock(). // B. While still holding the inner lock, attempt to opportunistically select // and unlink the next ONDECK thread from the EntryList.
*** 929,939 **** --- 929,939 ---- // Try a brief spin to avoid passing thru thread state transition ... if (TrySpin (Self)) goto Exeunt ; check_block_state(Self); if (Self->is_Java_thread()) { ! // Horribile dictu - we suffer through a state transition ! // Horrible dictu - we suffer through a state transition assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex"); ThreadBlockInVM tbivm ((JavaThread *) Self) ; ILock (Self) ; } else { // Mirabile dictu
*** 961,971 **** --- 961,971 ---- void Monitor::lock_without_safepoint_check () { lock_without_safepoint_check (Thread::current()) ; } ! // Returns true if thread succeceed [sic] in grabbing the lock, otherwise false. ! // Returns true if thread succeeds in grabbing the lock, otherwise false. bool Monitor::try_lock() { Thread * const Self = Thread::current(); debug_only(check_prelock_state(Self)); // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");

src/share/vm/runtime/mutex.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File