src/share/vm/runtime/mutex.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/runtime

src/share/vm/runtime/mutex.cpp

Print this page
rev 5732 : [mq]: comments2


 490 
 491   // Self is now in the ONDECK position and will remain so until it
 492   // manages to acquire the lock.
 493  OnDeck_LOOP:
 494   for (;;) {
 495     assert (_OnDeck == ESelf, "invariant") ;
 496     if (TrySpin (Self)) break ;
 497     // CONSIDER: if ESelf->TryPark() && TryLock() break ...
 498     // It's probably wise to spin only if we *actually* blocked
 499     // CONSIDER: check the lockbyte, if it remains set then
 500     // preemptively drain the cxq into the EntryList.
 501     // The best place and time to perform queue operations -- lock metadata --
 502     // is _before having acquired the outer lock, while waiting for the lock to drop.
 503     ParkCommon (ESelf, 0) ;
 504   }
 505 
 506   assert (_OnDeck == ESelf, "invariant") ;
 507   _OnDeck = NULL ;
 508 
 509   // Note that we current drop the inner lock (clear OnDeck) in the slow-path
 510   // epilog immediately after having acquired the outer lock.
 511   // But instead we could consider the following optimizations:
 512   // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
 513   //    This might avoid potential reacquisition of the inner lock in IUlock().
 514   // B. While still holding the inner lock, attempt to opportunistically select
 515   //    and unlink the next ONDECK thread from the EntryList.
 516   //    If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
 517   //    It's critical that the select-and-unlink operation run in constant-time as
 518   //    it executes when holding the outer lock and may artificially increase the
 519   //    effective length of the critical section.
 520   // Note that (A) and (B) are tantamount to succession by direct handoff for
 521   // the inner lock.
 522   goto Exeunt ;
 523 }
 524 
 525 void Monitor::IUnlock (bool RelaxAssert) {
 526   assert (ILocked(), "invariant") ;
 527   // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
 528   // before the store that releases the lock.  Crucially, all the stores and loads in the
 529   // critical section must be globally visible before the store of 0 into the lock-word
 530   // that releases the lock becomes globally visible.  That is, memory accesses in the


 914     return ;
 915   }
 916 
 917   // The lock is contended ...
 918 
 919   bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
 920   if (can_sneak && _owner == NULL) {
 921     // a java thread has locked the lock but has not entered the
 922     // critical region -- let's just pretend we've locked the lock
 923     // and go on.  we note this with _snuck so we can also
 924     // pretend to unlock when the time comes.
 925     _snuck = true;
 926     goto Exeunt ;
 927   }
 928 
 929   // Try a brief spin to avoid passing thru thread state transition ...
 930   if (TrySpin (Self)) goto Exeunt ;
 931 
 932   check_block_state(Self);
 933   if (Self->is_Java_thread()) {
 934     // Horribile dictu - we suffer through a state transition
 935     assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
 936     ThreadBlockInVM tbivm ((JavaThread *) Self) ;
 937     ILock (Self) ;
 938   } else {
 939     // Mirabile dictu
 940     ILock (Self) ;
 941   }
 942   goto Exeunt ;
 943 }
 944 
 945 void Monitor::lock() {
 946   this->lock(Thread::current());
 947 }
 948 
 949 // Lock without safepoint check - a degenerate variant of lock().
 950 // Should ONLY be used by safepoint code and other code
 951 // that is guaranteed not to block while running inside the VM. If this is called with
 952 // thread state set to be in VM, the safepoint synchronization code will deadlock!
 953 
 954 void Monitor::lock_without_safepoint_check (Thread * Self) {
 955   assert (_owner != Self, "invariant") ;
 956   ILock (Self) ;
 957   assert (_owner == NULL, "invariant");
 958   set_owner (Self);
 959 }
 960 
 961 void Monitor::lock_without_safepoint_check () {
 962   lock_without_safepoint_check (Thread::current()) ;
 963 }
 964 
 965 
 966 // Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
 967 
 968 bool Monitor::try_lock() {
 969   Thread * const Self = Thread::current();
 970   debug_only(check_prelock_state(Self));
 971   // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
 972 
 973   // Special case, where all Java threads are stopped.
 974   // The lock may have been acquired but _owner is not yet set.
 975   // In that case the VM thread can safely grab the lock.
 976   // It strikes me this should appear _after the TryLock() fails, below.
 977   bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
 978   if (can_sneak && _owner == NULL) {
 979     set_owner(Self); // Do not need to be atomic, since we are at a safepoint
 980     _snuck = true;
 981     return true;
 982   }
 983 
 984   if (TryLock()) {
 985     // We got the lock
 986     assert (_owner == NULL, "invariant");




 490 
 491   // Self is now in the ONDECK position and will remain so until it
 492   // manages to acquire the lock.
 493  OnDeck_LOOP:
 494   for (;;) {
 495     assert (_OnDeck == ESelf, "invariant") ;
 496     if (TrySpin (Self)) break ;
 497     // CONSIDER: if ESelf->TryPark() && TryLock() break ...
 498     // It's probably wise to spin only if we *actually* blocked
 499     // CONSIDER: check the lockbyte, if it remains set then
 500     // preemptively drain the cxq into the EntryList.
 501     // The best place and time to perform queue operations -- lock metadata --
 502     // is _before having acquired the outer lock, while waiting for the lock to drop.
 503     ParkCommon (ESelf, 0) ;
 504   }
 505 
 506   assert (_OnDeck == ESelf, "invariant") ;
 507   _OnDeck = NULL ;
 508 
 509   // Note that we current drop the inner lock (clear OnDeck) in the slow-path
 510   // epilogue immediately after having acquired the outer lock.
 511   // But instead we could consider the following optimizations:
 512   // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
 513   //    This might avoid potential reacquisition of the inner lock in IUlock().
 514   // B. While still holding the inner lock, attempt to opportunistically select
 515   //    and unlink the next ONDECK thread from the EntryList.
 516   //    If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
 517   //    It's critical that the select-and-unlink operation run in constant-time as
 518   //    it executes when holding the outer lock and may artificially increase the
 519   //    effective length of the critical section.
 520   // Note that (A) and (B) are tantamount to succession by direct handoff for
 521   // the inner lock.
 522   goto Exeunt ;
 523 }
 524 
 525 void Monitor::IUnlock (bool RelaxAssert) {
 526   assert (ILocked(), "invariant") ;
 527   // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
 528   // before the store that releases the lock.  Crucially, all the stores and loads in the
 529   // critical section must be globally visible before the store of 0 into the lock-word
 530   // that releases the lock becomes globally visible.  That is, memory accesses in the


 914     return ;
 915   }
 916 
 917   // The lock is contended ...
 918 
 919   bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
 920   if (can_sneak && _owner == NULL) {
 921     // a java thread has locked the lock but has not entered the
 922     // critical region -- let's just pretend we've locked the lock
 923     // and go on.  we note this with _snuck so we can also
 924     // pretend to unlock when the time comes.
 925     _snuck = true;
 926     goto Exeunt ;
 927   }
 928 
 929   // Try a brief spin to avoid passing thru thread state transition ...
 930   if (TrySpin (Self)) goto Exeunt ;
 931 
 932   check_block_state(Self);
 933   if (Self->is_Java_thread()) {
 934     // Horrible dictu - we suffer through a state transition
 935     assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
 936     ThreadBlockInVM tbivm ((JavaThread *) Self) ;
 937     ILock (Self) ;
 938   } else {
 939     // Mirabile dictu
 940     ILock (Self) ;
 941   }
 942   goto Exeunt ;
 943 }
 944 
 945 void Monitor::lock() {
 946   this->lock(Thread::current());
 947 }
 948 
 949 // Lock without safepoint check - a degenerate variant of lock().
 950 // Should ONLY be used by safepoint code and other code
 951 // that is guaranteed not to block while running inside the VM. If this is called with
 952 // thread state set to be in VM, the safepoint synchronization code will deadlock!
 953 
 954 void Monitor::lock_without_safepoint_check (Thread * Self) {
 955   assert (_owner != Self, "invariant") ;
 956   ILock (Self) ;
 957   assert (_owner == NULL, "invariant");
 958   set_owner (Self);
 959 }
 960 
 961 void Monitor::lock_without_safepoint_check () {
 962   lock_without_safepoint_check (Thread::current()) ;
 963 }
 964 
 965 
 966 // Returns true if thread succeeds in grabbing the lock, otherwise false.
 967 
 968 bool Monitor::try_lock() {
 969   Thread * const Self = Thread::current();
 970   debug_only(check_prelock_state(Self));
 971   // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
 972 
 973   // Special case, where all Java threads are stopped.
 974   // The lock may have been acquired but _owner is not yet set.
 975   // In that case the VM thread can safely grab the lock.
 976   // It strikes me this should appear _after the TryLock() fails, below.
 977   bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
 978   if (can_sneak && _owner == NULL) {
 979     set_owner(Self); // Do not need to be atomic, since we are at a safepoint
 980     _snuck = true;
 981     return true;
 982   }
 983 
 984   if (TryLock()) {
 985     // We got the lock
 986     assert (_owner == NULL, "invariant");


src/share/vm/runtime/mutex.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File