< prev index next >

src/hotspot/share/runtime/thread.cpp

Print this page
rev 51675 : imported patch 8210514


4719 // is common.  If we implement ListLock as a simple SpinLock it's common
4720 // for the JVM to devolve to yielding with little progress.  This is true
4721 // despite the fact that the critical sections protected by ListLock are
4722 // extremely short.
4723 //
4724 // TODO-FIXME: ListLock should be of type SpinLock.
4725 // We should make this a 1st-class type, integrated into the lock
4726 // hierarchy as leaf-locks.  Critically, the SpinLock structure
4727 // should have sufficient padding to avoid false-sharing and excessive
4728 // cache-coherency traffic.
4729 
4730 
4731 typedef volatile int SpinLockT;
4732 
4733 void Thread::SpinAcquire(volatile int * adr, const char * LockName) {
4734   if (Atomic::cmpxchg (1, adr, 0) == 0) {
4735     return;   // normal fast-path return
4736   }
4737 
4738   // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
4739   TEVENT(SpinAcquire - ctx);
4740   int ctr = 0;
4741   int Yields = 0;
4742   for (;;) {
4743     while (*adr != 0) {
4744       ++ctr;
4745       if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
4746         if (Yields > 5) {
4747           os::naked_short_sleep(1);
4748         } else {
4749           os::naked_yield();
4750           ++Yields;
4751         }
4752       } else {
4753         SpinPause();
4754       }
4755     }
4756     if (Atomic::cmpxchg(1, adr, 0) == 0) return;
4757   }
4758 }
4759 


4814 //    Pictorially:  LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
4815 //    The validity of the backlinks must be ratified before we trust the value.
4816 //    If the backlinks are invalid the exiting thread must back-track through the
4817 //    the forward links, which are always trustworthy.
4818 // *  Add a successor indication.  The LockWord is currently encoded as
4819 //    (List, LOCKBIT:1).  We could also add a SUCCBIT or an explicit _succ variable
4820 //    to provide the usual futile-wakeup optimization.
4821 //    See RTStt for details.
4822 //
4823 
4824 
4825 const intptr_t LOCKBIT = 1;
4826 
4827 void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) {
4828   intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0);
4829   if (w == 0) return;
4830   if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
4831     return;
4832   }
4833 
4834   TEVENT(muxAcquire - Contention);
4835   ParkEvent * const Self = Thread::current()->_MuxEvent;
4836   assert((intptr_t(Self) & LOCKBIT) == 0, "invariant");
4837   for (;;) {
4838     int its = (os::is_MP() ? 100 : 0) + 1;
4839 
4840     // Optional spin phase: spin-then-park strategy
4841     while (--its >= 0) {
4842       w = *Lock;
4843       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
4844         return;
4845       }
4846     }
4847 
4848     Self->reset();
4849     Self->OnList = intptr_t(Lock);
4850     // The following fence() isn't _strictly necessary as the subsequent
4851     // CAS() both serializes execution and ratifies the fetched *Lock value.
4852     OrderAccess::fence();
4853     for (;;) {
4854       w = *Lock;


4860         continue;      // Interference -- *Lock changed -- Just retry
4861       }
4862       assert(w & LOCKBIT, "invariant");
4863       Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
4864       if (Atomic::cmpxchg(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
4865     }
4866 
4867     while (Self->OnList != 0) {
4868       Self->park();
4869     }
4870   }
4871 }
4872 
4873 void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) {
4874   intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0);
4875   if (w == 0) return;
4876   if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
4877     return;
4878   }
4879 
4880   TEVENT(muxAcquire - Contention);
4881   ParkEvent * ReleaseAfter = NULL;
4882   if (ev == NULL) {
4883     ev = ReleaseAfter = ParkEvent::Allocate(NULL);
4884   }
4885   assert((intptr_t(ev) & LOCKBIT) == 0, "invariant");
4886   for (;;) {
4887     guarantee(ev->OnList == 0, "invariant");
4888     int its = (os::is_MP() ? 100 : 0) + 1;
4889 
4890     // Optional spin phase: spin-then-park strategy
4891     while (--its >= 0) {
4892       w = *Lock;
4893       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
4894         if (ReleaseAfter != NULL) {
4895           ParkEvent::Release(ReleaseAfter);
4896         }
4897         return;
4898       }
4899     }
4900 




4719 // is common.  If we implement ListLock as a simple SpinLock it's common
4720 // for the JVM to devolve to yielding with little progress.  This is true
4721 // despite the fact that the critical sections protected by ListLock are
4722 // extremely short.
4723 //
4724 // TODO-FIXME: ListLock should be of type SpinLock.
4725 // We should make this a 1st-class type, integrated into the lock
4726 // hierarchy as leaf-locks.  Critically, the SpinLock structure
4727 // should have sufficient padding to avoid false-sharing and excessive
4728 // cache-coherency traffic.
4729 
4730 
4731 typedef volatile int SpinLockT;
4732 
4733 void Thread::SpinAcquire(volatile int * adr, const char * LockName) {
4734   if (Atomic::cmpxchg (1, adr, 0) == 0) {
4735     return;   // normal fast-path return
4736   }
4737 
4738   // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.

4739   int ctr = 0;
4740   int Yields = 0;
4741   for (;;) {
4742     while (*adr != 0) {
4743       ++ctr;
4744       if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
4745         if (Yields > 5) {
4746           os::naked_short_sleep(1);
4747         } else {
4748           os::naked_yield();
4749           ++Yields;
4750         }
4751       } else {
4752         SpinPause();
4753       }
4754     }
4755     if (Atomic::cmpxchg(1, adr, 0) == 0) return;
4756   }
4757 }
4758 


4813 //    Pictorially:  LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
4814 //    The validity of the backlinks must be ratified before we trust the value.
4815 //    If the backlinks are invalid the exiting thread must back-track through the
4816 //    the forward links, which are always trustworthy.
4817 // *  Add a successor indication.  The LockWord is currently encoded as
4818 //    (List, LOCKBIT:1).  We could also add a SUCCBIT or an explicit _succ variable
4819 //    to provide the usual futile-wakeup optimization.
4820 //    See RTStt for details.
4821 //
4822 
4823 
4824 const intptr_t LOCKBIT = 1;
4825 
4826 void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) {
4827   intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0);
4828   if (w == 0) return;
4829   if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
4830     return;
4831   }
4832 

4833   ParkEvent * const Self = Thread::current()->_MuxEvent;
4834   assert((intptr_t(Self) & LOCKBIT) == 0, "invariant");
4835   for (;;) {
4836     int its = (os::is_MP() ? 100 : 0) + 1;
4837 
4838     // Optional spin phase: spin-then-park strategy
4839     while (--its >= 0) {
4840       w = *Lock;
4841       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
4842         return;
4843       }
4844     }
4845 
4846     Self->reset();
4847     Self->OnList = intptr_t(Lock);
4848     // The following fence() isn't _strictly necessary as the subsequent
4849     // CAS() both serializes execution and ratifies the fetched *Lock value.
4850     OrderAccess::fence();
4851     for (;;) {
4852       w = *Lock;


4858         continue;      // Interference -- *Lock changed -- Just retry
4859       }
4860       assert(w & LOCKBIT, "invariant");
4861       Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
4862       if (Atomic::cmpxchg(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
4863     }
4864 
4865     while (Self->OnList != 0) {
4866       Self->park();
4867     }
4868   }
4869 }
4870 
4871 void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) {
4872   intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0);
4873   if (w == 0) return;
4874   if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
4875     return;
4876   }
4877 

4878   ParkEvent * ReleaseAfter = NULL;
4879   if (ev == NULL) {
4880     ev = ReleaseAfter = ParkEvent::Allocate(NULL);
4881   }
4882   assert((intptr_t(ev) & LOCKBIT) == 0, "invariant");
4883   for (;;) {
4884     guarantee(ev->OnList == 0, "invariant");
4885     int its = (os::is_MP() ? 100 : 0) + 1;
4886 
4887     // Optional spin phase: spin-then-park strategy
4888     while (--its >= 0) {
4889       w = *Lock;
4890       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
4891         if (ReleaseAfter != NULL) {
4892           ParkEvent::Release(ReleaseAfter);
4893         }
4894         return;
4895       }
4896     }
4897 


< prev index next >