< prev index next >

src/hotspot/share/runtime/mutex.cpp

Print this page

        

@@ -346,25 +346,21 @@
   if (TryLock())    return 1;
   if (!os::is_MP()) return 0;
 
   int Probes  = 0;
   int Delay   = 0;
-  int Steps   = 0;
-  int SpinMax = NativeMonitorSpinLimit;
-  int flgs    = NativeMonitorFlags;
+  int SpinMax = 20;
   for (;;) {
     intptr_t v = _LockWord.FullWord;
     if ((v & _LBIT) == 0) {
       if (Atomic::cmpxchg (v|_LBIT, &_LockWord.FullWord, v) == v) {
         return 1;
       }
       continue;
     }
 
-    if ((flgs & 8) == 0) {
       SpinPause();
-    }
 
     // Periodically increase Delay -- variable Delay form
     // conceptually: delay *= 1 + 1/Exponent
     ++Probes;
     if (Probes > SpinMax) return 0;

@@ -372,12 +368,10 @@
     if ((Probes & 0x7) == 0) {
       Delay = ((Delay << 1)|1) & 0x7FF;
       // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
     }
 
-    if (flgs & 2) continue;
-
     // Consider checking _owner's schedctl state, if OFFPROC abort spin.
     // If the owner is OFFPROC then it's unlike that the lock will be dropped
     // in a timely fashion, which suggests that spinning would not be fruitful
     // or profitable.
 

@@ -388,30 +382,25 @@
     //   wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
     // Note that on Niagara-class systems we want to minimize STs in the
     // spin loop.  N1 and brethren write-around the L1$ over the xbar into the L2$.
     // Furthermore, they don't have a W$ like traditional SPARC processors.
     // We currently use a Marsaglia Shift-Xor RNG loop.
-    Steps += Delay;
     if (Self != NULL) {
       jint rv = Self->rng[0];
       for (int k = Delay; --k >= 0;) {
         rv = MarsagliaXORV(rv);
-        if ((flgs & 4) == 0 && SafepointMechanism::poll(Self)) return 0;
+        if (SafepointMechanism::poll(Self)) return 0;
       }
       Self->rng[0] = rv;
     } else {
       Stall(Delay);
     }
   }
 }
 
 static int ParkCommon(ParkEvent * ev, jlong timo) {
   // Diagnostic support - periodically unwedge blocked threads
-  intx nmt = NativeMonitorTimeout;
-  if (nmt > 0 && (nmt < timo || timo <= 0)) {
-    timo = nmt;
-  }
   int err = OS_OK;
   if (0 == timo) {
     ev->park();
   } else {
     err = ev->park(timo);

@@ -464,15 +453,10 @@
   // Either Enqueue Self on cxq or acquire the outer lock.
   // LockWord encoding = (cxq,LOCKBYTE)
   ESelf->reset();
   OrderAccess::fence();
 
-  // Optional optimization ... try barging on the inner lock
-  if ((NativeMonitorFlags & 32) && Atomic::replace_if_null(ESelf, &_OnDeck)) {
-    goto OnDeck_LOOP;
-  }
-
   if (AcquireOrPush(ESelf)) goto Exeunt;
 
   // At any given time there is at most one ondeck thread.
   // ondeck implies not resident on cxq and not resident on EntryList
   // Only the OnDeck thread can try to acquire -- contend for -- the lock.

@@ -482,11 +466,10 @@
     ParkCommon(ESelf, 0);
   }
 
   // Self is now in the OnDeck position and will remain so until it
   // manages to acquire the lock.
- OnDeck_LOOP:
   for (;;) {
     assert(_OnDeck == ESelf, "invariant");
     if (TrySpin(Self)) break;
     // It's probably wise to spin only if we *actually* blocked
     // CONSIDER: check the lockbyte, if it remains set then

@@ -704,15 +687,10 @@
     // subtle, so for the sake of code stewardship ...
     OrderAccess::fence();
     nfy->Notified = 1;
   }
   Thread::muxRelease(_WaitLock);
-  if (nfy != NULL && (NativeMonitorFlags & 16)) {
-    // Experimental code ... light up the wakee in the hope that this thread (the owner)
-    // will drop the lock just about the time the wakee comes ONPROC.
-    nfy->unpark();
-  }
   assert(ILocked(), "invariant");
   return true;
 }
 
 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset

@@ -792,11 +770,11 @@
   // spurious wakeups back to the caller.
 
   for (;;) {
     if (ESelf->Notified) break;
     int err = ParkCommon(ESelf, timo);
-    if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break;
+    if (err == OS_TIMEOUT) break;
   }
 
   // Prepare for reentry - if necessary, remove ESelf from WaitSet
   // ESelf can be:
   // 1. Still on the WaitSet.  This can happen if we exited the loop by timeout.
< prev index next >