1 /*
   2  * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "oops/markOop.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/atomic.inline.hpp"
  31 #include "runtime/handles.inline.hpp"
  32 #include "runtime/interfaceSupport.hpp"
  33 #include "runtime/mutexLocker.hpp"
  34 #include "runtime/objectMonitor.hpp"
  35 #include "runtime/objectMonitor.inline.hpp"
  36 #include "runtime/orderAccess.inline.hpp"
  37 #include "runtime/osThread.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "runtime/thread.inline.hpp"
  40 #include "services/threadService.hpp"
  41 #include "trace/tracing.hpp"
  42 #include "trace/traceMacros.hpp"
  43 #include "utilities/dtrace.hpp"
  44 #include "utilities/macros.hpp"
  45 #include "utilities/preserveException.hpp"
  46 
  47 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
  48 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
  49   #define NOINLINE __attribute__((noinline))
  50 #else
  51   #define NOINLINE
  52 #endif
  53 
  54 
  55 #ifdef DTRACE_ENABLED
  56 
  57 // Only bother with this argument setup if dtrace is available
  58 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  59 
  60 
  61 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  62   char* bytes = NULL;                                                      \
  63   int len = 0;                                                             \
  64   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  65   Symbol* klassname = ((oop)obj)->klass()->name();                         \
  66   if (klassname != NULL) {                                                 \
  67     bytes = (char*)klassname->bytes();                                     \
  68     len = klassname->utf8_length();                                        \
  69   }
  70 
  71 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
  72   {                                                                        \
  73     if (DTraceMonitorProbes) {                                             \
  74       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  75       HOTSPOT_MONITOR_WAIT(jtid,                                           \
  76                            (monitor), bytes, len, (millis));               \
  77     }                                                                      \
  78   }
  79 
  80 #define HOTSPOT_MONITOR_contended__enter HOTSPOT_MONITOR_CONTENDED_ENTER
  81 #define HOTSPOT_MONITOR_contended__entered HOTSPOT_MONITOR_CONTENDED_ENTERED
  82 #define HOTSPOT_MONITOR_contended__exit HOTSPOT_MONITOR_CONTENDED_EXIT
  83 #define HOTSPOT_MONITOR_notify HOTSPOT_MONITOR_NOTIFY
  84 #define HOTSPOT_MONITOR_notifyAll HOTSPOT_MONITOR_NOTIFYALL
  85 
  86 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
  87   {                                                                        \
  88     if (DTraceMonitorProbes) {                                             \
  89       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  90       HOTSPOT_MONITOR_##probe(jtid,                                        \
  91                               (uintptr_t)(monitor), bytes, len);           \
  92     }                                                                      \
  93   }
  94 
  95 #else //  ndef DTRACE_ENABLED
  96 
  97 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
  98 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
  99 
 100 #endif // ndef DTRACE_ENABLED
 101 
 102 // Tunables ...
 103 // The knob* variables are effectively final.  Once set they should
 104 // never be modified hence.  Consider using __read_mostly with GCC.
 105 
 106 int ObjectMonitor::Knob_Verbose     = 0;
 107 int ObjectMonitor::Knob_VerifyInUse = 0;
 108 int ObjectMonitor::Knob_SpinLimit   = 5000;    // derived by an external tool -
 109 static int Knob_LogSpins            = 0;       // enable jvmstat tally for spins
 110 static int Knob_HandOff             = 0;
 111 static int Knob_ReportSettings      = 0;
 112 
 113 static int Knob_SpinBase            = 0;       // Floor AKA SpinMin
 114 static int Knob_SpinBackOff         = 0;       // spin-loop backoff
 115 static int Knob_CASPenalty          = -1;      // Penalty for failed CAS
 116 static int Knob_OXPenalty           = -1;      // Penalty for observed _owner change
 117 static int Knob_SpinSetSucc         = 1;       // spinners set the _succ field
 118 static int Knob_SpinEarly           = 1;
 119 static int Knob_SuccEnabled         = 1;       // futile wake throttling
 120 static int Knob_SuccRestrict        = 0;       // Limit successors + spinners to at-most-one
 121 static int Knob_MaxSpinners         = -1;      // Should be a function of # CPUs
 122 static int Knob_Bonus               = 100;     // spin success bonus
 123 static int Knob_BonusB              = 100;     // spin success bonus
 124 static int Knob_Penalty             = 200;     // spin failure penalty
 125 static int Knob_Poverty             = 1000;
 126 static int Knob_SpinAfterFutile     = 1;       // Spin after returning from park()
 127 static int Knob_FixedSpin           = 0;
 128 static int Knob_OState              = 3;       // Spinner checks thread state of _owner
 129 static int Knob_UsePause            = 1;
 130 static int Knob_ExitPolicy          = 0;
 131 static int Knob_PreSpin             = 10;      // 20-100 likely better
 132 static int Knob_ResetEvent          = 0;
 133 static int BackOffMask              = 0;
 134 
 135 static int Knob_FastHSSEC           = 0;
 136 static int Knob_MoveNotifyee        = 2;       // notify() - disposition of notifyee
 137 static int Knob_QMode               = 0;       // EntryList-cxq policy - queue discipline
 138 static volatile int InitDone        = 0;
 139 
 140 #define TrySpin TrySpin_VaryDuration
 141 
 142 // -----------------------------------------------------------------------------
 143 // Theory of operations -- Monitors lists, thread residency, etc:
 144 //
 145 // * A thread acquires ownership of a monitor by successfully
 146 //   CAS()ing the _owner field from null to non-null.
 147 //
 148 // * Invariant: A thread appears on at most one monitor list --
 149 //   cxq, EntryList or WaitSet -- at any one time.
 150 //
 151 // * Contending threads "push" themselves onto the cxq with CAS
 152 //   and then spin/park.
 153 //
 154 // * After a contending thread eventually acquires the lock it must
 155 //   dequeue itself from either the EntryList or the cxq.
 156 //
 157 // * The exiting thread identifies and unparks an "heir presumptive"
 158 //   tentative successor thread on the EntryList.  Critically, the
 159 //   exiting thread doesn't unlink the successor thread from the EntryList.
 160 //   After having been unparked, the wakee will recontend for ownership of
 161 //   the monitor.   The successor (wakee) will either acquire the lock or
 162 //   re-park itself.
 163 //
 164 //   Succession is provided for by a policy of competitive handoff.
 165 //   The exiting thread does _not_ grant or pass ownership to the
 166 //   successor thread.  (This is also referred to as "handoff" succession").
 167 //   Instead the exiting thread releases ownership and possibly wakes
 168 //   a successor, so the successor can (re)compete for ownership of the lock.
 169 //   If the EntryList is empty but the cxq is populated the exiting
 170 //   thread will drain the cxq into the EntryList.  It does so by
 171 //   by detaching the cxq (installing null with CAS) and folding
 172 //   the threads from the cxq into the EntryList.  The EntryList is
 173 //   doubly linked, while the cxq is singly linked because of the
 174 //   CAS-based "push" used to enqueue recently arrived threads (RATs).
 175 //
 176 // * Concurrency invariants:
 177 //
 178 //   -- only the monitor owner may access or mutate the EntryList.
 179 //      The mutex property of the monitor itself protects the EntryList
 180 //      from concurrent interference.
 181 //   -- Only the monitor owner may detach the cxq.
 182 //
 183 // * The monitor entry list operations avoid locks, but strictly speaking
 184 //   they're not lock-free.  Enter is lock-free, exit is not.
 185 //   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
 186 //
 187 // * The cxq can have multiple concurrent "pushers" but only one concurrent
 188 //   detaching thread.  This mechanism is immune from the ABA corruption.
 189 //   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
 190 //
 191 // * Taken together, the cxq and the EntryList constitute or form a
 192 //   single logical queue of threads stalled trying to acquire the lock.
 193 //   We use two distinct lists to improve the odds of a constant-time
 194 //   dequeue operation after acquisition (in the ::enter() epilogue) and
 195 //   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
 196 //   A key desideratum is to minimize queue & monitor metadata manipulation
 197 //   that occurs while holding the monitor lock -- that is, we want to
 198 //   minimize monitor lock holds times.  Note that even a small amount of
 199 //   fixed spinning will greatly reduce the # of enqueue-dequeue operations
 200 //   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
 201 //   locks and monitor metadata.
 202 //
 203 //   Cxq points to the set of Recently Arrived Threads attempting entry.
 204 //   Because we push threads onto _cxq with CAS, the RATs must take the form of
 205 //   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
 206 //   the unlocking thread notices that EntryList is null but _cxq is != null.
 207 //
 208 //   The EntryList is ordered by the prevailing queue discipline and
 209 //   can be organized in any convenient fashion, such as a doubly-linked list or
 210 //   a circular doubly-linked list.  Critically, we want insert and delete operations
 211 //   to operate in constant-time.  If we need a priority queue then something akin
 212 //   to Solaris' sleepq would work nicely.  Viz.,
 213 //   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
 214 //   Queue discipline is enforced at ::exit() time, when the unlocking thread
 215 //   drains the cxq into the EntryList, and orders or reorders the threads on the
 216 //   EntryList accordingly.
 217 //
 218 //   Barring "lock barging", this mechanism provides fair cyclic ordering,
 219 //   somewhat similar to an elevator-scan.
 220 //
 221 // * The monitor synchronization subsystem avoids the use of native
 222 //   synchronization primitives except for the narrow platform-specific
 223 //   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
 224 //   the semantics of park-unpark.  Put another way, this monitor implementation
 225 //   depends only on atomic operations and park-unpark.  The monitor subsystem
 226 //   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
 227 //   underlying OS manages the READY<->RUN transitions.
 228 //
 229 // * Waiting threads reside on the WaitSet list -- wait() puts
 230 //   the caller onto the WaitSet.
 231 //
 232 // * notify() or notifyAll() simply transfers threads from the WaitSet to
 233 //   either the EntryList or cxq.  Subsequent exit() operations will
 234 //   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
 235 //   it's likely the notifyee would simply impale itself on the lock held
 236 //   by the notifier.
 237 //
 238 // * An interesting alternative is to encode cxq as (List,LockByte) where
 239 //   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
 240 //   variable, like _recursions, in the scheme.  The threads or Events that form
 241 //   the list would have to be aligned in 256-byte addresses.  A thread would
 242 //   try to acquire the lock or enqueue itself with CAS, but exiting threads
 243 //   could use a 1-0 protocol and simply STB to set the LockByte to 0.
 244 //   Note that is is *not* word-tearing, but it does presume that full-word
 245 //   CAS operations are coherent with intermix with STB operations.  That's true
 246 //   on most common processors.
 247 //
 248 // * See also http://blogs.sun.com/dave
 249 
 250 
 251 // -----------------------------------------------------------------------------
 252 // Enter support
 253 
 254 bool ObjectMonitor::try_enter(Thread* THREAD) {
 255   if (THREAD != _owner) {
 256     if (THREAD->is_lock_owned ((address)_owner)) {
 257       assert(_recursions == 0, "internal state error");
 258       _owner = THREAD;
 259       _recursions = 1;
 260       return true;
 261     }
 262     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
 263       return false;
 264     }
 265     return true;
 266   } else {
 267     _recursions++;
 268     return true;
 269   }
 270 }
 271 
 272 void NOINLINE ObjectMonitor::enter(TRAPS) {
 273   // The following code is ordered to check the most common cases first
 274   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 275   Thread * const Self = THREAD;
 276 
 277   void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
 278   if (cur == NULL) {
 279     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 280     assert(_recursions == 0, "invariant");
 281     assert(_owner == Self, "invariant");
 282     return;
 283   }
 284 
 285   if (cur == Self) {
 286     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 287     _recursions++;
 288     return;
 289   }
 290 
 291   if (Self->is_lock_owned ((address)cur)) {
 292     assert(_recursions == 0, "internal state error");
 293     _recursions = 1;
 294     // Commute owner from a thread-specific on-stack BasicLockObject address to
 295     // a full-fledged "Thread *".
 296     _owner = Self;
 297     return;
 298   }
 299 
 300   // We've encountered genuine contention.
 301   assert(Self->_Stalled == 0, "invariant");
 302   Self->_Stalled = intptr_t(this);
 303 
 304   // Try one round of spinning *before* enqueueing Self
 305   // and before going through the awkward and expensive state
 306   // transitions.  The following spin is strictly optional ...
 307   // Note that if we acquire the monitor from an initial spin
 308   // we forgo posting JVMTI events and firing DTRACE probes.
 309   if (Knob_SpinEarly && TrySpin (Self) > 0) {
 310     assert(_owner == Self, "invariant");
 311     assert(_recursions == 0, "invariant");
 312     assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 313     Self->_Stalled = 0;
 314     return;
 315   }
 316 
 317   assert(_owner != Self, "invariant");
 318   assert(_succ != Self, "invariant");
 319   assert(Self->is_Java_thread(), "invariant");
 320   JavaThread * jt = (JavaThread *) Self;
 321   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 322   assert(jt->thread_state() != _thread_blocked, "invariant");
 323   assert(this->object() != NULL, "invariant");
 324   assert(_count >= 0, "invariant");
 325 
 326   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 327   // Ensure the object-monitor relationship remains stable while there's contention.
 328   Atomic::inc(&_count);
 329 
 330   EventJavaMonitorEnter event;
 331 
 332   { // Change java thread status to indicate blocked on monitor enter.
 333     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 334 
 335     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 336     if (JvmtiExport::should_post_monitor_contended_enter()) {
 337       JvmtiExport::post_monitor_contended_enter(jt, this);
 338 
 339       // The current thread does not yet own the monitor and does not
 340       // yet appear on any queues that would get it made the successor.
 341       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
 342       // handler cannot accidentally consume an unpark() meant for the
 343       // ParkEvent associated with this ObjectMonitor.
 344     }
 345 
 346     OSThreadContendState osts(Self->osthread());
 347     ThreadBlockInVM tbivm(jt);
 348 
 349     Self->set_current_pending_monitor(this);
 350 
 351     // TODO-FIXME: change the following for(;;) loop to straight-line code.
 352     for (;;) {
 353       jt->set_suspend_equivalent();
 354       // cleared by handle_special_suspend_equivalent_condition()
 355       // or java_suspend_self()
 356 
 357       EnterI(THREAD);
 358 
 359       if (!ExitSuspendEquivalent(jt)) break;
 360 
 361       // We have acquired the contended monitor, but while we were
 362       // waiting another thread suspended us. We don't want to enter
 363       // the monitor while suspended because that would surprise the
 364       // thread that suspended us.
 365       //
 366       _recursions = 0;
 367       _succ = NULL;
 368       exit(false, Self);
 369 
 370       jt->java_suspend_self();
 371     }
 372     Self->set_current_pending_monitor(NULL);
 373 
 374     // We cleared the pending monitor info since we've just gotten past
 375     // the enter-check-for-suspend dance and we now own the monitor free
 376     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 377     // destructor can go to a safepoint at the end of this block. If we
 378     // do a thread dump during that safepoint, then this thread will show
 379     // as having "-locked" the monitor, but the OS and java.lang.Thread
 380     // states will still report that the thread is blocked trying to
 381     // acquire it.
 382   }
 383 
 384   Atomic::dec(&_count);
 385   assert(_count >= 0, "invariant");
 386   Self->_Stalled = 0;
 387 
 388   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 389   assert(_recursions == 0, "invariant");
 390   assert(_owner == Self, "invariant");
 391   assert(_succ != Self, "invariant");
 392   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 393 
 394   // The thread -- now the owner -- is back in vm mode.
 395   // Report the glorious news via TI,DTrace and jvmstat.
 396   // The probe effect is non-trivial.  All the reportage occurs
 397   // while we hold the monitor, increasing the length of the critical
 398   // section.  Amdahl's parallel speedup law comes vividly into play.
 399   //
 400   // Another option might be to aggregate the events (thread local or
 401   // per-monitor aggregation) and defer reporting until a more opportune
 402   // time -- such as next time some thread encounters contention but has
 403   // yet to acquire the lock.  While spinning that thread could
 404   // spinning we could increment JVMStat counters, etc.
 405 
 406   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
 407   if (JvmtiExport::should_post_monitor_contended_entered()) {
 408     JvmtiExport::post_monitor_contended_entered(jt, this);
 409 
 410     // The current thread already owns the monitor and is not going to
 411     // call park() for the remainder of the monitor enter protocol. So
 412     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 413     // event handler consumed an unpark() issued by the thread that
 414     // just exited the monitor.
 415   }
 416 
 417   if (event.should_commit()) {
 418     event.set_klass(((oop)this->object())->klass());
 419     event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
 420     event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
 421     event.commit();
 422   }
 423 
 424   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
 425     ObjectMonitor::_sync_ContendedLockAttempts->inc();
 426   }
 427 }
 428 
 429 
 430 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 431 // Callers must compensate as needed.
 432 
 433 int ObjectMonitor::TryLock(Thread * Self) {
 434   void * own = _owner;
 435   if (own != NULL) return 0;
 436   if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 437     // Either guarantee _recursions == 0 or set _recursions = 0.
 438     assert(_recursions == 0, "invariant");
 439     assert(_owner == Self, "invariant");
 440     return 1;
 441   }
 442   // The lock had been free momentarily, but we lost the race to the lock.
 443   // Interference -- the CAS failed.
 444   // We can either return -1 or retry.
 445   // Retry doesn't make as much sense because the lock was just acquired.
 446   return -1;
 447 }
 448 
 449 #define MAX_RECHECK_INTERVAL 1000
 450 
 451 void NOINLINE ObjectMonitor::EnterI(TRAPS) {
 452   Thread * const Self = THREAD;
 453   assert(Self->is_Java_thread(), "invariant");
 454   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 455 
 456   // Try the lock - TATAS
 457   if (TryLock (Self) > 0) {
 458     assert(_succ != Self, "invariant");
 459     assert(_owner == Self, "invariant");
 460     assert(_Responsible != Self, "invariant");
 461     return;
 462   }
 463 
 464   DeferredInitialize();
 465 
 466   // We try one round of spinning *before* enqueueing Self.
 467   //
 468   // If the _owner is ready but OFFPROC we could use a YieldTo()
 469   // operation to donate the remainder of this thread's quantum
 470   // to the owner.  This has subtle but beneficial affinity
 471   // effects.
 472 
 473   if (TrySpin (Self) > 0) {
 474     assert(_owner == Self, "invariant");
 475     assert(_succ != Self, "invariant");
 476     assert(_Responsible != Self, "invariant");
 477     return;
 478   }
 479 
 480   // The Spin failed -- Enqueue and park the thread ...
 481   assert(_succ != Self, "invariant");
 482   assert(_owner != Self, "invariant");
 483   assert(_Responsible != Self, "invariant");
 484 
 485   // Enqueue "Self" on ObjectMonitor's _cxq.
 486   //
 487   // Node acts as a proxy for Self.
 488   // As an aside, if were to ever rewrite the synchronization code mostly
 489   // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
 490   // Java objects.  This would avoid awkward lifecycle and liveness issues,
 491   // as well as eliminate a subset of ABA issues.
 492   // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
 493 
 494   ObjectWaiter node(Self);
 495   Self->_ParkEvent->reset();
 496   node._prev   = (ObjectWaiter *) 0xBAD;
 497   node.TState  = ObjectWaiter::TS_CXQ;
 498 
 499   // Push "Self" onto the front of the _cxq.
 500   // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
 501   // Note that spinning tends to reduce the rate at which threads
 502   // enqueue and dequeue on EntryList|cxq.
 503   ObjectWaiter * nxt;
 504   for (;;) {
 505     node._next = nxt = _cxq;
 506     if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break;
 507 
 508     // Interference - the CAS failed because _cxq changed.  Just retry.
 509     // As an optional optimization we retry the lock.
 510     if (TryLock (Self) > 0) {
 511       assert(_succ != Self, "invariant");
 512       assert(_owner == Self, "invariant");
 513       assert(_Responsible != Self, "invariant");
 514       return;
 515     }
 516   }
 517 
 518   // Check for cxq|EntryList edge transition to non-null.  This indicates
 519   // the onset of contention.  While contention persists exiting threads
 520   // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
 521   // operations revert to the faster 1-0 mode.  This enter operation may interleave
 522   // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
 523   // arrange for one of the contending thread to use a timed park() operations
 524   // to detect and recover from the race.  (Stranding is form of progress failure
 525   // where the monitor is unlocked but all the contending threads remain parked).
 526   // That is, at least one of the contended threads will periodically poll _owner.
 527   // One of the contending threads will become the designated "Responsible" thread.
 528   // The Responsible thread uses a timed park instead of a normal indefinite park
 529   // operation -- it periodically wakes and checks for and recovers from potential
 530   // strandings admitted by 1-0 exit operations.   We need at most one Responsible
 531   // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
 532   // be responsible for a monitor.
 533   //
 534   // Currently, one of the contended threads takes on the added role of "Responsible".
 535   // A viable alternative would be to use a dedicated "stranding checker" thread
 536   // that periodically iterated over all the threads (or active monitors) and unparked
 537   // successors where there was risk of stranding.  This would help eliminate the
 538   // timer scalability issues we see on some platforms as we'd only have one thread
 539   // -- the checker -- parked on a timer.
 540 
 541   if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
 542     // Try to assume the role of responsible thread for the monitor.
 543     // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
 544     Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
 545   }
 546 
 547   // The lock might have been released while this thread was occupied queueing
 548   // itself onto _cxq.  To close the race and avoid "stranding" and
 549   // progress-liveness failure we must resample-retry _owner before parking.
 550   // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
 551   // In this case the ST-MEMBAR is accomplished with CAS().
 552   //
 553   // TODO: Defer all thread state transitions until park-time.
 554   // Since state transitions are heavy and inefficient we'd like
 555   // to defer the state transitions until absolutely necessary,
 556   // and in doing so avoid some transitions ...
 557 
 558   TEVENT(Inflated enter - Contention);
 559   int nWakeups = 0;
 560   int recheckInterval = 1;
 561 
 562   for (;;) {
 563 
 564     if (TryLock(Self) > 0) break;
 565     assert(_owner != Self, "invariant");
 566 
 567     if ((SyncFlags & 2) && _Responsible == NULL) {
 568       Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
 569     }
 570 
 571     // park self
 572     if (_Responsible == Self || (SyncFlags & 1)) {
 573       TEVENT(Inflated enter - park TIMED);
 574       Self->_ParkEvent->park((jlong) recheckInterval);
 575       // Increase the recheckInterval, but clamp the value.
 576       recheckInterval *= 8;
 577       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 578         recheckInterval = MAX_RECHECK_INTERVAL;
 579       }
 580     } else {
 581       TEVENT(Inflated enter - park UNTIMED);
 582       Self->_ParkEvent->park();
 583     }
 584 
 585     if (TryLock(Self) > 0) break;
 586 
 587     // The lock is still contested.
 588     // Keep a tally of the # of futile wakeups.
 589     // Note that the counter is not protected by a lock or updated by atomics.
 590     // That is by design - we trade "lossy" counters which are exposed to
 591     // races during updates for a lower probe effect.
 592     TEVENT(Inflated enter - Futile wakeup);
 593     if (ObjectMonitor::_sync_FutileWakeups != NULL) {
 594       ObjectMonitor::_sync_FutileWakeups->inc();
 595     }
 596     ++nWakeups;
 597 
 598     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 599     // We can defer clearing _succ until after the spin completes
 600     // TrySpin() must tolerate being called with _succ == Self.
 601     // Try yet another round of adaptive spinning.
 602     if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
 603 
 604     // We can find that we were unpark()ed and redesignated _succ while
 605     // we were spinning.  That's harmless.  If we iterate and call park(),
 606     // park() will consume the event and return immediately and we'll
 607     // just spin again.  This pattern can repeat, leaving _succ to simply
 608     // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
 609     // Alternately, we can sample fired() here, and if set, forgo spinning
 610     // in the next iteration.
 611 
 612     if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
 613       Self->_ParkEvent->reset();
 614       OrderAccess::fence();
 615     }
 616     if (_succ == Self) _succ = NULL;
 617 
 618     // Invariant: after clearing _succ a thread *must* retry _owner before parking.
 619     OrderAccess::fence();
 620   }
 621 
 622   // Egress :
 623   // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
 624   // Normally we'll find Self on the EntryList .
 625   // From the perspective of the lock owner (this thread), the
 626   // EntryList is stable and cxq is prepend-only.
 627   // The head of cxq is volatile but the interior is stable.
 628   // In addition, Self.TState is stable.
 629 
 630   assert(_owner == Self, "invariant");
 631   assert(object() != NULL, "invariant");
 632   // I'd like to write:
 633   //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 634   // but as we're at a safepoint that's not safe.
 635 
 636   UnlinkAfterAcquire(Self, &node);
 637   if (_succ == Self) _succ = NULL;
 638 
 639   assert(_succ != Self, "invariant");
 640   if (_Responsible == Self) {
 641     _Responsible = NULL;
 642     OrderAccess::fence(); // Dekker pivot-point
 643 
 644     // We may leave threads on cxq|EntryList without a designated
 645     // "Responsible" thread.  This is benign.  When this thread subsequently
 646     // exits the monitor it can "see" such preexisting "old" threads --
 647     // threads that arrived on the cxq|EntryList before the fence, above --
 648     // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
 649     // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
 650     // non-null and elect a new "Responsible" timer thread.
 651     //
 652     // This thread executes:
 653     //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
 654     //    LD cxq|EntryList               (in subsequent exit)
 655     //
 656     // Entering threads in the slow/contended path execute:
 657     //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
 658     //    The (ST cxq; MEMBAR) is accomplished with CAS().
 659     //
 660     // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
 661     // exit operation from floating above the ST Responsible=null.
 662   }
 663 
 664   // We've acquired ownership with CAS().
 665   // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
 666   // But since the CAS() this thread may have also stored into _succ,
 667   // EntryList, cxq or Responsible.  These meta-data updates must be
 668   // visible __before this thread subsequently drops the lock.
 669   // Consider what could occur if we didn't enforce this constraint --
 670   // STs to monitor meta-data and user-data could reorder with (become
 671   // visible after) the ST in exit that drops ownership of the lock.
 672   // Some other thread could then acquire the lock, but observe inconsistent
 673   // or old monitor meta-data and heap data.  That violates the JMM.
 674   // To that end, the 1-0 exit() operation must have at least STST|LDST
 675   // "release" barrier semantics.  Specifically, there must be at least a
 676   // STST|LDST barrier in exit() before the ST of null into _owner that drops
 677   // the lock.   The barrier ensures that changes to monitor meta-data and data
 678   // protected by the lock will be visible before we release the lock, and
 679   // therefore before some other thread (CPU) has a chance to acquire the lock.
 680   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 681   //
 682   // Critically, any prior STs to _succ or EntryList must be visible before
 683   // the ST of null into _owner in the *subsequent* (following) corresponding
 684   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 685   // execute a serializing instruction.
 686 
 687   if (SyncFlags & 8) {
 688     OrderAccess::fence();
 689   }
 690   return;
 691 }
 692 
 693 // ReenterI() is a specialized inline form of the latter half of the
 694 // contended slow-path from EnterI().  We use ReenterI() only for
 695 // monitor reentry in wait().
 696 //
 697 // In the future we should reconcile EnterI() and ReenterI(), adding
 698 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
 699 // loop accordingly.
 700 
 701 void NOINLINE ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 702   assert(Self != NULL, "invariant");
 703   assert(SelfNode != NULL, "invariant");
 704   assert(SelfNode->_thread == Self, "invariant");
 705   assert(_waiters > 0, "invariant");
 706   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 707   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 708   JavaThread * jt = (JavaThread *) Self;
 709 
 710   int nWakeups = 0;
 711   for (;;) {
 712     ObjectWaiter::TStates v = SelfNode->TState;
 713     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 714     assert(_owner != Self, "invariant");
 715 
 716     if (TryLock(Self) > 0) break;
 717     if (TrySpin(Self) > 0) break;
 718 
 719     TEVENT(Wait Reentry - parking);
 720 
 721     // State transition wrappers around park() ...
 722     // ReenterI() wisely defers state transitions until
 723     // it's clear we must park the thread.
 724     {
 725       OSThreadContendState osts(Self->osthread());
 726       ThreadBlockInVM tbivm(jt);
 727 
 728       // cleared by handle_special_suspend_equivalent_condition()
 729       // or java_suspend_self()
 730       jt->set_suspend_equivalent();
 731       if (SyncFlags & 1) {
 732         Self->_ParkEvent->park((jlong)MAX_RECHECK_INTERVAL);
 733       } else {
 734         Self->_ParkEvent->park();
 735       }
 736 
 737       // were we externally suspended while we were waiting?
 738       for (;;) {
 739         if (!ExitSuspendEquivalent(jt)) break;
 740         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 741         jt->java_suspend_self();
 742         jt->set_suspend_equivalent();
 743       }
 744     }
 745 
 746     // Try again, but just so we distinguish between futile wakeups and
 747     // successful wakeups.  The following test isn't algorithmically
 748     // necessary, but it helps us maintain sensible statistics.
 749     if (TryLock(Self) > 0) break;
 750 
 751     // The lock is still contested.
 752     // Keep a tally of the # of futile wakeups.
 753     // Note that the counter is not protected by a lock or updated by atomics.
 754     // That is by design - we trade "lossy" counters which are exposed to
 755     // races during updates for a lower probe effect.
 756     TEVENT(Wait Reentry - futile wakeup);
 757     ++nWakeups;
 758 
 759     // Assuming this is not a spurious wakeup we'll normally
 760     // find that _succ == Self.
 761     if (_succ == Self) _succ = NULL;
 762 
 763     // Invariant: after clearing _succ a contending thread
 764     // *must* retry  _owner before parking.
 765     OrderAccess::fence();
 766 
 767     if (ObjectMonitor::_sync_FutileWakeups != NULL) {
 768       ObjectMonitor::_sync_FutileWakeups->inc();
 769     }
 770   }
 771 
 772   // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
 773   // Normally we'll find Self on the EntryList.
 774   // Unlinking from the EntryList is constant-time and atomic-free.
 775   // From the perspective of the lock owner (this thread), the
 776   // EntryList is stable and cxq is prepend-only.
 777   // The head of cxq is volatile but the interior is stable.
 778   // In addition, Self.TState is stable.
 779 
 780   assert(_owner == Self, "invariant");
 781   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 782   UnlinkAfterAcquire(Self, SelfNode);
 783   if (_succ == Self) _succ = NULL;
 784   assert(_succ != Self, "invariant");
 785   SelfNode->TState = ObjectWaiter::TS_RUN;
 786   OrderAccess::fence();      // see comments at the end of EnterI()
 787 }
 788 
 789 // By convention we unlink a contending thread from EntryList|cxq immediately
 790 // after the thread acquires the lock in ::enter().  Equally, we could defer
 791 // unlinking the thread until ::exit()-time.
 792 
 793 void ObjectMonitor::UnlinkAfterAcquire(Thread *Self, ObjectWaiter *SelfNode) {
 794   assert(_owner == Self, "invariant");
 795   assert(SelfNode->_thread == Self, "invariant");
 796 
 797   if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
 798     // Normal case: remove Self from the DLL EntryList .
 799     // This is a constant-time operation.
 800     ObjectWaiter * nxt = SelfNode->_next;
 801     ObjectWaiter * prv = SelfNode->_prev;
 802     if (nxt != NULL) nxt->_prev = prv;
 803     if (prv != NULL) prv->_next = nxt;
 804     if (SelfNode == _EntryList) _EntryList = nxt;
 805     assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
 806     assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
 807     TEVENT(Unlink from EntryList);
 808   } else {
 809     assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
 810     // Inopportune interleaving -- Self is still on the cxq.
 811     // This usually means the enqueue of self raced an exiting thread.
 812     // Normally we'll find Self near the front of the cxq, so
 813     // dequeueing is typically fast.  If needbe we can accelerate
 814     // this with some MCS/CHL-like bidirectional list hints and advisory
 815     // back-links so dequeueing from the interior will normally operate
 816     // in constant-time.
 817     // Dequeue Self from either the head (with CAS) or from the interior
 818     // with a linear-time scan and normal non-atomic memory operations.
 819     // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
 820     // and then unlink Self from EntryList.  We have to drain eventually,
 821     // so it might as well be now.
 822 
 823     ObjectWaiter * v = _cxq;
 824     assert(v != NULL, "invariant");
 825     if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
 826       // The CAS above can fail from interference IFF a "RAT" arrived.
 827       // In that case Self must be in the interior and can no longer be
 828       // at the head of cxq.
 829       if (v == SelfNode) {
 830         assert(_cxq != v, "invariant");
 831         v = _cxq;          // CAS above failed - start scan at head of list
 832       }
 833       ObjectWaiter * p;
 834       ObjectWaiter * q = NULL;
 835       for (p = v; p != NULL && p != SelfNode; p = p->_next) {
 836         q = p;
 837         assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
 838       }
 839       assert(v != SelfNode, "invariant");
 840       assert(p == SelfNode, "Node not found on cxq");
 841       assert(p != _cxq, "invariant");
 842       assert(q != NULL, "invariant");
 843       assert(q->_next == p, "invariant");
 844       q->_next = p->_next;
 845     }
 846     TEVENT(Unlink from cxq);
 847   }
 848 
 849 #ifdef ASSERT
 850   // Diagnostic hygiene ...
 851   SelfNode->_prev  = (ObjectWaiter *) 0xBAD;
 852   SelfNode->_next  = (ObjectWaiter *) 0xBAD;
 853   SelfNode->TState = ObjectWaiter::TS_RUN;
 854 #endif
 855 }
 856 
 857 // -----------------------------------------------------------------------------
 858 // Exit support
 859 //
 860 // exit()
 861 // ~~~~~~
 862 // Note that the collector can't reclaim the objectMonitor or deflate
 863 // the object out from underneath the thread calling ::exit() as the
 864 // thread calling ::exit() never transitions to a stable state.
 865 // This inhibits GC, which in turn inhibits asynchronous (and
 866 // inopportune) reclamation of "this".
 867 //
 868 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
 869 // There's one exception to the claim above, however.  EnterI() can call
 870 // exit() to drop a lock if the acquirer has been externally suspended.
 871 // In that case exit() is called with _thread_state as _thread_blocked,
 872 // but the monitor's _count field is > 0, which inhibits reclamation.
 873 //
 874 // 1-0 exit
 875 // ~~~~~~~~
 876 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
 877 // the fast-path operators have been optimized so the common ::exit()
 878 // operation is 1-0.  See i486.ad fast_unlock(), for instance.
 879 // The code emitted by fast_unlock() elides the usual MEMBAR.  This
 880 // greatly improves latency -- MEMBAR and CAS having considerable local
 881 // latency on modern processors -- but at the cost of "stranding".  Absent the
 882 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
 883 // ::enter() path, resulting in the entering thread being stranding
 884 // and a progress-liveness failure.   Stranding is extremely rare.
 885 // We use timers (timed park operations) & periodic polling to detect
 886 // and recover from stranding.  Potentially stranded threads periodically
 887 // wake up and poll the lock.  See the usage of the _Responsible variable.
 888 //
 889 // The CAS() in enter provides for safety and exclusion, while the CAS or
 890 // MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
 891 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
 892 // We detect and recover from stranding with timers.
 893 //
 894 // If a thread transiently strands it'll park until (a) another
 895 // thread acquires the lock and then drops the lock, at which time the
 896 // exiting thread will notice and unpark the stranded thread, or, (b)
 897 // the timer expires.  If the lock is high traffic then the stranding latency
 898 // will be low due to (a).  If the lock is low traffic then the odds of
 899 // stranding are lower, although the worst-case stranding latency
 900 // is longer.  Critically, we don't want to put excessive load in the
 901 // platform's timer subsystem.  We want to minimize both the timer injection
 902 // rate (timers created/sec) as well as the number of timers active at
 903 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 904 // the integral of the # of active timers at any instant over time).
 905 // Both impinge on OS scalability.  Given that, at most one thread parked on
 906 // a monitor will use a timer.
 907 //
 908 // There is also the risk of a futile wake-up. If we drop the lock
 909 // another thread can reacquire the lock immediately, and we can
 910 // then wake a thread unnecessarily. This is benign, and we've
 911 // structured the code so the windows are short and the frequency
 912 // of such futile wakups is low.
 913 
 914 void NOINLINE ObjectMonitor::exit(bool not_suspended, TRAPS) {
 915   Thread * const Self = THREAD;
 916   if (THREAD != _owner) {
 917     if (THREAD->is_lock_owned((address) _owner)) {
 918       // Transmute _owner from a BasicLock pointer to a Thread address.
 919       // We don't need to hold _mutex for this transition.
 920       // Non-null to Non-null is safe as long as all readers can
 921       // tolerate either flavor.
 922       assert(_recursions == 0, "invariant");
 923       _owner = THREAD;
 924       _recursions = 0;
 925     } else {
 926       // Apparent unbalanced locking ...
 927       // Naively we'd like to throw IllegalMonitorStateException.
 928       // As a practical matter we can neither allocate nor throw an
 929       // exception as ::exit() can be called from leaf routines.
 930       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 931       // Upon deeper reflection, however, in a properly run JVM the only
 932       // way we should encounter this situation is in the presence of
 933       // unbalanced JNI locking. TODO: CheckJNICalls.
 934       // See also: CR4414101
 935       TEVENT(Exit - Throw IMSX);
 936       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
 937       return;
 938     }
 939   }
 940 
 941   if (_recursions != 0) {
 942     _recursions--;        // this is simple recursive enter
 943     TEVENT(Inflated exit - recursive);
 944     return;
 945   }
 946 
 947   // Invariant: after setting Responsible=null an thread must execute
 948   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 949   if ((SyncFlags & 4) == 0) {
 950     _Responsible = NULL;
 951   }
 952 
 953 #if INCLUDE_TRACE
 954   // get the owner's thread id for the MonitorEnter event
 955   // if it is enabled and the thread isn't suspended
 956   if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
 957     _previous_owner_tid = SharedRuntime::get_java_tid(Self);
 958   }
 959 #endif
 960 
 961   for (;;) {
 962     assert(THREAD == _owner, "invariant");
 963 
 964 
 965     if (Knob_ExitPolicy == 0) {
 966       // release semantics: prior loads and stores from within the critical section
 967       // must not float (reorder) past the following store that drops the lock.
 968       // On SPARC that requires MEMBAR #loadstore|#storestore.
 969       // But of course in TSO #loadstore|#storestore is not required.
 970       // I'd like to write one of the following:
 971       // A.  OrderAccess::release() ; _owner = NULL
 972       // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
 973       // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
 974       // store into a _dummy variable.  That store is not needed, but can result
 975       // in massive wasteful coherency traffic on classic SMP systems.
 976       // Instead, I use release_store(), which is implemented as just a simple
 977       // ST on x64, x86 and SPARC.
 978       OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
 979       OrderAccess::storeload();                        // See if we need to wake a successor
 980       if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
 981         TEVENT(Inflated exit - simple egress);
 982         return;
 983       }
 984       TEVENT(Inflated exit - complex egress);
 985       // Other threads are blocked trying to acquire the lock.
 986 
 987       // Normally the exiting thread is responsible for ensuring succession,
 988       // but if other successors are ready or other entering threads are spinning
 989       // then this thread can simply store NULL into _owner and exit without
 990       // waking a successor.  The existence of spinners or ready successors
 991       // guarantees proper succession (liveness).  Responsibility passes to the
 992       // ready or running successors.  The exiting thread delegates the duty.
 993       // More precisely, if a successor already exists this thread is absolved
 994       // of the responsibility of waking (unparking) one.
 995       //
 996       // The _succ variable is critical to reducing futile wakeup frequency.
 997       // _succ identifies the "heir presumptive" thread that has been made
 998       // ready (unparked) but that has not yet run.  We need only one such
 999       // successor thread to guarantee progress.
1000       // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
1001       // section 3.3 "Futile Wakeup Throttling" for details.
1002       //
1003       // Note that spinners in Enter() also set _succ non-null.
1004       // In the current implementation spinners opportunistically set
1005       // _succ so that exiting threads might avoid waking a successor.
1006       // Another less appealing alternative would be for the exiting thread
1007       // to drop the lock and then spin briefly to see if a spinner managed
1008       // to acquire the lock.  If so, the exiting thread could exit
1009       // immediately without waking a successor, otherwise the exiting
1010       // thread would need to dequeue and wake a successor.
1011       // (Note that we'd need to make the post-drop spin short, but no
1012       // shorter than the worst-case round-trip cache-line migration time.
1013       // The dropped lock needs to become visible to the spinner, and then
1014       // the acquisition of the lock by the spinner must become visible to
1015       // the exiting thread).
1016 
1017       // It appears that an heir-presumptive (successor) must be made ready.
1018       // Only the current lock owner can manipulate the EntryList or
1019       // drain _cxq, so we need to reacquire the lock.  If we fail
1020       // to reacquire the lock the responsibility for ensuring succession
1021       // falls to the new owner.
1022       //
1023       if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
1024         return;
1025       }
1026       TEVENT(Exit - Reacquired);
1027     } else {
1028       if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1029         OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
1030         OrderAccess::storeload();
1031         // Ratify the previously observed values.
1032         if (_cxq == NULL || _succ != NULL) {
1033           TEVENT(Inflated exit - simple egress);
1034           return;
1035         }
1036 
1037         // inopportune interleaving -- the exiting thread (this thread)
1038         // in the fast-exit path raced an entering thread in the slow-enter
1039         // path.
1040         // We have two choices:
1041         // A.  Try to reacquire the lock.
1042         //     If the CAS() fails return immediately, otherwise
1043         //     we either restart/rerun the exit operation, or simply
1044         //     fall-through into the code below which wakes a successor.
1045         // B.  If the elements forming the EntryList|cxq are TSM
1046         //     we could simply unpark() the lead thread and return
1047         //     without having set _succ.
1048         if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
1049           TEVENT(Inflated exit - reacquired succeeded);
1050           return;
1051         }
1052         TEVENT(Inflated exit - reacquired failed);
1053       } else {
1054         TEVENT(Inflated exit - complex egress);
1055       }
1056     }
1057 
1058     guarantee(_owner == THREAD, "invariant");
1059 
1060     ObjectWaiter * w = NULL;
1061     int QMode = Knob_QMode;
1062 
1063     if (QMode == 2 && _cxq != NULL) {
1064       // QMode == 2 : cxq has precedence over EntryList.
1065       // Try to directly wake a successor from the cxq.
1066       // If successful, the successor will need to unlink itself from cxq.
1067       w = _cxq;
1068       assert(w != NULL, "invariant");
1069       assert(w->TState == ObjectWaiter::TS_CXQ, "Invariant");
1070       ExitEpilog(Self, w);
1071       return;
1072     }
1073 
1074     if (QMode == 3 && _cxq != NULL) {
1075       // Aggressively drain cxq into EntryList at the first opportunity.
1076       // This policy ensure that recently-run threads live at the head of EntryList.
1077       // Drain _cxq into EntryList - bulk transfer.
1078       // First, detach _cxq.
1079       // The following loop is tantamount to: w = swap(&cxq, NULL)
1080       w = _cxq;
1081       for (;;) {
1082         assert(w != NULL, "Invariant");
1083         ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
1084         if (u == w) break;
1085         w = u;
1086       }
1087       assert(w != NULL, "invariant");
1088 
1089       ObjectWaiter * q = NULL;
1090       ObjectWaiter * p;
1091       for (p = w; p != NULL; p = p->_next) {
1092         guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
1093         p->TState = ObjectWaiter::TS_ENTER;
1094         p->_prev = q;
1095         q = p;
1096       }
1097 
1098       // Append the RATs to the EntryList
1099       // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
1100       ObjectWaiter * Tail;
1101       for (Tail = _EntryList; Tail != NULL && Tail->_next != NULL;
1102            Tail = Tail->_next)
1103         /* empty */;
1104       if (Tail == NULL) {
1105         _EntryList = w;
1106       } else {
1107         Tail->_next = w;
1108         w->_prev = Tail;
1109       }
1110 
1111       // Fall thru into code that tries to wake a successor from EntryList
1112     }
1113 
1114     if (QMode == 4 && _cxq != NULL) {
1115       // Aggressively drain cxq into EntryList at the first opportunity.
1116       // This policy ensure that recently-run threads live at the head of EntryList.
1117 
1118       // Drain _cxq into EntryList - bulk transfer.
1119       // First, detach _cxq.
1120       // The following loop is tantamount to: w = swap(&cxq, NULL)
1121       w = _cxq;
1122       for (;;) {
1123         assert(w != NULL, "Invariant");
1124         ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
1125         if (u == w) break;
1126         w = u;
1127       }
1128       assert(w != NULL, "invariant");
1129 
1130       ObjectWaiter * q = NULL;
1131       ObjectWaiter * p;
1132       for (p = w; p != NULL; p = p->_next) {
1133         guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
1134         p->TState = ObjectWaiter::TS_ENTER;
1135         p->_prev = q;
1136         q = p;
1137       }
1138 
1139       // Prepend the RATs to the EntryList
1140       if (_EntryList != NULL) {
1141         q->_next = _EntryList;
1142         _EntryList->_prev = q;
1143       }
1144       _EntryList = w;
1145 
1146       // Fall thru into code that tries to wake a successor from EntryList
1147     }
1148 
1149     w = _EntryList;
1150     if (w != NULL) {
1151       // I'd like to write: guarantee (w->_thread != Self).
1152       // But in practice an exiting thread may find itself on the EntryList.
1153       // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
1154       // then calls exit().  Exit release the lock by setting O._owner to NULL.
1155       // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
1156       // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1157       // release the lock "O".  T2 resumes immediately after the ST of null into
1158       // _owner, above.  T2 notices that the EntryList is populated, so it
1159       // reacquires the lock and then finds itself on the EntryList.
1160       // Given all that, we have to tolerate the circumstance where "w" is
1161       // associated with Self.
1162       assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1163       ExitEpilog(Self, w);
1164       return;
1165     }
1166 
1167     // If we find that both _cxq and EntryList are null then just
1168     // re-run the exit protocol from the top.
1169     w = _cxq;
1170     if (w == NULL) continue;
1171 
1172     // Drain _cxq into EntryList - bulk transfer.
1173     // First, detach _cxq.
1174     // The following loop is tantamount to: w = swap(&cxq, NULL)
1175     for (;;) {
1176       assert(w != NULL, "Invariant");
1177       ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
1178       if (u == w) break;
1179       w = u;
1180     }
1181     TEVENT(Inflated exit - drain cxq into EntryList);
1182 
1183     assert(w != NULL, "invariant");
1184     assert(_EntryList == NULL, "invariant");
1185 
1186     // Convert the LIFO SLL anchored by _cxq into a DLL.
1187     // The list reorganization step operates in O(LENGTH(w)) time.
1188     // It's critical that this step operate quickly as
1189     // "Self" still holds the outer-lock, restricting parallelism
1190     // and effectively lengthening the critical section.
1191     // Invariant: s chases t chases u.
1192     // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
1193     // we have faster access to the tail.
1194 
1195     if (QMode == 1) {
1196       // QMode == 1 : drain cxq to EntryList, reversing order
1197       // We also reverse the order of the list.
1198       ObjectWaiter * s = NULL;
1199       ObjectWaiter * t = w;
1200       ObjectWaiter * u = NULL;
1201       while (t != NULL) {
1202         guarantee(t->TState == ObjectWaiter::TS_CXQ, "invariant");
1203         t->TState = ObjectWaiter::TS_ENTER;
1204         u = t->_next;
1205         t->_prev = u;
1206         t->_next = s;
1207         s = t;
1208         t = u;
1209       }
1210       _EntryList  = s;
1211       assert(s != NULL, "invariant");
1212     } else {
1213       // QMode == 0 or QMode == 2
1214       _EntryList = w;
1215       ObjectWaiter * q = NULL;
1216       ObjectWaiter * p;
1217       for (p = w; p != NULL; p = p->_next) {
1218         guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
1219         p->TState = ObjectWaiter::TS_ENTER;
1220         p->_prev = q;
1221         q = p;
1222       }
1223     }
1224 
1225     // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
1226     // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
1227 
1228     // See if we can abdicate to a spinner instead of waking a thread.
1229     // A primary goal of the implementation is to reduce the
1230     // context-switch rate.
1231     if (_succ != NULL) continue;
1232 
1233     w = _EntryList;
1234     if (w != NULL) {
1235       guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1236       ExitEpilog(Self, w);
1237       return;
1238     }
1239   }
1240 }
1241 
1242 // ExitSuspendEquivalent:
1243 // A faster alternate to handle_special_suspend_equivalent_condition()
1244 //
1245 // handle_special_suspend_equivalent_condition() unconditionally
1246 // acquires the SR_lock.  On some platforms uncontended MutexLocker()
1247 // operations have high latency.  Note that in ::enter() we call HSSEC
1248 // while holding the monitor, so we effectively lengthen the critical sections.
1249 //
1250 // There are a number of possible solutions:
1251 //
1252 // A.  To ameliorate the problem we might also defer state transitions
1253 //     to as late as possible -- just prior to parking.
1254 //     Given that, we'd call HSSEC after having returned from park(),
1255 //     but before attempting to acquire the monitor.  This is only a
1256 //     partial solution.  It avoids calling HSSEC while holding the
1257 //     monitor (good), but it still increases successor reacquisition latency --
1258 //     the interval between unparking a successor and the time the successor
1259 //     resumes and retries the lock.  See ReenterI(), which defers state transitions.
1260 //     If we use this technique we can also avoid EnterI()-exit() loop
1261 //     in ::enter() where we iteratively drop the lock and then attempt
1262 //     to reacquire it after suspending.
1263 //
1264 // B.  In the future we might fold all the suspend bits into a
1265 //     composite per-thread suspend flag and then update it with CAS().
1266 //     Alternately, a Dekker-like mechanism with multiple variables
1267 //     would suffice:
1268 //       ST Self->_suspend_equivalent = false
1269 //       MEMBAR
1270 //       LD Self_>_suspend_flags
1271 //
1272 // UPDATE 2007-10-6: since I've replaced the native Mutex/Monitor subsystem
1273 // with a more efficient implementation, the need to use "FastHSSEC" has
1274 // decreased. - Dave
1275 
1276 
1277 bool ObjectMonitor::ExitSuspendEquivalent(JavaThread * jSelf) {
1278   const int Mode = Knob_FastHSSEC;
1279   if (Mode && !jSelf->is_external_suspend()) {
1280     assert(jSelf->is_suspend_equivalent(), "invariant");
1281     jSelf->clear_suspend_equivalent();
1282     if (2 == Mode) OrderAccess::storeload();
1283     if (!jSelf->is_external_suspend()) return false;
1284     // We raced a suspension -- fall thru into the slow path
1285     TEVENT(ExitSuspendEquivalent - raced);
1286     jSelf->set_suspend_equivalent();
1287   }
1288   return jSelf->handle_special_suspend_equivalent_condition();
1289 }
1290 
1291 
1292 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1293   assert(_owner == Self, "invariant");
1294 
1295   // Exit protocol:
1296   // 1. ST _succ = wakee
1297   // 2. membar #loadstore|#storestore;
1298   // 2. ST _owner = NULL
1299   // 3. unpark(wakee)
1300 
1301   _succ = Knob_SuccEnabled ? Wakee->_thread : NULL;
1302   ParkEvent * Trigger = Wakee->_event;
1303 
1304   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1305   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1306   // out-of-scope (non-extant).
1307   Wakee  = NULL;
1308 
1309   // Drop the lock
1310   OrderAccess::release_store_ptr(&_owner, NULL);
1311   OrderAccess::fence();                               // ST _owner vs LD in unpark()
1312 
1313   if (SafepointSynchronize::do_call_back()) {
1314     TEVENT(unpark before SAFEPOINT);
1315   }
1316 
1317   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1318   Trigger->unpark();
1319 
1320   // Maintain stats and report events to JVMTI
1321   if (ObjectMonitor::_sync_Parks != NULL) {
1322     ObjectMonitor::_sync_Parks->inc();
1323   }
1324 }
1325 
1326 
1327 // -----------------------------------------------------------------------------
1328 // Class Loader deadlock handling.
1329 //
1330 // complete_exit exits a lock returning recursion count
1331 // complete_exit/reenter operate as a wait without waiting
1332 // complete_exit requires an inflated monitor
1333 // The _owner field is not always the Thread addr even with an
1334 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1335 // thread due to contention.
1336 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1337   Thread * const Self = THREAD;
1338   assert(Self->is_Java_thread(), "Must be Java thread!");
1339   JavaThread *jt = (JavaThread *)THREAD;
1340 
1341   DeferredInitialize();
1342 
1343   if (THREAD != _owner) {
1344     if (THREAD->is_lock_owned ((address)_owner)) {
1345       assert(_recursions == 0, "internal state error");
1346       _owner = THREAD;   // Convert from basiclock addr to Thread addr
1347       _recursions = 0;
1348     }
1349   }
1350 
1351   guarantee(Self == _owner, "complete_exit not owner");
1352   intptr_t save = _recursions; // record the old recursion count
1353   _recursions = 0;        // set the recursion level to be 0
1354   exit(true, Self);           // exit the monitor
1355   guarantee(_owner != Self, "invariant");
1356   return save;
1357 }
1358 
1359 // reenter() enters a lock and sets recursion count
1360 // complete_exit/reenter operate as a wait without waiting
1361 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1362   Thread * const Self = THREAD;
1363   assert(Self->is_Java_thread(), "Must be Java thread!");
1364   JavaThread *jt = (JavaThread *)THREAD;
1365 
1366   guarantee(_owner != Self, "reenter already owner");
1367   enter(THREAD);       // enter the monitor
1368   guarantee(_recursions == 0, "reenter recursion");
1369   _recursions = recursions;
1370   return;
1371 }
1372 
1373 
1374 // -----------------------------------------------------------------------------
1375 // A macro is used below because there may already be a pending
1376 // exception which should not abort the execution of the routines
1377 // which use this (which is why we don't put this into check_slow and
1378 // call it with a CHECK argument).
1379 
1380 #define CHECK_OWNER()                                                       \
1381   do {                                                                      \
1382     if (THREAD != _owner) {                                                 \
1383       if (THREAD->is_lock_owned((address) _owner)) {                        \
1384         _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
1385         _recursions = 0;                                                    \
1386       } else {                                                              \
1387         TEVENT(Throw IMSX);                                                 \
1388         THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
1389       }                                                                     \
1390     }                                                                       \
1391   } while (false)
1392 
1393 // check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
1394 // TODO-FIXME: remove check_slow() -- it's likely dead.
1395 
1396 void ObjectMonitor::check_slow(TRAPS) {
1397   TEVENT(check_slow - throw IMSX);
1398   assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1399   THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1400 }
1401 
1402 static int Adjust(volatile int * adr, int dx) {
1403   int v;
1404   for (v = *adr; Atomic::cmpxchg(v + dx, adr, v) != v; v = *adr) /* empty */;
1405   return v;
1406 }
1407 
1408 // helper method for posting a monitor wait event
1409 void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
1410                                             jlong notifier_tid,
1411                                             jlong timeout,
1412                                             bool timedout) {
1413   event->set_klass(((oop)this->object())->klass());
1414   event->set_timeout((TYPE_ULONG)timeout);
1415   event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
1416   event->set_notifier((TYPE_OSTHREAD)notifier_tid);
1417   event->set_timedOut((TYPE_BOOLEAN)timedout);
1418   event->commit();
1419 }
1420 
1421 // -----------------------------------------------------------------------------
1422 // Wait/Notify/NotifyAll
1423 //
1424 // Note: a subset of changes to ObjectMonitor::wait()
1425 // will need to be replicated in complete_exit
1426 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1427   Thread * const Self = THREAD;
1428   assert(Self->is_Java_thread(), "Must be Java thread!");
1429   JavaThread *jt = (JavaThread *)THREAD;
1430 
1431   DeferredInitialize();
1432 
1433   // Throw IMSX or IEX.
1434   CHECK_OWNER();
1435 
1436   EventJavaMonitorWait event;
1437 
1438   // check for a pending interrupt
1439   if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1440     // post monitor waited event.  Note that this is past-tense, we are done waiting.
1441     if (JvmtiExport::should_post_monitor_waited()) {
1442       // Note: 'false' parameter is passed here because the
1443       // wait was not timed out due to thread interrupt.
1444       JvmtiExport::post_monitor_waited(jt, this, false);
1445 
1446       // In this short circuit of the monitor wait protocol, the
1447       // current thread never drops ownership of the monitor and
1448       // never gets added to the wait queue so the current thread
1449       // cannot be made the successor. This means that the
1450       // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1451       // consume an unpark() meant for the ParkEvent associated with
1452       // this ObjectMonitor.
1453     }
1454     if (event.should_commit()) {
1455       post_monitor_wait_event(&event, 0, millis, false);
1456     }
1457     TEVENT(Wait - Throw IEX);
1458     THROW(vmSymbols::java_lang_InterruptedException());
1459     return;
1460   }
1461 
1462   TEVENT(Wait);
1463 
1464   assert(Self->_Stalled == 0, "invariant");
1465   Self->_Stalled = intptr_t(this);
1466   jt->set_current_waiting_monitor(this);
1467 
1468   // create a node to be put into the queue
1469   // Critically, after we reset() the event but prior to park(), we must check
1470   // for a pending interrupt.
1471   ObjectWaiter node(Self);
1472   node.TState = ObjectWaiter::TS_WAIT;
1473   Self->_ParkEvent->reset();
1474   OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
1475 
1476   // Enter the waiting queue, which is a circular doubly linked list in this case
1477   // but it could be a priority queue or any data structure.
1478   // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
1479   // by the the owner of the monitor *except* in the case where park()
1480   // returns because of a timeout of interrupt.  Contention is exceptionally rare
1481   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1482 
1483   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
1484   AddWaiter(&node);
1485   Thread::SpinRelease(&_WaitSetLock);
1486 
1487   if ((SyncFlags & 4) == 0) {
1488     _Responsible = NULL;
1489   }
1490   intptr_t save = _recursions; // record the old recursion count
1491   _waiters++;                  // increment the number of waiters
1492   _recursions = 0;             // set the recursion level to be 1
1493   exit(true, Self);                    // exit the monitor
1494   guarantee(_owner != Self, "invariant");
1495 
1496   // The thread is on the WaitSet list - now park() it.
1497   // On MP systems it's conceivable that a brief spin before we park
1498   // could be profitable.
1499   //
1500   // TODO-FIXME: change the following logic to a loop of the form
1501   //   while (!timeout && !interrupted && _notified == 0) park()
1502 
1503   int ret = OS_OK;
1504   int WasNotified = 0;
1505   { // State transition wrappers
1506     OSThread* osthread = Self->osthread();
1507     OSThreadWaitState osts(osthread, true);
1508     {
1509       ThreadBlockInVM tbivm(jt);
1510       // Thread is in thread_blocked state and oop access is unsafe.
1511       jt->set_suspend_equivalent();
1512 
1513       if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
1514         // Intentionally empty
1515       } else if (node._notified == 0) {
1516         if (millis <= 0) {
1517           Self->_ParkEvent->park();
1518         } else {
1519           ret = Self->_ParkEvent->park(millis);
1520         }
1521       }
1522 
1523       // were we externally suspended while we were waiting?
1524       if (ExitSuspendEquivalent (jt)) {
1525         // TODO-FIXME: add -- if succ == Self then succ = null.
1526         jt->java_suspend_self();
1527       }
1528 
1529     } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
1530 
1531     // Node may be on the WaitSet, the EntryList (or cxq), or in transition
1532     // from the WaitSet to the EntryList.
1533     // See if we need to remove Node from the WaitSet.
1534     // We use double-checked locking to avoid grabbing _WaitSetLock
1535     // if the thread is not on the wait queue.
1536     //
1537     // Note that we don't need a fence before the fetch of TState.
1538     // In the worst case we'll fetch a old-stale value of TS_WAIT previously
1539     // written by the is thread. (perhaps the fetch might even be satisfied
1540     // by a look-aside into the processor's own store buffer, although given
1541     // the length of the code path between the prior ST and this load that's
1542     // highly unlikely).  If the following LD fetches a stale TS_WAIT value
1543     // then we'll acquire the lock and then re-fetch a fresh TState value.
1544     // That is, we fail toward safety.
1545 
1546     if (node.TState == ObjectWaiter::TS_WAIT) {
1547       Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
1548       if (node.TState == ObjectWaiter::TS_WAIT) {
1549         DequeueSpecificWaiter(&node);       // unlink from WaitSet
1550         assert(node._notified == 0, "invariant");
1551         node.TState = ObjectWaiter::TS_RUN;
1552       }
1553       Thread::SpinRelease(&_WaitSetLock);
1554     }
1555 
1556     // The thread is now either on off-list (TS_RUN),
1557     // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
1558     // The Node's TState variable is stable from the perspective of this thread.
1559     // No other threads will asynchronously modify TState.
1560     guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
1561     OrderAccess::loadload();
1562     if (_succ == Self) _succ = NULL;
1563     WasNotified = node._notified;
1564 
1565     // Reentry phase -- reacquire the monitor.
1566     // re-enter contended monitor after object.wait().
1567     // retain OBJECT_WAIT state until re-enter successfully completes
1568     // Thread state is thread_in_vm and oop access is again safe,
1569     // although the raw address of the object may have changed.
1570     // (Don't cache naked oops over safepoints, of course).
1571 
1572     // post monitor waited event. Note that this is past-tense, we are done waiting.
1573     if (JvmtiExport::should_post_monitor_waited()) {
1574       JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
1575 
1576       if (node._notified != 0 && _succ == Self) {
1577         // In this part of the monitor wait-notify-reenter protocol it
1578         // is possible (and normal) for another thread to do a fastpath
1579         // monitor enter-exit while this thread is still trying to get
1580         // to the reenter portion of the protocol.
1581         //
1582         // The ObjectMonitor was notified and the current thread is
1583         // the successor which also means that an unpark() has already
1584         // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
1585         // consume the unpark() that was done when the successor was
1586         // set because the same ParkEvent is shared between Java
1587         // monitors and JVM/TI RawMonitors (for now).
1588         //
1589         // We redo the unpark() to ensure forward progress, i.e., we
1590         // don't want all pending threads hanging (parked) with none
1591         // entering the unlocked monitor.
1592         node._event->unpark();
1593       }
1594     }
1595 
1596     if (event.should_commit()) {
1597       post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
1598     }
1599 
1600     OrderAccess::fence();
1601 
1602     assert(Self->_Stalled != 0, "invariant");
1603     Self->_Stalled = 0;
1604 
1605     assert(_owner != Self, "invariant");
1606     ObjectWaiter::TStates v = node.TState;
1607     if (v == ObjectWaiter::TS_RUN) {
1608       enter(Self);
1609     } else {
1610       guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1611       ReenterI(Self, &node);
1612       node.wait_reenter_end(this);
1613     }
1614 
1615     // Self has reacquired the lock.
1616     // Lifecycle - the node representing Self must not appear on any queues.
1617     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1618     // want residual elements associated with this thread left on any lists.
1619     guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1620     assert(_owner == Self, "invariant");
1621     assert(_succ != Self, "invariant");
1622   } // OSThreadWaitState()
1623 
1624   jt->set_current_waiting_monitor(NULL);
1625 
1626   guarantee(_recursions == 0, "invariant");
1627   _recursions = save;     // restore the old recursion count
1628   _waiters--;             // decrement the number of waiters
1629 
1630   // Verify a few postconditions
1631   assert(_owner == Self, "invariant");
1632   assert(_succ != Self, "invariant");
1633   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
1634 
1635   if (SyncFlags & 32) {
1636     OrderAccess::fence();
1637   }
1638 
1639   // check if the notification happened
1640   if (!WasNotified) {
1641     // no, it could be timeout or Thread.interrupt() or both
1642     // check for interrupt event, otherwise it is timeout
1643     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1644       TEVENT(Wait - throw IEX from epilog);
1645       THROW(vmSymbols::java_lang_InterruptedException());
1646     }
1647   }
1648 
1649   // NOTE: Spurious wake up will be consider as timeout.
1650   // Monitor notify has precedence over thread interrupt.
1651 }
1652 
1653 
1654 // Consider:
1655 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
1656 // then instead of transferring a thread from the WaitSet to the EntryList
1657 // we might just dequeue a thread from the WaitSet and directly unpark() it.
1658 
1659 void ObjectMonitor::INotify(Thread * Self) {
1660   const int policy = Knob_MoveNotifyee;
1661 
1662   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
1663   ObjectWaiter * iterator = DequeueWaiter();
1664   if (iterator != NULL) {
1665     TEVENT(Notify1 - Transfer);
1666     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1667     guarantee(iterator->_notified == 0, "invariant");
1668     // Disposition - what might we do with iterator ?
1669     // a.  add it directly to the EntryList - either tail or head.
1670     // b.  push it onto the front of the _cxq.
1671     // For now we use (a).
1672     if (policy != 4) {
1673       iterator->TState = ObjectWaiter::TS_ENTER;
1674     }
1675     iterator->_notified = 1;
1676     iterator->_notifier_tid = Self->osthread()->thread_id();
1677 
1678     ObjectWaiter * list = _EntryList;
1679     if (list != NULL) {
1680       assert(list->_prev == NULL, "invariant");
1681       assert(list->TState == ObjectWaiter::TS_ENTER, "invariant");
1682       assert(list != iterator, "invariant");
1683     }
1684 
1685     if (policy == 0) {       // prepend to EntryList
1686       if (list == NULL) {
1687         iterator->_next = iterator->_prev = NULL;
1688         _EntryList = iterator;
1689       } else {
1690         list->_prev = iterator;
1691         iterator->_next = list;
1692         iterator->_prev = NULL;
1693         _EntryList = iterator;
1694       }
1695     } else if (policy == 1) {      // append to EntryList
1696       if (list == NULL) {
1697         iterator->_next = iterator->_prev = NULL;
1698         _EntryList = iterator;
1699       } else {
1700         // CONSIDER:  finding the tail currently requires a linear-time walk of
1701         // the EntryList.  We can make tail access constant-time by converting to
1702         // a CDLL instead of using our current DLL.
1703         ObjectWaiter * tail;
1704         for (tail = list; tail->_next != NULL; tail = tail->_next) /* empty */;
1705         assert(tail != NULL && tail->_next == NULL, "invariant");
1706         tail->_next = iterator;
1707         iterator->_prev = tail;
1708         iterator->_next = NULL;
1709       }
1710     } else if (policy == 2) {      // prepend to cxq
1711       if (list == NULL) {
1712         iterator->_next = iterator->_prev = NULL;
1713         _EntryList = iterator;
1714       } else {
1715         iterator->TState = ObjectWaiter::TS_CXQ;
1716         for (;;) {
1717           ObjectWaiter * front = _cxq;
1718           iterator->_next = front;
1719           if (Atomic::cmpxchg_ptr(iterator, &_cxq, front) == front) {
1720             break;
1721           }
1722         }
1723       }
1724     } else if (policy == 3) {      // append to cxq
1725       iterator->TState = ObjectWaiter::TS_CXQ;
1726       for (;;) {
1727         ObjectWaiter * tail = _cxq;
1728         if (tail == NULL) {
1729           iterator->_next = NULL;
1730           if (Atomic::cmpxchg_ptr(iterator, &_cxq, NULL) == NULL) {
1731             break;
1732           }
1733         } else {
1734           while (tail->_next != NULL) tail = tail->_next;
1735           tail->_next = iterator;
1736           iterator->_prev = tail;
1737           iterator->_next = NULL;
1738           break;
1739         }
1740       }
1741     } else {
1742       ParkEvent * ev = iterator->_event;
1743       iterator->TState = ObjectWaiter::TS_RUN;
1744       OrderAccess::fence();
1745       ev->unpark();
1746     }
1747 
1748     // _WaitSetLock protects the wait queue, not the EntryList.  We could
1749     // move the add-to-EntryList operation, above, outside the critical section
1750     // protected by _WaitSetLock.  In practice that's not useful.  With the
1751     // exception of  wait() timeouts and interrupts the monitor owner
1752     // is the only thread that grabs _WaitSetLock.  There's almost no contention
1753     // on _WaitSetLock so it's not profitable to reduce the length of the
1754     // critical section.
1755 
1756     if (policy < 4) {
1757       iterator->wait_reenter_begin(this);
1758     }
1759   }
1760   Thread::SpinRelease(&_WaitSetLock);
1761 }
1762 
1763 // Consider: a not-uncommon synchronization bug is to use notify() when
1764 // notifyAll() is more appropriate, potentially resulting in stranded
1765 // threads; this is one example of a lost wakeup. A useful diagnostic
1766 // option is to force all notify() operations to behave as notifyAll().
1767 //
1768 // Note: We can also detect many such problems with a "minimum wait".
1769 // When the "minimum wait" is set to a small non-zero timeout value
1770 // and the program does not hang whereas it did absent "minimum wait",
1771 // that suggests a lost wakeup bug. The '-XX:SyncFlags=1' option uses
1772 // a "minimum wait" for all park() operations; see the recheckInterval
1773 // variable and MAX_RECHECK_INTERVAL.
1774 
1775 void ObjectMonitor::notify(TRAPS) {
1776   CHECK_OWNER();
1777   if (_WaitSet == NULL) {
1778     TEVENT(Empty-Notify);
1779     return;
1780   }
1781   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1782   INotify(THREAD);
1783   if (ObjectMonitor::_sync_Notifications != NULL) {
1784     ObjectMonitor::_sync_Notifications->inc(1);
1785   }
1786 }
1787 
1788 
1789 // The current implementation of notifyAll() transfers the waiters one-at-a-time
1790 // from the waitset to the EntryList. This could be done more efficiently with a
1791 // single bulk transfer but in practice it's not time-critical. Beware too,
1792 // that in prepend-mode we invert the order of the waiters. Let's say that the
1793 // waitset is "ABCD" and the EntryList is "XYZ". After a notifyAll() in prepend
1794 // mode the waitset will be empty and the EntryList will be "DCBAXYZ".
1795 
1796 void ObjectMonitor::notifyAll(TRAPS) {
1797   CHECK_OWNER();
1798   if (_WaitSet == NULL) {
1799     TEVENT(Empty-NotifyAll);
1800     return;
1801   }
1802 
1803   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1804   int tally = 0;
1805   while (_WaitSet != NULL) {
1806     tally++;
1807     INotify(THREAD);
1808   }
1809 
1810   if (tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
1811     ObjectMonitor::_sync_Notifications->inc(tally);
1812   }
1813 }
1814 
1815 // -----------------------------------------------------------------------------
1816 // Adaptive Spinning Support
1817 //
1818 // Adaptive spin-then-block - rational spinning
1819 //
1820 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
1821 // algorithm.  On high order SMP systems it would be better to start with
1822 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
1823 // a contending thread could enqueue itself on the cxq and then spin locally
1824 // on a thread-specific variable such as its ParkEvent._Event flag.
1825 // That's left as an exercise for the reader.  Note that global spinning is
1826 // not problematic on Niagara, as the L2 cache serves the interconnect and
1827 // has both low latency and massive bandwidth.
1828 //
1829 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
1830 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
1831 // (duration) or we can fix the count at approximately the duration of
1832 // a context switch and vary the frequency.   Of course we could also
1833 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
1834 // See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
1835 //
1836 // This implementation varies the duration "D", where D varies with
1837 // the success rate of recent spin attempts. (D is capped at approximately
1838 // length of a round-trip context switch).  The success rate for recent
1839 // spin attempts is a good predictor of the success rate of future spin
1840 // attempts.  The mechanism adapts automatically to varying critical
1841 // section length (lock modality), system load and degree of parallelism.
1842 // D is maintained per-monitor in _SpinDuration and is initialized
1843 // optimistically.  Spin frequency is fixed at 100%.
1844 //
1845 // Note that _SpinDuration is volatile, but we update it without locks
1846 // or atomics.  The code is designed so that _SpinDuration stays within
1847 // a reasonable range even in the presence of races.  The arithmetic
1848 // operations on _SpinDuration are closed over the domain of legal values,
1849 // so at worst a race will install and older but still legal value.
1850 // At the very worst this introduces some apparent non-determinism.
1851 // We might spin when we shouldn't or vice-versa, but since the spin
1852 // count are relatively short, even in the worst case, the effect is harmless.
1853 //
1854 // Care must be taken that a low "D" value does not become an
1855 // an absorbing state.  Transient spinning failures -- when spinning
1856 // is overall profitable -- should not cause the system to converge
1857 // on low "D" values.  We want spinning to be stable and predictable
1858 // and fairly responsive to change and at the same time we don't want
1859 // it to oscillate, become metastable, be "too" non-deterministic,
1860 // or converge on or enter undesirable stable absorbing states.
1861 //
1862 // We implement a feedback-based control system -- using past behavior
1863 // to predict future behavior.  We face two issues: (a) if the
1864 // input signal is random then the spin predictor won't provide optimal
1865 // results, and (b) if the signal frequency is too high then the control
1866 // system, which has some natural response lag, will "chase" the signal.
1867 // (b) can arise from multimodal lock hold times.  Transient preemption
1868 // can also result in apparent bimodal lock hold times.
1869 // Although sub-optimal, neither condition is particularly harmful, as
1870 // in the worst-case we'll spin when we shouldn't or vice-versa.
1871 // The maximum spin duration is rather short so the failure modes aren't bad.
1872 // To be conservative, I've tuned the gain in system to bias toward
1873 // _not spinning.  Relatedly, the system can sometimes enter a mode where it
1874 // "rings" or oscillates between spinning and not spinning.  This happens
1875 // when spinning is just on the cusp of profitability, however, so the
1876 // situation is not dire.  The state is benign -- there's no need to add
1877 // hysteresis control to damp the transition rate between spinning and
1878 // not spinning.
1879 
1880 intptr_t ObjectMonitor::SpinCallbackArgument = 0;
1881 int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL;
1882 
1883 // Spinning: Fixed frequency (100%), vary duration
1884 
1885 
1886 int ObjectMonitor::TrySpin_VaryDuration(Thread * Self) {
1887   // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
1888   int ctr = Knob_FixedSpin;
1889   if (ctr != 0) {
1890     while (--ctr >= 0) {
1891       if (TryLock(Self) > 0) return 1;
1892       SpinPause();
1893     }
1894     return 0;
1895   }
1896 
1897   for (ctr = Knob_PreSpin + 1; --ctr >= 0;) {
1898     if (TryLock(Self) > 0) {
1899       // Increase _SpinDuration ...
1900       // Note that we don't clamp SpinDuration precisely at SpinLimit.
1901       // Raising _SpurDuration to the poverty line is key.
1902       int x = _SpinDuration;
1903       if (x < Knob_SpinLimit) {
1904         if (x < Knob_Poverty) x = Knob_Poverty;
1905         _SpinDuration = x + Knob_BonusB;
1906       }
1907       return 1;
1908     }
1909     SpinPause();
1910   }
1911 
1912   // Admission control - verify preconditions for spinning
1913   //
1914   // We always spin a little bit, just to prevent _SpinDuration == 0 from
1915   // becoming an absorbing state.  Put another way, we spin briefly to
1916   // sample, just in case the system load, parallelism, contention, or lock
1917   // modality changed.
1918   //
1919   // Consider the following alternative:
1920   // Periodically set _SpinDuration = _SpinLimit and try a long/full
1921   // spin attempt.  "Periodically" might mean after a tally of
1922   // the # of failed spin attempts (or iterations) reaches some threshold.
1923   // This takes us into the realm of 1-out-of-N spinning, where we
1924   // hold the duration constant but vary the frequency.
1925 
1926   ctr = _SpinDuration;
1927   if (ctr < Knob_SpinBase) ctr = Knob_SpinBase;
1928   if (ctr <= 0) return 0;
1929 
1930   if (Knob_SuccRestrict && _succ != NULL) return 0;
1931   if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
1932     TEVENT(Spin abort - notrunnable [TOP]);
1933     return 0;
1934   }
1935 
1936   int MaxSpin = Knob_MaxSpinners;
1937   if (MaxSpin >= 0) {
1938     if (_Spinner > MaxSpin) {
1939       TEVENT(Spin abort -- too many spinners);
1940       return 0;
1941     }
1942     // Slightly racy, but benign ...
1943     Adjust(&_Spinner, 1);
1944   }
1945 
1946   // We're good to spin ... spin ingress.
1947   // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
1948   // when preparing to LD...CAS _owner, etc and the CAS is likely
1949   // to succeed.
1950   int hits    = 0;
1951   int msk     = 0;
1952   int caspty  = Knob_CASPenalty;
1953   int oxpty   = Knob_OXPenalty;
1954   int sss     = Knob_SpinSetSucc;
1955   if (sss && _succ == NULL) _succ = Self;
1956   Thread * prv = NULL;
1957 
1958   // There are three ways to exit the following loop:
1959   // 1.  A successful spin where this thread has acquired the lock.
1960   // 2.  Spin failure with prejudice
1961   // 3.  Spin failure without prejudice
1962 
1963   while (--ctr >= 0) {
1964 
1965     // Periodic polling -- Check for pending GC
1966     // Threads may spin while they're unsafe.
1967     // We don't want spinning threads to delay the JVM from reaching
1968     // a stop-the-world safepoint or to steal cycles from GC.
1969     // If we detect a pending safepoint we abort in order that
1970     // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1971     // this thread, if safe, doesn't steal cycles from GC.
1972     // This is in keeping with the "no loitering in runtime" rule.
1973     // We periodically check to see if there's a safepoint pending.
1974     if ((ctr & 0xFF) == 0) {
1975       if (SafepointSynchronize::do_call_back()) {
1976         TEVENT(Spin: safepoint);
1977         goto Abort;           // abrupt spin egress
1978       }
1979       if (Knob_UsePause & 1) SpinPause();
1980 
1981       int (*scb)(intptr_t,int) = SpinCallbackFunction;
1982       if (hits > 50 && scb != NULL) {
1983         int abend = (*scb)(SpinCallbackArgument, 0);
1984       }
1985     }
1986 
1987     if (Knob_UsePause & 2) SpinPause();
1988 
1989     // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
1990     // This is useful on classic SMP systems, but is of less utility on
1991     // N1-style CMT platforms.
1992     //
1993     // Trade-off: lock acquisition latency vs coherency bandwidth.
1994     // Lock hold times are typically short.  A histogram
1995     // of successful spin attempts shows that we usually acquire
1996     // the lock early in the spin.  That suggests we want to
1997     // sample _owner frequently in the early phase of the spin,
1998     // but then back-off and sample less frequently as the spin
1999     // progresses.  The back-off makes a good citizen on SMP big
2000     // SMP systems.  Oversampling _owner can consume excessive
2001     // coherency bandwidth.  Relatedly, if we _oversample _owner we
2002     // can inadvertently interfere with the the ST m->owner=null.
2003     // executed by the lock owner.
2004     if (ctr & msk) continue;
2005     ++hits;
2006     if ((hits & 0xF) == 0) {
2007       // The 0xF, above, corresponds to the exponent.
2008       // Consider: (msk+1)|msk
2009       msk = ((msk << 2)|3) & BackOffMask;
2010     }
2011 
2012     // Probe _owner with TATAS
2013     // If this thread observes the monitor transition or flicker
2014     // from locked to unlocked to locked, then the odds that this
2015     // thread will acquire the lock in this spin attempt go down
2016     // considerably.  The same argument applies if the CAS fails
2017     // or if we observe _owner change from one non-null value to
2018     // another non-null value.   In such cases we might abort
2019     // the spin without prejudice or apply a "penalty" to the
2020     // spin count-down variable "ctr", reducing it by 100, say.
2021 
2022     Thread * ox = (Thread *) _owner;
2023     if (ox == NULL) {
2024       ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL);
2025       if (ox == NULL) {
2026         // The CAS succeeded -- this thread acquired ownership
2027         // Take care of some bookkeeping to exit spin state.
2028         if (sss && _succ == Self) {
2029           _succ = NULL;
2030         }
2031         if (MaxSpin > 0) Adjust(&_Spinner, -1);
2032 
2033         // Increase _SpinDuration :
2034         // The spin was successful (profitable) so we tend toward
2035         // longer spin attempts in the future.
2036         // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
2037         // If we acquired the lock early in the spin cycle it
2038         // makes sense to increase _SpinDuration proportionally.
2039         // Note that we don't clamp SpinDuration precisely at SpinLimit.
2040         int x = _SpinDuration;
2041         if (x < Knob_SpinLimit) {
2042           if (x < Knob_Poverty) x = Knob_Poverty;
2043           _SpinDuration = x + Knob_Bonus;
2044         }
2045         return 1;
2046       }
2047 
2048       // The CAS failed ... we can take any of the following actions:
2049       // * penalize: ctr -= Knob_CASPenalty
2050       // * exit spin with prejudice -- goto Abort;
2051       // * exit spin without prejudice.
2052       // * Since CAS is high-latency, retry again immediately.
2053       prv = ox;
2054       TEVENT(Spin: cas failed);
2055       if (caspty == -2) break;
2056       if (caspty == -1) goto Abort;
2057       ctr -= caspty;
2058       continue;
2059     }
2060 
2061     // Did lock ownership change hands ?
2062     if (ox != prv && prv != NULL) {
2063       TEVENT(spin: Owner changed)
2064       if (oxpty == -2) break;
2065       if (oxpty == -1) goto Abort;
2066       ctr -= oxpty;
2067     }
2068     prv = ox;
2069 
2070     // Abort the spin if the owner is not executing.
2071     // The owner must be executing in order to drop the lock.
2072     // Spinning while the owner is OFFPROC is idiocy.
2073     // Consider: ctr -= RunnablePenalty ;
2074     if (Knob_OState && NotRunnable (Self, ox)) {
2075       TEVENT(Spin abort - notrunnable);
2076       goto Abort;
2077     }
2078     if (sss && _succ == NULL) _succ = Self;
2079   }
2080 
2081   // Spin failed with prejudice -- reduce _SpinDuration.
2082   // TODO: Use an AIMD-like policy to adjust _SpinDuration.
2083   // AIMD is globally stable.
2084   TEVENT(Spin failure);
2085   {
2086     int x = _SpinDuration;
2087     if (x > 0) {
2088       // Consider an AIMD scheme like: x -= (x >> 3) + 100
2089       // This is globally sample and tends to damp the response.
2090       x -= Knob_Penalty;
2091       if (x < 0) x = 0;
2092       _SpinDuration = x;
2093     }
2094   }
2095 
2096  Abort:
2097   if (MaxSpin >= 0) Adjust(&_Spinner, -1);
2098   if (sss && _succ == Self) {
2099     _succ = NULL;
2100     // Invariant: after setting succ=null a contending thread
2101     // must recheck-retry _owner before parking.  This usually happens
2102     // in the normal usage of TrySpin(), but it's safest
2103     // to make TrySpin() as foolproof as possible.
2104     OrderAccess::fence();
2105     if (TryLock(Self) > 0) return 1;
2106   }
2107   return 0;
2108 }
2109 
2110 // NotRunnable() -- informed spinning
2111 //
2112 // Don't bother spinning if the owner is not eligible to drop the lock.
2113 // Peek at the owner's schedctl.sc_state and Thread._thread_values and
2114 // spin only if the owner thread is _thread_in_Java or _thread_in_vm.
2115 // The thread must be runnable in order to drop the lock in timely fashion.
2116 // If the _owner is not runnable then spinning will not likely be
2117 // successful (profitable).
2118 //
2119 // Beware -- the thread referenced by _owner could have died
2120 // so a simply fetch from _owner->_thread_state might trap.
2121 // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
2122 // Because of the lifecycle issues the schedctl and _thread_state values
2123 // observed by NotRunnable() might be garbage.  NotRunnable must
2124 // tolerate this and consider the observed _thread_state value
2125 // as advisory.
2126 //
2127 // Beware too, that _owner is sometimes a BasicLock address and sometimes
2128 // a thread pointer.
2129 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
2130 // with the LSB of _owner.  Another option would be to probablistically probe
2131 // the putative _owner->TypeTag value.
2132 //
2133 // Checking _thread_state isn't perfect.  Even if the thread is
2134 // in_java it might be blocked on a page-fault or have been preempted
2135 // and sitting on a ready/dispatch queue.  _thread state in conjunction
2136 // with schedctl.sc_state gives us a good picture of what the
2137 // thread is doing, however.
2138 //
2139 // TODO: check schedctl.sc_state.
2140 // We'll need to use SafeFetch32() to read from the schedctl block.
2141 // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
2142 //
2143 // The return value from NotRunnable() is *advisory* -- the
2144 // result is based on sampling and is not necessarily coherent.
2145 // The caller must tolerate false-negative and false-positive errors.
2146 // Spinning, in general, is probabilistic anyway.
2147 
2148 
2149 int ObjectMonitor::NotRunnable(Thread * Self, Thread * ox) {
2150   // Check ox->TypeTag == 2BAD.
2151   if (ox == NULL) return 0;
2152 
2153   // Avoid transitive spinning ...
2154   // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
2155   // Immediately after T1 acquires L it's possible that T2, also
2156   // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
2157   // This occurs transiently after T1 acquired L but before
2158   // T1 managed to clear T1.Stalled.  T2 does not need to abort
2159   // its spin in this circumstance.
2160   intptr_t BlockedOn = SafeFetchN((intptr_t *) &ox->_Stalled, intptr_t(1));
2161 
2162   if (BlockedOn == 1) return 1;
2163   if (BlockedOn != 0) {
2164     return BlockedOn != intptr_t(this) && _owner == ox;
2165   }
2166 
2167   assert(sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant");
2168   int jst = SafeFetch32((int *) &((JavaThread *) ox)->_thread_state, -1);;
2169   // consider also: jst != _thread_in_Java -- but that's overspecific.
2170   return jst == _thread_blocked || jst == _thread_in_native;
2171 }
2172 
2173 
2174 // -----------------------------------------------------------------------------
2175 // WaitSet management ...
2176 
2177 ObjectWaiter::ObjectWaiter(Thread* thread) {
2178   _next     = NULL;
2179   _prev     = NULL;
2180   _notified = 0;
2181   TState    = TS_RUN;
2182   _thread   = thread;
2183   _event    = thread->_ParkEvent;
2184   _active   = false;
2185   assert(_event != NULL, "invariant");
2186 }
2187 
2188 void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) {
2189   JavaThread *jt = (JavaThread *)this->_thread;
2190   _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
2191 }
2192 
2193 void ObjectWaiter::wait_reenter_end(ObjectMonitor * const mon) {
2194   JavaThread *jt = (JavaThread *)this->_thread;
2195   JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
2196 }
2197 
2198 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
2199   assert(node != NULL, "should not dequeue NULL node");
2200   assert(node->_prev == NULL, "node already in list");
2201   assert(node->_next == NULL, "node already in list");
2202   // put node at end of queue (circular doubly linked list)
2203   if (_WaitSet == NULL) {
2204     _WaitSet = node;
2205     node->_prev = node;
2206     node->_next = node;
2207   } else {
2208     ObjectWaiter* head = _WaitSet;
2209     ObjectWaiter* tail = head->_prev;
2210     assert(tail->_next == head, "invariant check");
2211     tail->_next = node;
2212     head->_prev = node;
2213     node->_next = head;
2214     node->_prev = tail;
2215   }
2216 }
2217 
2218 inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
2219   // dequeue the very first waiter
2220   ObjectWaiter* waiter = _WaitSet;
2221   if (waiter) {
2222     DequeueSpecificWaiter(waiter);
2223   }
2224   return waiter;
2225 }
2226 
2227 inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
2228   assert(node != NULL, "should not dequeue NULL node");
2229   assert(node->_prev != NULL, "node already removed from list");
2230   assert(node->_next != NULL, "node already removed from list");
2231   // when the waiter has woken up because of interrupt,
2232   // timeout or other spurious wake-up, dequeue the
2233   // waiter from waiting list
2234   ObjectWaiter* next = node->_next;
2235   if (next == node) {
2236     assert(node->_prev == node, "invariant check");
2237     _WaitSet = NULL;
2238   } else {
2239     ObjectWaiter* prev = node->_prev;
2240     assert(prev->_next == node, "invariant check");
2241     assert(next->_prev == node, "invariant check");
2242     next->_prev = prev;
2243     prev->_next = next;
2244     if (_WaitSet == node) {
2245       _WaitSet = next;
2246     }
2247   }
2248   node->_next = NULL;
2249   node->_prev = NULL;
2250 }
2251 
2252 // -----------------------------------------------------------------------------
2253 // PerfData support
2254 PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts       = NULL;
2255 PerfCounter * ObjectMonitor::_sync_FutileWakeups               = NULL;
2256 PerfCounter * ObjectMonitor::_sync_Parks                       = NULL;
2257 PerfCounter * ObjectMonitor::_sync_EmptyNotifications          = NULL;
2258 PerfCounter * ObjectMonitor::_sync_Notifications               = NULL;
2259 PerfCounter * ObjectMonitor::_sync_PrivateA                    = NULL;
2260 PerfCounter * ObjectMonitor::_sync_PrivateB                    = NULL;
2261 PerfCounter * ObjectMonitor::_sync_SlowExit                    = NULL;
2262 PerfCounter * ObjectMonitor::_sync_SlowEnter                   = NULL;
2263 PerfCounter * ObjectMonitor::_sync_SlowNotify                  = NULL;
2264 PerfCounter * ObjectMonitor::_sync_SlowNotifyAll               = NULL;
2265 PerfCounter * ObjectMonitor::_sync_FailedSpins                 = NULL;
2266 PerfCounter * ObjectMonitor::_sync_SuccessfulSpins             = NULL;
2267 PerfCounter * ObjectMonitor::_sync_MonInCirculation            = NULL;
2268 PerfCounter * ObjectMonitor::_sync_MonScavenged                = NULL;
2269 PerfCounter * ObjectMonitor::_sync_Inflations                  = NULL;
2270 PerfCounter * ObjectMonitor::_sync_Deflations                  = NULL;
2271 PerfLongVariable * ObjectMonitor::_sync_MonExtant              = NULL;
2272 
2273 // One-shot global initialization for the sync subsystem.
2274 // We could also defer initialization and initialize on-demand
2275 // the first time we call inflate().  Initialization would
2276 // be protected - like so many things - by the MonitorCache_lock.
2277 
2278 void ObjectMonitor::Initialize() {
2279   static int InitializationCompleted = 0;
2280   assert(InitializationCompleted == 0, "invariant");
2281   InitializationCompleted = 1;
2282   if (UsePerfData) {
2283     EXCEPTION_MARK;
2284 #define NEWPERFCOUNTER(n)                                                \
2285   {                                                                      \
2286     n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,  \
2287                                         CHECK);                          \
2288   }
2289 #define NEWPERFVARIABLE(n)                                                \
2290   {                                                                       \
2291     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
2292                                          CHECK);                          \
2293   }
2294     NEWPERFCOUNTER(_sync_Inflations);
2295     NEWPERFCOUNTER(_sync_Deflations);
2296     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2297     NEWPERFCOUNTER(_sync_FutileWakeups);
2298     NEWPERFCOUNTER(_sync_Parks);
2299     NEWPERFCOUNTER(_sync_EmptyNotifications);
2300     NEWPERFCOUNTER(_sync_Notifications);
2301     NEWPERFCOUNTER(_sync_SlowEnter);
2302     NEWPERFCOUNTER(_sync_SlowExit);
2303     NEWPERFCOUNTER(_sync_SlowNotify);
2304     NEWPERFCOUNTER(_sync_SlowNotifyAll);
2305     NEWPERFCOUNTER(_sync_FailedSpins);
2306     NEWPERFCOUNTER(_sync_SuccessfulSpins);
2307     NEWPERFCOUNTER(_sync_PrivateA);
2308     NEWPERFCOUNTER(_sync_PrivateB);
2309     NEWPERFCOUNTER(_sync_MonInCirculation);
2310     NEWPERFCOUNTER(_sync_MonScavenged);
2311     NEWPERFVARIABLE(_sync_MonExtant);
2312 #undef NEWPERFCOUNTER
2313 #undef NEWPERFVARIABLE
2314   }
2315 }
2316 
2317 static char * kvGet(char * kvList, const char * Key) {
2318   if (kvList == NULL) return NULL;
2319   size_t n = strlen(Key);
2320   char * Search;
2321   for (Search = kvList; *Search; Search += strlen(Search) + 1) {
2322     if (strncmp (Search, Key, n) == 0) {
2323       if (Search[n] == '=') return Search + n + 1;
2324       if (Search[n] == 0)   return(char *) "1";
2325     }
2326   }
2327   return NULL;
2328 }
2329 
2330 static int kvGetInt(char * kvList, const char * Key, int Default) {
2331   char * v = kvGet(kvList, Key);
2332   int rslt = v ? ::strtol(v, NULL, 0) : Default;
2333   if (Knob_ReportSettings && v != NULL) {
2334     ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
2335     ::fflush(stdout);
2336   }
2337   return rslt;
2338 }
2339 
2340 void ObjectMonitor::DeferredInitialize() {
2341   if (InitDone > 0) return;
2342   if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
2343     while (InitDone != 1) /* empty */;
2344     return;
2345   }
2346 
2347   // One-shot global initialization ...
2348   // The initialization is idempotent, so we don't need locks.
2349   // In the future consider doing this via os::init_2().
2350   // SyncKnobs consist of <Key>=<Value> pairs in the style
2351   // of environment variables.  Start by converting ':' to NUL.
2352 
2353   if (SyncKnobs == NULL) SyncKnobs = "";
2354 
2355   size_t sz = strlen(SyncKnobs);
2356   char * knobs = (char *) malloc(sz + 2);
2357   if (knobs == NULL) {
2358     vm_exit_out_of_memory(sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs");
2359     guarantee(0, "invariant");
2360   }
2361   strcpy(knobs, SyncKnobs);
2362   knobs[sz+1] = 0;
2363   for (char * p = knobs; *p; p++) {
2364     if (*p == ':') *p = 0;
2365   }
2366 
2367   #define SETKNOB(x) { Knob_##x = kvGetInt(knobs, #x, Knob_##x); }
2368   SETKNOB(ReportSettings);
2369   SETKNOB(Verbose);
2370   SETKNOB(VerifyInUse);
2371   SETKNOB(FixedSpin);
2372   SETKNOB(SpinLimit);
2373   SETKNOB(SpinBase);
2374   SETKNOB(SpinBackOff);
2375   SETKNOB(CASPenalty);
2376   SETKNOB(OXPenalty);
2377   SETKNOB(LogSpins);
2378   SETKNOB(SpinSetSucc);
2379   SETKNOB(SuccEnabled);
2380   SETKNOB(SuccRestrict);
2381   SETKNOB(Penalty);
2382   SETKNOB(Bonus);
2383   SETKNOB(BonusB);
2384   SETKNOB(Poverty);
2385   SETKNOB(SpinAfterFutile);
2386   SETKNOB(UsePause);
2387   SETKNOB(SpinEarly);
2388   SETKNOB(OState);
2389   SETKNOB(MaxSpinners);
2390   SETKNOB(PreSpin);
2391   SETKNOB(ExitPolicy);
2392   SETKNOB(QMode);
2393   SETKNOB(ResetEvent);
2394   SETKNOB(MoveNotifyee);
2395   SETKNOB(FastHSSEC);
2396   #undef SETKNOB
2397 
2398   if (Knob_Verbose) {
2399     sanity_checks();
2400   }
2401 
2402   if (os::is_MP()) {
2403     BackOffMask = (1 << Knob_SpinBackOff) - 1;
2404     if (Knob_ReportSettings) ::printf("BackOffMask=%X\n", BackOffMask);
2405     // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
2406   } else {
2407     Knob_SpinLimit = 0;
2408     Knob_SpinBase  = 0;
2409     Knob_PreSpin   = 0;
2410     Knob_FixedSpin = -1;
2411   }
2412 
2413   if (Knob_LogSpins == 0) {
2414     ObjectMonitor::_sync_FailedSpins = NULL;
2415   }
2416 
2417   free(knobs);
2418   OrderAccess::fence();
2419   InitDone = 1;
2420 }
2421 
2422 void ObjectMonitor::sanity_checks() {
2423   int error_cnt = 0;
2424   int warning_cnt = 0;
2425   bool verbose = Knob_Verbose != 0 NOT_PRODUCT(|| VerboseInternalVMTests);
2426 
2427   if (verbose) {
2428     tty->print_cr("INFO: sizeof(ObjectMonitor)=" SIZE_FORMAT,
2429                   sizeof(ObjectMonitor));
2430     tty->print_cr("INFO: sizeof(PaddedEnd<ObjectMonitor>)=" SIZE_FORMAT,
2431                   sizeof(PaddedEnd<ObjectMonitor>));
2432   }
2433 
2434   uint cache_line_size = VM_Version::L1_data_cache_line_size();
2435   if (verbose) {
2436     tty->print_cr("INFO: L1_data_cache_line_size=%u", cache_line_size);
2437   }
2438 
2439   ObjectMonitor dummy;
2440   u_char *addr_begin  = (u_char*)&dummy;
2441   u_char *addr_header = (u_char*)&dummy._header;
2442   u_char *addr_owner  = (u_char*)&dummy._owner;
2443 
2444   uint offset_header = (uint)(addr_header - addr_begin);
2445   if (verbose) tty->print_cr("INFO: offset(_header)=%u", offset_header);
2446 
2447   uint offset_owner = (uint)(addr_owner - addr_begin);
2448   if (verbose) tty->print_cr("INFO: offset(_owner)=%u", offset_owner);
2449 
2450   if ((uint)(addr_header - addr_begin) != 0) {
2451     tty->print_cr("ERROR: offset(_header) must be zero (0).");
2452     error_cnt++;
2453   }
2454 
2455   if (cache_line_size != 0) {
2456     // We were able to determine the L1 data cache line size so
2457     // do some cache line specific sanity checks
2458 
2459     if ((offset_owner - offset_header) < cache_line_size) {
2460       tty->print_cr("WARNING: the _header and _owner fields are closer "
2461                     "than a cache line which permits false sharing.");
2462       warning_cnt++;
2463     }
2464 
2465     if ((sizeof(PaddedEnd<ObjectMonitor>) % cache_line_size) != 0) {
2466       tty->print_cr("WARNING: PaddedEnd<ObjectMonitor> size is not a "
2467                     "multiple of a cache line which permits false sharing.");
2468       warning_cnt++;
2469     }
2470   }
2471 
2472   ObjectSynchronizer::sanity_checks(verbose, cache_line_size, &error_cnt,
2473                                     &warning_cnt);
2474 
2475   if (verbose || error_cnt != 0 || warning_cnt != 0) {
2476     tty->print_cr("INFO: error_cnt=%d", error_cnt);
2477     tty->print_cr("INFO: warning_cnt=%d", warning_cnt);
2478   }
2479 
2480   guarantee(error_cnt == 0,
2481             "Fatal error(s) found in ObjectMonitor::sanity_checks()");
2482 }
2483 
2484 #ifndef PRODUCT
2485 void ObjectMonitor::verify() {
2486 }
2487 
2488 void ObjectMonitor::print() {
2489 }
2490 #endif