1 /*
   2  * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "oops/markOop.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/handles.inline.hpp"
  31 #include "runtime/interfaceSupport.hpp"
  32 #include "runtime/mutexLocker.hpp"
  33 #include "runtime/objectMonitor.hpp"
  34 #include "runtime/objectMonitor.inline.hpp"
  35 #include "runtime/osThread.hpp"
  36 #include "runtime/stubRoutines.hpp"
  37 #include "runtime/thread.inline.hpp"
  38 #include "services/threadService.hpp"
  39 #include "trace/tracing.hpp"
  40 #include "trace/traceMacros.hpp"
  41 #include "utilities/dtrace.hpp"
  42 #include "utilities/macros.hpp"
  43 #include "utilities/preserveException.hpp"
  44 #ifdef TARGET_OS_FAMILY_linux
  45 # include "os_linux.inline.hpp"
  46 #endif
  47 #ifdef TARGET_OS_FAMILY_solaris
  48 # include "os_solaris.inline.hpp"
  49 #endif
  50 #ifdef TARGET_OS_FAMILY_windows
  51 # include "os_windows.inline.hpp"
  52 #endif
  53 #ifdef TARGET_OS_FAMILY_bsd
  54 # include "os_bsd.inline.hpp"
  55 #endif
  56 
  57 #if defined(__GNUC__) && !defined(IA64)
  58   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
  59   #define ATTR __attribute__((noinline))
  60 #else
  61   #define ATTR
  62 #endif
  63 
  64 
  65 #ifdef DTRACE_ENABLED
  66 
  67 // Only bother with this argument setup if dtrace is available
  68 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  69 
  70 
  71 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  72   char* bytes = NULL;                                                      \
  73   int len = 0;                                                             \
  74   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  75   Symbol* klassname = ((oop)obj)->klass()->name();                         \
  76   if (klassname != NULL) {                                                 \
  77     bytes = (char*)klassname->bytes();                                     \
  78     len = klassname->utf8_length();                                        \
  79   }
  80 
  81 #ifndef USDT2
  82 
  83 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
  84   jlong, uintptr_t, char*, int);
  85 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
  86   jlong, uintptr_t, char*, int);
  87 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
  88   jlong, uintptr_t, char*, int);
  89 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
  90   jlong, uintptr_t, char*, int);
  91 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
  92   jlong, uintptr_t, char*, int);
  93 
  94 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)       \
  95   {                                                                        \
  96     if (DTraceMonitorProbes) {                                            \
  97       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                       \
  98       HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid,                       \
  99                        (monitor), bytes, len, (millis));                   \
 100     }                                                                      \
 101   }
 102 
 103 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)             \
 104   {                                                                        \
 105     if (DTraceMonitorProbes) {                                            \
 106       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                       \
 107       HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid,                    \
 108                        (uintptr_t)(monitor), bytes, len);                  \
 109     }                                                                      \
 110   }
 111 
 112 #else /* USDT2 */
 113 
 114 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
 115   {                                                                        \
 116     if (DTraceMonitorProbes) {                                            \
 117       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
 118       HOTSPOT_MONITOR_WAIT(jtid,                                           \
 119                        (monitor), bytes, len, (millis));                   \
 120     }                                                                      \
 121   }
 122 
 123 #define HOTSPOT_MONITOR_contended__enter HOTSPOT_MONITOR_CONTENDED_ENTER
 124 #define HOTSPOT_MONITOR_contended__entered HOTSPOT_MONITOR_CONTENDED_ENTERED
 125 #define HOTSPOT_MONITOR_contended__exit HOTSPOT_MONITOR_CONTENDED_EXIT
 126 #define HOTSPOT_MONITOR_notify HOTSPOT_MONITOR_NOTIFY
 127 #define HOTSPOT_MONITOR_notifyAll HOTSPOT_MONITOR_NOTIFYALL
 128 
 129 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
 130   {                                                                        \
 131     if (DTraceMonitorProbes) {                                            \
 132       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
 133       HOTSPOT_MONITOR_##probe(jtid,                                               \
 134                        (uintptr_t)(monitor), bytes, len);                  \
 135     }                                                                      \
 136   }
 137 
 138 #endif /* USDT2 */
 139 #else //  ndef DTRACE_ENABLED
 140 
 141 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
 142 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 143 
 144 #endif // ndef DTRACE_ENABLED
 145 
 146 // Tunables ...
 147 // The knob* variables are effectively final.  Once set they should
 148 // never be modified hence.  Consider using __read_mostly with GCC.
 149 
 150 int ObjectMonitor::Knob_Verbose    = 0 ;
 151 int ObjectMonitor::Knob_SpinLimit  = 5000 ;    // derived by an external tool -
 152 static int Knob_LogSpins           = 0 ;       // enable jvmstat tally for spins
 153 static int Knob_HandOff            = 0 ;
 154 static int Knob_ReportSettings     = 0 ;
 155 
 156 static int Knob_SpinBase           = 0 ;       // Floor AKA SpinMin
 157 static int Knob_SpinBackOff        = 0 ;       // spin-loop backoff
 158 static int Knob_CASPenalty         = -1 ;      // Penalty for failed CAS
 159 static int Knob_OXPenalty          = -1 ;      // Penalty for observed _owner change
 160 static int Knob_SpinSetSucc        = 1 ;       // spinners set the _succ field
 161 static int Knob_SpinEarly          = 1 ;
 162 static int Knob_SuccEnabled        = 1 ;       // futile wake throttling
 163 static int Knob_SuccRestrict       = 0 ;       // Limit successors + spinners to at-most-one
 164 static int Knob_MaxSpinners        = -1 ;      // Should be a function of # CPUs
 165 static int Knob_Bonus              = 100 ;     // spin success bonus
 166 static int Knob_BonusB             = 100 ;     // spin success bonus
 167 static int Knob_Penalty            = 200 ;     // spin failure penalty
 168 static int Knob_Poverty            = 1000 ;
 169 static int Knob_SpinAfterFutile    = 1 ;       // Spin after returning from park()
 170 static int Knob_FixedSpin          = 0 ;
 171 static int Knob_OState             = 3 ;       // Spinner checks thread state of _owner
 172 static int Knob_UsePause           = 1 ;
 173 static int Knob_ExitPolicy         = 0 ;
 174 static int Knob_PreSpin            = 10 ;      // 20-100 likely better
 175 static int Knob_ResetEvent         = 0 ;
 176 static int BackOffMask             = 0 ;
 177 
 178 static int Knob_FastHSSEC          = 0 ;
 179 static int Knob_MoveNotifyee       = 2 ;       // notify() - disposition of notifyee
 180 static int Knob_QMode              = 0 ;       // EntryList-cxq policy - queue discipline
 181 static volatile int InitDone       = 0 ;
 182 
 183 #define TrySpin TrySpin_VaryDuration
 184 
 185 // -----------------------------------------------------------------------------
 186 // Theory of operations -- Monitors lists, thread residency, etc:
 187 //
 188 // * A thread acquires ownership of a monitor by successfully
 189 //   CAS()ing the _owner field from null to non-null.
 190 //
 191 // * Invariant: A thread appears on at most one monitor list --
 192 //   cxq, EntryList or WaitSet -- at any one time.
 193 //
 194 // * Contending threads "push" themselves onto the cxq with CAS
 195 //   and then spin/park.
 196 //
 197 // * After a contending thread eventually acquires the lock it must
 198 //   dequeue itself from either the EntryList or the cxq.
 199 //
 200 // * The exiting thread identifies and unparks an "heir presumptive"
 201 //   tentative successor thread on the EntryList.  Critically, the
 202 //   exiting thread doesn't unlink the successor thread from the EntryList.
 203 //   After having been unparked, the wakee will recontend for ownership of
 204 //   the monitor.   The successor (wakee) will either acquire the lock or
 205 //   re-park itself.
 206 //
 207 //   Succession is provided for by a policy of competitive handoff.
 208 //   The exiting thread does _not_ grant or pass ownership to the
 209 //   successor thread.  (This is also referred to as "handoff" succession").
 210 //   Instead the exiting thread releases ownership and possibly wakes
 211 //   a successor, so the successor can (re)compete for ownership of the lock.
 212 //   If the EntryList is empty but the cxq is populated the exiting
 213 //   thread will drain the cxq into the EntryList.  It does so by
 214 //   by detaching the cxq (installing null with CAS) and folding
 215 //   the threads from the cxq into the EntryList.  The EntryList is
 216 //   doubly linked, while the cxq is singly linked because of the
 217 //   CAS-based "push" used to enqueue recently arrived threads (RATs).
 218 //
 219 // * Concurrency invariants:
 220 //
 221 //   -- only the monitor owner may access or mutate the EntryList.
 222 //      The mutex property of the monitor itself protects the EntryList
 223 //      from concurrent interference.
 224 //   -- Only the monitor owner may detach the cxq.
 225 //
 226 // * The monitor entry list operations avoid locks, but strictly speaking
 227 //   they're not lock-free.  Enter is lock-free, exit is not.
 228 //   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
 229 //
 230 // * The cxq can have multiple concurrent "pushers" but only one concurrent
 231 //   detaching thread.  This mechanism is immune from the ABA corruption.
 232 //   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
 233 //
 234 // * Taken together, the cxq and the EntryList constitute or form a
 235 //   single logical queue of threads stalled trying to acquire the lock.
 236 //   We use two distinct lists to improve the odds of a constant-time
 237 //   dequeue operation after acquisition (in the ::enter() epilog) and
 238 //   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
 239 //   A key desideratum is to minimize queue & monitor metadata manipulation
 240 //   that occurs while holding the monitor lock -- that is, we want to
 241 //   minimize monitor lock holds times.  Note that even a small amount of
 242 //   fixed spinning will greatly reduce the # of enqueue-dequeue operations
 243 //   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
 244 //   locks and monitor metadata.
 245 //
 246 //   Cxq points to the the set of Recently Arrived Threads attempting entry.
 247 //   Because we push threads onto _cxq with CAS, the RATs must take the form of
 248 //   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
 249 //   the unlocking thread notices that EntryList is null but _cxq is != null.
 250 //
 251 //   The EntryList is ordered by the prevailing queue discipline and
 252 //   can be organized in any convenient fashion, such as a doubly-linked list or
 253 //   a circular doubly-linked list.  Critically, we want insert and delete operations
 254 //   to operate in constant-time.  If we need a priority queue then something akin
 255 //   to Solaris' sleepq would work nicely.  Viz.,
 256 //   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
 257 //   Queue discipline is enforced at ::exit() time, when the unlocking thread
 258 //   drains the cxq into the EntryList, and orders or reorders the threads on the
 259 //   EntryList accordingly.
 260 //
 261 //   Barring "lock barging", this mechanism provides fair cyclic ordering,
 262 //   somewhat similar to an elevator-scan.
 263 //
 264 // * The monitor synchronization subsystem avoids the use of native
 265 //   synchronization primitives except for the narrow platform-specific
 266 //   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
 267 //   the semantics of park-unpark.  Put another way, this monitor implementation
 268 //   depends only on atomic operations and park-unpark.  The monitor subsystem
 269 //   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
 270 //   underlying OS manages the READY<->RUN transitions.
 271 //
 272 // * Waiting threads reside on the WaitSet list -- wait() puts
 273 //   the caller onto the WaitSet.
 274 //
 275 // * notify() or notifyAll() simply transfers threads from the WaitSet to
 276 //   either the EntryList or cxq.  Subsequent exit() operations will
 277 //   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
 278 //   it's likely the notifyee would simply impale itself on the lock held
 279 //   by the notifier.
 280 //
 281 // * An interesting alternative is to encode cxq as (List,LockByte) where
 282 //   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
 283 //   variable, like _recursions, in the scheme.  The threads or Events that form
 284 //   the list would have to be aligned in 256-byte addresses.  A thread would
 285 //   try to acquire the lock or enqueue itself with CAS, but exiting threads
 286 //   could use a 1-0 protocol and simply STB to set the LockByte to 0.
 287 //   Note that is is *not* word-tearing, but it does presume that full-word
 288 //   CAS operations are coherent with intermix with STB operations.  That's true
 289 //   on most common processors.
 290 //
 291 // * See also http://blogs.sun.com/dave
 292 
 293 
 294 // -----------------------------------------------------------------------------
 295 // Enter support
 296 
 297 bool ObjectMonitor::try_enter(Thread* THREAD) {
 298   if (THREAD != _owner) {
 299     if (THREAD->is_lock_owned ((address)_owner)) {
 300        assert(_recursions == 0, "internal state error");
 301        _owner = THREAD ;
 302        _recursions = 1 ;
 303        OwnerIsThread = 1 ;
 304        return true;
 305     }
 306     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
 307       return false;
 308     }
 309     return true;
 310   } else {
 311     _recursions++;
 312     return true;
 313   }
 314 }
 315 
 316 void ATTR ObjectMonitor::enter(TRAPS) {
 317   // The following code is ordered to check the most common cases first
 318   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 319   Thread * const Self = THREAD ;
 320   void * cur ;
 321 
 322   cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
 323   if (cur == NULL) {
 324      // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 325      assert (_recursions == 0   , "invariant") ;
 326      assert (_owner      == Self, "invariant") ;
 327      // CONSIDER: set or assert OwnerIsThread == 1
 328      return ;
 329   }
 330 
 331   if (cur == Self) {
 332      // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 333      _recursions ++ ;
 334      return ;
 335   }
 336 
 337   if (Self->is_lock_owned ((address)cur)) {
 338     assert (_recursions == 0, "internal state error");
 339     _recursions = 1 ;
 340     // Commute owner from a thread-specific on-stack BasicLockObject address to
 341     // a full-fledged "Thread *".
 342     _owner = Self ;
 343     OwnerIsThread = 1 ;
 344     return ;
 345   }
 346 
 347   // We've encountered genuine contention.
 348   assert (Self->_Stalled == 0, "invariant") ;
 349   Self->_Stalled = intptr_t(this) ;
 350 
 351   // Try one round of spinning *before* enqueueing Self
 352   // and before going through the awkward and expensive state
 353   // transitions.  The following spin is strictly optional ...
 354   // Note that if we acquire the monitor from an initial spin
 355   // we forgo posting JVMTI events and firing DTRACE probes.
 356   if (Knob_SpinEarly && TrySpin (Self) > 0) {
 357      assert (_owner == Self      , "invariant") ;
 358      assert (_recursions == 0    , "invariant") ;
 359      assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 360      Self->_Stalled = 0 ;
 361      return ;
 362   }
 363 
 364   assert (_owner != Self          , "invariant") ;
 365   assert (_succ  != Self          , "invariant") ;
 366   assert (Self->is_Java_thread()  , "invariant") ;
 367   JavaThread * jt = (JavaThread *) Self ;
 368   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
 369   assert (jt->thread_state() != _thread_blocked   , "invariant") ;
 370   assert (this->object() != NULL  , "invariant") ;
 371   assert (_count >= 0, "invariant") ;
 372 
 373   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 374   // Ensure the object-monitor relationship remains stable while there's contention.
 375   Atomic::inc_ptr(&_count);
 376 
 377   EventJavaMonitorEnter event;
 378 
 379   { // Change java thread status to indicate blocked on monitor enter.
 380     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 381 
 382     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 383     if (JvmtiExport::should_post_monitor_contended_enter()) {
 384       JvmtiExport::post_monitor_contended_enter(jt, this);
 385 
 386       // The current thread does not yet own the monitor and does not
 387       // yet appear on any queues that would get it made the successor.
 388       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
 389       // handler cannot accidentally consume an unpark() meant for the
 390       // ParkEvent associated with this ObjectMonitor.
 391     }
 392 
 393     OSThreadContendState osts(Self->osthread());
 394     ThreadBlockInVM tbivm(jt);
 395 
 396     Self->set_current_pending_monitor(this);
 397 
 398     // TODO-FIXME: change the following for(;;) loop to straight-line code.
 399     for (;;) {
 400       jt->set_suspend_equivalent();
 401       // cleared by handle_special_suspend_equivalent_condition()
 402       // or java_suspend_self()
 403 
 404       EnterI (THREAD) ;
 405 
 406       if (!ExitSuspendEquivalent(jt)) break ;
 407 
 408       //
 409       // We have acquired the contended monitor, but while we were
 410       // waiting another thread suspended us. We don't want to enter
 411       // the monitor while suspended because that would surprise the
 412       // thread that suspended us.
 413       //
 414           _recursions = 0 ;
 415       _succ = NULL ;
 416       exit (false, Self) ;
 417 
 418       jt->java_suspend_self();
 419     }
 420     Self->set_current_pending_monitor(NULL);
 421   }
 422 
 423   Atomic::dec_ptr(&_count);
 424   assert (_count >= 0, "invariant") ;
 425   Self->_Stalled = 0 ;
 426 
 427   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 428   assert (_recursions == 0     , "invariant") ;
 429   assert (_owner == Self       , "invariant") ;
 430   assert (_succ  != Self       , "invariant") ;
 431   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 432 
 433   // The thread -- now the owner -- is back in vm mode.
 434   // Report the glorious news via TI,DTrace and jvmstat.
 435   // The probe effect is non-trivial.  All the reportage occurs
 436   // while we hold the monitor, increasing the length of the critical
 437   // section.  Amdahl's parallel speedup law comes vividly into play.
 438   //
 439   // Another option might be to aggregate the events (thread local or
 440   // per-monitor aggregation) and defer reporting until a more opportune
 441   // time -- such as next time some thread encounters contention but has
 442   // yet to acquire the lock.  While spinning that thread could
 443   // spinning we could increment JVMStat counters, etc.
 444 
 445   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
 446   if (JvmtiExport::should_post_monitor_contended_entered()) {
 447     JvmtiExport::post_monitor_contended_entered(jt, this);
 448 
 449     // The current thread already owns the monitor and is not going to
 450     // call park() for the remainder of the monitor enter protocol. So
 451     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 452     // event handler consumed an unpark() issued by the thread that
 453     // just exited the monitor.
 454   }
 455 
 456   if (event.should_commit()) {
 457     event.set_klass(((oop)this->object())->klass());
 458     event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
 459     event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
 460     event.commit();
 461   }
 462 
 463   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
 464      ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
 465   }
 466 }
 467 
 468 
 469 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 470 // Callers must compensate as needed.
 471 
 472 int ObjectMonitor::TryLock (Thread * Self) {
 473    for (;;) {
 474       void * own = _owner ;
 475       if (own != NULL) return 0 ;
 476       if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 477          // Either guarantee _recursions == 0 or set _recursions = 0.
 478          assert (_recursions == 0, "invariant") ;
 479          assert (_owner == Self, "invariant") ;
 480          // CONSIDER: set or assert that OwnerIsThread == 1
 481          return 1 ;
 482       }
 483       // The lock had been free momentarily, but we lost the race to the lock.
 484       // Interference -- the CAS failed.
 485       // We can either return -1 or retry.
 486       // Retry doesn't make as much sense because the lock was just acquired.
 487       if (true) return -1 ;
 488    }
 489 }
 490 
 491 void ATTR ObjectMonitor::EnterI (TRAPS) {
 492     Thread * Self = THREAD ;
 493     assert (Self->is_Java_thread(), "invariant") ;
 494     assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
 495 
 496     // Try the lock - TATAS
 497     if (TryLock (Self) > 0) {
 498         assert (_succ != Self              , "invariant") ;
 499         assert (_owner == Self             , "invariant") ;
 500         assert (_Responsible != Self       , "invariant") ;
 501         return ;
 502     }
 503 
 504     DeferredInitialize () ;
 505 
 506     // We try one round of spinning *before* enqueueing Self.
 507     //
 508     // If the _owner is ready but OFFPROC we could use a YieldTo()
 509     // operation to donate the remainder of this thread's quantum
 510     // to the owner.  This has subtle but beneficial affinity
 511     // effects.
 512 
 513     if (TrySpin (Self) > 0) {
 514         assert (_owner == Self        , "invariant") ;
 515         assert (_succ != Self         , "invariant") ;
 516         assert (_Responsible != Self  , "invariant") ;
 517         return ;
 518     }
 519 
 520     // The Spin failed -- Enqueue and park the thread ...
 521     assert (_succ  != Self            , "invariant") ;
 522     assert (_owner != Self            , "invariant") ;
 523     assert (_Responsible != Self      , "invariant") ;
 524 
 525     // Enqueue "Self" on ObjectMonitor's _cxq.
 526     //
 527     // Node acts as a proxy for Self.
 528     // As an aside, if were to ever rewrite the synchronization code mostly
 529     // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
 530     // Java objects.  This would avoid awkward lifecycle and liveness issues,
 531     // as well as eliminate a subset of ABA issues.
 532     // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
 533     //
 534 
 535     ObjectWaiter node(Self) ;
 536     Self->_ParkEvent->reset() ;
 537     node._prev   = (ObjectWaiter *) 0xBAD ;
 538     node.TState  = ObjectWaiter::TS_CXQ ;
 539 
 540     // Push "Self" onto the front of the _cxq.
 541     // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
 542     // Note that spinning tends to reduce the rate at which threads
 543     // enqueue and dequeue on EntryList|cxq.
 544     ObjectWaiter * nxt ;
 545     for (;;) {
 546         node._next = nxt = _cxq ;
 547         if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
 548 
 549         // Interference - the CAS failed because _cxq changed.  Just retry.
 550         // As an optional optimization we retry the lock.
 551         if (TryLock (Self) > 0) {
 552             assert (_succ != Self         , "invariant") ;
 553             assert (_owner == Self        , "invariant") ;
 554             assert (_Responsible != Self  , "invariant") ;
 555             return ;
 556         }
 557     }
 558 
 559     // Check for cxq|EntryList edge transition to non-null.  This indicates
 560     // the onset of contention.  While contention persists exiting threads
 561     // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
 562     // operations revert to the faster 1-0 mode.  This enter operation may interleave
 563     // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
 564     // arrange for one of the contending thread to use a timed park() operations
 565     // to detect and recover from the race.  (Stranding is form of progress failure
 566     // where the monitor is unlocked but all the contending threads remain parked).
 567     // That is, at least one of the contended threads will periodically poll _owner.
 568     // One of the contending threads will become the designated "Responsible" thread.
 569     // The Responsible thread uses a timed park instead of a normal indefinite park
 570     // operation -- it periodically wakes and checks for and recovers from potential
 571     // strandings admitted by 1-0 exit operations.   We need at most one Responsible
 572     // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
 573     // be responsible for a monitor.
 574     //
 575     // Currently, one of the contended threads takes on the added role of "Responsible".
 576     // A viable alternative would be to use a dedicated "stranding checker" thread
 577     // that periodically iterated over all the threads (or active monitors) and unparked
 578     // successors where there was risk of stranding.  This would help eliminate the
 579     // timer scalability issues we see on some platforms as we'd only have one thread
 580     // -- the checker -- parked on a timer.
 581 
 582     if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
 583         // Try to assume the role of responsible thread for the monitor.
 584         // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
 585         Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
 586     }
 587 
 588     // The lock have been released while this thread was occupied queueing
 589     // itself onto _cxq.  To close the race and avoid "stranding" and
 590     // progress-liveness failure we must resample-retry _owner before parking.
 591     // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
 592     // In this case the ST-MEMBAR is accomplished with CAS().
 593     //
 594     // TODO: Defer all thread state transitions until park-time.
 595     // Since state transitions are heavy and inefficient we'd like
 596     // to defer the state transitions until absolutely necessary,
 597     // and in doing so avoid some transitions ...
 598 
 599     TEVENT (Inflated enter - Contention) ;
 600     int nWakeups = 0 ;
 601     int RecheckInterval = 1 ;
 602 
 603     for (;;) {
 604 
 605         if (TryLock (Self) > 0) break ;
 606         assert (_owner != Self, "invariant") ;
 607 
 608         if ((SyncFlags & 2) && _Responsible == NULL) {
 609            Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
 610         }
 611 
 612         // park self
 613         if (_Responsible == Self || (SyncFlags & 1)) {
 614             TEVENT (Inflated enter - park TIMED) ;
 615             Self->_ParkEvent->park ((jlong) RecheckInterval) ;
 616             // Increase the RecheckInterval, but clamp the value.
 617             RecheckInterval *= 8 ;
 618             if (RecheckInterval > 1000) RecheckInterval = 1000 ;
 619         } else {
 620             TEVENT (Inflated enter - park UNTIMED) ;
 621             Self->_ParkEvent->park() ;
 622         }
 623 
 624         if (TryLock(Self) > 0) break ;
 625 
 626         // The lock is still contested.
 627         // Keep a tally of the # of futile wakeups.
 628         // Note that the counter is not protected by a lock or updated by atomics.
 629         // That is by design - we trade "lossy" counters which are exposed to
 630         // races during updates for a lower probe effect.
 631         TEVENT (Inflated enter - Futile wakeup) ;
 632         if (ObjectMonitor::_sync_FutileWakeups != NULL) {
 633            ObjectMonitor::_sync_FutileWakeups->inc() ;
 634         }
 635         ++ nWakeups ;
 636 
 637         // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 638         // We can defer clearing _succ until after the spin completes
 639         // TrySpin() must tolerate being called with _succ == Self.
 640         // Try yet another round of adaptive spinning.
 641         if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
 642 
 643         // We can find that we were unpark()ed and redesignated _succ while
 644         // we were spinning.  That's harmless.  If we iterate and call park(),
 645         // park() will consume the event and return immediately and we'll
 646         // just spin again.  This pattern can repeat, leaving _succ to simply
 647         // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
 648         // Alternately, we can sample fired() here, and if set, forgo spinning
 649         // in the next iteration.
 650 
 651         if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
 652            Self->_ParkEvent->reset() ;
 653            OrderAccess::fence() ;
 654         }
 655         if (_succ == Self) _succ = NULL ;
 656 
 657         // Invariant: after clearing _succ a thread *must* retry _owner before parking.
 658         OrderAccess::fence() ;
 659     }
 660 
 661     // Egress :
 662     // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
 663     // Normally we'll find Self on the EntryList .
 664     // From the perspective of the lock owner (this thread), the
 665     // EntryList is stable and cxq is prepend-only.
 666     // The head of cxq is volatile but the interior is stable.
 667     // In addition, Self.TState is stable.
 668 
 669     assert (_owner == Self      , "invariant") ;
 670     assert (object() != NULL    , "invariant") ;
 671     // I'd like to write:
 672     //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 673     // but as we're at a safepoint that's not safe.
 674 
 675     UnlinkAfterAcquire (Self, &node) ;
 676     if (_succ == Self) _succ = NULL ;
 677 
 678     assert (_succ != Self, "invariant") ;
 679     if (_Responsible == Self) {
 680         _Responsible = NULL ;
 681         OrderAccess::fence(); // Dekker pivot-point
 682 
 683         // We may leave threads on cxq|EntryList without a designated
 684         // "Responsible" thread.  This is benign.  When this thread subsequently
 685         // exits the monitor it can "see" such preexisting "old" threads --
 686         // threads that arrived on the cxq|EntryList before the fence, above --
 687         // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
 688         // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
 689         // non-null and elect a new "Responsible" timer thread.
 690         //
 691         // This thread executes:
 692         //    ST Responsible=null; MEMBAR    (in enter epilog - here)
 693         //    LD cxq|EntryList               (in subsequent exit)
 694         //
 695         // Entering threads in the slow/contended path execute:
 696         //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
 697         //    The (ST cxq; MEMBAR) is accomplished with CAS().
 698         //
 699         // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
 700         // exit operation from floating above the ST Responsible=null.
 701     }
 702 
 703     // We've acquired ownership with CAS().
 704     // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
 705     // But since the CAS() this thread may have also stored into _succ,
 706     // EntryList, cxq or Responsible.  These meta-data updates must be
 707     // visible __before this thread subsequently drops the lock.
 708     // Consider what could occur if we didn't enforce this constraint --
 709     // STs to monitor meta-data and user-data could reorder with (become
 710     // visible after) the ST in exit that drops ownership of the lock.
 711     // Some other thread could then acquire the lock, but observe inconsistent
 712     // or old monitor meta-data and heap data.  That violates the JMM.
 713     // To that end, the 1-0 exit() operation must have at least STST|LDST
 714     // "release" barrier semantics.  Specifically, there must be at least a
 715     // STST|LDST barrier in exit() before the ST of null into _owner that drops
 716     // the lock.   The barrier ensures that changes to monitor meta-data and data
 717     // protected by the lock will be visible before we release the lock, and
 718     // therefore before some other thread (CPU) has a chance to acquire the lock.
 719     // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 720     //
 721     // Critically, any prior STs to _succ or EntryList must be visible before
 722     // the ST of null into _owner in the *subsequent* (following) corresponding
 723     // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 724     // execute a serializing instruction.
 725 
 726     if (SyncFlags & 8) {
 727        OrderAccess::fence() ;
 728     }
 729     return ;
 730 }
 731 
 732 // ReenterI() is a specialized inline form of the latter half of the
 733 // contended slow-path from EnterI().  We use ReenterI() only for
 734 // monitor reentry in wait().
 735 //
 736 // In the future we should reconcile EnterI() and ReenterI(), adding
 737 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
 738 // loop accordingly.
 739 
 740 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
 741     assert (Self != NULL                , "invariant") ;
 742     assert (SelfNode != NULL            , "invariant") ;
 743     assert (SelfNode->_thread == Self   , "invariant") ;
 744     assert (_waiters > 0                , "invariant") ;
 745     assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
 746     assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
 747     JavaThread * jt = (JavaThread *) Self ;
 748 
 749     int nWakeups = 0 ;
 750     for (;;) {
 751         ObjectWaiter::TStates v = SelfNode->TState ;
 752         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
 753         assert    (_owner != Self, "invariant") ;
 754 
 755         if (TryLock (Self) > 0) break ;
 756         if (TrySpin (Self) > 0) break ;
 757 
 758         TEVENT (Wait Reentry - parking) ;
 759 
 760         // State transition wrappers around park() ...
 761         // ReenterI() wisely defers state transitions until
 762         // it's clear we must park the thread.
 763         {
 764            OSThreadContendState osts(Self->osthread());
 765            ThreadBlockInVM tbivm(jt);
 766 
 767            // cleared by handle_special_suspend_equivalent_condition()
 768            // or java_suspend_self()
 769            jt->set_suspend_equivalent();
 770            if (SyncFlags & 1) {
 771               Self->_ParkEvent->park ((jlong)1000) ;
 772            } else {
 773               Self->_ParkEvent->park () ;
 774            }
 775 
 776            // were we externally suspended while we were waiting?
 777            for (;;) {
 778               if (!ExitSuspendEquivalent (jt)) break ;
 779               if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 780               jt->java_suspend_self();
 781               jt->set_suspend_equivalent();
 782            }
 783         }
 784 
 785         // Try again, but just so we distinguish between futile wakeups and
 786         // successful wakeups.  The following test isn't algorithmically
 787         // necessary, but it helps us maintain sensible statistics.
 788         if (TryLock(Self) > 0) break ;
 789 
 790         // The lock is still contested.
 791         // Keep a tally of the # of futile wakeups.
 792         // Note that the counter is not protected by a lock or updated by atomics.
 793         // That is by design - we trade "lossy" counters which are exposed to
 794         // races during updates for a lower probe effect.
 795         TEVENT (Wait Reentry - futile wakeup) ;
 796         ++ nWakeups ;
 797 
 798         // Assuming this is not a spurious wakeup we'll normally
 799         // find that _succ == Self.
 800         if (_succ == Self) _succ = NULL ;
 801 
 802         // Invariant: after clearing _succ a contending thread
 803         // *must* retry  _owner before parking.
 804         OrderAccess::fence() ;
 805 
 806         if (ObjectMonitor::_sync_FutileWakeups != NULL) {
 807           ObjectMonitor::_sync_FutileWakeups->inc() ;
 808         }
 809     }
 810 
 811     // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
 812     // Normally we'll find Self on the EntryList.
 813     // Unlinking from the EntryList is constant-time and atomic-free.
 814     // From the perspective of the lock owner (this thread), the
 815     // EntryList is stable and cxq is prepend-only.
 816     // The head of cxq is volatile but the interior is stable.
 817     // In addition, Self.TState is stable.
 818 
 819     assert (_owner == Self, "invariant") ;
 820     assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 821     UnlinkAfterAcquire (Self, SelfNode) ;
 822     if (_succ == Self) _succ = NULL ;
 823     assert (_succ != Self, "invariant") ;
 824     SelfNode->TState = ObjectWaiter::TS_RUN ;
 825     OrderAccess::fence() ;      // see comments at the end of EnterI()
 826 }
 827 
 828 // after the thread acquires the lock in ::enter().  Equally, we could defer
 829 // unlinking the thread until ::exit()-time.
 830 
 831 void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
 832 {
 833     assert (_owner == Self, "invariant") ;
 834     assert (SelfNode->_thread == Self, "invariant") ;
 835 
 836     if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
 837         // Normal case: remove Self from the DLL EntryList .
 838         // This is a constant-time operation.
 839         ObjectWaiter * nxt = SelfNode->_next ;
 840         ObjectWaiter * prv = SelfNode->_prev ;
 841         if (nxt != NULL) nxt->_prev = prv ;
 842         if (prv != NULL) prv->_next = nxt ;
 843         if (SelfNode == _EntryList ) _EntryList = nxt ;
 844         assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 845         assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 846         TEVENT (Unlink from EntryList) ;
 847     } else {
 848         guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
 849         // Inopportune interleaving -- Self is still on the cxq.
 850         // This usually means the enqueue of self raced an exiting thread.
 851         // Normally we'll find Self near the front of the cxq, so
 852         // dequeueing is typically fast.  If needbe we can accelerate
 853         // this with some MCS/CHL-like bidirectional list hints and advisory
 854         // back-links so dequeueing from the interior will normally operate
 855         // in constant-time.
 856         // Dequeue Self from either the head (with CAS) or from the interior
 857         // with a linear-time scan and normal non-atomic memory operations.
 858         // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
 859         // and then unlink Self from EntryList.  We have to drain eventually,
 860         // so it might as well be now.
 861 
 862         ObjectWaiter * v = _cxq ;
 863         assert (v != NULL, "invariant") ;
 864         if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
 865             // The CAS above can fail from interference IFF a "RAT" arrived.
 866             // In that case Self must be in the interior and can no longer be
 867             // at the head of cxq.
 868             if (v == SelfNode) {
 869                 assert (_cxq != v, "invariant") ;
 870                 v = _cxq ;          // CAS above failed - start scan at head of list
 871             }
 872             ObjectWaiter * p ;
 873             ObjectWaiter * q = NULL ;
 874             for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
 875                 q = p ;
 876                 assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
 877             }
 878             assert (v != SelfNode,  "invariant") ;
 879             assert (p == SelfNode,  "Node not found on cxq") ;
 880             assert (p != _cxq,      "invariant") ;
 881             assert (q != NULL,      "invariant") ;
 882             assert (q->_next == p,  "invariant") ;
 883             q->_next = p->_next ;
 884         }
 885         TEVENT (Unlink from cxq) ;
 886     }
 887 
 888     // Diagnostic hygiene ...
 889     SelfNode->_prev  = (ObjectWaiter *) 0xBAD ;
 890     SelfNode->_next  = (ObjectWaiter *) 0xBAD ;
 891     SelfNode->TState = ObjectWaiter::TS_RUN ;
 892 }
 893 
 894 // -----------------------------------------------------------------------------
 895 // Exit support
 896 //
 897 // exit()
 898 // ~~~~~~
 899 // Note that the collector can't reclaim the objectMonitor or deflate
 900 // the object out from underneath the thread calling ::exit() as the
 901 // thread calling ::exit() never transitions to a stable state.
 902 // This inhibits GC, which in turn inhibits asynchronous (and
 903 // inopportune) reclamation of "this".
 904 //
 905 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
 906 // There's one exception to the claim above, however.  EnterI() can call
 907 // exit() to drop a lock if the acquirer has been externally suspended.
 908 // In that case exit() is called with _thread_state as _thread_blocked,
 909 // but the monitor's _count field is > 0, which inhibits reclamation.
 910 //
 911 // 1-0 exit
 912 // ~~~~~~~~
 913 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
 914 // the fast-path operators have been optimized so the common ::exit()
 915 // operation is 1-0.  See i486.ad fast_unlock(), for instance.
 916 // The code emitted by fast_unlock() elides the usual MEMBAR.  This
 917 // greatly improves latency -- MEMBAR and CAS having considerable local
 918 // latency on modern processors -- but at the cost of "stranding".  Absent the
 919 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
 920 // ::enter() path, resulting in the entering thread being stranding
 921 // and a progress-liveness failure.   Stranding is extremely rare.
 922 // We use timers (timed park operations) & periodic polling to detect
 923 // and recover from stranding.  Potentially stranded threads periodically
 924 // wake up and poll the lock.  See the usage of the _Responsible variable.
 925 //
 926 // The CAS() in enter provides for safety and exclusion, while the CAS or
 927 // MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
 928 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
 929 // We detect and recover from stranding with timers.
 930 //
 931 // If a thread transiently strands it'll park until (a) another
 932 // thread acquires the lock and then drops the lock, at which time the
 933 // exiting thread will notice and unpark the stranded thread, or, (b)
 934 // the timer expires.  If the lock is high traffic then the stranding latency
 935 // will be low due to (a).  If the lock is low traffic then the odds of
 936 // stranding are lower, although the worst-case stranding latency
 937 // is longer.  Critically, we don't want to put excessive load in the
 938 // platform's timer subsystem.  We want to minimize both the timer injection
 939 // rate (timers created/sec) as well as the number of timers active at
 940 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 941 // the integral of the # of active timers at any instant over time).
 942 // Both impinge on OS scalability.  Given that, at most one thread parked on
 943 // a monitor will use a timer.
 944 
 945 void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
 946    Thread * Self = THREAD ;
 947    if (THREAD != _owner) {
 948      if (THREAD->is_lock_owned((address) _owner)) {
 949        // Transmute _owner from a BasicLock pointer to a Thread address.
 950        // We don't need to hold _mutex for this transition.
 951        // Non-null to Non-null is safe as long as all readers can
 952        // tolerate either flavor.
 953        assert (_recursions == 0, "invariant") ;
 954        _owner = THREAD ;
 955        _recursions = 0 ;
 956        OwnerIsThread = 1 ;
 957      } else {
 958        // NOTE: we need to handle unbalanced monitor enter/exit
 959        // in native code by throwing an exception.
 960        // TODO: Throw an IllegalMonitorStateException ?
 961        TEVENT (Exit - Throw IMSX) ;
 962        assert(false, "Non-balanced monitor enter/exit!");
 963        if (false) {
 964           THROW(vmSymbols::java_lang_IllegalMonitorStateException());
 965        }
 966        return;
 967      }
 968    }
 969 
 970    if (_recursions != 0) {
 971      _recursions--;        // this is simple recursive enter
 972      TEVENT (Inflated exit - recursive) ;
 973      return ;
 974    }
 975 
 976    // Invariant: after setting Responsible=null an thread must execute
 977    // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 978    if ((SyncFlags & 4) == 0) {
 979       _Responsible = NULL ;
 980    }
 981 
 982 #if INCLUDE_TRACE
 983    // get the owner's thread id for the MonitorEnter event
 984    // if it is enabled and the thread isn't suspended
 985    if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
 986      _previous_owner_tid = SharedRuntime::get_java_tid(Self);
 987    }
 988 #endif
 989 
 990    for (;;) {
 991       assert (THREAD == _owner, "invariant") ;
 992 
 993 
 994       if (Knob_ExitPolicy == 0) {
 995          // release semantics: prior loads and stores from within the critical section
 996          // must not float (reorder) past the following store that drops the lock.
 997          // On SPARC that requires MEMBAR #loadstore|#storestore.
 998          // But of course in TSO #loadstore|#storestore is not required.
 999          // I'd like to write one of the following:
1000          // A.  OrderAccess::release() ; _owner = NULL
1001          // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
1002          // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
1003          // store into a _dummy variable.  That store is not needed, but can result
1004          // in massive wasteful coherency traffic on classic SMP systems.
1005          // Instead, I use release_store(), which is implemented as just a simple
1006          // ST on x64, x86 and SPARC.
1007          OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
1008          OrderAccess::storeload() ;                         // See if we need to wake a successor
1009          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1010             TEVENT (Inflated exit - simple egress) ;
1011             return ;
1012          }
1013          TEVENT (Inflated exit - complex egress) ;
1014 
1015          // Normally the exiting thread is responsible for ensuring succession,
1016          // but if other successors are ready or other entering threads are spinning
1017          // then this thread can simply store NULL into _owner and exit without
1018          // waking a successor.  The existence of spinners or ready successors
1019          // guarantees proper succession (liveness).  Responsibility passes to the
1020          // ready or running successors.  The exiting thread delegates the duty.
1021          // More precisely, if a successor already exists this thread is absolved
1022          // of the responsibility of waking (unparking) one.
1023          //
1024          // The _succ variable is critical to reducing futile wakeup frequency.
1025          // _succ identifies the "heir presumptive" thread that has been made
1026          // ready (unparked) but that has not yet run.  We need only one such
1027          // successor thread to guarantee progress.
1028          // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
1029          // section 3.3 "Futile Wakeup Throttling" for details.
1030          //
1031          // Note that spinners in Enter() also set _succ non-null.
1032          // In the current implementation spinners opportunistically set
1033          // _succ so that exiting threads might avoid waking a successor.
1034          // Another less appealing alternative would be for the exiting thread
1035          // to drop the lock and then spin briefly to see if a spinner managed
1036          // to acquire the lock.  If so, the exiting thread could exit
1037          // immediately without waking a successor, otherwise the exiting
1038          // thread would need to dequeue and wake a successor.
1039          // (Note that we'd need to make the post-drop spin short, but no
1040          // shorter than the worst-case round-trip cache-line migration time.
1041          // The dropped lock needs to become visible to the spinner, and then
1042          // the acquisition of the lock by the spinner must become visible to
1043          // the exiting thread).
1044          //
1045 
1046          // It appears that an heir-presumptive (successor) must be made ready.
1047          // Only the current lock owner can manipulate the EntryList or
1048          // drain _cxq, so we need to reacquire the lock.  If we fail
1049          // to reacquire the lock the responsibility for ensuring succession
1050          // falls to the new owner.
1051          //
1052          if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
1053             return ;
1054          }
1055          TEVENT (Exit - Reacquired) ;
1056       } else {
1057          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1058             OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
1059             OrderAccess::storeload() ;
1060             // Ratify the previously observed values.
1061             if (_cxq == NULL || _succ != NULL) {
1062                 TEVENT (Inflated exit - simple egress) ;
1063                 return ;
1064             }
1065 
1066             // inopportune interleaving -- the exiting thread (this thread)
1067             // in the fast-exit path raced an entering thread in the slow-enter
1068             // path.
1069             // We have two choices:
1070             // A.  Try to reacquire the lock.
1071             //     If the CAS() fails return immediately, otherwise
1072             //     we either restart/rerun the exit operation, or simply
1073             //     fall-through into the code below which wakes a successor.
1074             // B.  If the elements forming the EntryList|cxq are TSM
1075             //     we could simply unpark() the lead thread and return
1076             //     without having set _succ.
1077             if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
1078                TEVENT (Inflated exit - reacquired succeeded) ;
1079                return ;
1080             }
1081             TEVENT (Inflated exit - reacquired failed) ;
1082          } else {
1083             TEVENT (Inflated exit - complex egress) ;
1084          }
1085       }
1086 
1087       guarantee (_owner == THREAD, "invariant") ;
1088 
1089       ObjectWaiter * w = NULL ;
1090       int QMode = Knob_QMode ;
1091 
1092       if (QMode == 2 && _cxq != NULL) {
1093           // QMode == 2 : cxq has precedence over EntryList.
1094           // Try to directly wake a successor from the cxq.
1095           // If successful, the successor will need to unlink itself from cxq.
1096           w = _cxq ;
1097           assert (w != NULL, "invariant") ;
1098           assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
1099           ExitEpilog (Self, w) ;
1100           return ;
1101       }
1102 
1103       if (QMode == 3 && _cxq != NULL) {
1104           // Aggressively drain cxq into EntryList at the first opportunity.
1105           // This policy ensure that recently-run threads live at the head of EntryList.
1106           // Drain _cxq into EntryList - bulk transfer.
1107           // First, detach _cxq.
1108           // The following loop is tantamount to: w = swap (&cxq, NULL)
1109           w = _cxq ;
1110           for (;;) {
1111              assert (w != NULL, "Invariant") ;
1112              ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
1113              if (u == w) break ;
1114              w = u ;
1115           }
1116           assert (w != NULL              , "invariant") ;
1117 
1118           ObjectWaiter * q = NULL ;
1119           ObjectWaiter * p ;
1120           for (p = w ; p != NULL ; p = p->_next) {
1121               guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
1122               p->TState = ObjectWaiter::TS_ENTER ;
1123               p->_prev = q ;
1124               q = p ;
1125           }
1126 
1127           // Append the RATs to the EntryList
1128           // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
1129           ObjectWaiter * Tail ;
1130           for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
1131           if (Tail == NULL) {
1132               _EntryList = w ;
1133           } else {
1134               Tail->_next = w ;
1135               w->_prev = Tail ;
1136           }
1137 
1138           // Fall thru into code that tries to wake a successor from EntryList
1139       }
1140 
1141       if (QMode == 4 && _cxq != NULL) {
1142           // Aggressively drain cxq into EntryList at the first opportunity.
1143           // This policy ensure that recently-run threads live at the head of EntryList.
1144 
1145           // Drain _cxq into EntryList - bulk transfer.
1146           // First, detach _cxq.
1147           // The following loop is tantamount to: w = swap (&cxq, NULL)
1148           w = _cxq ;
1149           for (;;) {
1150              assert (w != NULL, "Invariant") ;
1151              ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
1152              if (u == w) break ;
1153              w = u ;
1154           }
1155           assert (w != NULL              , "invariant") ;
1156 
1157           ObjectWaiter * q = NULL ;
1158           ObjectWaiter * p ;
1159           for (p = w ; p != NULL ; p = p->_next) {
1160               guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
1161               p->TState = ObjectWaiter::TS_ENTER ;
1162               p->_prev = q ;
1163               q = p ;
1164           }
1165 
1166           // Prepend the RATs to the EntryList
1167           if (_EntryList != NULL) {
1168               q->_next = _EntryList ;
1169               _EntryList->_prev = q ;
1170           }
1171           _EntryList = w ;
1172 
1173           // Fall thru into code that tries to wake a successor from EntryList
1174       }
1175 
1176       w = _EntryList  ;
1177       if (w != NULL) {
1178           // I'd like to write: guarantee (w->_thread != Self).
1179           // But in practice an exiting thread may find itself on the EntryList.
1180           // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
1181           // then calls exit().  Exit release the lock by setting O._owner to NULL.
1182           // Lets say T1 then stalls.  T2 acquires O and calls O.notify().  The
1183           // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1184           // release the lock "O".  T2 resumes immediately after the ST of null into
1185           // _owner, above.  T2 notices that the EntryList is populated, so it
1186           // reacquires the lock and then finds itself on the EntryList.
1187           // Given all that, we have to tolerate the circumstance where "w" is
1188           // associated with Self.
1189           assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1190           ExitEpilog (Self, w) ;
1191           return ;
1192       }
1193 
1194       // If we find that both _cxq and EntryList are null then just
1195       // re-run the exit protocol from the top.
1196       w = _cxq ;
1197       if (w == NULL) continue ;
1198 
1199       // Drain _cxq into EntryList - bulk transfer.
1200       // First, detach _cxq.
1201       // The following loop is tantamount to: w = swap (&cxq, NULL)
1202       for (;;) {
1203           assert (w != NULL, "Invariant") ;
1204           ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
1205           if (u == w) break ;
1206           w = u ;
1207       }
1208       TEVENT (Inflated exit - drain cxq into EntryList) ;
1209 
1210       assert (w != NULL              , "invariant") ;
1211       assert (_EntryList  == NULL    , "invariant") ;
1212 
1213       // Convert the LIFO SLL anchored by _cxq into a DLL.
1214       // The list reorganization step operates in O(LENGTH(w)) time.
1215       // It's critical that this step operate quickly as
1216       // "Self" still holds the outer-lock, restricting parallelism
1217       // and effectively lengthening the critical section.
1218       // Invariant: s chases t chases u.
1219       // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
1220       // we have faster access to the tail.
1221 
1222       if (QMode == 1) {
1223          // QMode == 1 : drain cxq to EntryList, reversing order
1224          // We also reverse the order of the list.
1225          ObjectWaiter * s = NULL ;
1226          ObjectWaiter * t = w ;
1227          ObjectWaiter * u = NULL ;
1228          while (t != NULL) {
1229              guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
1230              t->TState = ObjectWaiter::TS_ENTER ;
1231              u = t->_next ;
1232              t->_prev = u ;
1233              t->_next = s ;
1234              s = t;
1235              t = u ;
1236          }
1237          _EntryList  = s ;
1238          assert (s != NULL, "invariant") ;
1239       } else {
1240          // QMode == 0 or QMode == 2
1241          _EntryList = w ;
1242          ObjectWaiter * q = NULL ;
1243          ObjectWaiter * p ;
1244          for (p = w ; p != NULL ; p = p->_next) {
1245              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
1246              p->TState = ObjectWaiter::TS_ENTER ;
1247              p->_prev = q ;
1248              q = p ;
1249          }
1250       }
1251 
1252       // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
1253       // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
1254 
1255       // See if we can abdicate to a spinner instead of waking a thread.
1256       // A primary goal of the implementation is to reduce the
1257       // context-switch rate.
1258       if (_succ != NULL) continue;
1259 
1260       w = _EntryList  ;
1261       if (w != NULL) {
1262           guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1263           ExitEpilog (Self, w) ;
1264           return ;
1265       }
1266    }
1267 }
1268 
1269 // ExitSuspendEquivalent:
1270 // A faster alternate to handle_special_suspend_equivalent_condition()
1271 //
1272 // handle_special_suspend_equivalent_condition() unconditionally
1273 // acquires the SR_lock.  On some platforms uncontended MutexLocker()
1274 // operations have high latency.  Note that in ::enter() we call HSSEC
1275 // while holding the monitor, so we effectively lengthen the critical sections.
1276 //
1277 // There are a number of possible solutions:
1278 //
1279 // A.  To ameliorate the problem we might also defer state transitions
1280 //     to as late as possible -- just prior to parking.
1281 //     Given that, we'd call HSSEC after having returned from park(),
1282 //     but before attempting to acquire the monitor.  This is only a
1283 //     partial solution.  It avoids calling HSSEC while holding the
1284 //     monitor (good), but it still increases successor reacquisition latency --
1285 //     the interval between unparking a successor and the time the successor
1286 //     resumes and retries the lock.  See ReenterI(), which defers state transitions.
1287 //     If we use this technique we can also avoid EnterI()-exit() loop
1288 //     in ::enter() where we iteratively drop the lock and then attempt
1289 //     to reacquire it after suspending.
1290 //
1291 // B.  In the future we might fold all the suspend bits into a
1292 //     composite per-thread suspend flag and then update it with CAS().
1293 //     Alternately, a Dekker-like mechanism with multiple variables
1294 //     would suffice:
1295 //       ST Self->_suspend_equivalent = false
1296 //       MEMBAR
1297 //       LD Self_>_suspend_flags
1298 //
1299 
1300 
1301 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
1302    int Mode = Knob_FastHSSEC ;
1303    if (Mode && !jSelf->is_external_suspend()) {
1304       assert (jSelf->is_suspend_equivalent(), "invariant") ;
1305       jSelf->clear_suspend_equivalent() ;
1306       if (2 == Mode) OrderAccess::storeload() ;
1307       if (!jSelf->is_external_suspend()) return false ;
1308       // We raced a suspension -- fall thru into the slow path
1309       TEVENT (ExitSuspendEquivalent - raced) ;
1310       jSelf->set_suspend_equivalent() ;
1311    }
1312    return jSelf->handle_special_suspend_equivalent_condition() ;
1313 }
1314 
1315 
1316 void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
1317    assert (_owner == Self, "invariant") ;
1318 
1319    // Exit protocol:
1320    // 1. ST _succ = wakee
1321    // 2. membar #loadstore|#storestore;
1322    // 2. ST _owner = NULL
1323    // 3. unpark(wakee)
1324 
1325    _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
1326    ParkEvent * Trigger = Wakee->_event ;
1327 
1328    // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1329    // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1330    // out-of-scope (non-extant).
1331    Wakee  = NULL ;
1332 
1333    // Drop the lock
1334    OrderAccess::release_store_ptr (&_owner, NULL) ;
1335    OrderAccess::fence() ;                               // ST _owner vs LD in unpark()
1336 
1337    if (SafepointSynchronize::do_call_back()) {
1338       TEVENT (unpark before SAFEPOINT) ;
1339    }
1340 
1341    DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1342    Trigger->unpark() ;
1343 
1344    // Maintain stats and report events to JVMTI
1345    if (ObjectMonitor::_sync_Parks != NULL) {
1346       ObjectMonitor::_sync_Parks->inc() ;
1347    }
1348 }
1349 
1350 
1351 // -----------------------------------------------------------------------------
1352 // Class Loader deadlock handling.
1353 //
1354 // complete_exit exits a lock returning recursion count
1355 // complete_exit/reenter operate as a wait without waiting
1356 // complete_exit requires an inflated monitor
1357 // The _owner field is not always the Thread addr even with an
1358 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1359 // thread due to contention.
1360 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1361    Thread * const Self = THREAD;
1362    assert(Self->is_Java_thread(), "Must be Java thread!");
1363    JavaThread *jt = (JavaThread *)THREAD;
1364 
1365    DeferredInitialize();
1366 
1367    if (THREAD != _owner) {
1368     if (THREAD->is_lock_owned ((address)_owner)) {
1369        assert(_recursions == 0, "internal state error");
1370        _owner = THREAD ;   /* Convert from basiclock addr to Thread addr */
1371        _recursions = 0 ;
1372        OwnerIsThread = 1 ;
1373     }
1374    }
1375 
1376    guarantee(Self == _owner, "complete_exit not owner");
1377    intptr_t save = _recursions; // record the old recursion count
1378    _recursions = 0;        // set the recursion level to be 0
1379    exit (true, Self) ;           // exit the monitor
1380    guarantee (_owner != Self, "invariant");
1381    return save;
1382 }
1383 
1384 // reenter() enters a lock and sets recursion count
1385 // complete_exit/reenter operate as a wait without waiting
1386 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1387    Thread * const Self = THREAD;
1388    assert(Self->is_Java_thread(), "Must be Java thread!");
1389    JavaThread *jt = (JavaThread *)THREAD;
1390 
1391    guarantee(_owner != Self, "reenter already owner");
1392    enter (THREAD);       // enter the monitor
1393    guarantee (_recursions == 0, "reenter recursion");
1394    _recursions = recursions;
1395    return;
1396 }
1397 
1398 
1399 // -----------------------------------------------------------------------------
1400 // A macro is used below because there may already be a pending
1401 // exception which should not abort the execution of the routines
1402 // which use this (which is why we don't put this into check_slow and
1403 // call it with a CHECK argument).
1404 
1405 #define CHECK_OWNER()                                                             \
1406   do {                                                                            \
1407     if (THREAD != _owner) {                                                       \
1408       if (THREAD->is_lock_owned((address) _owner)) {                              \
1409         _owner = THREAD ;  /* Convert from basiclock addr to Thread addr */       \
1410         _recursions = 0;                                                          \
1411         OwnerIsThread = 1 ;                                                       \
1412       } else {                                                                    \
1413         TEVENT (Throw IMSX) ;                                                     \
1414         THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
1415       }                                                                           \
1416     }                                                                             \
1417   } while (false)
1418 
1419 // check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
1420 // TODO-FIXME: remove check_slow() -- it's likely dead.
1421 
1422 void ObjectMonitor::check_slow(TRAPS) {
1423   TEVENT (check_slow - throw IMSX) ;
1424   assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1425   THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1426 }
1427 
1428 static int Adjust (volatile int * adr, int dx) {
1429   int v ;
1430   for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
1431   return v ;
1432 }
1433 
1434 // helper method for posting a monitor wait event
1435 void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
1436                                                            jlong notifier_tid,
1437                                                            jlong timeout,
1438                                                            bool timedout) {
1439   event->set_klass(((oop)this->object())->klass());
1440   event->set_timeout((TYPE_ULONG)timeout);
1441   event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
1442   event->set_notifier((TYPE_OSTHREAD)notifier_tid);
1443   event->set_timedOut((TYPE_BOOLEAN)timedout);
1444   event->commit();
1445 }
1446 
1447 // -----------------------------------------------------------------------------
1448 // Wait/Notify/NotifyAll
1449 //
1450 // Note: a subset of changes to ObjectMonitor::wait()
1451 // will need to be replicated in complete_exit above
1452 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1453    Thread * const Self = THREAD ;
1454    assert(Self->is_Java_thread(), "Must be Java thread!");
1455    JavaThread *jt = (JavaThread *)THREAD;
1456 
1457    DeferredInitialize () ;
1458 
1459    // Throw IMSX or IEX.
1460    CHECK_OWNER();
1461 
1462    EventJavaMonitorWait event;
1463 
1464    // check for a pending interrupt
1465    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1466      // post monitor waited event.  Note that this is past-tense, we are done waiting.
1467      if (JvmtiExport::should_post_monitor_waited()) {
1468         // Note: 'false' parameter is passed here because the
1469         // wait was not timed out due to thread interrupt.
1470         JvmtiExport::post_monitor_waited(jt, this, false);
1471 
1472         // In this short circuit of the monitor wait protocol, the
1473         // current thread never drops ownership of the monitor and
1474         // never gets added to the wait queue so the current thread
1475         // cannot be made the successor. This means that the
1476         // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1477         // consume an unpark() meant for the ParkEvent associated with
1478         // this ObjectMonitor.
1479      }
1480      if (event.should_commit()) {
1481        post_monitor_wait_event(&event, 0, millis, false);
1482      }
1483      TEVENT (Wait - Throw IEX) ;
1484      THROW(vmSymbols::java_lang_InterruptedException());
1485      return ;
1486    }
1487 
1488    TEVENT (Wait) ;
1489 
1490    assert (Self->_Stalled == 0, "invariant") ;
1491    Self->_Stalled = intptr_t(this) ;
1492    jt->set_current_waiting_monitor(this);
1493 
1494    // create a node to be put into the queue
1495    // Critically, after we reset() the event but prior to park(), we must check
1496    // for a pending interrupt.
1497    ObjectWaiter node(Self);
1498    node.TState = ObjectWaiter::TS_WAIT ;
1499    Self->_ParkEvent->reset() ;
1500    OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
1501 
1502    // Enter the waiting queue, which is a circular doubly linked list in this case
1503    // but it could be a priority queue or any data structure.
1504    // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
1505    // by the the owner of the monitor *except* in the case where park()
1506    // returns because of a timeout of interrupt.  Contention is exceptionally rare
1507    // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1508 
1509    Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
1510    AddWaiter (&node) ;
1511    Thread::SpinRelease (&_WaitSetLock) ;
1512 
1513    if ((SyncFlags & 4) == 0) {
1514       _Responsible = NULL ;
1515    }
1516    intptr_t save = _recursions; // record the old recursion count
1517    _waiters++;                  // increment the number of waiters
1518    _recursions = 0;             // set the recursion level to be 1
1519    exit (true, Self) ;                    // exit the monitor
1520    guarantee (_owner != Self, "invariant") ;
1521 
1522    // The thread is on the WaitSet list - now park() it.
1523    // On MP systems it's conceivable that a brief spin before we park
1524    // could be profitable.
1525    //
1526    // TODO-FIXME: change the following logic to a loop of the form
1527    //   while (!timeout && !interrupted && _notified == 0) park()
1528 
1529    int ret = OS_OK ;
1530    int WasNotified = 0 ;
1531    { // State transition wrappers
1532      OSThread* osthread = Self->osthread();
1533      OSThreadWaitState osts(osthread, true);
1534      {
1535        ThreadBlockInVM tbivm(jt);
1536        // Thread is in thread_blocked state and oop access is unsafe.
1537        jt->set_suspend_equivalent();
1538 
1539        if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
1540            // Intentionally empty
1541        } else
1542        if (node._notified == 0) {
1543          if (millis <= 0) {
1544             Self->_ParkEvent->park () ;
1545          } else {
1546             ret = Self->_ParkEvent->park (millis) ;
1547          }
1548        }
1549 
1550        // were we externally suspended while we were waiting?
1551        if (ExitSuspendEquivalent (jt)) {
1552           // TODO-FIXME: add -- if succ == Self then succ = null.
1553           jt->java_suspend_self();
1554        }
1555 
1556      } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
1557 
1558 
1559      // Node may be on the WaitSet, the EntryList (or cxq), or in transition
1560      // from the WaitSet to the EntryList.
1561      // See if we need to remove Node from the WaitSet.
1562      // We use double-checked locking to avoid grabbing _WaitSetLock
1563      // if the thread is not on the wait queue.
1564      //
1565      // Note that we don't need a fence before the fetch of TState.
1566      // In the worst case we'll fetch a old-stale value of TS_WAIT previously
1567      // written by the is thread. (perhaps the fetch might even be satisfied
1568      // by a look-aside into the processor's own store buffer, although given
1569      // the length of the code path between the prior ST and this load that's
1570      // highly unlikely).  If the following LD fetches a stale TS_WAIT value
1571      // then we'll acquire the lock and then re-fetch a fresh TState value.
1572      // That is, we fail toward safety.
1573 
1574      if (node.TState == ObjectWaiter::TS_WAIT) {
1575          Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
1576          if (node.TState == ObjectWaiter::TS_WAIT) {
1577             DequeueSpecificWaiter (&node) ;       // unlink from WaitSet
1578             assert(node._notified == 0, "invariant");
1579             node.TState = ObjectWaiter::TS_RUN ;
1580          }
1581          Thread::SpinRelease (&_WaitSetLock) ;
1582      }
1583 
1584      // The thread is now either on off-list (TS_RUN),
1585      // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
1586      // The Node's TState variable is stable from the perspective of this thread.
1587      // No other threads will asynchronously modify TState.
1588      guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
1589      OrderAccess::loadload() ;
1590      if (_succ == Self) _succ = NULL ;
1591      WasNotified = node._notified ;
1592 
1593      // Reentry phase -- reacquire the monitor.
1594      // re-enter contended monitor after object.wait().
1595      // retain OBJECT_WAIT state until re-enter successfully completes
1596      // Thread state is thread_in_vm and oop access is again safe,
1597      // although the raw address of the object may have changed.
1598      // (Don't cache naked oops over safepoints, of course).
1599 
1600      // post monitor waited event. Note that this is past-tense, we are done waiting.
1601      if (JvmtiExport::should_post_monitor_waited()) {
1602        JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
1603 
1604        if (node._notified != 0 && _succ == Self) {
1605          // In this part of the monitor wait-notify-reenter protocol it
1606          // is possible (and normal) for another thread to do a fastpath
1607          // monitor enter-exit while this thread is still trying to get
1608          // to the reenter portion of the protocol.
1609          //
1610          // The ObjectMonitor was notified and the current thread is
1611          // the successor which also means that an unpark() has already
1612          // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
1613          // consume the unpark() that was done when the successor was
1614          // set because the same ParkEvent is shared between Java
1615          // monitors and JVM/TI RawMonitors (for now).
1616          //
1617          // We redo the unpark() to ensure forward progress, i.e., we
1618          // don't want all pending threads hanging (parked) with none
1619          // entering the unlocked monitor.
1620          node._event->unpark();
1621        }
1622      }
1623 
1624      if (event.should_commit()) {
1625        post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
1626      }
1627 
1628      OrderAccess::fence() ;
1629 
1630      assert (Self->_Stalled != 0, "invariant") ;
1631      Self->_Stalled = 0 ;
1632 
1633      assert (_owner != Self, "invariant") ;
1634      ObjectWaiter::TStates v = node.TState ;
1635      if (v == ObjectWaiter::TS_RUN) {
1636          enter (Self) ;
1637      } else {
1638          guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
1639          ReenterI (Self, &node) ;
1640          node.wait_reenter_end(this);
1641      }
1642 
1643      // Self has reacquired the lock.
1644      // Lifecycle - the node representing Self must not appear on any queues.
1645      // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1646      // want residual elements associated with this thread left on any lists.
1647      guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
1648      assert    (_owner == Self, "invariant") ;
1649      assert    (_succ != Self , "invariant") ;
1650    } // OSThreadWaitState()
1651 
1652    jt->set_current_waiting_monitor(NULL);
1653 
1654    guarantee (_recursions == 0, "invariant") ;
1655    _recursions = save;     // restore the old recursion count
1656    _waiters--;             // decrement the number of waiters
1657 
1658    // Verify a few postconditions
1659    assert (_owner == Self       , "invariant") ;
1660    assert (_succ  != Self       , "invariant") ;
1661    assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
1662 
1663    if (SyncFlags & 32) {
1664       OrderAccess::fence() ;
1665    }
1666 
1667    // check if the notification happened
1668    if (!WasNotified) {
1669      // no, it could be timeout or Thread.interrupt() or both
1670      // check for interrupt event, otherwise it is timeout
1671      if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1672        TEVENT (Wait - throw IEX from epilog) ;
1673        THROW(vmSymbols::java_lang_InterruptedException());
1674      }
1675    }
1676 
1677    // NOTE: Spurious wake up will be consider as timeout.
1678    // Monitor notify has precedence over thread interrupt.
1679 }
1680 
1681 
1682 // Consider:
1683 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
1684 // then instead of transferring a thread from the WaitSet to the EntryList
1685 // we might just dequeue a thread from the WaitSet and directly unpark() it.
1686 
1687 void ObjectMonitor::notify(TRAPS) {
1688   CHECK_OWNER();
1689   if (_WaitSet == NULL) {
1690      TEVENT (Empty-Notify) ;
1691      return ;
1692   }
1693   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1694 
1695   int Policy = Knob_MoveNotifyee ;
1696 
1697   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
1698   ObjectWaiter * iterator = DequeueWaiter() ;
1699   if (iterator != NULL) {
1700      TEVENT (Notify1 - Transfer) ;
1701      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1702      guarantee (iterator->_notified == 0, "invariant") ;
1703      if (Policy != 4) {
1704         iterator->TState = ObjectWaiter::TS_ENTER ;
1705      }
1706      iterator->_notified = 1 ;
1707      Thread * Self = THREAD;
1708      iterator->_notifier_tid = Self->osthread()->thread_id();
1709 
1710      ObjectWaiter * List = _EntryList ;
1711      if (List != NULL) {
1712         assert (List->_prev == NULL, "invariant") ;
1713         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1714         assert (List != iterator, "invariant") ;
1715      }
1716 
1717      if (Policy == 0) {       // prepend to EntryList
1718          if (List == NULL) {
1719              iterator->_next = iterator->_prev = NULL ;
1720              _EntryList = iterator ;
1721          } else {
1722              List->_prev = iterator ;
1723              iterator->_next = List ;
1724              iterator->_prev = NULL ;
1725              _EntryList = iterator ;
1726         }
1727      } else
1728      if (Policy == 1) {      // append to EntryList
1729          if (List == NULL) {
1730              iterator->_next = iterator->_prev = NULL ;
1731              _EntryList = iterator ;
1732          } else {
1733             // CONSIDER:  finding the tail currently requires a linear-time walk of
1734             // the EntryList.  We can make tail access constant-time by converting to
1735             // a CDLL instead of using our current DLL.
1736             ObjectWaiter * Tail ;
1737             for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
1738             assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
1739             Tail->_next = iterator ;
1740             iterator->_prev = Tail ;
1741             iterator->_next = NULL ;
1742         }
1743      } else
1744      if (Policy == 2) {      // prepend to cxq
1745          // prepend to cxq
1746          if (List == NULL) {
1747              iterator->_next = iterator->_prev = NULL ;
1748              _EntryList = iterator ;
1749          } else {
1750             iterator->TState = ObjectWaiter::TS_CXQ ;
1751             for (;;) {
1752                 ObjectWaiter * Front = _cxq ;
1753                 iterator->_next = Front ;
1754                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
1755                     break ;
1756                 }
1757             }
1758          }
1759      } else
1760      if (Policy == 3) {      // append to cxq
1761         iterator->TState = ObjectWaiter::TS_CXQ ;
1762         for (;;) {
1763             ObjectWaiter * Tail ;
1764             Tail = _cxq ;
1765             if (Tail == NULL) {
1766                 iterator->_next = NULL ;
1767                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
1768                    break ;
1769                 }
1770             } else {
1771                 while (Tail->_next != NULL) Tail = Tail->_next ;
1772                 Tail->_next = iterator ;
1773                 iterator->_prev = Tail ;
1774                 iterator->_next = NULL ;
1775                 break ;
1776             }
1777         }
1778      } else {
1779         ParkEvent * ev = iterator->_event ;
1780         iterator->TState = ObjectWaiter::TS_RUN ;
1781         OrderAccess::fence() ;
1782         ev->unpark() ;
1783      }
1784 
1785      if (Policy < 4) {
1786        iterator->wait_reenter_begin(this);
1787      }
1788 
1789      // _WaitSetLock protects the wait queue, not the EntryList.  We could
1790      // move the add-to-EntryList operation, above, outside the critical section
1791      // protected by _WaitSetLock.  In practice that's not useful.  With the
1792      // exception of  wait() timeouts and interrupts the monitor owner
1793      // is the only thread that grabs _WaitSetLock.  There's almost no contention
1794      // on _WaitSetLock so it's not profitable to reduce the length of the
1795      // critical section.
1796   }
1797 
1798   Thread::SpinRelease (&_WaitSetLock) ;
1799 
1800   if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) {
1801      ObjectMonitor::_sync_Notifications->inc() ;
1802   }
1803 }
1804 
1805 
1806 void ObjectMonitor::notifyAll(TRAPS) {
1807   CHECK_OWNER();
1808   ObjectWaiter* iterator;
1809   if (_WaitSet == NULL) {
1810       TEVENT (Empty-NotifyAll) ;
1811       return ;
1812   }
1813   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1814 
1815   int Policy = Knob_MoveNotifyee ;
1816   int Tally = 0 ;
1817   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
1818 
1819   for (;;) {
1820      iterator = DequeueWaiter () ;
1821      if (iterator == NULL) break ;
1822      TEVENT (NotifyAll - Transfer1) ;
1823      ++Tally ;
1824 
1825      // Disposition - what might we do with iterator ?
1826      // a.  add it directly to the EntryList - either tail or head.
1827      // b.  push it onto the front of the _cxq.
1828      // For now we use (a).
1829 
1830      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1831      guarantee (iterator->_notified == 0, "invariant") ;
1832      iterator->_notified = 1 ;
1833      Thread * Self = THREAD;
1834      iterator->_notifier_tid = Self->osthread()->thread_id();
1835      if (Policy != 4) {
1836         iterator->TState = ObjectWaiter::TS_ENTER ;
1837      }
1838 
1839      ObjectWaiter * List = _EntryList ;
1840      if (List != NULL) {
1841         assert (List->_prev == NULL, "invariant") ;
1842         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1843         assert (List != iterator, "invariant") ;
1844      }
1845 
1846      if (Policy == 0) {       // prepend to EntryList
1847          if (List == NULL) {
1848              iterator->_next = iterator->_prev = NULL ;
1849              _EntryList = iterator ;
1850          } else {
1851              List->_prev = iterator ;
1852              iterator->_next = List ;
1853              iterator->_prev = NULL ;
1854              _EntryList = iterator ;
1855         }
1856      } else
1857      if (Policy == 1) {      // append to EntryList
1858          if (List == NULL) {
1859              iterator->_next = iterator->_prev = NULL ;
1860              _EntryList = iterator ;
1861          } else {
1862             // CONSIDER:  finding the tail currently requires a linear-time walk of
1863             // the EntryList.  We can make tail access constant-time by converting to
1864             // a CDLL instead of using our current DLL.
1865             ObjectWaiter * Tail ;
1866             for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
1867             assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
1868             Tail->_next = iterator ;
1869             iterator->_prev = Tail ;
1870             iterator->_next = NULL ;
1871         }
1872      } else
1873      if (Policy == 2) {      // prepend to cxq
1874          // prepend to cxq
1875          iterator->TState = ObjectWaiter::TS_CXQ ;
1876          for (;;) {
1877              ObjectWaiter * Front = _cxq ;
1878              iterator->_next = Front ;
1879              if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
1880                  break ;
1881              }
1882          }
1883      } else
1884      if (Policy == 3) {      // append to cxq
1885         iterator->TState = ObjectWaiter::TS_CXQ ;
1886         for (;;) {
1887             ObjectWaiter * Tail ;
1888             Tail = _cxq ;
1889             if (Tail == NULL) {
1890                 iterator->_next = NULL ;
1891                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
1892                    break ;
1893                 }
1894             } else {
1895                 while (Tail->_next != NULL) Tail = Tail->_next ;
1896                 Tail->_next = iterator ;
1897                 iterator->_prev = Tail ;
1898                 iterator->_next = NULL ;
1899                 break ;
1900             }
1901         }
1902      } else {
1903         ParkEvent * ev = iterator->_event ;
1904         iterator->TState = ObjectWaiter::TS_RUN ;
1905         OrderAccess::fence() ;
1906         ev->unpark() ;
1907      }
1908 
1909      if (Policy < 4) {
1910        iterator->wait_reenter_begin(this);
1911      }
1912 
1913      // _WaitSetLock protects the wait queue, not the EntryList.  We could
1914      // move the add-to-EntryList operation, above, outside the critical section
1915      // protected by _WaitSetLock.  In practice that's not useful.  With the
1916      // exception of  wait() timeouts and interrupts the monitor owner
1917      // is the only thread that grabs _WaitSetLock.  There's almost no contention
1918      // on _WaitSetLock so it's not profitable to reduce the length of the
1919      // critical section.
1920   }
1921 
1922   Thread::SpinRelease (&_WaitSetLock) ;
1923 
1924   if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
1925      ObjectMonitor::_sync_Notifications->inc(Tally) ;
1926   }
1927 }
1928 
1929 // -----------------------------------------------------------------------------
1930 // Adaptive Spinning Support
1931 //
1932 // Adaptive spin-then-block - rational spinning
1933 //
1934 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
1935 // algorithm.  On high order SMP systems it would be better to start with
1936 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
1937 // a contending thread could enqueue itself on the cxq and then spin locally
1938 // on a thread-specific variable such as its ParkEvent._Event flag.
1939 // That's left as an exercise for the reader.  Note that global spinning is
1940 // not problematic on Niagara, as the L2$ serves the interconnect and has both
1941 // low latency and massive bandwidth.
1942 //
1943 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
1944 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
1945 // (duration) or we can fix the count at approximately the duration of
1946 // a context switch and vary the frequency.   Of course we could also
1947 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
1948 // See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
1949 //
1950 // This implementation varies the duration "D", where D varies with
1951 // the success rate of recent spin attempts. (D is capped at approximately
1952 // length of a round-trip context switch).  The success rate for recent
1953 // spin attempts is a good predictor of the success rate of future spin
1954 // attempts.  The mechanism adapts automatically to varying critical
1955 // section length (lock modality), system load and degree of parallelism.
1956 // D is maintained per-monitor in _SpinDuration and is initialized
1957 // optimistically.  Spin frequency is fixed at 100%.
1958 //
1959 // Note that _SpinDuration is volatile, but we update it without locks
1960 // or atomics.  The code is designed so that _SpinDuration stays within
1961 // a reasonable range even in the presence of races.  The arithmetic
1962 // operations on _SpinDuration are closed over the domain of legal values,
1963 // so at worst a race will install and older but still legal value.
1964 // At the very worst this introduces some apparent non-determinism.
1965 // We might spin when we shouldn't or vice-versa, but since the spin
1966 // count are relatively short, even in the worst case, the effect is harmless.
1967 //
1968 // Care must be taken that a low "D" value does not become an
1969 // an absorbing state.  Transient spinning failures -- when spinning
1970 // is overall profitable -- should not cause the system to converge
1971 // on low "D" values.  We want spinning to be stable and predictable
1972 // and fairly responsive to change and at the same time we don't want
1973 // it to oscillate, become metastable, be "too" non-deterministic,
1974 // or converge on or enter undesirable stable absorbing states.
1975 //
1976 // We implement a feedback-based control system -- using past behavior
1977 // to predict future behavior.  We face two issues: (a) if the
1978 // input signal is random then the spin predictor won't provide optimal
1979 // results, and (b) if the signal frequency is too high then the control
1980 // system, which has some natural response lag, will "chase" the signal.
1981 // (b) can arise from multimodal lock hold times.  Transient preemption
1982 // can also result in apparent bimodal lock hold times.
1983 // Although sub-optimal, neither condition is particularly harmful, as
1984 // in the worst-case we'll spin when we shouldn't or vice-versa.
1985 // The maximum spin duration is rather short so the failure modes aren't bad.
1986 // To be conservative, I've tuned the gain in system to bias toward
1987 // _not spinning.  Relatedly, the system can sometimes enter a mode where it
1988 // "rings" or oscillates between spinning and not spinning.  This happens
1989 // when spinning is just on the cusp of profitability, however, so the
1990 // situation is not dire.  The state is benign -- there's no need to add
1991 // hysteresis control to damp the transition rate between spinning and
1992 // not spinning.
1993 //
1994 
1995 intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
1996 int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
1997 
1998 // Spinning: Fixed frequency (100%), vary duration
1999 
2000 
2001 int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
2002 
2003     // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
2004     int ctr = Knob_FixedSpin ;
2005     if (ctr != 0) {
2006         while (--ctr >= 0) {
2007             if (TryLock (Self) > 0) return 1 ;
2008             SpinPause () ;
2009         }
2010         return 0 ;
2011     }
2012 
2013     for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
2014       if (TryLock(Self) > 0) {
2015         // Increase _SpinDuration ...
2016         // Note that we don't clamp SpinDuration precisely at SpinLimit.
2017         // Raising _SpurDuration to the poverty line is key.
2018         int x = _SpinDuration ;
2019         if (x < Knob_SpinLimit) {
2020            if (x < Knob_Poverty) x = Knob_Poverty ;
2021            _SpinDuration = x + Knob_BonusB ;
2022         }
2023         return 1 ;
2024       }
2025       SpinPause () ;
2026     }
2027 
2028     // Admission control - verify preconditions for spinning
2029     //
2030     // We always spin a little bit, just to prevent _SpinDuration == 0 from
2031     // becoming an absorbing state.  Put another way, we spin briefly to
2032     // sample, just in case the system load, parallelism, contention, or lock
2033     // modality changed.
2034     //
2035     // Consider the following alternative:
2036     // Periodically set _SpinDuration = _SpinLimit and try a long/full
2037     // spin attempt.  "Periodically" might mean after a tally of
2038     // the # of failed spin attempts (or iterations) reaches some threshold.
2039     // This takes us into the realm of 1-out-of-N spinning, where we
2040     // hold the duration constant but vary the frequency.
2041 
2042     ctr = _SpinDuration  ;
2043     if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
2044     if (ctr <= 0) return 0 ;
2045 
2046     if (Knob_SuccRestrict && _succ != NULL) return 0 ;
2047     if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
2048        TEVENT (Spin abort - notrunnable [TOP]);
2049        return 0 ;
2050     }
2051 
2052     int MaxSpin = Knob_MaxSpinners ;
2053     if (MaxSpin >= 0) {
2054        if (_Spinner > MaxSpin) {
2055           TEVENT (Spin abort -- too many spinners) ;
2056           return 0 ;
2057        }
2058        // Slighty racy, but benign ...
2059        Adjust (&_Spinner, 1) ;
2060     }
2061 
2062     // We're good to spin ... spin ingress.
2063     // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
2064     // when preparing to LD...CAS _owner, etc and the CAS is likely
2065     // to succeed.
2066     int hits    = 0 ;
2067     int msk     = 0 ;
2068     int caspty  = Knob_CASPenalty ;
2069     int oxpty   = Knob_OXPenalty ;
2070     int sss     = Knob_SpinSetSucc ;
2071     if (sss && _succ == NULL ) _succ = Self ;
2072     Thread * prv = NULL ;
2073 
2074     // There are three ways to exit the following loop:
2075     // 1.  A successful spin where this thread has acquired the lock.
2076     // 2.  Spin failure with prejudice
2077     // 3.  Spin failure without prejudice
2078 
2079     while (--ctr >= 0) {
2080 
2081       // Periodic polling -- Check for pending GC
2082       // Threads may spin while they're unsafe.
2083       // We don't want spinning threads to delay the JVM from reaching
2084       // a stop-the-world safepoint or to steal cycles from GC.
2085       // If we detect a pending safepoint we abort in order that
2086       // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
2087       // this thread, if safe, doesn't steal cycles from GC.
2088       // This is in keeping with the "no loitering in runtime" rule.
2089       // We periodically check to see if there's a safepoint pending.
2090       if ((ctr & 0xFF) == 0) {
2091          if (SafepointSynchronize::do_call_back()) {
2092             TEVENT (Spin: safepoint) ;
2093             goto Abort ;           // abrupt spin egress
2094          }
2095          if (Knob_UsePause & 1) SpinPause () ;
2096 
2097          int (*scb)(intptr_t,int) = SpinCallbackFunction ;
2098          if (hits > 50 && scb != NULL) {
2099             int abend = (*scb)(SpinCallbackArgument, 0) ;
2100          }
2101       }
2102 
2103       if (Knob_UsePause & 2) SpinPause() ;
2104 
2105       // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
2106       // This is useful on classic SMP systems, but is of less utility on
2107       // N1-style CMT platforms.
2108       //
2109       // Trade-off: lock acquisition latency vs coherency bandwidth.
2110       // Lock hold times are typically short.  A histogram
2111       // of successful spin attempts shows that we usually acquire
2112       // the lock early in the spin.  That suggests we want to
2113       // sample _owner frequently in the early phase of the spin,
2114       // but then back-off and sample less frequently as the spin
2115       // progresses.  The back-off makes a good citizen on SMP big
2116       // SMP systems.  Oversampling _owner can consume excessive
2117       // coherency bandwidth.  Relatedly, if we _oversample _owner we
2118       // can inadvertently interfere with the the ST m->owner=null.
2119       // executed by the lock owner.
2120       if (ctr & msk) continue ;
2121       ++hits ;
2122       if ((hits & 0xF) == 0) {
2123         // The 0xF, above, corresponds to the exponent.
2124         // Consider: (msk+1)|msk
2125         msk = ((msk << 2)|3) & BackOffMask ;
2126       }
2127 
2128       // Probe _owner with TATAS
2129       // If this thread observes the monitor transition or flicker
2130       // from locked to unlocked to locked, then the odds that this
2131       // thread will acquire the lock in this spin attempt go down
2132       // considerably.  The same argument applies if the CAS fails
2133       // or if we observe _owner change from one non-null value to
2134       // another non-null value.   In such cases we might abort
2135       // the spin without prejudice or apply a "penalty" to the
2136       // spin count-down variable "ctr", reducing it by 100, say.
2137 
2138       Thread * ox = (Thread *) _owner ;
2139       if (ox == NULL) {
2140          ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
2141          if (ox == NULL) {
2142             // The CAS succeeded -- this thread acquired ownership
2143             // Take care of some bookkeeping to exit spin state.
2144             if (sss && _succ == Self) {
2145                _succ = NULL ;
2146             }
2147             if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
2148 
2149             // Increase _SpinDuration :
2150             // The spin was successful (profitable) so we tend toward
2151             // longer spin attempts in the future.
2152             // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
2153             // If we acquired the lock early in the spin cycle it
2154             // makes sense to increase _SpinDuration proportionally.
2155             // Note that we don't clamp SpinDuration precisely at SpinLimit.
2156             int x = _SpinDuration ;
2157             if (x < Knob_SpinLimit) {
2158                 if (x < Knob_Poverty) x = Knob_Poverty ;
2159                 _SpinDuration = x + Knob_Bonus ;
2160             }
2161             return 1 ;
2162          }
2163 
2164          // The CAS failed ... we can take any of the following actions:
2165          // * penalize: ctr -= Knob_CASPenalty
2166          // * exit spin with prejudice -- goto Abort;
2167          // * exit spin without prejudice.
2168          // * Since CAS is high-latency, retry again immediately.
2169          prv = ox ;
2170          TEVENT (Spin: cas failed) ;
2171          if (caspty == -2) break ;
2172          if (caspty == -1) goto Abort ;
2173          ctr -= caspty ;
2174          continue ;
2175       }
2176 
2177       // Did lock ownership change hands ?
2178       if (ox != prv && prv != NULL ) {
2179           TEVENT (spin: Owner changed)
2180           if (oxpty == -2) break ;
2181           if (oxpty == -1) goto Abort ;
2182           ctr -= oxpty ;
2183       }
2184       prv = ox ;
2185 
2186       // Abort the spin if the owner is not executing.
2187       // The owner must be executing in order to drop the lock.
2188       // Spinning while the owner is OFFPROC is idiocy.
2189       // Consider: ctr -= RunnablePenalty ;
2190       if (Knob_OState && NotRunnable (Self, ox)) {
2191          TEVENT (Spin abort - notrunnable);
2192          goto Abort ;
2193       }
2194       if (sss && _succ == NULL ) _succ = Self ;
2195    }
2196 
2197    // Spin failed with prejudice -- reduce _SpinDuration.
2198    // TODO: Use an AIMD-like policy to adjust _SpinDuration.
2199    // AIMD is globally stable.
2200    TEVENT (Spin failure) ;
2201    {
2202      int x = _SpinDuration ;
2203      if (x > 0) {
2204         // Consider an AIMD scheme like: x -= (x >> 3) + 100
2205         // This is globally sample and tends to damp the response.
2206         x -= Knob_Penalty ;
2207         if (x < 0) x = 0 ;
2208         _SpinDuration = x ;
2209      }
2210    }
2211 
2212  Abort:
2213    if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
2214    if (sss && _succ == Self) {
2215       _succ = NULL ;
2216       // Invariant: after setting succ=null a contending thread
2217       // must recheck-retry _owner before parking.  This usually happens
2218       // in the normal usage of TrySpin(), but it's safest
2219       // to make TrySpin() as foolproof as possible.
2220       OrderAccess::fence() ;
2221       if (TryLock(Self) > 0) return 1 ;
2222    }
2223    return 0 ;
2224 }
2225 
2226 // NotRunnable() -- informed spinning
2227 //
2228 // Don't bother spinning if the owner is not eligible to drop the lock.
2229 // Peek at the owner's schedctl.sc_state and Thread._thread_values and
2230 // spin only if the owner thread is _thread_in_Java or _thread_in_vm.
2231 // The thread must be runnable in order to drop the lock in timely fashion.
2232 // If the _owner is not runnable then spinning will not likely be
2233 // successful (profitable).
2234 //
2235 // Beware -- the thread referenced by _owner could have died
2236 // so a simply fetch from _owner->_thread_state might trap.
2237 // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
2238 // Because of the lifecycle issues the schedctl and _thread_state values
2239 // observed by NotRunnable() might be garbage.  NotRunnable must
2240 // tolerate this and consider the observed _thread_state value
2241 // as advisory.
2242 //
2243 // Beware too, that _owner is sometimes a BasicLock address and sometimes
2244 // a thread pointer.  We differentiate the two cases with OwnerIsThread.
2245 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
2246 // with the LSB of _owner.  Another option would be to probablistically probe
2247 // the putative _owner->TypeTag value.
2248 //
2249 // Checking _thread_state isn't perfect.  Even if the thread is
2250 // in_java it might be blocked on a page-fault or have been preempted
2251 // and sitting on a ready/dispatch queue.  _thread state in conjunction
2252 // with schedctl.sc_state gives us a good picture of what the
2253 // thread is doing, however.
2254 //
2255 // TODO: check schedctl.sc_state.
2256 // We'll need to use SafeFetch32() to read from the schedctl block.
2257 // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
2258 //
2259 // The return value from NotRunnable() is *advisory* -- the
2260 // result is based on sampling and is not necessarily coherent.
2261 // The caller must tolerate false-negative and false-positive errors.
2262 // Spinning, in general, is probabilistic anyway.
2263 
2264 
2265 int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
2266     // Check either OwnerIsThread or ox->TypeTag == 2BAD.
2267     if (!OwnerIsThread) return 0 ;
2268 
2269     if (ox == NULL) return 0 ;
2270 
2271     // Avoid transitive spinning ...
2272     // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
2273     // Immediately after T1 acquires L it's possible that T2, also
2274     // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
2275     // This occurs transiently after T1 acquired L but before
2276     // T1 managed to clear T1.Stalled.  T2 does not need to abort
2277     // its spin in this circumstance.
2278     intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
2279 
2280     if (BlockedOn == 1) return 1 ;
2281     if (BlockedOn != 0) {
2282       return BlockedOn != intptr_t(this) && _owner == ox ;
2283     }
2284 
2285     assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
2286     int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
2287     // consider also: jst != _thread_in_Java -- but that's overspecific.
2288     return jst == _thread_blocked || jst == _thread_in_native ;
2289 }
2290 
2291 
2292 // -----------------------------------------------------------------------------
2293 // WaitSet management ...
2294 
2295 ObjectWaiter::ObjectWaiter(Thread* thread) {
2296   _next     = NULL;
2297   _prev     = NULL;
2298   _notified = 0;
2299   TState    = TS_RUN ;
2300   _thread   = thread;
2301   _event    = thread->_ParkEvent ;
2302   _active   = false;
2303   assert (_event != NULL, "invariant") ;
2304 }
2305 
2306 void ObjectWaiter::wait_reenter_begin(ObjectMonitor *mon) {
2307   JavaThread *jt = (JavaThread *)this->_thread;
2308   _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
2309 }
2310 
2311 void ObjectWaiter::wait_reenter_end(ObjectMonitor *mon) {
2312   JavaThread *jt = (JavaThread *)this->_thread;
2313   JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
2314 }
2315 
2316 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
2317   assert(node != NULL, "should not dequeue NULL node");
2318   assert(node->_prev == NULL, "node already in list");
2319   assert(node->_next == NULL, "node already in list");
2320   // put node at end of queue (circular doubly linked list)
2321   if (_WaitSet == NULL) {
2322     _WaitSet = node;
2323     node->_prev = node;
2324     node->_next = node;
2325   } else {
2326     ObjectWaiter* head = _WaitSet ;
2327     ObjectWaiter* tail = head->_prev;
2328     assert(tail->_next == head, "invariant check");
2329     tail->_next = node;
2330     head->_prev = node;
2331     node->_next = head;
2332     node->_prev = tail;
2333   }
2334 }
2335 
2336 inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
2337   // dequeue the very first waiter
2338   ObjectWaiter* waiter = _WaitSet;
2339   if (waiter) {
2340     DequeueSpecificWaiter(waiter);
2341   }
2342   return waiter;
2343 }
2344 
2345 inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
2346   assert(node != NULL, "should not dequeue NULL node");
2347   assert(node->_prev != NULL, "node already removed from list");
2348   assert(node->_next != NULL, "node already removed from list");
2349   // when the waiter has woken up because of interrupt,
2350   // timeout or other spurious wake-up, dequeue the
2351   // waiter from waiting list
2352   ObjectWaiter* next = node->_next;
2353   if (next == node) {
2354     assert(node->_prev == node, "invariant check");
2355     _WaitSet = NULL;
2356   } else {
2357     ObjectWaiter* prev = node->_prev;
2358     assert(prev->_next == node, "invariant check");
2359     assert(next->_prev == node, "invariant check");
2360     next->_prev = prev;
2361     prev->_next = next;
2362     if (_WaitSet == node) {
2363       _WaitSet = next;
2364     }
2365   }
2366   node->_next = NULL;
2367   node->_prev = NULL;
2368 }
2369 
2370 // -----------------------------------------------------------------------------
2371 // PerfData support
2372 PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts       = NULL ;
2373 PerfCounter * ObjectMonitor::_sync_FutileWakeups               = NULL ;
2374 PerfCounter * ObjectMonitor::_sync_Parks                       = NULL ;
2375 PerfCounter * ObjectMonitor::_sync_EmptyNotifications          = NULL ;
2376 PerfCounter * ObjectMonitor::_sync_Notifications               = NULL ;
2377 PerfCounter * ObjectMonitor::_sync_PrivateA                    = NULL ;
2378 PerfCounter * ObjectMonitor::_sync_PrivateB                    = NULL ;
2379 PerfCounter * ObjectMonitor::_sync_SlowExit                    = NULL ;
2380 PerfCounter * ObjectMonitor::_sync_SlowEnter                   = NULL ;
2381 PerfCounter * ObjectMonitor::_sync_SlowNotify                  = NULL ;
2382 PerfCounter * ObjectMonitor::_sync_SlowNotifyAll               = NULL ;
2383 PerfCounter * ObjectMonitor::_sync_FailedSpins                 = NULL ;
2384 PerfCounter * ObjectMonitor::_sync_SuccessfulSpins             = NULL ;
2385 PerfCounter * ObjectMonitor::_sync_MonInCirculation            = NULL ;
2386 PerfCounter * ObjectMonitor::_sync_MonScavenged                = NULL ;
2387 PerfCounter * ObjectMonitor::_sync_Inflations                  = NULL ;
2388 PerfCounter * ObjectMonitor::_sync_Deflations                  = NULL ;
2389 PerfLongVariable * ObjectMonitor::_sync_MonExtant              = NULL ;
2390 
2391 // One-shot global initialization for the sync subsystem.
2392 // We could also defer initialization and initialize on-demand
2393 // the first time we call inflate().  Initialization would
2394 // be protected - like so many things - by the MonitorCache_lock.
2395 
2396 void ObjectMonitor::Initialize () {
2397   static int InitializationCompleted = 0 ;
2398   assert (InitializationCompleted == 0, "invariant") ;
2399   InitializationCompleted = 1 ;
2400   if (UsePerfData) {
2401       EXCEPTION_MARK ;
2402       #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
2403       #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
2404       NEWPERFCOUNTER(_sync_Inflations) ;
2405       NEWPERFCOUNTER(_sync_Deflations) ;
2406       NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
2407       NEWPERFCOUNTER(_sync_FutileWakeups) ;
2408       NEWPERFCOUNTER(_sync_Parks) ;
2409       NEWPERFCOUNTER(_sync_EmptyNotifications) ;
2410       NEWPERFCOUNTER(_sync_Notifications) ;
2411       NEWPERFCOUNTER(_sync_SlowEnter) ;
2412       NEWPERFCOUNTER(_sync_SlowExit) ;
2413       NEWPERFCOUNTER(_sync_SlowNotify) ;
2414       NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
2415       NEWPERFCOUNTER(_sync_FailedSpins) ;
2416       NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
2417       NEWPERFCOUNTER(_sync_PrivateA) ;
2418       NEWPERFCOUNTER(_sync_PrivateB) ;
2419       NEWPERFCOUNTER(_sync_MonInCirculation) ;
2420       NEWPERFCOUNTER(_sync_MonScavenged) ;
2421       NEWPERFVARIABLE(_sync_MonExtant) ;
2422       #undef NEWPERFCOUNTER
2423   }
2424 }
2425 
2426 
2427 // Compile-time asserts
2428 // When possible, it's better to catch errors deterministically at
2429 // compile-time than at runtime.  The down-side to using compile-time
2430 // asserts is that error message -- often something about negative array
2431 // indices -- is opaque.
2432 
2433 #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
2434 
2435 void ObjectMonitor::ctAsserts() {
2436   CTASSERT(offset_of (ObjectMonitor, _header) == 0);
2437 }
2438 
2439 
2440 static char * kvGet (char * kvList, const char * Key) {
2441     if (kvList == NULL) return NULL ;
2442     size_t n = strlen (Key) ;
2443     char * Search ;
2444     for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
2445         if (strncmp (Search, Key, n) == 0) {
2446             if (Search[n] == '=') return Search + n + 1 ;
2447             if (Search[n] == 0)   return (char *) "1" ;
2448         }
2449     }
2450     return NULL ;
2451 }
2452 
2453 static int kvGetInt (char * kvList, const char * Key, int Default) {
2454     char * v = kvGet (kvList, Key) ;
2455     int rslt = v ? ::strtol (v, NULL, 0) : Default ;
2456     if (Knob_ReportSettings && v != NULL) {
2457         ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
2458         ::fflush (stdout) ;
2459     }
2460     return rslt ;
2461 }
2462 
2463 void ObjectMonitor::DeferredInitialize () {
2464   if (InitDone > 0) return ;
2465   if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
2466       while (InitDone != 1) ;
2467       return ;
2468   }
2469 
2470   // One-shot global initialization ...
2471   // The initialization is idempotent, so we don't need locks.
2472   // In the future consider doing this via os::init_2().
2473   // SyncKnobs consist of <Key>=<Value> pairs in the style
2474   // of environment variables.  Start by converting ':' to NUL.
2475 
2476   if (SyncKnobs == NULL) SyncKnobs = "" ;
2477 
2478   size_t sz = strlen (SyncKnobs) ;
2479   char * knobs = (char *) malloc (sz + 2) ;
2480   if (knobs == NULL) {
2481      vm_exit_out_of_memory (sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs") ;
2482      guarantee (0, "invariant") ;
2483   }
2484   strcpy (knobs, SyncKnobs) ;
2485   knobs[sz+1] = 0 ;
2486   for (char * p = knobs ; *p ; p++) {
2487      if (*p == ':') *p = 0 ;
2488   }
2489 
2490   #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
2491   SETKNOB(ReportSettings) ;
2492   SETKNOB(Verbose) ;
2493   SETKNOB(FixedSpin) ;
2494   SETKNOB(SpinLimit) ;
2495   SETKNOB(SpinBase) ;
2496   SETKNOB(SpinBackOff);
2497   SETKNOB(CASPenalty) ;
2498   SETKNOB(OXPenalty) ;
2499   SETKNOB(LogSpins) ;
2500   SETKNOB(SpinSetSucc) ;
2501   SETKNOB(SuccEnabled) ;
2502   SETKNOB(SuccRestrict) ;
2503   SETKNOB(Penalty) ;
2504   SETKNOB(Bonus) ;
2505   SETKNOB(BonusB) ;
2506   SETKNOB(Poverty) ;
2507   SETKNOB(SpinAfterFutile) ;
2508   SETKNOB(UsePause) ;
2509   SETKNOB(SpinEarly) ;
2510   SETKNOB(OState) ;
2511   SETKNOB(MaxSpinners) ;
2512   SETKNOB(PreSpin) ;
2513   SETKNOB(ExitPolicy) ;
2514   SETKNOB(QMode);
2515   SETKNOB(ResetEvent) ;
2516   SETKNOB(MoveNotifyee) ;
2517   SETKNOB(FastHSSEC) ;
2518   #undef SETKNOB
2519 
2520   if (os::is_MP()) {
2521      BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
2522      if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
2523      // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
2524   } else {
2525      Knob_SpinLimit = 0 ;
2526      Knob_SpinBase  = 0 ;
2527      Knob_PreSpin   = 0 ;
2528      Knob_FixedSpin = -1 ;
2529   }
2530 
2531   if (Knob_LogSpins == 0) {
2532      ObjectMonitor::_sync_FailedSpins = NULL ;
2533   }
2534 
2535   free (knobs) ;
2536   OrderAccess::fence() ;
2537   InitDone = 1 ;
2538 }
2539 
2540 #ifndef PRODUCT
2541 void ObjectMonitor::verify() {
2542 }
2543 
2544 void ObjectMonitor::print() {
2545 }
2546 #endif