1 /*
   2  * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "jfr/jfrEvents.hpp"
  28 #include "jfr/support/jfrThreadId.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.inline.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "oops/markWord.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/handles.inline.hpp"
  37 #include "runtime/interfaceSupport.inline.hpp"
  38 #include "runtime/mutexLocker.hpp"
  39 #include "runtime/objectMonitor.hpp"
  40 #include "runtime/objectMonitor.inline.hpp"
  41 #include "runtime/orderAccess.hpp"
  42 #include "runtime/osThread.hpp"
  43 #include "runtime/safepointMechanism.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "services/threadService.hpp"
  48 #include "utilities/dtrace.hpp"
  49 #include "utilities/macros.hpp"
  50 #include "utilities/preserveException.hpp"
  51 #if INCLUDE_JFR
  52 #include "jfr/support/jfrFlush.hpp"
  53 #endif
  54 
  55 #ifdef DTRACE_ENABLED
  56 
  57 // Only bother with this argument setup if dtrace is available
  58 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  59 
  60 
  61 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  62   char* bytes = NULL;                                                      \
  63   int len = 0;                                                             \
  64   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  65   Symbol* klassname = ((oop)obj)->klass()->name();                         \
  66   if (klassname != NULL) {                                                 \
  67     bytes = (char*)klassname->bytes();                                     \
  68     len = klassname->utf8_length();                                        \
  69   }
  70 
  71 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
  72   {                                                                        \
  73     if (DTraceMonitorProbes) {                                             \
  74       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  75       HOTSPOT_MONITOR_WAIT(jtid,                                           \
  76                            (monitor), bytes, len, (millis));               \
  77     }                                                                      \
  78   }
  79 
  80 #define HOTSPOT_MONITOR_contended__enter HOTSPOT_MONITOR_CONTENDED_ENTER
  81 #define HOTSPOT_MONITOR_contended__entered HOTSPOT_MONITOR_CONTENDED_ENTERED
  82 #define HOTSPOT_MONITOR_contended__exit HOTSPOT_MONITOR_CONTENDED_EXIT
  83 #define HOTSPOT_MONITOR_notify HOTSPOT_MONITOR_NOTIFY
  84 #define HOTSPOT_MONITOR_notifyAll HOTSPOT_MONITOR_NOTIFYALL
  85 
  86 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
  87   {                                                                        \
  88     if (DTraceMonitorProbes) {                                             \
  89       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  90       HOTSPOT_MONITOR_##probe(jtid,                                        \
  91                               (uintptr_t)(monitor), bytes, len);           \
  92     }                                                                      \
  93   }
  94 
  95 #else //  ndef DTRACE_ENABLED
  96 
  97 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
  98 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
  99 
 100 #endif // ndef DTRACE_ENABLED
 101 
 102 // Tunables ...
 103 // The knob* variables are effectively final.  Once set they should
 104 // never be modified hence.  Consider using __read_mostly with GCC.
 105 
 106 int ObjectMonitor::Knob_SpinLimit    = 5000;    // derived by an external tool -
 107 
 108 static int Knob_Bonus               = 100;     // spin success bonus
 109 static int Knob_BonusB              = 100;     // spin success bonus
 110 static int Knob_Penalty             = 200;     // spin failure penalty
 111 static int Knob_Poverty             = 1000;
 112 static int Knob_FixedSpin           = 0;
 113 static int Knob_PreSpin             = 10;      // 20-100 likely better
 114 
 115 DEBUG_ONLY(static volatile bool InitDone = false;)
 116 
 117 // -----------------------------------------------------------------------------
 118 // Theory of operations -- Monitors lists, thread residency, etc:
 119 //
 120 // * A thread acquires ownership of a monitor by successfully
 121 //   CAS()ing the _owner field from null to non-null.
 122 //
 123 // * Invariant: A thread appears on at most one monitor list --
 124 //   cxq, EntryList or WaitSet -- at any one time.
 125 //
 126 // * Contending threads "push" themselves onto the cxq with CAS
 127 //   and then spin/park.
 128 //
 129 // * After a contending thread eventually acquires the lock it must
 130 //   dequeue itself from either the EntryList or the cxq.
 131 //
 132 // * The exiting thread identifies and unparks an "heir presumptive"
 133 //   tentative successor thread on the EntryList.  Critically, the
 134 //   exiting thread doesn't unlink the successor thread from the EntryList.
 135 //   After having been unparked, the wakee will recontend for ownership of
 136 //   the monitor.   The successor (wakee) will either acquire the lock or
 137 //   re-park itself.
 138 //
 139 //   Succession is provided for by a policy of competitive handoff.
 140 //   The exiting thread does _not_ grant or pass ownership to the
 141 //   successor thread.  (This is also referred to as "handoff" succession").
 142 //   Instead the exiting thread releases ownership and possibly wakes
 143 //   a successor, so the successor can (re)compete for ownership of the lock.
 144 //   If the EntryList is empty but the cxq is populated the exiting
 145 //   thread will drain the cxq into the EntryList.  It does so by
 146 //   by detaching the cxq (installing null with CAS) and folding
 147 //   the threads from the cxq into the EntryList.  The EntryList is
 148 //   doubly linked, while the cxq is singly linked because of the
 149 //   CAS-based "push" used to enqueue recently arrived threads (RATs).
 150 //
 151 // * Concurrency invariants:
 152 //
 153 //   -- only the monitor owner may access or mutate the EntryList.
 154 //      The mutex property of the monitor itself protects the EntryList
 155 //      from concurrent interference.
 156 //   -- Only the monitor owner may detach the cxq.
 157 //
 158 // * The monitor entry list operations avoid locks, but strictly speaking
 159 //   they're not lock-free.  Enter is lock-free, exit is not.
 160 //   For a description of 'Methods and apparatus providing non-blocking access
 161 //   to a resource,' see U.S. Pat. No. 7844973.
 162 //
 163 // * The cxq can have multiple concurrent "pushers" but only one concurrent
 164 //   detaching thread.  This mechanism is immune from the ABA corruption.
 165 //   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
 166 //
 167 // * Taken together, the cxq and the EntryList constitute or form a
 168 //   single logical queue of threads stalled trying to acquire the lock.
 169 //   We use two distinct lists to improve the odds of a constant-time
 170 //   dequeue operation after acquisition (in the ::enter() epilogue) and
 171 //   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
 172 //   A key desideratum is to minimize queue & monitor metadata manipulation
 173 //   that occurs while holding the monitor lock -- that is, we want to
 174 //   minimize monitor lock holds times.  Note that even a small amount of
 175 //   fixed spinning will greatly reduce the # of enqueue-dequeue operations
 176 //   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
 177 //   locks and monitor metadata.
 178 //
 179 //   Cxq points to the set of Recently Arrived Threads attempting entry.
 180 //   Because we push threads onto _cxq with CAS, the RATs must take the form of
 181 //   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
 182 //   the unlocking thread notices that EntryList is null but _cxq is != null.
 183 //
 184 //   The EntryList is ordered by the prevailing queue discipline and
 185 //   can be organized in any convenient fashion, such as a doubly-linked list or
 186 //   a circular doubly-linked list.  Critically, we want insert and delete operations
 187 //   to operate in constant-time.  If we need a priority queue then something akin
 188 //   to Solaris' sleepq would work nicely.  Viz.,
 189 //   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
 190 //   Queue discipline is enforced at ::exit() time, when the unlocking thread
 191 //   drains the cxq into the EntryList, and orders or reorders the threads on the
 192 //   EntryList accordingly.
 193 //
 194 //   Barring "lock barging", this mechanism provides fair cyclic ordering,
 195 //   somewhat similar to an elevator-scan.
 196 //
 197 // * The monitor synchronization subsystem avoids the use of native
 198 //   synchronization primitives except for the narrow platform-specific
 199 //   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
 200 //   the semantics of park-unpark.  Put another way, this monitor implementation
 201 //   depends only on atomic operations and park-unpark.  The monitor subsystem
 202 //   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
 203 //   underlying OS manages the READY<->RUN transitions.
 204 //
 205 // * Waiting threads reside on the WaitSet list -- wait() puts
 206 //   the caller onto the WaitSet.
 207 //
 208 // * notify() or notifyAll() simply transfers threads from the WaitSet to
 209 //   either the EntryList or cxq.  Subsequent exit() operations will
 210 //   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
 211 //   it's likely the notifyee would simply impale itself on the lock held
 212 //   by the notifier.
 213 //
 214 // * An interesting alternative is to encode cxq as (List,LockByte) where
 215 //   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
 216 //   variable, like _recursions, in the scheme.  The threads or Events that form
 217 //   the list would have to be aligned in 256-byte addresses.  A thread would
 218 //   try to acquire the lock or enqueue itself with CAS, but exiting threads
 219 //   could use a 1-0 protocol and simply STB to set the LockByte to 0.
 220 //   Note that is is *not* word-tearing, but it does presume that full-word
 221 //   CAS operations are coherent with intermix with STB operations.  That's true
 222 //   on most common processors.
 223 //
 224 // * See also http://blogs.sun.com/dave
 225 
 226 
 227 void* ObjectMonitor::operator new (size_t size) throw() {
 228   return AllocateHeap(size, mtInternal);
 229 }
 230 void* ObjectMonitor::operator new[] (size_t size) throw() {
 231   return operator new (size);
 232 }
 233 void ObjectMonitor::operator delete(void* p) {
 234   FreeHeap(p);
 235 }
 236 void ObjectMonitor::operator delete[] (void *p) {
 237   operator delete(p);
 238 }
 239 
 240 // -----------------------------------------------------------------------------
 241 // Enter support
 242 
 243 bool ObjectMonitor::enter(TRAPS) {
 244   // The following code is ordered to check the most common cases first
 245   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 246   Thread * const Self = THREAD;
 247 
 248   void* cur = try_set_owner_from(NULL, Self);
 249   if (cur == NULL) {
 250     assert(_recursions == 0, "invariant");
 251     return true;
 252   }
 253 
 254   if (cur == Self) {
 255     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 256     _recursions++;
 257     return true;
 258   }
 259 
 260   if (Self->is_lock_owned((address)cur)) {
 261     assert(_recursions == 0, "internal state error");
 262     _recursions = 1;
 263     set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
 264     return true;
 265   }
 266 
 267   // We've encountered genuine contention.
 268   assert(Self->_Stalled == 0, "invariant");
 269   Self->_Stalled = intptr_t(this);
 270 
 271   // Try one round of spinning *before* enqueueing Self
 272   // and before going through the awkward and expensive state
 273   // transitions.  The following spin is strictly optional ...
 274   // Note that if we acquire the monitor from an initial spin
 275   // we forgo posting JVMTI events and firing DTRACE probes.
 276   if (TrySpin(Self) > 0) {
 277     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 278     assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
 279     assert(((oop)object())->mark() == markWord::encode(this),
 280            "object mark must match encoded this: mark=" INTPTR_FORMAT
 281            ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
 282            markWord::encode(this).value());
 283     Self->_Stalled = 0;
 284     return true;
 285   }
 286 
 287   assert(_owner != Self, "invariant");
 288   assert(_succ != Self, "invariant");
 289   assert(Self->is_Java_thread(), "invariant");
 290   JavaThread * jt = (JavaThread *) Self;
 291   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 292   assert(jt->thread_state() != _thread_blocked, "invariant");
 293 
 294   // Keep track of contention for JVM/TI and M&M queries.
 295   add_to_contentions(1);
 296   if (is_being_async_deflated()) {
 297     // Async deflation is in progress and our contentions increment
 298     // above lost the race to async deflation. Undo the work and
 299     // force the caller to retry.
 300     const oop l_object = (oop)object();
 301     if (l_object != NULL) {
 302       // Attempt to restore the header/dmw to the object's header so that
 303       // we only retry once if the deflater thread happens to be slow.
 304       install_displaced_markword_in_object(l_object);
 305     }
 306     Self->_Stalled = 0;
 307     add_to_contentions(-1);
 308     return false;
 309   }
 310 
 311   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 312   EventJavaMonitorEnter event;
 313   if (event.should_commit()) {
 314     event.set_monitorClass(((oop)this->object())->klass());
 315     event.set_address((uintptr_t)(this->object_addr()));
 316   }
 317 
 318   { // Change java thread status to indicate blocked on monitor enter.
 319     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 320 
 321     Self->set_current_pending_monitor(this);
 322 
 323     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 324     if (JvmtiExport::should_post_monitor_contended_enter()) {
 325       JvmtiExport::post_monitor_contended_enter(jt, this);
 326 
 327       // The current thread does not yet own the monitor and does not
 328       // yet appear on any queues that would get it made the successor.
 329       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
 330       // handler cannot accidentally consume an unpark() meant for the
 331       // ParkEvent associated with this ObjectMonitor.
 332     }
 333 
 334     OSThreadContendState osts(Self->osthread());
 335     ThreadBlockInVM tbivm(jt);
 336 
 337     // TODO-FIXME: change the following for(;;) loop to straight-line code.
 338     for (;;) {
 339       jt->set_suspend_equivalent();
 340       // cleared by handle_special_suspend_equivalent_condition()
 341       // or java_suspend_self()
 342 
 343       EnterI(THREAD);
 344 
 345       if (!ExitSuspendEquivalent(jt)) break;
 346 
 347       // We have acquired the contended monitor, but while we were
 348       // waiting another thread suspended us. We don't want to enter
 349       // the monitor while suspended because that would surprise the
 350       // thread that suspended us.
 351       //
 352       _recursions = 0;
 353       _succ = NULL;
 354       exit(false, Self);
 355 
 356       jt->java_suspend_self();
 357     }
 358     Self->set_current_pending_monitor(NULL);
 359 
 360     // We cleared the pending monitor info since we've just gotten past
 361     // the enter-check-for-suspend dance and we now own the monitor free
 362     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 363     // destructor can go to a safepoint at the end of this block. If we
 364     // do a thread dump during that safepoint, then this thread will show
 365     // as having "-locked" the monitor, but the OS and java.lang.Thread
 366     // states will still report that the thread is blocked trying to
 367     // acquire it.
 368   }
 369 
 370   add_to_contentions(-1);
 371   assert(contentions() >= 0, "must not be negative: contentions=%d", contentions());
 372   Self->_Stalled = 0;
 373 
 374   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 375   assert(_recursions == 0, "invariant");
 376   assert(_owner == Self, "invariant");
 377   assert(_succ != Self, "invariant");
 378   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 379 
 380   // The thread -- now the owner -- is back in vm mode.
 381   // Report the glorious news via TI,DTrace and jvmstat.
 382   // The probe effect is non-trivial.  All the reportage occurs
 383   // while we hold the monitor, increasing the length of the critical
 384   // section.  Amdahl's parallel speedup law comes vividly into play.
 385   //
 386   // Another option might be to aggregate the events (thread local or
 387   // per-monitor aggregation) and defer reporting until a more opportune
 388   // time -- such as next time some thread encounters contention but has
 389   // yet to acquire the lock.  While spinning that thread could
 390   // spinning we could increment JVMStat counters, etc.
 391 
 392   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
 393   if (JvmtiExport::should_post_monitor_contended_entered()) {
 394     JvmtiExport::post_monitor_contended_entered(jt, this);
 395 
 396     // The current thread already owns the monitor and is not going to
 397     // call park() for the remainder of the monitor enter protocol. So
 398     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 399     // event handler consumed an unpark() issued by the thread that
 400     // just exited the monitor.
 401   }
 402   if (event.should_commit()) {
 403     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 404     event.commit();
 405   }
 406   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 407   return true;
 408 }
 409 
 410 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 411 // Callers must compensate as needed.
 412 
 413 int ObjectMonitor::TryLock(Thread * Self) {
 414   void * own = _owner;
 415   if (own != NULL) return 0;
 416   if (try_set_owner_from(NULL, Self) == NULL) {
 417     assert(_recursions == 0, "invariant");
 418     return 1;
 419   }
 420   // The lock had been free momentarily, but we lost the race to the lock.
 421   // Interference -- the CAS failed.
 422   // We can either return -1 or retry.
 423   // Retry doesn't make as much sense because the lock was just acquired.
 424   return -1;
 425 }
 426 
 427 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
 428 // into the header of the object associated with the monitor. This
 429 // idempotent method is called by a thread that is deflating a
 430 // monitor and by other threads that have detected a race with the
 431 // deflation process.
 432 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
 433   // This function must only be called when (owner == DEFLATER_MARKER
 434   // && contentions <= 0), but we can't guarantee that here because
 435   // those values could change when the ObjectMonitor gets moved from
 436   // the global free list to a per-thread free list.
 437 
 438   guarantee(obj != NULL, "must be non-NULL");
 439 
 440   // Separate loads in is_being_async_deflated(), which is almost always
 441   // called before this function, from the load of dmw/header below.
 442   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 443     // A non-multiple copy atomic (nMCA) machine needs a bigger
 444     // hammer to separate the loads before and the load below.
 445     OrderAccess::fence();
 446   } else {
 447     OrderAccess::loadload();
 448   }
 449 
 450   const oop l_object = (oop)object();
 451   if (l_object == NULL) {
 452     // ObjectMonitor's object ref has already been cleared by async
 453     // deflation so we're done here.
 454     return;
 455   }
 456   assert(l_object == obj, "object=" INTPTR_FORMAT " must equal obj="
 457          INTPTR_FORMAT, p2i(l_object), p2i(obj));
 458 
 459   markWord dmw = header();
 460   // The dmw has to be neutral (not NULL, not locked and not marked).
 461   assert(dmw.is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, dmw.value());
 462 
 463   // Install displaced mark word if the object's header still points
 464   // to this ObjectMonitor. More than one racing caller to this function
 465   // can rarely reach this point, but only one can win.
 466   markWord res = obj->cas_set_mark(dmw, markWord::encode(this));
 467   if (res != markWord::encode(this)) {
 468     // This should be rare so log at the Info level when it happens.
 469     log_info(monitorinflation)("install_displaced_markword_in_object: "
 470                                "failed cas_set_mark: new_mark=" INTPTR_FORMAT
 471                                ", old_mark=" INTPTR_FORMAT ", res=" INTPTR_FORMAT,
 472                                dmw.value(), markWord::encode(this).value(),
 473                                res.value());
 474   }
 475 
 476   // Note: It does not matter which thread restored the header/dmw
 477   // into the object's header. The thread deflating the monitor just
 478   // wanted the object's header restored and it is. The threads that
 479   // detected a race with the deflation process also wanted the
 480   // object's header restored before they retry their operation and
 481   // because it is restored they will only retry once.
 482 }
 483 
 484 // Convert the fields used by is_busy() to a string that can be
 485 // used for diagnostic output.
 486 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 487   ss->print("is_busy: waiters=%d, ", _waiters);
 488   if (contentions() > 0) {
 489     ss->print("contentions=%d, ", contentions());
 490   } else {
 491     ss->print("contentions=0");
 492   }
 493   if (_owner != DEFLATER_MARKER) {
 494     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 495   } else {
 496     // We report NULL instead of DEFLATER_MARKER here because is_busy()
 497     // ignores DEFLATER_MARKER values.
 498     ss->print("owner=" INTPTR_FORMAT, NULL);
 499   }
 500   ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq),
 501             p2i(_EntryList));
 502   return ss->base();
 503 }
 504 
 505 #define MAX_RECHECK_INTERVAL 1000
 506 
 507 void ObjectMonitor::EnterI(TRAPS) {
 508   Thread * const Self = THREAD;
 509   assert(Self->is_Java_thread(), "invariant");
 510   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 511 
 512   // Try the lock - TATAS
 513   if (TryLock (Self) > 0) {
 514     assert(_succ != Self, "invariant");
 515     assert(_owner == Self, "invariant");
 516     assert(_Responsible != Self, "invariant");
 517     return;
 518   }
 519 
 520   if (try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
 521     // Cancelled the in-progress async deflation by changing owner from
 522     // DEFLATER_MARKER to Self. As part of the contended enter protocol,
 523     // contentions was incremented to a positive value before EnterI()
 524     // was called and that prevents the deflater thread from winning the
 525     // last part of the 2-part async deflation protocol. After EnterI()
 526     // returns to enter(), contentions is decremented because the caller
 527     // now owns the monitor. We bump contentions an extra time here to
 528     // prevent the deflater thread from winning the last part of the
 529     // 2-part async deflation protocol after the regular decrement
 530     // occurs in enter(). The deflater thread will decrement contentions
 531     // after it recognizes that the async deflation was cancelled.
 532     add_to_contentions(1);
 533     assert(_succ != Self, "invariant");
 534     assert(_Responsible != Self, "invariant");
 535     return;
 536   }
 537 
 538   assert(InitDone, "Unexpectedly not initialized");
 539 
 540   // We try one round of spinning *before* enqueueing Self.
 541   //
 542   // If the _owner is ready but OFFPROC we could use a YieldTo()
 543   // operation to donate the remainder of this thread's quantum
 544   // to the owner.  This has subtle but beneficial affinity
 545   // effects.
 546 
 547   if (TrySpin(Self) > 0) {
 548     assert(_owner == Self, "invariant");
 549     assert(_succ != Self, "invariant");
 550     assert(_Responsible != Self, "invariant");
 551     return;
 552   }
 553 
 554   // The Spin failed -- Enqueue and park the thread ...
 555   assert(_succ != Self, "invariant");
 556   assert(_owner != Self, "invariant");
 557   assert(_Responsible != Self, "invariant");
 558 
 559   // Enqueue "Self" on ObjectMonitor's _cxq.
 560   //
 561   // Node acts as a proxy for Self.
 562   // As an aside, if were to ever rewrite the synchronization code mostly
 563   // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
 564   // Java objects.  This would avoid awkward lifecycle and liveness issues,
 565   // as well as eliminate a subset of ABA issues.
 566   // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
 567 
 568   ObjectWaiter node(Self);
 569   Self->_ParkEvent->reset();
 570   node._prev   = (ObjectWaiter *) 0xBAD;
 571   node.TState  = ObjectWaiter::TS_CXQ;
 572 
 573   // Push "Self" onto the front of the _cxq.
 574   // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
 575   // Note that spinning tends to reduce the rate at which threads
 576   // enqueue and dequeue on EntryList|cxq.
 577   ObjectWaiter * nxt;
 578   for (;;) {
 579     node._next = nxt = _cxq;
 580     if (Atomic::cmpxchg(&_cxq, nxt, &node) == nxt) break;
 581 
 582     // Interference - the CAS failed because _cxq changed.  Just retry.
 583     // As an optional optimization we retry the lock.
 584     if (TryLock (Self) > 0) {
 585       assert(_succ != Self, "invariant");
 586       assert(_owner == Self, "invariant");
 587       assert(_Responsible != Self, "invariant");
 588       return;
 589     }
 590   }
 591 
 592   // Check for cxq|EntryList edge transition to non-null.  This indicates
 593   // the onset of contention.  While contention persists exiting threads
 594   // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
 595   // operations revert to the faster 1-0 mode.  This enter operation may interleave
 596   // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
 597   // arrange for one of the contending thread to use a timed park() operations
 598   // to detect and recover from the race.  (Stranding is form of progress failure
 599   // where the monitor is unlocked but all the contending threads remain parked).
 600   // That is, at least one of the contended threads will periodically poll _owner.
 601   // One of the contending threads will become the designated "Responsible" thread.
 602   // The Responsible thread uses a timed park instead of a normal indefinite park
 603   // operation -- it periodically wakes and checks for and recovers from potential
 604   // strandings admitted by 1-0 exit operations.   We need at most one Responsible
 605   // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
 606   // be responsible for a monitor.
 607   //
 608   // Currently, one of the contended threads takes on the added role of "Responsible".
 609   // A viable alternative would be to use a dedicated "stranding checker" thread
 610   // that periodically iterated over all the threads (or active monitors) and unparked
 611   // successors where there was risk of stranding.  This would help eliminate the
 612   // timer scalability issues we see on some platforms as we'd only have one thread
 613   // -- the checker -- parked on a timer.
 614 
 615   if (nxt == NULL && _EntryList == NULL) {
 616     // Try to assume the role of responsible thread for the monitor.
 617     // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
 618     Atomic::replace_if_null(&_Responsible, Self);
 619   }
 620 
 621   // The lock might have been released while this thread was occupied queueing
 622   // itself onto _cxq.  To close the race and avoid "stranding" and
 623   // progress-liveness failure we must resample-retry _owner before parking.
 624   // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
 625   // In this case the ST-MEMBAR is accomplished with CAS().
 626   //
 627   // TODO: Defer all thread state transitions until park-time.
 628   // Since state transitions are heavy and inefficient we'd like
 629   // to defer the state transitions until absolutely necessary,
 630   // and in doing so avoid some transitions ...
 631 
 632   int nWakeups = 0;
 633   int recheckInterval = 1;
 634 
 635   for (;;) {
 636 
 637     if (TryLock(Self) > 0) break;
 638     assert(_owner != Self, "invariant");
 639 
 640     // park self
 641     if (_Responsible == Self) {
 642       Self->_ParkEvent->park((jlong) recheckInterval);
 643       // Increase the recheckInterval, but clamp the value.
 644       recheckInterval *= 8;
 645       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 646         recheckInterval = MAX_RECHECK_INTERVAL;
 647       }
 648     } else {
 649       Self->_ParkEvent->park();
 650     }
 651 
 652     if (TryLock(Self) > 0) break;
 653 
 654     if (try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
 655       // Cancelled the in-progress async deflation by changing owner from
 656       // DEFLATER_MARKER to Self. As part of the contended enter protocol,
 657       // contentions was incremented to a positive value before EnterI()
 658       // was called and that prevents the deflater thread from winning the
 659       // last part of the 2-part async deflation protocol. After EnterI()
 660       // returns to enter(), contentions is decremented because the caller
 661       // now owns the monitor. We bump contentions an extra time here to
 662       // prevent the deflater thread from winning the last part of the
 663       // 2-part async deflation protocol after the regular decrement
 664       // occurs in enter(). The deflater thread will decrement contentions
 665       // after it recognizes that the async deflation was cancelled.
 666       add_to_contentions(1);
 667       break;
 668     }
 669 
 670     // The lock is still contested.
 671     // Keep a tally of the # of futile wakeups.
 672     // Note that the counter is not protected by a lock or updated by atomics.
 673     // That is by design - we trade "lossy" counters which are exposed to
 674     // races during updates for a lower probe effect.
 675 
 676     // This PerfData object can be used in parallel with a safepoint.
 677     // See the work around in PerfDataManager::destroy().
 678     OM_PERFDATA_OP(FutileWakeups, inc());
 679     ++nWakeups;
 680 
 681     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 682     // We can defer clearing _succ until after the spin completes
 683     // TrySpin() must tolerate being called with _succ == Self.
 684     // Try yet another round of adaptive spinning.
 685     if (TrySpin(Self) > 0) break;
 686 
 687     // We can find that we were unpark()ed and redesignated _succ while
 688     // we were spinning.  That's harmless.  If we iterate and call park(),
 689     // park() will consume the event and return immediately and we'll
 690     // just spin again.  This pattern can repeat, leaving _succ to simply
 691     // spin on a CPU.
 692 
 693     if (_succ == Self) _succ = NULL;
 694 
 695     // Invariant: after clearing _succ a thread *must* retry _owner before parking.
 696     OrderAccess::fence();
 697   }
 698 
 699   // Egress :
 700   // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
 701   // Normally we'll find Self on the EntryList .
 702   // From the perspective of the lock owner (this thread), the
 703   // EntryList is stable and cxq is prepend-only.
 704   // The head of cxq is volatile but the interior is stable.
 705   // In addition, Self.TState is stable.
 706 
 707   assert(_owner == Self, "invariant");
 708   assert(object() != NULL, "invariant");
 709   // I'd like to write:
 710   //   guarantee (((oop)(object()))->mark() == markWord::encode(this), "invariant") ;
 711   // but as we're at a safepoint that's not safe.
 712 
 713   UnlinkAfterAcquire(Self, &node);
 714   if (_succ == Self) _succ = NULL;
 715 
 716   assert(_succ != Self, "invariant");
 717   if (_Responsible == Self) {
 718     _Responsible = NULL;
 719     OrderAccess::fence(); // Dekker pivot-point
 720 
 721     // We may leave threads on cxq|EntryList without a designated
 722     // "Responsible" thread.  This is benign.  When this thread subsequently
 723     // exits the monitor it can "see" such preexisting "old" threads --
 724     // threads that arrived on the cxq|EntryList before the fence, above --
 725     // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
 726     // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
 727     // non-null and elect a new "Responsible" timer thread.
 728     //
 729     // This thread executes:
 730     //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
 731     //    LD cxq|EntryList               (in subsequent exit)
 732     //
 733     // Entering threads in the slow/contended path execute:
 734     //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
 735     //    The (ST cxq; MEMBAR) is accomplished with CAS().
 736     //
 737     // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
 738     // exit operation from floating above the ST Responsible=null.
 739   }
 740 
 741   // We've acquired ownership with CAS().
 742   // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
 743   // But since the CAS() this thread may have also stored into _succ,
 744   // EntryList, cxq or Responsible.  These meta-data updates must be
 745   // visible __before this thread subsequently drops the lock.
 746   // Consider what could occur if we didn't enforce this constraint --
 747   // STs to monitor meta-data and user-data could reorder with (become
 748   // visible after) the ST in exit that drops ownership of the lock.
 749   // Some other thread could then acquire the lock, but observe inconsistent
 750   // or old monitor meta-data and heap data.  That violates the JMM.
 751   // To that end, the 1-0 exit() operation must have at least STST|LDST
 752   // "release" barrier semantics.  Specifically, there must be at least a
 753   // STST|LDST barrier in exit() before the ST of null into _owner that drops
 754   // the lock.   The barrier ensures that changes to monitor meta-data and data
 755   // protected by the lock will be visible before we release the lock, and
 756   // therefore before some other thread (CPU) has a chance to acquire the lock.
 757   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 758   //
 759   // Critically, any prior STs to _succ or EntryList must be visible before
 760   // the ST of null into _owner in the *subsequent* (following) corresponding
 761   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 762   // execute a serializing instruction.
 763 
 764   return;
 765 }
 766 
 767 // ReenterI() is a specialized inline form of the latter half of the
 768 // contended slow-path from EnterI().  We use ReenterI() only for
 769 // monitor reentry in wait().
 770 //
 771 // In the future we should reconcile EnterI() and ReenterI().
 772 
 773 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 774   assert(Self != NULL, "invariant");
 775   assert(SelfNode != NULL, "invariant");
 776   assert(SelfNode->_thread == Self, "invariant");
 777   assert(_waiters > 0, "invariant");
 778   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 779   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 780   JavaThread * jt = (JavaThread *) Self;
 781 
 782   int nWakeups = 0;
 783   for (;;) {
 784     ObjectWaiter::TStates v = SelfNode->TState;
 785     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 786     assert(_owner != Self, "invariant");
 787 
 788     if (TryLock(Self) > 0) break;
 789     if (TrySpin(Self) > 0) break;
 790 
 791     // State transition wrappers around park() ...
 792     // ReenterI() wisely defers state transitions until
 793     // it's clear we must park the thread.
 794     {
 795       OSThreadContendState osts(Self->osthread());
 796       ThreadBlockInVM tbivm(jt);
 797 
 798       // cleared by handle_special_suspend_equivalent_condition()
 799       // or java_suspend_self()
 800       jt->set_suspend_equivalent();
 801       Self->_ParkEvent->park();
 802 
 803       // were we externally suspended while we were waiting?
 804       for (;;) {
 805         if (!ExitSuspendEquivalent(jt)) break;
 806         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 807         jt->java_suspend_self();
 808         jt->set_suspend_equivalent();
 809       }
 810     }
 811 
 812     // Try again, but just so we distinguish between futile wakeups and
 813     // successful wakeups.  The following test isn't algorithmically
 814     // necessary, but it helps us maintain sensible statistics.
 815     if (TryLock(Self) > 0) break;
 816 
 817     // The lock is still contested.
 818     // Keep a tally of the # of futile wakeups.
 819     // Note that the counter is not protected by a lock or updated by atomics.
 820     // That is by design - we trade "lossy" counters which are exposed to
 821     // races during updates for a lower probe effect.
 822     ++nWakeups;
 823 
 824     // Assuming this is not a spurious wakeup we'll normally
 825     // find that _succ == Self.
 826     if (_succ == Self) _succ = NULL;
 827 
 828     // Invariant: after clearing _succ a contending thread
 829     // *must* retry  _owner before parking.
 830     OrderAccess::fence();
 831 
 832     // This PerfData object can be used in parallel with a safepoint.
 833     // See the work around in PerfDataManager::destroy().
 834     OM_PERFDATA_OP(FutileWakeups, inc());
 835   }
 836 
 837   // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
 838   // Normally we'll find Self on the EntryList.
 839   // Unlinking from the EntryList is constant-time and atomic-free.
 840   // From the perspective of the lock owner (this thread), the
 841   // EntryList is stable and cxq is prepend-only.
 842   // The head of cxq is volatile but the interior is stable.
 843   // In addition, Self.TState is stable.
 844 
 845   assert(_owner == Self, "invariant");
 846   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 847   UnlinkAfterAcquire(Self, SelfNode);
 848   if (_succ == Self) _succ = NULL;
 849   assert(_succ != Self, "invariant");
 850   SelfNode->TState = ObjectWaiter::TS_RUN;
 851   OrderAccess::fence();      // see comments at the end of EnterI()
 852 }
 853 
 854 // By convention we unlink a contending thread from EntryList|cxq immediately
 855 // after the thread acquires the lock in ::enter().  Equally, we could defer
 856 // unlinking the thread until ::exit()-time.
 857 
 858 void ObjectMonitor::UnlinkAfterAcquire(Thread *Self, ObjectWaiter *SelfNode) {
 859   assert(_owner == Self, "invariant");
 860   assert(SelfNode->_thread == Self, "invariant");
 861 
 862   if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
 863     // Normal case: remove Self from the DLL EntryList .
 864     // This is a constant-time operation.
 865     ObjectWaiter * nxt = SelfNode->_next;
 866     ObjectWaiter * prv = SelfNode->_prev;
 867     if (nxt != NULL) nxt->_prev = prv;
 868     if (prv != NULL) prv->_next = nxt;
 869     if (SelfNode == _EntryList) _EntryList = nxt;
 870     assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
 871     assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
 872   } else {
 873     assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
 874     // Inopportune interleaving -- Self is still on the cxq.
 875     // This usually means the enqueue of self raced an exiting thread.
 876     // Normally we'll find Self near the front of the cxq, so
 877     // dequeueing is typically fast.  If needbe we can accelerate
 878     // this with some MCS/CHL-like bidirectional list hints and advisory
 879     // back-links so dequeueing from the interior will normally operate
 880     // in constant-time.
 881     // Dequeue Self from either the head (with CAS) or from the interior
 882     // with a linear-time scan and normal non-atomic memory operations.
 883     // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
 884     // and then unlink Self from EntryList.  We have to drain eventually,
 885     // so it might as well be now.
 886 
 887     ObjectWaiter * v = _cxq;
 888     assert(v != NULL, "invariant");
 889     if (v != SelfNode || Atomic::cmpxchg(&_cxq, v, SelfNode->_next) != v) {
 890       // The CAS above can fail from interference IFF a "RAT" arrived.
 891       // In that case Self must be in the interior and can no longer be
 892       // at the head of cxq.
 893       if (v == SelfNode) {
 894         assert(_cxq != v, "invariant");
 895         v = _cxq;          // CAS above failed - start scan at head of list
 896       }
 897       ObjectWaiter * p;
 898       ObjectWaiter * q = NULL;
 899       for (p = v; p != NULL && p != SelfNode; p = p->_next) {
 900         q = p;
 901         assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
 902       }
 903       assert(v != SelfNode, "invariant");
 904       assert(p == SelfNode, "Node not found on cxq");
 905       assert(p != _cxq, "invariant");
 906       assert(q != NULL, "invariant");
 907       assert(q->_next == p, "invariant");
 908       q->_next = p->_next;
 909     }
 910   }
 911 
 912 #ifdef ASSERT
 913   // Diagnostic hygiene ...
 914   SelfNode->_prev  = (ObjectWaiter *) 0xBAD;
 915   SelfNode->_next  = (ObjectWaiter *) 0xBAD;
 916   SelfNode->TState = ObjectWaiter::TS_RUN;
 917 #endif
 918 }
 919 
 920 // -----------------------------------------------------------------------------
 921 // Exit support
 922 //
 923 // exit()
 924 // ~~~~~~
 925 // Note that the collector can't reclaim the objectMonitor or deflate
 926 // the object out from underneath the thread calling ::exit() as the
 927 // thread calling ::exit() never transitions to a stable state.
 928 // This inhibits GC, which in turn inhibits asynchronous (and
 929 // inopportune) reclamation of "this".
 930 //
 931 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
 932 // There's one exception to the claim above, however.  EnterI() can call
 933 // exit() to drop a lock if the acquirer has been externally suspended.
 934 // In that case exit() is called with _thread_state == _thread_blocked,
 935 // but the monitor's _contentions field is > 0, which inhibits reclamation.
 936 //
 937 // 1-0 exit
 938 // ~~~~~~~~
 939 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
 940 // the fast-path operators have been optimized so the common ::exit()
 941 // operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock().
 942 // The code emitted by fast_unlock() elides the usual MEMBAR.  This
 943 // greatly improves latency -- MEMBAR and CAS having considerable local
 944 // latency on modern processors -- but at the cost of "stranding".  Absent the
 945 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
 946 // ::enter() path, resulting in the entering thread being stranding
 947 // and a progress-liveness failure.   Stranding is extremely rare.
 948 // We use timers (timed park operations) & periodic polling to detect
 949 // and recover from stranding.  Potentially stranded threads periodically
 950 // wake up and poll the lock.  See the usage of the _Responsible variable.
 951 //
 952 // The CAS() in enter provides for safety and exclusion, while the CAS or
 953 // MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
 954 // eliminates the CAS/MEMBAR from the exit path, but it admits stranding.
 955 // We detect and recover from stranding with timers.
 956 //
 957 // If a thread transiently strands it'll park until (a) another
 958 // thread acquires the lock and then drops the lock, at which time the
 959 // exiting thread will notice and unpark the stranded thread, or, (b)
 960 // the timer expires.  If the lock is high traffic then the stranding latency
 961 // will be low due to (a).  If the lock is low traffic then the odds of
 962 // stranding are lower, although the worst-case stranding latency
 963 // is longer.  Critically, we don't want to put excessive load in the
 964 // platform's timer subsystem.  We want to minimize both the timer injection
 965 // rate (timers created/sec) as well as the number of timers active at
 966 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 967 // the integral of the # of active timers at any instant over time).
 968 // Both impinge on OS scalability.  Given that, at most one thread parked on
 969 // a monitor will use a timer.
 970 //
 971 // There is also the risk of a futile wake-up. If we drop the lock
 972 // another thread can reacquire the lock immediately, and we can
 973 // then wake a thread unnecessarily. This is benign, and we've
 974 // structured the code so the windows are short and the frequency
 975 // of such futile wakups is low.
 976 
 977 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 978   Thread* const Self = THREAD;
 979   void* cur = Atomic::load(&_owner);
 980   if (THREAD != cur) {
 981     if (THREAD->is_lock_owned((address)cur)) {
 982       assert(_recursions == 0, "invariant");
 983       set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
 984       _recursions = 0;
 985     } else {
 986       // Apparent unbalanced locking ...
 987       // Naively we'd like to throw IllegalMonitorStateException.
 988       // As a practical matter we can neither allocate nor throw an
 989       // exception as ::exit() can be called from leaf routines.
 990       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 991       // Upon deeper reflection, however, in a properly run JVM the only
 992       // way we should encounter this situation is in the presence of
 993       // unbalanced JNI locking. TODO: CheckJNICalls.
 994       // See also: CR4414101
 995 #ifdef ASSERT
 996       LogStreamHandle(Error, monitorinflation) lsh;
 997       lsh.print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
 998                     " is exiting an ObjectMonitor it does not own.", p2i(THREAD));
 999       lsh.print_cr("The imbalance is possibly caused by JNI locking.");
1000       print_debug_style_on(&lsh);
1001       assert(false, "Non-balanced monitor enter/exit!");
1002 #endif
1003       return;
1004     }
1005   }
1006 
1007   if (_recursions != 0) {
1008     _recursions--;        // this is simple recursive enter
1009     return;
1010   }
1011 
1012   // Invariant: after setting Responsible=null an thread must execute
1013   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1014   _Responsible = NULL;
1015 
1016 #if INCLUDE_JFR
1017   // get the owner's thread id for the MonitorEnter event
1018   // if it is enabled and the thread isn't suspended
1019   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1020     _previous_owner_tid = JFR_THREAD_ID(Self);
1021   }
1022 #endif
1023 
1024   for (;;) {
1025     assert(THREAD == _owner, "invariant");
1026 
1027     // Drop the lock.
1028     // release semantics: prior loads and stores from within the critical section
1029     // must not float (reorder) past the following store that drops the lock.
1030     // Uses a storeload to separate release_store(owner) from the
1031     // successor check. The try_set_owner() below uses cmpxchg() so
1032     // we get the fence down there.
1033     release_clear_owner(Self);
1034     OrderAccess::storeload();
1035 
1036     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1037       return;
1038     }
1039     // Other threads are blocked trying to acquire the lock.
1040 
1041     // Normally the exiting thread is responsible for ensuring succession,
1042     // but if other successors are ready or other entering threads are spinning
1043     // then this thread can simply store NULL into _owner and exit without
1044     // waking a successor.  The existence of spinners or ready successors
1045     // guarantees proper succession (liveness).  Responsibility passes to the
1046     // ready or running successors.  The exiting thread delegates the duty.
1047     // More precisely, if a successor already exists this thread is absolved
1048     // of the responsibility of waking (unparking) one.
1049     //
1050     // The _succ variable is critical to reducing futile wakeup frequency.
1051     // _succ identifies the "heir presumptive" thread that has been made
1052     // ready (unparked) but that has not yet run.  We need only one such
1053     // successor thread to guarantee progress.
1054     // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
1055     // section 3.3 "Futile Wakeup Throttling" for details.
1056     //
1057     // Note that spinners in Enter() also set _succ non-null.
1058     // In the current implementation spinners opportunistically set
1059     // _succ so that exiting threads might avoid waking a successor.
1060     // Another less appealing alternative would be for the exiting thread
1061     // to drop the lock and then spin briefly to see if a spinner managed
1062     // to acquire the lock.  If so, the exiting thread could exit
1063     // immediately without waking a successor, otherwise the exiting
1064     // thread would need to dequeue and wake a successor.
1065     // (Note that we'd need to make the post-drop spin short, but no
1066     // shorter than the worst-case round-trip cache-line migration time.
1067     // The dropped lock needs to become visible to the spinner, and then
1068     // the acquisition of the lock by the spinner must become visible to
1069     // the exiting thread).
1070 
1071     // It appears that an heir-presumptive (successor) must be made ready.
1072     // Only the current lock owner can manipulate the EntryList or
1073     // drain _cxq, so we need to reacquire the lock.  If we fail
1074     // to reacquire the lock the responsibility for ensuring succession
1075     // falls to the new owner.
1076     //
1077     if (try_set_owner_from(NULL, Self) != NULL) {
1078       return;
1079     }
1080 
1081     guarantee(_owner == THREAD, "invariant");
1082 
1083     ObjectWaiter * w = NULL;
1084 
1085     w = _EntryList;
1086     if (w != NULL) {
1087       // I'd like to write: guarantee (w->_thread != Self).
1088       // But in practice an exiting thread may find itself on the EntryList.
1089       // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
1090       // then calls exit().  Exit release the lock by setting O._owner to NULL.
1091       // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
1092       // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1093       // release the lock "O".  T2 resumes immediately after the ST of null into
1094       // _owner, above.  T2 notices that the EntryList is populated, so it
1095       // reacquires the lock and then finds itself on the EntryList.
1096       // Given all that, we have to tolerate the circumstance where "w" is
1097       // associated with Self.
1098       assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1099       ExitEpilog(Self, w);
1100       return;
1101     }
1102 
1103     // If we find that both _cxq and EntryList are null then just
1104     // re-run the exit protocol from the top.
1105     w = _cxq;
1106     if (w == NULL) continue;
1107 
1108     // Drain _cxq into EntryList - bulk transfer.
1109     // First, detach _cxq.
1110     // The following loop is tantamount to: w = swap(&cxq, NULL)
1111     for (;;) {
1112       assert(w != NULL, "Invariant");
1113       ObjectWaiter * u = Atomic::cmpxchg(&_cxq, w, (ObjectWaiter*)NULL);
1114       if (u == w) break;
1115       w = u;
1116     }
1117 
1118     assert(w != NULL, "invariant");
1119     assert(_EntryList == NULL, "invariant");
1120 
1121     // Convert the LIFO SLL anchored by _cxq into a DLL.
1122     // The list reorganization step operates in O(LENGTH(w)) time.
1123     // It's critical that this step operate quickly as
1124     // "Self" still holds the outer-lock, restricting parallelism
1125     // and effectively lengthening the critical section.
1126     // Invariant: s chases t chases u.
1127     // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
1128     // we have faster access to the tail.
1129 
1130     _EntryList = w;
1131     ObjectWaiter * q = NULL;
1132     ObjectWaiter * p;
1133     for (p = w; p != NULL; p = p->_next) {
1134       guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
1135       p->TState = ObjectWaiter::TS_ENTER;
1136       p->_prev = q;
1137       q = p;
1138     }
1139 
1140     // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
1141     // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
1142 
1143     // See if we can abdicate to a spinner instead of waking a thread.
1144     // A primary goal of the implementation is to reduce the
1145     // context-switch rate.
1146     if (_succ != NULL) continue;
1147 
1148     w = _EntryList;
1149     if (w != NULL) {
1150       guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1151       ExitEpilog(Self, w);
1152       return;
1153     }
1154   }
1155 }
1156 
1157 // ExitSuspendEquivalent:
1158 // A faster alternate to handle_special_suspend_equivalent_condition()
1159 //
1160 // handle_special_suspend_equivalent_condition() unconditionally
1161 // acquires the SR_lock.  On some platforms uncontended MutexLocker()
1162 // operations have high latency.  Note that in ::enter() we call HSSEC
1163 // while holding the monitor, so we effectively lengthen the critical sections.
1164 //
1165 // There are a number of possible solutions:
1166 //
1167 // A.  To ameliorate the problem we might also defer state transitions
1168 //     to as late as possible -- just prior to parking.
1169 //     Given that, we'd call HSSEC after having returned from park(),
1170 //     but before attempting to acquire the monitor.  This is only a
1171 //     partial solution.  It avoids calling HSSEC while holding the
1172 //     monitor (good), but it still increases successor reacquisition latency --
1173 //     the interval between unparking a successor and the time the successor
1174 //     resumes and retries the lock.  See ReenterI(), which defers state transitions.
1175 //     If we use this technique we can also avoid EnterI()-exit() loop
1176 //     in ::enter() where we iteratively drop the lock and then attempt
1177 //     to reacquire it after suspending.
1178 //
1179 // B.  In the future we might fold all the suspend bits into a
1180 //     composite per-thread suspend flag and then update it with CAS().
1181 //     Alternately, a Dekker-like mechanism with multiple variables
1182 //     would suffice:
1183 //       ST Self->_suspend_equivalent = false
1184 //       MEMBAR
1185 //       LD Self_>_suspend_flags
1186 
1187 bool ObjectMonitor::ExitSuspendEquivalent(JavaThread * jSelf) {
1188   return jSelf->handle_special_suspend_equivalent_condition();
1189 }
1190 
1191 
1192 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1193   assert(_owner == Self, "invariant");
1194 
1195   // Exit protocol:
1196   // 1. ST _succ = wakee
1197   // 2. membar #loadstore|#storestore;
1198   // 2. ST _owner = NULL
1199   // 3. unpark(wakee)
1200 
1201   _succ = Wakee->_thread;
1202   ParkEvent * Trigger = Wakee->_event;
1203 
1204   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1205   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1206   // out-of-scope (non-extant).
1207   Wakee  = NULL;
1208 
1209   // Drop the lock.
1210   // Uses a fence to separate release_store(owner) from the LD in unpark().
1211   release_clear_owner(Self);
1212   OrderAccess::fence();
1213 
1214   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1215   Trigger->unpark();
1216 
1217   // Maintain stats and report events to JVMTI
1218   OM_PERFDATA_OP(Parks, inc());
1219 }
1220 
1221 
1222 // -----------------------------------------------------------------------------
1223 // Class Loader deadlock handling.
1224 //
1225 // complete_exit exits a lock returning recursion count
1226 // complete_exit/reenter operate as a wait without waiting
1227 // complete_exit requires an inflated monitor
1228 // The _owner field is not always the Thread addr even with an
1229 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1230 // thread due to contention.
1231 intx ObjectMonitor::complete_exit(TRAPS) {
1232   Thread * const Self = THREAD;
1233   assert(Self->is_Java_thread(), "Must be Java thread!");
1234   JavaThread *jt = (JavaThread *)THREAD;
1235 
1236   assert(InitDone, "Unexpectedly not initialized");
1237 
1238   void* cur = Atomic::load(&_owner);
1239   if (THREAD != cur) {
1240     if (THREAD->is_lock_owned((address)cur)) {
1241       assert(_recursions == 0, "internal state error");
1242       set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
1243       _recursions = 0;
1244     }
1245   }
1246 
1247   guarantee(Self == _owner, "complete_exit not owner");
1248   intx save = _recursions; // record the old recursion count
1249   _recursions = 0;        // set the recursion level to be 0
1250   exit(true, Self);           // exit the monitor
1251   guarantee(_owner != Self, "invariant");
1252   return save;
1253 }
1254 
1255 // reenter() enters a lock and sets recursion count
1256 // complete_exit/reenter operate as a wait without waiting
1257 bool ObjectMonitor::reenter(intx recursions, TRAPS) {
1258   Thread * const Self = THREAD;
1259   assert(Self->is_Java_thread(), "Must be Java thread!");
1260   JavaThread *jt = (JavaThread *)THREAD;
1261 
1262   guarantee(_owner != Self, "reenter already owner");
1263   if (!enter(THREAD)) {
1264     return false;
1265   }
1266   // Entered the monitor.
1267   guarantee(_recursions == 0, "reenter recursion");
1268   _recursions = recursions;
1269   return true;
1270 }
1271 
1272 // Checks that the current THREAD owns this monitor and causes an
1273 // immediate return if it doesn't. We don't use the CHECK macro
1274 // because we want the IMSE to be the only exception that is thrown
1275 // from the call site when false is returned. Any other pending
1276 // exception is ignored.
1277 #define CHECK_OWNER()                                                  \
1278   do {                                                                 \
1279     if (!check_owner(THREAD)) {                                        \
1280        assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1281        return;                                                         \
1282      }                                                                 \
1283   } while (false)
1284 
1285 // Returns true if the specified thread owns the ObjectMonitor.
1286 // Otherwise returns false and throws IllegalMonitorStateException
1287 // (IMSE). If there is a pending exception and the specified thread
1288 // is not the owner, that exception will be replaced by the IMSE.
1289 bool ObjectMonitor::check_owner(Thread* THREAD) {
1290   void* cur = Atomic::load(&_owner);
1291   if (cur == THREAD) {
1292     return true;
1293   }
1294   if (THREAD->is_lock_owned((address)cur)) {
1295     set_owner_from_BasicLock(cur, THREAD);  // Convert from BasicLock* to Thread*.
1296     _recursions = 0;
1297     return true;
1298   }
1299   THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1300              "current thread is not owner", false);
1301 }
1302 
1303 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1304                                     ObjectMonitor* monitor,
1305                                     jlong notifier_tid,
1306                                     jlong timeout,
1307                                     bool timedout) {
1308   assert(event != NULL, "invariant");
1309   assert(monitor != NULL, "invariant");
1310   event->set_monitorClass(((oop)monitor->object())->klass());
1311   event->set_timeout(timeout);
1312   event->set_address((uintptr_t)monitor->object_addr());
1313   event->set_notifier(notifier_tid);
1314   event->set_timedOut(timedout);
1315   event->commit();
1316 }
1317 
1318 // -----------------------------------------------------------------------------
1319 // Wait/Notify/NotifyAll
1320 //
1321 // Note: a subset of changes to ObjectMonitor::wait()
1322 // will need to be replicated in complete_exit
1323 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1324   Thread * const Self = THREAD;
1325   assert(Self->is_Java_thread(), "Must be Java thread!");
1326   JavaThread *jt = (JavaThread *)THREAD;
1327 
1328   assert(InitDone, "Unexpectedly not initialized");
1329 
1330   CHECK_OWNER();  // Throws IMSE if not owner.
1331 
1332   EventJavaMonitorWait event;
1333 
1334   // check for a pending interrupt
1335   if (interruptible && jt->is_interrupted(true) && !HAS_PENDING_EXCEPTION) {
1336     // post monitor waited event.  Note that this is past-tense, we are done waiting.
1337     if (JvmtiExport::should_post_monitor_waited()) {
1338       // Note: 'false' parameter is passed here because the
1339       // wait was not timed out due to thread interrupt.
1340       JvmtiExport::post_monitor_waited(jt, this, false);
1341 
1342       // In this short circuit of the monitor wait protocol, the
1343       // current thread never drops ownership of the monitor and
1344       // never gets added to the wait queue so the current thread
1345       // cannot be made the successor. This means that the
1346       // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1347       // consume an unpark() meant for the ParkEvent associated with
1348       // this ObjectMonitor.
1349     }
1350     if (event.should_commit()) {
1351       post_monitor_wait_event(&event, this, 0, millis, false);
1352     }
1353     THROW(vmSymbols::java_lang_InterruptedException());
1354     return;
1355   }
1356 
1357   assert(Self->_Stalled == 0, "invariant");
1358   Self->_Stalled = intptr_t(this);
1359   jt->set_current_waiting_monitor(this);
1360 
1361   // create a node to be put into the queue
1362   // Critically, after we reset() the event but prior to park(), we must check
1363   // for a pending interrupt.
1364   ObjectWaiter node(Self);
1365   node.TState = ObjectWaiter::TS_WAIT;
1366   Self->_ParkEvent->reset();
1367   OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
1368 
1369   // Enter the waiting queue, which is a circular doubly linked list in this case
1370   // but it could be a priority queue or any data structure.
1371   // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
1372   // by the the owner of the monitor *except* in the case where park()
1373   // returns because of a timeout of interrupt.  Contention is exceptionally rare
1374   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1375 
1376   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
1377   AddWaiter(&node);
1378   Thread::SpinRelease(&_WaitSetLock);
1379 
1380   _Responsible = NULL;
1381 
1382   intx save = _recursions;     // record the old recursion count
1383   _waiters++;                  // increment the number of waiters
1384   _recursions = 0;             // set the recursion level to be 1
1385   exit(true, Self);                    // exit the monitor
1386   guarantee(_owner != Self, "invariant");
1387 
1388   // The thread is on the WaitSet list - now park() it.
1389   // On MP systems it's conceivable that a brief spin before we park
1390   // could be profitable.
1391   //
1392   // TODO-FIXME: change the following logic to a loop of the form
1393   //   while (!timeout && !interrupted && _notified == 0) park()
1394 
1395   int ret = OS_OK;
1396   int WasNotified = 0;
1397 
1398   // Need to check interrupt state whilst still _thread_in_vm
1399   bool interrupted = interruptible && jt->is_interrupted(false);
1400 
1401   { // State transition wrappers
1402     OSThread* osthread = Self->osthread();
1403     OSThreadWaitState osts(osthread, true);
1404     {
1405       ThreadBlockInVM tbivm(jt);
1406       // Thread is in thread_blocked state and oop access is unsafe.
1407       jt->set_suspend_equivalent();
1408 
1409       if (interrupted || HAS_PENDING_EXCEPTION) {
1410         // Intentionally empty
1411       } else if (node._notified == 0) {
1412         if (millis <= 0) {
1413           Self->_ParkEvent->park();
1414         } else {
1415           ret = Self->_ParkEvent->park(millis);
1416         }
1417       }
1418 
1419       // were we externally suspended while we were waiting?
1420       if (ExitSuspendEquivalent (jt)) {
1421         // TODO-FIXME: add -- if succ == Self then succ = null.
1422         jt->java_suspend_self();
1423       }
1424 
1425     } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
1426 
1427     // Node may be on the WaitSet, the EntryList (or cxq), or in transition
1428     // from the WaitSet to the EntryList.
1429     // See if we need to remove Node from the WaitSet.
1430     // We use double-checked locking to avoid grabbing _WaitSetLock
1431     // if the thread is not on the wait queue.
1432     //
1433     // Note that we don't need a fence before the fetch of TState.
1434     // In the worst case we'll fetch a old-stale value of TS_WAIT previously
1435     // written by the is thread. (perhaps the fetch might even be satisfied
1436     // by a look-aside into the processor's own store buffer, although given
1437     // the length of the code path between the prior ST and this load that's
1438     // highly unlikely).  If the following LD fetches a stale TS_WAIT value
1439     // then we'll acquire the lock and then re-fetch a fresh TState value.
1440     // That is, we fail toward safety.
1441 
1442     if (node.TState == ObjectWaiter::TS_WAIT) {
1443       Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
1444       if (node.TState == ObjectWaiter::TS_WAIT) {
1445         DequeueSpecificWaiter(&node);       // unlink from WaitSet
1446         assert(node._notified == 0, "invariant");
1447         node.TState = ObjectWaiter::TS_RUN;
1448       }
1449       Thread::SpinRelease(&_WaitSetLock);
1450     }
1451 
1452     // The thread is now either on off-list (TS_RUN),
1453     // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
1454     // The Node's TState variable is stable from the perspective of this thread.
1455     // No other threads will asynchronously modify TState.
1456     guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
1457     OrderAccess::loadload();
1458     if (_succ == Self) _succ = NULL;
1459     WasNotified = node._notified;
1460 
1461     // Reentry phase -- reacquire the monitor.
1462     // re-enter contended monitor after object.wait().
1463     // retain OBJECT_WAIT state until re-enter successfully completes
1464     // Thread state is thread_in_vm and oop access is again safe,
1465     // although the raw address of the object may have changed.
1466     // (Don't cache naked oops over safepoints, of course).
1467 
1468     // post monitor waited event. Note that this is past-tense, we are done waiting.
1469     if (JvmtiExport::should_post_monitor_waited()) {
1470       JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
1471 
1472       if (node._notified != 0 && _succ == Self) {
1473         // In this part of the monitor wait-notify-reenter protocol it
1474         // is possible (and normal) for another thread to do a fastpath
1475         // monitor enter-exit while this thread is still trying to get
1476         // to the reenter portion of the protocol.
1477         //
1478         // The ObjectMonitor was notified and the current thread is
1479         // the successor which also means that an unpark() has already
1480         // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
1481         // consume the unpark() that was done when the successor was
1482         // set because the same ParkEvent is shared between Java
1483         // monitors and JVM/TI RawMonitors (for now).
1484         //
1485         // We redo the unpark() to ensure forward progress, i.e., we
1486         // don't want all pending threads hanging (parked) with none
1487         // entering the unlocked monitor.
1488         node._event->unpark();
1489       }
1490     }
1491 
1492     if (event.should_commit()) {
1493       post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1494     }
1495 
1496     OrderAccess::fence();
1497 
1498     assert(Self->_Stalled != 0, "invariant");
1499     Self->_Stalled = 0;
1500 
1501     assert(_owner != Self, "invariant");
1502     ObjectWaiter::TStates v = node.TState;
1503     if (v == ObjectWaiter::TS_RUN) {
1504       enter(Self);
1505     } else {
1506       guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1507       ReenterI(Self, &node);
1508       node.wait_reenter_end(this);
1509     }
1510 
1511     // Self has reacquired the lock.
1512     // Lifecycle - the node representing Self must not appear on any queues.
1513     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1514     // want residual elements associated with this thread left on any lists.
1515     guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1516     assert(_owner == Self, "invariant");
1517     assert(_succ != Self, "invariant");
1518   } // OSThreadWaitState()
1519 
1520   jt->set_current_waiting_monitor(NULL);
1521 
1522   guarantee(_recursions == 0, "invariant");
1523   _recursions = save;     // restore the old recursion count
1524   _waiters--;             // decrement the number of waiters
1525 
1526   // Verify a few postconditions
1527   assert(_owner == Self, "invariant");
1528   assert(_succ != Self, "invariant");
1529   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
1530 
1531   // check if the notification happened
1532   if (!WasNotified) {
1533     // no, it could be timeout or Thread.interrupt() or both
1534     // check for interrupt event, otherwise it is timeout
1535     if (interruptible && jt->is_interrupted(true) && !HAS_PENDING_EXCEPTION) {
1536       THROW(vmSymbols::java_lang_InterruptedException());
1537     }
1538   }
1539 
1540   // NOTE: Spurious wake up will be consider as timeout.
1541   // Monitor notify has precedence over thread interrupt.
1542 }
1543 
1544 
1545 // Consider:
1546 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
1547 // then instead of transferring a thread from the WaitSet to the EntryList
1548 // we might just dequeue a thread from the WaitSet and directly unpark() it.
1549 
1550 void ObjectMonitor::INotify(Thread * Self) {
1551   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
1552   ObjectWaiter * iterator = DequeueWaiter();
1553   if (iterator != NULL) {
1554     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1555     guarantee(iterator->_notified == 0, "invariant");
1556     // Disposition - what might we do with iterator ?
1557     // a.  add it directly to the EntryList - either tail (policy == 1)
1558     //     or head (policy == 0).
1559     // b.  push it onto the front of the _cxq (policy == 2).
1560     // For now we use (b).
1561 
1562     iterator->TState = ObjectWaiter::TS_ENTER;
1563 
1564     iterator->_notified = 1;
1565     iterator->_notifier_tid = JFR_THREAD_ID(Self);
1566 
1567     ObjectWaiter * list = _EntryList;
1568     if (list != NULL) {
1569       assert(list->_prev == NULL, "invariant");
1570       assert(list->TState == ObjectWaiter::TS_ENTER, "invariant");
1571       assert(list != iterator, "invariant");
1572     }
1573 
1574     // prepend to cxq
1575     if (list == NULL) {
1576       iterator->_next = iterator->_prev = NULL;
1577       _EntryList = iterator;
1578     } else {
1579       iterator->TState = ObjectWaiter::TS_CXQ;
1580       for (;;) {
1581         ObjectWaiter * front = _cxq;
1582         iterator->_next = front;
1583         if (Atomic::cmpxchg(&_cxq, front, iterator) == front) {
1584           break;
1585         }
1586       }
1587     }
1588 
1589     // _WaitSetLock protects the wait queue, not the EntryList.  We could
1590     // move the add-to-EntryList operation, above, outside the critical section
1591     // protected by _WaitSetLock.  In practice that's not useful.  With the
1592     // exception of  wait() timeouts and interrupts the monitor owner
1593     // is the only thread that grabs _WaitSetLock.  There's almost no contention
1594     // on _WaitSetLock so it's not profitable to reduce the length of the
1595     // critical section.
1596 
1597     iterator->wait_reenter_begin(this);
1598   }
1599   Thread::SpinRelease(&_WaitSetLock);
1600 }
1601 
1602 // Consider: a not-uncommon synchronization bug is to use notify() when
1603 // notifyAll() is more appropriate, potentially resulting in stranded
1604 // threads; this is one example of a lost wakeup. A useful diagnostic
1605 // option is to force all notify() operations to behave as notifyAll().
1606 //
1607 // Note: We can also detect many such problems with a "minimum wait".
1608 // When the "minimum wait" is set to a small non-zero timeout value
1609 // and the program does not hang whereas it did absent "minimum wait",
1610 // that suggests a lost wakeup bug.
1611 
1612 void ObjectMonitor::notify(TRAPS) {
1613   CHECK_OWNER();  // Throws IMSE if not owner.
1614   if (_WaitSet == NULL) {
1615     return;
1616   }
1617   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1618   INotify(THREAD);
1619   OM_PERFDATA_OP(Notifications, inc(1));
1620 }
1621 
1622 
1623 // The current implementation of notifyAll() transfers the waiters one-at-a-time
1624 // from the waitset to the EntryList. This could be done more efficiently with a
1625 // single bulk transfer but in practice it's not time-critical. Beware too,
1626 // that in prepend-mode we invert the order of the waiters. Let's say that the
1627 // waitset is "ABCD" and the EntryList is "XYZ". After a notifyAll() in prepend
1628 // mode the waitset will be empty and the EntryList will be "DCBAXYZ".
1629 
1630 void ObjectMonitor::notifyAll(TRAPS) {
1631   CHECK_OWNER();  // Throws IMSE if not owner.
1632   if (_WaitSet == NULL) {
1633     return;
1634   }
1635 
1636   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1637   int tally = 0;
1638   while (_WaitSet != NULL) {
1639     tally++;
1640     INotify(THREAD);
1641   }
1642 
1643   OM_PERFDATA_OP(Notifications, inc(tally));
1644 }
1645 
1646 // -----------------------------------------------------------------------------
1647 // Adaptive Spinning Support
1648 //
1649 // Adaptive spin-then-block - rational spinning
1650 //
1651 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
1652 // algorithm.  On high order SMP systems it would be better to start with
1653 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
1654 // a contending thread could enqueue itself on the cxq and then spin locally
1655 // on a thread-specific variable such as its ParkEvent._Event flag.
1656 // That's left as an exercise for the reader.  Note that global spinning is
1657 // not problematic on Niagara, as the L2 cache serves the interconnect and
1658 // has both low latency and massive bandwidth.
1659 //
1660 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
1661 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
1662 // (duration) or we can fix the count at approximately the duration of
1663 // a context switch and vary the frequency.   Of course we could also
1664 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
1665 // For a description of 'Adaptive spin-then-block mutual exclusion in
1666 // multi-threaded processing,' see U.S. Pat. No. 8046758.
1667 //
1668 // This implementation varies the duration "D", where D varies with
1669 // the success rate of recent spin attempts. (D is capped at approximately
1670 // length of a round-trip context switch).  The success rate for recent
1671 // spin attempts is a good predictor of the success rate of future spin
1672 // attempts.  The mechanism adapts automatically to varying critical
1673 // section length (lock modality), system load and degree of parallelism.
1674 // D is maintained per-monitor in _SpinDuration and is initialized
1675 // optimistically.  Spin frequency is fixed at 100%.
1676 //
1677 // Note that _SpinDuration is volatile, but we update it without locks
1678 // or atomics.  The code is designed so that _SpinDuration stays within
1679 // a reasonable range even in the presence of races.  The arithmetic
1680 // operations on _SpinDuration are closed over the domain of legal values,
1681 // so at worst a race will install and older but still legal value.
1682 // At the very worst this introduces some apparent non-determinism.
1683 // We might spin when we shouldn't or vice-versa, but since the spin
1684 // count are relatively short, even in the worst case, the effect is harmless.
1685 //
1686 // Care must be taken that a low "D" value does not become an
1687 // an absorbing state.  Transient spinning failures -- when spinning
1688 // is overall profitable -- should not cause the system to converge
1689 // on low "D" values.  We want spinning to be stable and predictable
1690 // and fairly responsive to change and at the same time we don't want
1691 // it to oscillate, become metastable, be "too" non-deterministic,
1692 // or converge on or enter undesirable stable absorbing states.
1693 //
1694 // We implement a feedback-based control system -- using past behavior
1695 // to predict future behavior.  We face two issues: (a) if the
1696 // input signal is random then the spin predictor won't provide optimal
1697 // results, and (b) if the signal frequency is too high then the control
1698 // system, which has some natural response lag, will "chase" the signal.
1699 // (b) can arise from multimodal lock hold times.  Transient preemption
1700 // can also result in apparent bimodal lock hold times.
1701 // Although sub-optimal, neither condition is particularly harmful, as
1702 // in the worst-case we'll spin when we shouldn't or vice-versa.
1703 // The maximum spin duration is rather short so the failure modes aren't bad.
1704 // To be conservative, I've tuned the gain in system to bias toward
1705 // _not spinning.  Relatedly, the system can sometimes enter a mode where it
1706 // "rings" or oscillates between spinning and not spinning.  This happens
1707 // when spinning is just on the cusp of profitability, however, so the
1708 // situation is not dire.  The state is benign -- there's no need to add
1709 // hysteresis control to damp the transition rate between spinning and
1710 // not spinning.
1711 
1712 // Spinning: Fixed frequency (100%), vary duration
1713 int ObjectMonitor::TrySpin(Thread * Self) {
1714   // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
1715   int ctr = Knob_FixedSpin;
1716   if (ctr != 0) {
1717     while (--ctr >= 0) {
1718       if (TryLock(Self) > 0) return 1;
1719       SpinPause();
1720     }
1721     return 0;
1722   }
1723 
1724   for (ctr = Knob_PreSpin + 1; --ctr >= 0;) {
1725     if (TryLock(Self) > 0) {
1726       // Increase _SpinDuration ...
1727       // Note that we don't clamp SpinDuration precisely at SpinLimit.
1728       // Raising _SpurDuration to the poverty line is key.
1729       int x = _SpinDuration;
1730       if (x < Knob_SpinLimit) {
1731         if (x < Knob_Poverty) x = Knob_Poverty;
1732         _SpinDuration = x + Knob_BonusB;
1733       }
1734       return 1;
1735     }
1736     SpinPause();
1737   }
1738 
1739   // Admission control - verify preconditions for spinning
1740   //
1741   // We always spin a little bit, just to prevent _SpinDuration == 0 from
1742   // becoming an absorbing state.  Put another way, we spin briefly to
1743   // sample, just in case the system load, parallelism, contention, or lock
1744   // modality changed.
1745   //
1746   // Consider the following alternative:
1747   // Periodically set _SpinDuration = _SpinLimit and try a long/full
1748   // spin attempt.  "Periodically" might mean after a tally of
1749   // the # of failed spin attempts (or iterations) reaches some threshold.
1750   // This takes us into the realm of 1-out-of-N spinning, where we
1751   // hold the duration constant but vary the frequency.
1752 
1753   ctr = _SpinDuration;
1754   if (ctr <= 0) return 0;
1755 
1756   if (NotRunnable(Self, (Thread *) _owner)) {
1757     return 0;
1758   }
1759 
1760   // We're good to spin ... spin ingress.
1761   // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
1762   // when preparing to LD...CAS _owner, etc and the CAS is likely
1763   // to succeed.
1764   if (_succ == NULL) {
1765     _succ = Self;
1766   }
1767   Thread * prv = NULL;
1768 
1769   // There are three ways to exit the following loop:
1770   // 1.  A successful spin where this thread has acquired the lock.
1771   // 2.  Spin failure with prejudice
1772   // 3.  Spin failure without prejudice
1773 
1774   while (--ctr >= 0) {
1775 
1776     // Periodic polling -- Check for pending GC
1777     // Threads may spin while they're unsafe.
1778     // We don't want spinning threads to delay the JVM from reaching
1779     // a stop-the-world safepoint or to steal cycles from GC.
1780     // If we detect a pending safepoint we abort in order that
1781     // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1782     // this thread, if safe, doesn't steal cycles from GC.
1783     // This is in keeping with the "no loitering in runtime" rule.
1784     // We periodically check to see if there's a safepoint pending.
1785     if ((ctr & 0xFF) == 0) {
1786       if (SafepointMechanism::should_block(Self)) {
1787         goto Abort;           // abrupt spin egress
1788       }
1789       SpinPause();
1790     }
1791 
1792     // Probe _owner with TATAS
1793     // If this thread observes the monitor transition or flicker
1794     // from locked to unlocked to locked, then the odds that this
1795     // thread will acquire the lock in this spin attempt go down
1796     // considerably.  The same argument applies if the CAS fails
1797     // or if we observe _owner change from one non-null value to
1798     // another non-null value.   In such cases we might abort
1799     // the spin without prejudice or apply a "penalty" to the
1800     // spin count-down variable "ctr", reducing it by 100, say.
1801 
1802     Thread * ox = (Thread *) _owner;
1803     if (ox == NULL) {
1804       ox = (Thread*)try_set_owner_from(NULL, Self);
1805       if (ox == NULL) {
1806         // The CAS succeeded -- this thread acquired ownership
1807         // Take care of some bookkeeping to exit spin state.
1808         if (_succ == Self) {
1809           _succ = NULL;
1810         }
1811 
1812         // Increase _SpinDuration :
1813         // The spin was successful (profitable) so we tend toward
1814         // longer spin attempts in the future.
1815         // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1816         // If we acquired the lock early in the spin cycle it
1817         // makes sense to increase _SpinDuration proportionally.
1818         // Note that we don't clamp SpinDuration precisely at SpinLimit.
1819         int x = _SpinDuration;
1820         if (x < Knob_SpinLimit) {
1821           if (x < Knob_Poverty) x = Knob_Poverty;
1822           _SpinDuration = x + Knob_Bonus;
1823         }
1824         return 1;
1825       }
1826 
1827       // The CAS failed ... we can take any of the following actions:
1828       // * penalize: ctr -= CASPenalty
1829       // * exit spin with prejudice -- goto Abort;
1830       // * exit spin without prejudice.
1831       // * Since CAS is high-latency, retry again immediately.
1832       prv = ox;
1833       goto Abort;
1834     }
1835 
1836     // Did lock ownership change hands ?
1837     if (ox != prv && prv != NULL) {
1838       goto Abort;
1839     }
1840     prv = ox;
1841 
1842     // Abort the spin if the owner is not executing.
1843     // The owner must be executing in order to drop the lock.
1844     // Spinning while the owner is OFFPROC is idiocy.
1845     // Consider: ctr -= RunnablePenalty ;
1846     if (NotRunnable(Self, ox)) {
1847       goto Abort;
1848     }
1849     if (_succ == NULL) {
1850       _succ = Self;
1851     }
1852   }
1853 
1854   // Spin failed with prejudice -- reduce _SpinDuration.
1855   // TODO: Use an AIMD-like policy to adjust _SpinDuration.
1856   // AIMD is globally stable.
1857   {
1858     int x = _SpinDuration;
1859     if (x > 0) {
1860       // Consider an AIMD scheme like: x -= (x >> 3) + 100
1861       // This is globally sample and tends to damp the response.
1862       x -= Knob_Penalty;
1863       if (x < 0) x = 0;
1864       _SpinDuration = x;
1865     }
1866   }
1867 
1868  Abort:
1869   if (_succ == Self) {
1870     _succ = NULL;
1871     // Invariant: after setting succ=null a contending thread
1872     // must recheck-retry _owner before parking.  This usually happens
1873     // in the normal usage of TrySpin(), but it's safest
1874     // to make TrySpin() as foolproof as possible.
1875     OrderAccess::fence();
1876     if (TryLock(Self) > 0) return 1;
1877   }
1878   return 0;
1879 }
1880 
1881 // NotRunnable() -- informed spinning
1882 //
1883 // Don't bother spinning if the owner is not eligible to drop the lock.
1884 // Spin only if the owner thread is _thread_in_Java or _thread_in_vm.
1885 // The thread must be runnable in order to drop the lock in timely fashion.
1886 // If the _owner is not runnable then spinning will not likely be
1887 // successful (profitable).
1888 //
1889 // Beware -- the thread referenced by _owner could have died
1890 // so a simply fetch from _owner->_thread_state might trap.
1891 // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
1892 // Because of the lifecycle issues, the _thread_state values
1893 // observed by NotRunnable() might be garbage.  NotRunnable must
1894 // tolerate this and consider the observed _thread_state value
1895 // as advisory.
1896 //
1897 // Beware too, that _owner is sometimes a BasicLock address and sometimes
1898 // a thread pointer.
1899 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
1900 // with the LSB of _owner.  Another option would be to probabilistically probe
1901 // the putative _owner->TypeTag value.
1902 //
1903 // Checking _thread_state isn't perfect.  Even if the thread is
1904 // in_java it might be blocked on a page-fault or have been preempted
1905 // and sitting on a ready/dispatch queue.
1906 //
1907 // The return value from NotRunnable() is *advisory* -- the
1908 // result is based on sampling and is not necessarily coherent.
1909 // The caller must tolerate false-negative and false-positive errors.
1910 // Spinning, in general, is probabilistic anyway.
1911 
1912 
1913 int ObjectMonitor::NotRunnable(Thread * Self, Thread * ox) {
1914   // Check ox->TypeTag == 2BAD.
1915   if (ox == NULL) return 0;
1916 
1917   // Avoid transitive spinning ...
1918   // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
1919   // Immediately after T1 acquires L it's possible that T2, also
1920   // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
1921   // This occurs transiently after T1 acquired L but before
1922   // T1 managed to clear T1.Stalled.  T2 does not need to abort
1923   // its spin in this circumstance.
1924   intptr_t BlockedOn = SafeFetchN((intptr_t *) &ox->_Stalled, intptr_t(1));
1925 
1926   if (BlockedOn == 1) return 1;
1927   if (BlockedOn != 0) {
1928     return BlockedOn != intptr_t(this) && _owner == ox;
1929   }
1930 
1931   assert(sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant");
1932   int jst = SafeFetch32((int *) &((JavaThread *) ox)->_thread_state, -1);;
1933   // consider also: jst != _thread_in_Java -- but that's overspecific.
1934   return jst == _thread_blocked || jst == _thread_in_native;
1935 }
1936 
1937 
1938 // -----------------------------------------------------------------------------
1939 // WaitSet management ...
1940 
1941 ObjectWaiter::ObjectWaiter(Thread* thread) {
1942   _next     = NULL;
1943   _prev     = NULL;
1944   _notified = 0;
1945   _notifier_tid = 0;
1946   TState    = TS_RUN;
1947   _thread   = thread;
1948   _event    = thread->_ParkEvent;
1949   _active   = false;
1950   assert(_event != NULL, "invariant");
1951 }
1952 
1953 void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) {
1954   JavaThread *jt = (JavaThread *)this->_thread;
1955   _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
1956 }
1957 
1958 void ObjectWaiter::wait_reenter_end(ObjectMonitor * const mon) {
1959   JavaThread *jt = (JavaThread *)this->_thread;
1960   JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
1961 }
1962 
1963 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
1964   assert(node != NULL, "should not add NULL node");
1965   assert(node->_prev == NULL, "node already in list");
1966   assert(node->_next == NULL, "node already in list");
1967   // put node at end of queue (circular doubly linked list)
1968   if (_WaitSet == NULL) {
1969     _WaitSet = node;
1970     node->_prev = node;
1971     node->_next = node;
1972   } else {
1973     ObjectWaiter* head = _WaitSet;
1974     ObjectWaiter* tail = head->_prev;
1975     assert(tail->_next == head, "invariant check");
1976     tail->_next = node;
1977     head->_prev = node;
1978     node->_next = head;
1979     node->_prev = tail;
1980   }
1981 }
1982 
1983 inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
1984   // dequeue the very first waiter
1985   ObjectWaiter* waiter = _WaitSet;
1986   if (waiter) {
1987     DequeueSpecificWaiter(waiter);
1988   }
1989   return waiter;
1990 }
1991 
1992 inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
1993   assert(node != NULL, "should not dequeue NULL node");
1994   assert(node->_prev != NULL, "node already removed from list");
1995   assert(node->_next != NULL, "node already removed from list");
1996   // when the waiter has woken up because of interrupt,
1997   // timeout or other spurious wake-up, dequeue the
1998   // waiter from waiting list
1999   ObjectWaiter* next = node->_next;
2000   if (next == node) {
2001     assert(node->_prev == node, "invariant check");
2002     _WaitSet = NULL;
2003   } else {
2004     ObjectWaiter* prev = node->_prev;
2005     assert(prev->_next == node, "invariant check");
2006     assert(next->_prev == node, "invariant check");
2007     next->_prev = prev;
2008     prev->_next = next;
2009     if (_WaitSet == node) {
2010       _WaitSet = next;
2011     }
2012   }
2013   node->_next = NULL;
2014   node->_prev = NULL;
2015 }
2016 
2017 // -----------------------------------------------------------------------------
2018 // PerfData support
2019 PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts       = NULL;
2020 PerfCounter * ObjectMonitor::_sync_FutileWakeups               = NULL;
2021 PerfCounter * ObjectMonitor::_sync_Parks                       = NULL;
2022 PerfCounter * ObjectMonitor::_sync_Notifications               = NULL;
2023 PerfCounter * ObjectMonitor::_sync_Inflations                  = NULL;
2024 PerfCounter * ObjectMonitor::_sync_Deflations                  = NULL;
2025 PerfLongVariable * ObjectMonitor::_sync_MonExtant              = NULL;
2026 
2027 // One-shot global initialization for the sync subsystem.
2028 // We could also defer initialization and initialize on-demand
2029 // the first time we call ObjectSynchronizer::inflate().
2030 // Initialization would be protected - like so many things - by
2031 // the MonitorCache_lock.
2032 
2033 void ObjectMonitor::Initialize() {
2034   assert(!InitDone, "invariant");
2035 
2036   if (!os::is_MP()) {
2037     Knob_SpinLimit = 0;
2038     Knob_PreSpin   = 0;
2039     Knob_FixedSpin = -1;
2040   }
2041 
2042   if (UsePerfData) {
2043     EXCEPTION_MARK;
2044 #define NEWPERFCOUNTER(n)                                                \
2045   {                                                                      \
2046     n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,  \
2047                                         CHECK);                          \
2048   }
2049 #define NEWPERFVARIABLE(n)                                                \
2050   {                                                                       \
2051     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
2052                                          CHECK);                          \
2053   }
2054     NEWPERFCOUNTER(_sync_Inflations);
2055     NEWPERFCOUNTER(_sync_Deflations);
2056     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2057     NEWPERFCOUNTER(_sync_FutileWakeups);
2058     NEWPERFCOUNTER(_sync_Parks);
2059     NEWPERFCOUNTER(_sync_Notifications);
2060     NEWPERFVARIABLE(_sync_MonExtant);
2061 #undef NEWPERFCOUNTER
2062 #undef NEWPERFVARIABLE
2063   }
2064 
2065   DEBUG_ONLY(InitDone = true;)
2066 }
2067 
2068 void ObjectMonitor::print_on(outputStream* st) const {
2069   // The minimal things to print for markWord printing, more can be added for debugging and logging.
2070   st->print("{contentions=0x%08x,waiters=0x%08x"
2071             ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
2072             contentions(), waiters(), recursions(),
2073             p2i(owner()));
2074 }
2075 void ObjectMonitor::print() const { print_on(tty); }
2076 
2077 #ifdef ASSERT
2078 // Print the ObjectMonitor like a debugger would:
2079 //
2080 // (ObjectMonitor) 0x00007fdfb6012e40 = {
2081 //   _header = 0x0000000000000001
2082 //   _object = 0x000000070ff45fd0
2083 //   _allocation_state = Old
2084 //   _pad_buf0 = {
2085 //     [0] = '\0'
2086 //     ...
2087 //     [43] = '\0'
2088 //   }
2089 //   _owner = 0x0000000000000000
2090 //   _previous_owner_tid = 0
2091 //   _pad_buf1 = {
2092 //     [0] = '\0'
2093 //     ...
2094 //     [47] = '\0'
2095 //   }
2096 //   _next_om = 0x0000000000000000
2097 //   _recursions = 0
2098 //   _EntryList = 0x0000000000000000
2099 //   _cxq = 0x0000000000000000
2100 //   _succ = 0x0000000000000000
2101 //   _Responsible = 0x0000000000000000
2102 //   _Spinner = 0
2103 //   _SpinDuration = 5000
2104 //   _contentions = 0
2105 //   _WaitSet = 0x0000700009756248
2106 //   _waiters = 1
2107 //   _WaitSetLock = 0
2108 // }
2109 //
2110 void ObjectMonitor::print_debug_style_on(outputStream* st) const {
2111   st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
2112   st->print_cr("  _header = " INTPTR_FORMAT, header().value());
2113   st->print_cr("  _object = " INTPTR_FORMAT, p2i(_object));
2114   st->print("  _allocation_state = ");
2115   if (is_free()) {
2116     st->print("Free");
2117   } else if (is_old()) {
2118     st->print("Old");
2119   } else if (is_new()) {
2120     st->print("New");
2121   } else {
2122     st->print("unknown=%d", _allocation_state);
2123   }
2124   st->cr();
2125   st->print_cr("  _pad_buf0 = {");
2126   st->print_cr("    [0] = '\\0'");
2127   st->print_cr("    ...");
2128   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2129   st->print_cr("  }");
2130   st->print_cr("  _owner = " INTPTR_FORMAT, p2i(_owner));
2131   st->print_cr("  _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid);
2132   st->print_cr("  _pad_buf1 = {");
2133   st->print_cr("    [0] = '\\0'");
2134   st->print_cr("    ...");
2135   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2136   st->print_cr("  }");
2137   st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(next_om()));
2138   st->print_cr("  _recursions = " INTX_FORMAT, _recursions);
2139   st->print_cr("  _EntryList = " INTPTR_FORMAT, p2i(_EntryList));
2140   st->print_cr("  _cxq = " INTPTR_FORMAT, p2i(_cxq));
2141   st->print_cr("  _succ = " INTPTR_FORMAT, p2i(_succ));
2142   st->print_cr("  _Responsible = " INTPTR_FORMAT, p2i(_Responsible));
2143   st->print_cr("  _Spinner = %d", _Spinner);
2144   st->print_cr("  _SpinDuration = %d", _SpinDuration);
2145   st->print_cr("  _contentions = %d", contentions());
2146   st->print_cr("  _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet));
2147   st->print_cr("  _waiters = %d", _waiters);
2148   st->print_cr("  _WaitSetLock = %d", _WaitSetLock);
2149   st->print_cr("}");
2150 }
2151 #endif