1 /*
   2  * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "oops/markOop.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/biasedLocking.hpp"
  31 #include "runtime/handles.inline.hpp"
  32 #include "runtime/interfaceSupport.hpp"
  33 #include "runtime/mutexLocker.hpp"
  34 #include "runtime/objectMonitor.hpp"
  35 #include "runtime/objectMonitor.inline.hpp"
  36 #include "runtime/osThread.hpp"
  37 #include "runtime/stubRoutines.hpp"
  38 #include "runtime/synchronizer.hpp"
  39 #include "services/threadService.hpp"
  40 #include "utilities/dtrace.hpp"
  41 #include "utilities/events.hpp"
  42 #include "utilities/preserveException.hpp"
  43 #ifdef TARGET_OS_FAMILY_linux
  44 # include "os_linux.inline.hpp"
  45 # include "thread_linux.inline.hpp"
  46 #endif
  47 #ifdef TARGET_OS_FAMILY_solaris
  48 # include "os_solaris.inline.hpp"
  49 # include "thread_solaris.inline.hpp"
  50 #endif
  51 #ifdef TARGET_OS_FAMILY_windows
  52 # include "os_windows.inline.hpp"
  53 # include "thread_windows.inline.hpp"
  54 #endif
  55 
  56 #if defined(__GNUC__) && !defined(IA64)
  57   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
  58   #define ATTR __attribute__((noinline))
  59 #else
  60   #define ATTR
  61 #endif
  62 
  63 // Native markword accessors for synchronization and hashCode().
  64 //
  65 // The "core" versions of monitor enter and exit reside in this file.
  66 // The interpreter and compilers contain specialized transliterated
  67 // variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
  68 // for instance.  If you make changes here, make sure to modify the
  69 // interpreter, and both C1 and C2 fast-path inline locking code emission.
  70 //
  71 // TODO: merge the objectMonitor and synchronizer classes.
  72 //
  73 // -----------------------------------------------------------------------------
  74 
  75 #ifdef DTRACE_ENABLED
  76 
  77 // Only bother with this argument setup if dtrace is available
  78 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  79 
  80 HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
  81   jlong, uintptr_t, char*, int, long);
  82 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
  83   jlong, uintptr_t, char*, int);
  84 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
  85   jlong, uintptr_t, char*, int);
  86 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
  87   jlong, uintptr_t, char*, int);
  88 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
  89   jlong, uintptr_t, char*, int);
  90 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
  91   jlong, uintptr_t, char*, int);
  92 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
  93   jlong, uintptr_t, char*, int);
  94 
  95 #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread)                      \
  96   char* bytes = NULL;                                                      \
  97   int len = 0;                                                             \
  98   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  99   symbolOop klassname = ((oop)(klassOop))->klass()->klass_part()->name();  \
 100   if (klassname != NULL) {                                                 \
 101     bytes = (char*)klassname->bytes();                                     \
 102     len = klassname->utf8_length();                                        \
 103   }
 104 
 105 #define DTRACE_MONITOR_WAIT_PROBE(monitor, klassOop, thread, millis)       \
 106   {                                                                        \
 107     if (DTraceMonitorProbes) {                                            \
 108       DTRACE_MONITOR_PROBE_COMMON(klassOop, thread);                       \
 109       HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid,                       \
 110                        (monitor), bytes, len, (millis));                   \
 111     }                                                                      \
 112   }
 113 
 114 #define DTRACE_MONITOR_PROBE(probe, monitor, klassOop, thread)             \
 115   {                                                                        \
 116     if (DTraceMonitorProbes) {                                            \
 117       DTRACE_MONITOR_PROBE_COMMON(klassOop, thread);                       \
 118       HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid,                    \
 119                        (uintptr_t)(monitor), bytes, len);                  \
 120     }                                                                      \
 121   }
 122 
 123 #else //  ndef DTRACE_ENABLED
 124 
 125 #define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon)    {;}
 126 #define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon)          {;}
 127 
 128 #endif // ndef DTRACE_ENABLED
 129 
 130 // ObjectWaiter serves as a "proxy" or surrogate thread.
 131 // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
 132 // ParkEvent instead.  Beware, however, that the JVMTI code
 133 // knows about ObjectWaiters, so we'll have to reconcile that code.
 134 // See next_waiter(), first_waiter(), etc.
 135 
 136 class ObjectWaiter : public StackObj {
 137  public:
 138   enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
 139   enum Sorted  { PREPEND, APPEND, SORTED } ;
 140   ObjectWaiter * volatile _next;
 141   ObjectWaiter * volatile _prev;
 142   Thread*       _thread;
 143   ParkEvent *   _event;
 144   volatile int  _notified ;
 145   volatile TStates TState ;
 146   Sorted        _Sorted ;           // List placement disposition
 147   bool          _active ;           // Contention monitoring is enabled
 148  public:
 149   ObjectWaiter(Thread* thread) {
 150     _next     = NULL;
 151     _prev     = NULL;
 152     _notified = 0;
 153     TState    = TS_RUN ;
 154     _thread   = thread;
 155     _event    = thread->_ParkEvent ;
 156     _active   = false;
 157     assert (_event != NULL, "invariant") ;
 158   }
 159 
 160   void wait_reenter_begin(ObjectMonitor *mon) {
 161     JavaThread *jt = (JavaThread *)this->_thread;
 162     _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
 163   }
 164 
 165   void wait_reenter_end(ObjectMonitor *mon) {
 166     JavaThread *jt = (JavaThread *)this->_thread;
 167     JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
 168   }
 169 };
 170 
 171 enum ManifestConstants {
 172     ClearResponsibleAtSTW   = 0,
 173     MaximumRecheckInterval  = 1000
 174 } ;
 175 
 176 
 177 #undef TEVENT
 178 #define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
 179 
 180 #define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
 181 
 182 #undef  TEVENT
 183 #define TEVENT(nom) {;}
 184 
 185 // Performance concern:
 186 // OrderAccess::storestore() calls release() which STs 0 into the global volatile
 187 // OrderAccess::Dummy variable.  This store is unnecessary for correctness.
 188 // Many threads STing into a common location causes considerable cache migration
 189 // or "sloshing" on large SMP system.  As such, I avoid using OrderAccess::storestore()
 190 // until it's repaired.  In some cases OrderAccess::fence() -- which incurs local
 191 // latency on the executing processor -- is a better choice as it scales on SMP
 192 // systems.  See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a
 193 // discussion of coherency costs.  Note that all our current reference platforms
 194 // provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC.
 195 //
 196 // As a general policy we use "volatile" to control compiler-based reordering
 197 // and explicit fences (barriers) to control for architectural reordering performed
 198 // by the CPU(s) or platform.
 199 
 200 static int  MBFence (int x) { OrderAccess::fence(); return x; }
 201 
 202 struct SharedGlobals {
 203     // These are highly shared mostly-read variables.
 204     // To avoid false-sharing they need to be the sole occupants of a $ line.
 205     double padPrefix [8];
 206     volatile int stwRandom ;
 207     volatile int stwCycle ;
 208 
 209     // Hot RW variables -- Sequester to avoid false-sharing
 210     double padSuffix [16];
 211     volatile int hcSequence ;
 212     double padFinal [8] ;
 213 } ;
 214 
 215 static SharedGlobals GVars ;
 216 static int MonitorScavengeThreshold = 1000000 ;
 217 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
 218 
 219 
 220 // Tunables ...
 221 // The knob* variables are effectively final.  Once set they should
 222 // never be modified hence.  Consider using __read_mostly with GCC.
 223 
 224 static int Knob_LogSpins           = 0 ;       // enable jvmstat tally for spins
 225 static int Knob_HandOff            = 0 ;
 226 static int Knob_Verbose            = 0 ;
 227 static int Knob_ReportSettings     = 0 ;
 228 
 229 static int Knob_SpinLimit          = 5000 ;    // derived by an external tool -
 230 static int Knob_SpinBase           = 0 ;       // Floor AKA SpinMin
 231 static int Knob_SpinBackOff        = 0 ;       // spin-loop backoff
 232 static int Knob_CASPenalty         = -1 ;      // Penalty for failed CAS
 233 static int Knob_OXPenalty          = -1 ;      // Penalty for observed _owner change
 234 static int Knob_SpinSetSucc        = 1 ;       // spinners set the _succ field
 235 static int Knob_SpinEarly          = 1 ;
 236 static int Knob_SuccEnabled        = 1 ;       // futile wake throttling
 237 static int Knob_SuccRestrict       = 0 ;       // Limit successors + spinners to at-most-one
 238 static int Knob_MaxSpinners        = -1 ;      // Should be a function of # CPUs
 239 static int Knob_Bonus              = 100 ;     // spin success bonus
 240 static int Knob_BonusB             = 100 ;     // spin success bonus
 241 static int Knob_Penalty            = 200 ;     // spin failure penalty
 242 static int Knob_Poverty            = 1000 ;
 243 static int Knob_SpinAfterFutile    = 1 ;       // Spin after returning from park()
 244 static int Knob_FixedSpin          = 0 ;
 245 static int Knob_OState             = 3 ;       // Spinner checks thread state of _owner
 246 static int Knob_UsePause           = 1 ;
 247 static int Knob_ExitPolicy         = 0 ;
 248 static int Knob_PreSpin            = 10 ;      // 20-100 likely better
 249 static int Knob_ResetEvent         = 0 ;
 250 static int BackOffMask             = 0 ;
 251 
 252 static int Knob_FastHSSEC          = 0 ;
 253 static int Knob_MoveNotifyee       = 2 ;       // notify() - disposition of notifyee
 254 static int Knob_QMode              = 0 ;       // EntryList-cxq policy - queue discipline
 255 static volatile int InitDone       = 0 ;
 256 
 257 
 258 // hashCode() generation :
 259 //
 260 // Possibilities:
 261 // * MD5Digest of {obj,stwRandom}
 262 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
 263 // * A DES- or AES-style SBox[] mechanism
 264 // * One of the Phi-based schemes, such as:
 265 //   2654435761 = 2^32 * Phi (golden ratio)
 266 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
 267 // * A variation of Marsaglia's shift-xor RNG scheme.
 268 // * (obj ^ stwRandom) is appealing, but can result
 269 //   in undesirable regularity in the hashCode values of adjacent objects
 270 //   (objects allocated back-to-back, in particular).  This could potentially
 271 //   result in hashtable collisions and reduced hashtable efficiency.
 272 //   There are simple ways to "diffuse" the middle address bits over the
 273 //   generated hashCode values:
 274 //
 275 
 276 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
 277   intptr_t value = 0 ;
 278   if (hashCode == 0) {
 279      // This form uses an unguarded global Park-Miller RNG,
 280      // so it's possible for two threads to race and generate the same RNG.
 281      // On MP system we'll have lots of RW access to a global, so the
 282      // mechanism induces lots of coherency traffic.
 283      value = os::random() ;
 284   } else
 285   if (hashCode == 1) {
 286      // This variation has the property of being stable (idempotent)
 287      // between STW operations.  This can be useful in some of the 1-0
 288      // synchronization schemes.
 289      intptr_t addrBits = intptr_t(obj) >> 3 ;
 290      value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
 291   } else
 292   if (hashCode == 2) {
 293      value = 1 ;            // for sensitivity testing
 294   } else
 295   if (hashCode == 3) {
 296      value = ++GVars.hcSequence ;
 297   } else
 298   if (hashCode == 4) {
 299      value = intptr_t(obj) ;
 300   } else {
 301      // Marsaglia's xor-shift scheme with thread-specific state
 302      // This is probably the best overall implementation -- we'll
 303      // likely make this the default in future releases.
 304      unsigned t = Self->_hashStateX ;
 305      t ^= (t << 11) ;
 306      Self->_hashStateX = Self->_hashStateY ;
 307      Self->_hashStateY = Self->_hashStateZ ;
 308      Self->_hashStateZ = Self->_hashStateW ;
 309      unsigned v = Self->_hashStateW ;
 310      v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
 311      Self->_hashStateW = v ;
 312      value = v ;
 313   }
 314 
 315   value &= markOopDesc::hash_mask;
 316   if (value == 0) value = 0xBAD ;
 317   assert (value != markOopDesc::no_hash, "invariant") ;
 318   TEVENT (hashCode: GENERATE) ;
 319   return value;
 320 }
 321 
 322 void BasicLock::print_on(outputStream* st) const {
 323   st->print("monitor");
 324 }
 325 
 326 void BasicLock::move_to(oop obj, BasicLock* dest) {
 327   // Check to see if we need to inflate the lock. This is only needed
 328   // if an object is locked using "this" lightweight monitor. In that
 329   // case, the displaced_header() is unlocked, because the
 330   // displaced_header() contains the header for the originally unlocked
 331   // object. However the object could have already been inflated. But it
 332   // does not matter, the inflation will just a no-op. For other cases,
 333   // the displaced header will be either 0x0 or 0x3, which are location
 334   // independent, therefore the BasicLock is free to move.
 335   //
 336   // During OSR we may need to relocate a BasicLock (which contains a
 337   // displaced word) from a location in an interpreter frame to a
 338   // new location in a compiled frame.  "this" refers to the source
 339   // basiclock in the interpreter frame.  "dest" refers to the destination
 340   // basiclock in the new compiled frame.  We *always* inflate in move_to().
 341   // The always-Inflate policy works properly, but in 1.5.0 it can sometimes
 342   // cause performance problems in code that makes heavy use of a small # of
 343   // uncontended locks.   (We'd inflate during OSR, and then sync performance
 344   // would subsequently plummet because the thread would be forced thru the slow-path).
 345   // This problem has been made largely moot on IA32 by inlining the inflated fast-path
 346   // operations in Fast_Lock and Fast_Unlock in i486.ad.
 347   //
 348   // Note that there is a way to safely swing the object's markword from
 349   // one stack location to another.  This avoids inflation.  Obviously,
 350   // we need to ensure that both locations refer to the current thread's stack.
 351   // There are some subtle concurrency issues, however, and since the benefit is
 352   // is small (given the support for inflated fast-path locking in the fast_lock, etc)
 353   // we'll leave that optimization for another time.
 354 
 355   if (displaced_header()->is_neutral()) {
 356     ObjectSynchronizer::inflate_helper(obj);
 357     // WARNING: We can not put check here, because the inflation
 358     // will not update the displaced header. Once BasicLock is inflated,
 359     // no one should ever look at its content.
 360   } else {
 361     // Typically the displaced header will be 0 (recursive stack lock) or
 362     // unused_mark.  Naively we'd like to assert that the displaced mark
 363     // value is either 0, neutral, or 3.  But with the advent of the
 364     // store-before-CAS avoidance in fast_lock/compiler_lock_object
 365     // we can find any flavor mark in the displaced mark.
 366   }
 367 // [RGV] The next line appears to do nothing!
 368   intptr_t dh = (intptr_t) displaced_header();
 369   dest->set_displaced_header(displaced_header());
 370 }
 371 
 372 // -----------------------------------------------------------------------------
 373 
 374 // standard constructor, allows locking failures
 375 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
 376   _dolock = doLock;
 377   _thread = thread;
 378   debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
 379   _obj = obj;
 380 
 381   if (_dolock) {
 382     TEVENT (ObjectLocker) ;
 383 
 384     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
 385   }
 386 }
 387 
 388 ObjectLocker::~ObjectLocker() {
 389   if (_dolock) {
 390     ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
 391   }
 392 }
 393 
 394 // -----------------------------------------------------------------------------
 395 
 396 
 397 PerfCounter * ObjectSynchronizer::_sync_Inflations                  = NULL ;
 398 PerfCounter * ObjectSynchronizer::_sync_Deflations                  = NULL ;
 399 PerfCounter * ObjectSynchronizer::_sync_ContendedLockAttempts       = NULL ;
 400 PerfCounter * ObjectSynchronizer::_sync_FutileWakeups               = NULL ;
 401 PerfCounter * ObjectSynchronizer::_sync_Parks                       = NULL ;
 402 PerfCounter * ObjectSynchronizer::_sync_EmptyNotifications          = NULL ;
 403 PerfCounter * ObjectSynchronizer::_sync_Notifications               = NULL ;
 404 PerfCounter * ObjectSynchronizer::_sync_PrivateA                    = NULL ;
 405 PerfCounter * ObjectSynchronizer::_sync_PrivateB                    = NULL ;
 406 PerfCounter * ObjectSynchronizer::_sync_SlowExit                    = NULL ;
 407 PerfCounter * ObjectSynchronizer::_sync_SlowEnter                   = NULL ;
 408 PerfCounter * ObjectSynchronizer::_sync_SlowNotify                  = NULL ;
 409 PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll               = NULL ;
 410 PerfCounter * ObjectSynchronizer::_sync_FailedSpins                 = NULL ;
 411 PerfCounter * ObjectSynchronizer::_sync_SuccessfulSpins             = NULL ;
 412 PerfCounter * ObjectSynchronizer::_sync_MonInCirculation            = NULL ;
 413 PerfCounter * ObjectSynchronizer::_sync_MonScavenged                = NULL ;
 414 PerfLongVariable * ObjectSynchronizer::_sync_MonExtant              = NULL ;
 415 
 416 // One-shot global initialization for the sync subsystem.
 417 // We could also defer initialization and initialize on-demand
 418 // the first time we call inflate().  Initialization would
 419 // be protected - like so many things - by the MonitorCache_lock.
 420 
 421 void ObjectSynchronizer::Initialize () {
 422   static int InitializationCompleted = 0 ;
 423   assert (InitializationCompleted == 0, "invariant") ;
 424   InitializationCompleted = 1 ;
 425   if (UsePerfData) {
 426       EXCEPTION_MARK ;
 427       #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
 428       #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
 429       NEWPERFCOUNTER(_sync_Inflations) ;
 430       NEWPERFCOUNTER(_sync_Deflations) ;
 431       NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
 432       NEWPERFCOUNTER(_sync_FutileWakeups) ;
 433       NEWPERFCOUNTER(_sync_Parks) ;
 434       NEWPERFCOUNTER(_sync_EmptyNotifications) ;
 435       NEWPERFCOUNTER(_sync_Notifications) ;
 436       NEWPERFCOUNTER(_sync_SlowEnter) ;
 437       NEWPERFCOUNTER(_sync_SlowExit) ;
 438       NEWPERFCOUNTER(_sync_SlowNotify) ;
 439       NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
 440       NEWPERFCOUNTER(_sync_FailedSpins) ;
 441       NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
 442       NEWPERFCOUNTER(_sync_PrivateA) ;
 443       NEWPERFCOUNTER(_sync_PrivateB) ;
 444       NEWPERFCOUNTER(_sync_MonInCirculation) ;
 445       NEWPERFCOUNTER(_sync_MonScavenged) ;
 446       NEWPERFVARIABLE(_sync_MonExtant) ;
 447       #undef NEWPERFCOUNTER
 448   }
 449 }
 450 
 451 // Compile-time asserts
 452 // When possible, it's better to catch errors deterministically at
 453 // compile-time than at runtime.  The down-side to using compile-time
 454 // asserts is that error message -- often something about negative array
 455 // indices -- is opaque.
 456 
 457 #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
 458 
 459 void ObjectMonitor::ctAsserts() {
 460   CTASSERT(offset_of (ObjectMonitor, _header) == 0);
 461 }
 462 
 463 static int Adjust (volatile int * adr, int dx) {
 464   int v ;
 465   for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
 466   return v ;
 467 }
 468 
 469 // Ad-hoc mutual exclusion primitives: SpinLock and Mux
 470 //
 471 // We employ SpinLocks _only for low-contention, fixed-length
 472 // short-duration critical sections where we're concerned
 473 // about native mutex_t or HotSpot Mutex:: latency.
 474 // The mux construct provides a spin-then-block mutual exclusion
 475 // mechanism.
 476 //
 477 // Testing has shown that contention on the ListLock guarding gFreeList
 478 // is common.  If we implement ListLock as a simple SpinLock it's common
 479 // for the JVM to devolve to yielding with little progress.  This is true
 480 // despite the fact that the critical sections protected by ListLock are
 481 // extremely short.
 482 //
 483 // TODO-FIXME: ListLock should be of type SpinLock.
 484 // We should make this a 1st-class type, integrated into the lock
 485 // hierarchy as leaf-locks.  Critically, the SpinLock structure
 486 // should have sufficient padding to avoid false-sharing and excessive
 487 // cache-coherency traffic.
 488 
 489 
 490 typedef volatile int SpinLockT ;
 491 
 492 void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
 493   if (Atomic::cmpxchg (1, adr, 0) == 0) {
 494      return ;   // normal fast-path return
 495   }
 496 
 497   // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
 498   TEVENT (SpinAcquire - ctx) ;
 499   int ctr = 0 ;
 500   int Yields = 0 ;
 501   for (;;) {
 502      while (*adr != 0) {
 503         ++ctr ;
 504         if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
 505            if (Yields > 5) {
 506              // Consider using a simple NakedSleep() instead.
 507              // Then SpinAcquire could be called by non-JVM threads
 508              Thread::current()->_ParkEvent->park(1) ;
 509            } else {
 510              os::NakedYield() ;
 511              ++Yields ;
 512            }
 513         } else {
 514            SpinPause() ;
 515         }
 516      }
 517      if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
 518   }
 519 }
 520 
 521 void Thread::SpinRelease (volatile int * adr) {
 522   assert (*adr != 0, "invariant") ;
 523   OrderAccess::fence() ;      // guarantee at least release consistency.
 524   // Roach-motel semantics.
 525   // It's safe if subsequent LDs and STs float "up" into the critical section,
 526   // but prior LDs and STs within the critical section can't be allowed
 527   // to reorder or float past the ST that releases the lock.
 528   *adr = 0 ;
 529 }
 530 
 531 // muxAcquire and muxRelease:
 532 //
 533 // *  muxAcquire and muxRelease support a single-word lock-word construct.
 534 //    The LSB of the word is set IFF the lock is held.
 535 //    The remainder of the word points to the head of a singly-linked list
 536 //    of threads blocked on the lock.
 537 //
 538 // *  The current implementation of muxAcquire-muxRelease uses its own
 539 //    dedicated Thread._MuxEvent instance.  If we're interested in
 540 //    minimizing the peak number of extant ParkEvent instances then
 541 //    we could eliminate _MuxEvent and "borrow" _ParkEvent as long
 542 //    as certain invariants were satisfied.  Specifically, care would need
 543 //    to be taken with regards to consuming unpark() "permits".
 544 //    A safe rule of thumb is that a thread would never call muxAcquire()
 545 //    if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
 546 //    park().  Otherwise the _ParkEvent park() operation in muxAcquire() could
 547 //    consume an unpark() permit intended for monitorenter, for instance.
 548 //    One way around this would be to widen the restricted-range semaphore
 549 //    implemented in park().  Another alternative would be to provide
 550 //    multiple instances of the PlatformEvent() for each thread.  One
 551 //    instance would be dedicated to muxAcquire-muxRelease, for instance.
 552 //
 553 // *  Usage:
 554 //    -- Only as leaf locks
 555 //    -- for short-term locking only as muxAcquire does not perform
 556 //       thread state transitions.
 557 //
 558 // Alternatives:
 559 // *  We could implement muxAcquire and muxRelease with MCS or CLH locks
 560 //    but with parking or spin-then-park instead of pure spinning.
 561 // *  Use Taura-Oyama-Yonenzawa locks.
 562 // *  It's possible to construct a 1-0 lock if we encode the lockword as
 563 //    (List,LockByte).  Acquire will CAS the full lockword while Release
 564 //    will STB 0 into the LockByte.  The 1-0 scheme admits stranding, so
 565 //    acquiring threads use timers (ParkTimed) to detect and recover from
 566 //    the stranding window.  Thread/Node structures must be aligned on 256-byte
 567 //    boundaries by using placement-new.
 568 // *  Augment MCS with advisory back-link fields maintained with CAS().
 569 //    Pictorially:  LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
 570 //    The validity of the backlinks must be ratified before we trust the value.
 571 //    If the backlinks are invalid the exiting thread must back-track through the
 572 //    the forward links, which are always trustworthy.
 573 // *  Add a successor indication.  The LockWord is currently encoded as
 574 //    (List, LOCKBIT:1).  We could also add a SUCCBIT or an explicit _succ variable
 575 //    to provide the usual futile-wakeup optimization.
 576 //    See RTStt for details.
 577 // *  Consider schedctl.sc_nopreempt to cover the critical section.
 578 //
 579 
 580 
 581 typedef volatile intptr_t MutexT ;      // Mux Lock-word
 582 enum MuxBits { LOCKBIT = 1 } ;
 583 
 584 void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
 585   intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
 586   if (w == 0) return ;
 587   if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
 588      return ;
 589   }
 590 
 591   TEVENT (muxAcquire - Contention) ;
 592   ParkEvent * const Self = Thread::current()->_MuxEvent ;
 593   assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
 594   for (;;) {
 595      int its = (os::is_MP() ? 100 : 0) + 1 ;
 596 
 597      // Optional spin phase: spin-then-park strategy
 598      while (--its >= 0) {
 599        w = *Lock ;
 600        if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
 601           return ;
 602        }
 603      }
 604 
 605      Self->reset() ;
 606      Self->OnList = intptr_t(Lock) ;
 607      // The following fence() isn't _strictly necessary as the subsequent
 608      // CAS() both serializes execution and ratifies the fetched *Lock value.
 609      OrderAccess::fence();
 610      for (;;) {
 611         w = *Lock ;
 612         if ((w & LOCKBIT) == 0) {
 613             if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
 614                 Self->OnList = 0 ;   // hygiene - allows stronger asserts
 615                 return ;
 616             }
 617             continue ;      // Interference -- *Lock changed -- Just retry
 618         }
 619         assert (w & LOCKBIT, "invariant") ;
 620         Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
 621         if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
 622      }
 623 
 624      while (Self->OnList != 0) {
 625         Self->park() ;
 626      }
 627   }
 628 }
 629 
 630 void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
 631   intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
 632   if (w == 0) return ;
 633   if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
 634     return ;
 635   }
 636 
 637   TEVENT (muxAcquire - Contention) ;
 638   ParkEvent * ReleaseAfter = NULL ;
 639   if (ev == NULL) {
 640     ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
 641   }
 642   assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
 643   for (;;) {
 644     guarantee (ev->OnList == 0, "invariant") ;
 645     int its = (os::is_MP() ? 100 : 0) + 1 ;
 646 
 647     // Optional spin phase: spin-then-park strategy
 648     while (--its >= 0) {
 649       w = *Lock ;
 650       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
 651         if (ReleaseAfter != NULL) {
 652           ParkEvent::Release (ReleaseAfter) ;
 653         }
 654         return ;
 655       }
 656     }
 657 
 658     ev->reset() ;
 659     ev->OnList = intptr_t(Lock) ;
 660     // The following fence() isn't _strictly necessary as the subsequent
 661     // CAS() both serializes execution and ratifies the fetched *Lock value.
 662     OrderAccess::fence();
 663     for (;;) {
 664       w = *Lock ;
 665       if ((w & LOCKBIT) == 0) {
 666         if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
 667           ev->OnList = 0 ;
 668           // We call ::Release while holding the outer lock, thus
 669           // artificially lengthening the critical section.
 670           // Consider deferring the ::Release() until the subsequent unlock(),
 671           // after we've dropped the outer lock.
 672           if (ReleaseAfter != NULL) {
 673             ParkEvent::Release (ReleaseAfter) ;
 674           }
 675           return ;
 676         }
 677         continue ;      // Interference -- *Lock changed -- Just retry
 678       }
 679       assert (w & LOCKBIT, "invariant") ;
 680       ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
 681       if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
 682     }
 683 
 684     while (ev->OnList != 0) {
 685       ev->park() ;
 686     }
 687   }
 688 }
 689 
 690 // Release() must extract a successor from the list and then wake that thread.
 691 // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
 692 // similar to that used by ParkEvent::Allocate() and ::Release().  DMR-based
 693 // Release() would :
 694 // (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
 695 // (B) Extract a successor from the private list "in-hand"
 696 // (C) attempt to CAS() the residual back into *Lock over null.
 697 //     If there were any newly arrived threads and the CAS() would fail.
 698 //     In that case Release() would detach the RATs, re-merge the list in-hand
 699 //     with the RATs and repeat as needed.  Alternately, Release() might
 700 //     detach and extract a successor, but then pass the residual list to the wakee.
 701 //     The wakee would be responsible for reattaching and remerging before it
 702 //     competed for the lock.
 703 //
 704 // Both "pop" and DMR are immune from ABA corruption -- there can be
 705 // multiple concurrent pushers, but only one popper or detacher.
 706 // This implementation pops from the head of the list.  This is unfair,
 707 // but tends to provide excellent throughput as hot threads remain hot.
 708 // (We wake recently run threads first).
 709 
 710 void Thread::muxRelease (volatile intptr_t * Lock)  {
 711   for (;;) {
 712     const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
 713     assert (w & LOCKBIT, "invariant") ;
 714     if (w == LOCKBIT) return ;
 715     ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
 716     assert (List != NULL, "invariant") ;
 717     assert (List->OnList == intptr_t(Lock), "invariant") ;
 718     ParkEvent * nxt = List->ListNext ;
 719 
 720     // The following CAS() releases the lock and pops the head element.
 721     if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
 722       continue ;
 723     }
 724     List->OnList = 0 ;
 725     OrderAccess::fence() ;
 726     List->unpark () ;
 727     return ;
 728   }
 729 }
 730 
 731 // ObjectMonitor Lifecycle
 732 // -----------------------
 733 // Inflation unlinks monitors from the global gFreeList and
 734 // associates them with objects.  Deflation -- which occurs at
 735 // STW-time -- disassociates idle monitors from objects.  Such
 736 // scavenged monitors are returned to the gFreeList.
 737 //
 738 // The global list is protected by ListLock.  All the critical sections
 739 // are short and operate in constant-time.
 740 //
 741 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
 742 //
 743 // Lifecycle:
 744 // --   unassigned and on the global free list
 745 // --   unassigned and on a thread's private omFreeList
 746 // --   assigned to an object.  The object is inflated and the mark refers
 747 //      to the objectmonitor.
 748 //
 749 // TODO-FIXME:
 750 //
 751 // *  We currently protect the gFreeList with a simple lock.
 752 //    An alternate lock-free scheme would be to pop elements from the gFreeList
 753 //    with CAS.  This would be safe from ABA corruption as long we only
 754 //    recycled previously appearing elements onto the list in deflate_idle_monitors()
 755 //    at STW-time.  Completely new elements could always be pushed onto the gFreeList
 756 //    with CAS.  Elements that appeared previously on the list could only
 757 //    be installed at STW-time.
 758 //
 759 // *  For efficiency and to help reduce the store-before-CAS penalty
 760 //    the objectmonitors on gFreeList or local free lists should be ready to install
 761 //    with the exception of _header and _object.  _object can be set after inflation.
 762 //    In particular, keep all objectMonitors on a thread's private list in ready-to-install
 763 //    state with m.Owner set properly.
 764 //
 765 // *  We could all diffuse contention by using multiple global (FreeList, Lock)
 766 //    pairs -- threads could use trylock() and a cyclic-scan strategy to search for
 767 //    an unlocked free list.
 768 //
 769 // *  Add lifecycle tags and assert()s.
 770 //
 771 // *  Be more consistent about when we clear an objectmonitor's fields:
 772 //    A.  After extracting the objectmonitor from a free list.
 773 //    B.  After adding an objectmonitor to a free list.
 774 //
 775 
 776 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
 777 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL ;
 778 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL ;
 779 int ObjectSynchronizer::gOmInUseCount = 0;
 780 static volatile intptr_t ListLock = 0 ;      // protects global monitor free-list cache
 781 static volatile int MonitorFreeCount  = 0 ;      // # on gFreeList
 782 static volatile int MonitorPopulation = 0 ;      // # Extant -- in circulation
 783 #define CHAINMARKER ((oop)-1)
 784 
 785 // Constraining monitor pool growth via MonitorBound ...
 786 //
 787 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
 788 // the rate of scavenging is driven primarily by GC.  As such,  we can find
 789 // an inordinate number of monitors in circulation.
 790 // To avoid that scenario we can artificially induce a STW safepoint
 791 // if the pool appears to be growing past some reasonable bound.
 792 // Generally we favor time in space-time tradeoffs, but as there's no
 793 // natural back-pressure on the # of extant monitors we need to impose some
 794 // type of limit.  Beware that if MonitorBound is set to too low a value
 795 // we could just loop. In addition, if MonitorBound is set to a low value
 796 // we'll incur more safepoints, which are harmful to performance.
 797 // See also: GuaranteedSafepointInterval
 798 //
 799 // As noted elsewhere, the correct long-term solution is to deflate at
 800 // monitorexit-time, in which case the number of inflated objects is bounded
 801 // by the number of threads.  That policy obviates the need for scavenging at
 802 // STW safepoint time.   As an aside, scavenging can be time-consuming when the
 803 // # of extant monitors is large.   Unfortunately there's a day-1 assumption baked
 804 // into much HotSpot code that the object::monitor relationship, once established
 805 // or observed, will remain stable except over potential safepoints.
 806 //
 807 // We can use either a blocking synchronous VM operation or an async VM operation.
 808 // -- If we use a blocking VM operation :
 809 //    Calls to ScavengeCheck() should be inserted only into 'safe' locations in paths
 810 //    that lead to ::inflate() or ::omAlloc().
 811 //    Even though the safepoint will not directly induce GC, a GC might
 812 //    piggyback on the safepoint operation, so the caller should hold no naked oops.
 813 //    Furthermore, monitor::object relationships are NOT necessarily stable over this call
 814 //    unless the caller has made provisions to "pin" the object to the monitor, say
 815 //    by incrementing the monitor's _count field.
 816 // -- If we use a non-blocking asynchronous VM operation :
 817 //    the constraints above don't apply.  The safepoint will fire in the future
 818 //    at a more convenient time.  On the other hand the latency between posting and
 819 //    running the safepoint introduces or admits "slop" or laxity during which the
 820 //    monitor population can climb further above the threshold.  The monitor population,
 821 //    however, tends to converge asymptotically over time to a count that's slightly
 822 //    above the target value specified by MonitorBound.   That is, we avoid unbounded
 823 //    growth, albeit with some imprecision.
 824 //
 825 // The current implementation uses asynchronous VM operations.
 826 //
 827 // Ideally we'd check if (MonitorPopulation > MonitorBound) in omAlloc()
 828 // immediately before trying to grow the global list via allocation.
 829 // If the predicate was true then we'd induce a synchronous safepoint, wait
 830 // for the safepoint to complete, and then again to allocate from the global
 831 // free list.  This approach is much simpler and precise, admitting no "slop".
 832 // Unfortunately we can't safely safepoint in the midst of omAlloc(), so
 833 // instead we use asynchronous safepoints.
 834 
 835 static void InduceScavenge (Thread * Self, const char * Whence) {
 836   // Induce STW safepoint to trim monitors
 837   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
 838   // More precisely, trigger an asynchronous STW safepoint as the number
 839   // of active monitors passes the specified threshold.
 840   // TODO: assert thread state is reasonable
 841 
 842   if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
 843     if (Knob_Verbose) {
 844       ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
 845       ::fflush(stdout) ;
 846     }
 847     // Induce a 'null' safepoint to scavenge monitors
 848     // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
 849     // to the VMthread and have a lifespan longer than that of this activation record.
 850     // The VMThread will delete the op when completed.
 851     VMThread::execute (new VM_ForceAsyncSafepoint()) ;
 852 
 853     if (Knob_Verbose) {
 854       ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
 855       ::fflush(stdout) ;
 856     }
 857   }
 858 }
 859 /* Too slow for general assert or debug
 860 void ObjectSynchronizer::verifyInUse (Thread *Self) {
 861    ObjectMonitor* mid;
 862    int inusetally = 0;
 863    for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
 864      inusetally ++;
 865    }
 866    assert(inusetally == Self->omInUseCount, "inuse count off");
 867 
 868    int freetally = 0;
 869    for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
 870      freetally ++;
 871    }
 872    assert(freetally == Self->omFreeCount, "free count off");
 873 }
 874 */
 875 
 876 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
 877     // A large MAXPRIVATE value reduces both list lock contention
 878     // and list coherency traffic, but also tends to increase the
 879     // number of objectMonitors in circulation as well as the STW
 880     // scavenge costs.  As usual, we lean toward time in space-time
 881     // tradeoffs.
 882     const int MAXPRIVATE = 1024 ;
 883     for (;;) {
 884         ObjectMonitor * m ;
 885 
 886         // 1: try to allocate from the thread's local omFreeList.
 887         // Threads will attempt to allocate first from their local list, then
 888         // from the global list, and only after those attempts fail will the thread
 889         // attempt to instantiate new monitors.   Thread-local free lists take
 890         // heat off the ListLock and improve allocation latency, as well as reducing
 891         // coherency traffic on the shared global list.
 892         m = Self->omFreeList ;
 893         if (m != NULL) {
 894            Self->omFreeList = m->FreeNext ;
 895            Self->omFreeCount -- ;
 896            // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
 897            guarantee (m->object() == NULL, "invariant") ;
 898            if (MonitorInUseLists) {
 899              m->FreeNext = Self->omInUseList;
 900              Self->omInUseList = m;
 901              Self->omInUseCount ++;
 902              // verifyInUse(Self);
 903            } else {
 904              m->FreeNext = NULL;
 905            }
 906            return m ;
 907         }
 908 
 909         // 2: try to allocate from the global gFreeList
 910         // CONSIDER: use muxTry() instead of muxAcquire().
 911         // If the muxTry() fails then drop immediately into case 3.
 912         // If we're using thread-local free lists then try
 913         // to reprovision the caller's free list.
 914         if (gFreeList != NULL) {
 915             // Reprovision the thread's omFreeList.
 916             // Use bulk transfers to reduce the allocation rate and heat
 917             // on various locks.
 918             Thread::muxAcquire (&ListLock, "omAlloc") ;
 919             for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
 920                 MonitorFreeCount --;
 921                 ObjectMonitor * take = gFreeList ;
 922                 gFreeList = take->FreeNext ;
 923                 guarantee (take->object() == NULL, "invariant") ;
 924                 guarantee (!take->is_busy(), "invariant") ;
 925                 take->Recycle() ;
 926                 omRelease (Self, take, false) ;
 927             }
 928             Thread::muxRelease (&ListLock) ;
 929             Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
 930             if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
 931             TEVENT (omFirst - reprovision) ;
 932 
 933             const int mx = MonitorBound ;
 934             if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
 935               // We can't safely induce a STW safepoint from omAlloc() as our thread
 936               // state may not be appropriate for such activities and callers may hold
 937               // naked oops, so instead we defer the action.
 938               InduceScavenge (Self, "omAlloc") ;
 939             }
 940             continue;
 941         }
 942 
 943         // 3: allocate a block of new ObjectMonitors
 944         // Both the local and global free lists are empty -- resort to malloc().
 945         // In the current implementation objectMonitors are TSM - immortal.
 946         assert (_BLOCKSIZE > 1, "invariant") ;
 947         ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
 948 
 949         // NOTE: (almost) no way to recover if allocation failed.
 950         // We might be able to induce a STW safepoint and scavenge enough
 951         // objectMonitors to permit progress.
 952         if (temp == NULL) {
 953             vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ;
 954         }
 955 
 956         // Format the block.
 957         // initialize the linked list, each monitor points to its next
 958         // forming the single linked free list, the very first monitor
 959         // will points to next block, which forms the block list.
 960         // The trick of using the 1st element in the block as gBlockList
 961         // linkage should be reconsidered.  A better implementation would
 962         // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
 963 
 964         for (int i = 1; i < _BLOCKSIZE ; i++) {
 965            temp[i].FreeNext = &temp[i+1];
 966         }
 967 
 968         // terminate the last monitor as the end of list
 969         temp[_BLOCKSIZE - 1].FreeNext = NULL ;
 970 
 971         // Element [0] is reserved for global list linkage
 972         temp[0].set_object(CHAINMARKER);
 973 
 974         // Consider carving out this thread's current request from the
 975         // block in hand.  This avoids some lock traffic and redundant
 976         // list activity.
 977 
 978         // Acquire the ListLock to manipulate BlockList and FreeList.
 979         // An Oyama-Taura-Yonezawa scheme might be more efficient.
 980         Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
 981         MonitorPopulation += _BLOCKSIZE-1;
 982         MonitorFreeCount += _BLOCKSIZE-1;
 983 
 984         // Add the new block to the list of extant blocks (gBlockList).
 985         // The very first objectMonitor in a block is reserved and dedicated.
 986         // It serves as blocklist "next" linkage.
 987         temp[0].FreeNext = gBlockList;
 988         gBlockList = temp;
 989 
 990         // Add the new string of objectMonitors to the global free list
 991         temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
 992         gFreeList = temp + 1;
 993         Thread::muxRelease (&ListLock) ;
 994         TEVENT (Allocate block of monitors) ;
 995     }
 996 }
 997 
 998 // Place "m" on the caller's private per-thread omFreeList.
 999 // In practice there's no need to clamp or limit the number of
1000 // monitors on a thread's omFreeList as the only time we'll call
1001 // omRelease is to return a monitor to the free list after a CAS
1002 // attempt failed.  This doesn't allow unbounded #s of monitors to
1003 // accumulate on a thread's free list.
1004 //
1005 // In the future the usage of omRelease() might change and monitors
1006 // could migrate between free lists.  In that case to avoid excessive
1007 // accumulation we could  limit omCount to (omProvision*2), otherwise return
1008 // the objectMonitor to the global list.  We should drain (return) in reasonable chunks.
1009 // That is, *not* one-at-a-time.
1010 
1011 
1012 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
1013     guarantee (m->object() == NULL, "invariant") ;
1014 
1015     // Remove from omInUseList
1016     if (MonitorInUseLists && fromPerThreadAlloc) {
1017       ObjectMonitor* curmidinuse = NULL;
1018       for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) {
1019        if (m == mid) {
1020          // extract from per-thread in-use-list
1021          if (mid == Self->omInUseList) {
1022            Self->omInUseList = mid->FreeNext;
1023          } else if (curmidinuse != NULL) {
1024            curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1025          }
1026          Self->omInUseCount --;
1027          // verifyInUse(Self);
1028          break;
1029        } else {
1030          curmidinuse = mid;
1031          mid = mid->FreeNext;
1032       }
1033     }
1034   }
1035 
1036   // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new
1037   m->FreeNext = Self->omFreeList ;
1038   Self->omFreeList = m ;
1039   Self->omFreeCount ++ ;
1040 }
1041 
1042 // Return the monitors of a moribund thread's local free list to
1043 // the global free list.  Typically a thread calls omFlush() when
1044 // it's dying.  We could also consider having the VM thread steal
1045 // monitors from threads that have not run java code over a few
1046 // consecutive STW safepoints.  Relatedly, we might decay
1047 // omFreeProvision at STW safepoints.
1048 //
1049 // Also return the monitors of a moribund thread"s omInUseList to
1050 // a global gOmInUseList under the global list lock so these
1051 // will continue to be scanned.
1052 //
1053 // We currently call omFlush() from the Thread:: dtor _after the thread
1054 // has been excised from the thread list and is no longer a mutator.
1055 // That means that omFlush() can run concurrently with a safepoint and
1056 // the scavenge operator.  Calling omFlush() from JavaThread::exit() might
1057 // be a better choice as we could safely reason that that the JVM is
1058 // not at a safepoint at the time of the call, and thus there could
1059 // be not inopportune interleavings between omFlush() and the scavenge
1060 // operator.
1061 
1062 void ObjectSynchronizer::omFlush (Thread * Self) {
1063     ObjectMonitor * List = Self->omFreeList ;  // Null-terminated SLL
1064     Self->omFreeList = NULL ;
1065     ObjectMonitor * Tail = NULL ;
1066     int Tally = 0;
1067     if (List != NULL) {
1068       ObjectMonitor * s ;
1069       for (s = List ; s != NULL ; s = s->FreeNext) {
1070           Tally ++ ;
1071           Tail = s ;
1072           guarantee (s->object() == NULL, "invariant") ;
1073           guarantee (!s->is_busy(), "invariant") ;
1074           s->set_owner (NULL) ;   // redundant but good hygiene
1075           TEVENT (omFlush - Move one) ;
1076       }
1077       guarantee (Tail != NULL && List != NULL, "invariant") ;
1078     }
1079 
1080     ObjectMonitor * InUseList = Self->omInUseList;
1081     ObjectMonitor * InUseTail = NULL ;
1082     int InUseTally = 0;
1083     if (InUseList != NULL) {
1084       Self->omInUseList = NULL;
1085       ObjectMonitor *curom;
1086       for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
1087         InUseTail = curom;
1088         InUseTally++;
1089       }
1090 // TODO debug
1091       assert(Self->omInUseCount == InUseTally, "inuse count off");
1092       Self->omInUseCount = 0;
1093       guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
1094     }
1095 
1096     Thread::muxAcquire (&ListLock, "omFlush") ;
1097     if (Tail != NULL) {
1098       Tail->FreeNext = gFreeList ;
1099       gFreeList = List ;
1100       MonitorFreeCount += Tally;
1101     }
1102 
1103     if (InUseTail != NULL) {
1104       InUseTail->FreeNext = gOmInUseList;
1105       gOmInUseList = InUseList;
1106       gOmInUseCount += InUseTally;
1107     }
1108 
1109     Thread::muxRelease (&ListLock) ;
1110     TEVENT (omFlush) ;
1111 }
1112 
1113 
1114 // Get the next block in the block list.
1115 static inline ObjectMonitor* next(ObjectMonitor* block) {
1116   assert(block->object() == CHAINMARKER, "must be a block header");
1117   block = block->FreeNext ;
1118   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
1119   return block;
1120 }
1121 
1122 // Fast path code shared by multiple functions
1123 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1124   markOop mark = obj->mark();
1125   if (mark->has_monitor()) {
1126     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1127     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1128     return mark->monitor();
1129   }
1130   return ObjectSynchronizer::inflate(Thread::current(), obj);
1131 }
1132 
1133 // Note that we could encounter some performance loss through false-sharing as
1134 // multiple locks occupy the same $ line.  Padding might be appropriate.
1135 
1136 #define NINFLATIONLOCKS 256
1137 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
1138 
1139 static markOop ReadStableMark (oop obj) {
1140   markOop mark = obj->mark() ;
1141   if (!mark->is_being_inflated()) {
1142     return mark ;       // normal fast-path return
1143   }
1144 
1145   int its = 0 ;
1146   for (;;) {
1147     markOop mark = obj->mark() ;
1148     if (!mark->is_being_inflated()) {
1149       return mark ;    // normal fast-path return
1150     }
1151 
1152     // The object is being inflated by some other thread.
1153     // The caller of ReadStableMark() must wait for inflation to complete.
1154     // Avoid live-lock
1155     // TODO: consider calling SafepointSynchronize::do_call_back() while
1156     // spinning to see if there's a safepoint pending.  If so, immediately
1157     // yielding or blocking would be appropriate.  Avoid spinning while
1158     // there is a safepoint pending.
1159     // TODO: add inflation contention performance counters.
1160     // TODO: restrict the aggregate number of spinners.
1161 
1162     ++its ;
1163     if (its > 10000 || !os::is_MP()) {
1164        if (its & 1) {
1165          os::NakedYield() ;
1166          TEVENT (Inflate: INFLATING - yield) ;
1167        } else {
1168          // Note that the following code attenuates the livelock problem but is not
1169          // a complete remedy.  A more complete solution would require that the inflating
1170          // thread hold the associated inflation lock.  The following code simply restricts
1171          // the number of spinners to at most one.  We'll have N-2 threads blocked
1172          // on the inflationlock, 1 thread holding the inflation lock and using
1173          // a yield/park strategy, and 1 thread in the midst of inflation.
1174          // A more refined approach would be to change the encoding of INFLATING
1175          // to allow encapsulation of a native thread pointer.  Threads waiting for
1176          // inflation to complete would use CAS to push themselves onto a singly linked
1177          // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
1178          // and calling park().  When inflation was complete the thread that accomplished inflation
1179          // would detach the list and set the markword to inflated with a single CAS and
1180          // then for each thread on the list, set the flag and unpark() the thread.
1181          // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
1182          // wakes at most one thread whereas we need to wake the entire list.
1183          int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ;
1184          int YieldThenBlock = 0 ;
1185          assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
1186          assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
1187          Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
1188          while (obj->mark() == markOopDesc::INFLATING()) {
1189            // Beware: NakedYield() is advisory and has almost no effect on some platforms
1190            // so we periodically call Self->_ParkEvent->park(1).
1191            // We use a mixed spin/yield/block mechanism.
1192            if ((YieldThenBlock++) >= 16) {
1193               Thread::current()->_ParkEvent->park(1) ;
1194            } else {
1195               os::NakedYield() ;
1196            }
1197          }
1198          Thread::muxRelease (InflationLocks + ix ) ;
1199          TEVENT (Inflate: INFLATING - yield/park) ;
1200        }
1201     } else {
1202        SpinPause() ;       // SMP-polite spinning
1203     }
1204   }
1205 }
1206 
1207 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
1208   // Inflate mutates the heap ...
1209   // Relaxing assertion for bug 6320749.
1210   assert (Universe::verify_in_progress() ||
1211           !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1212 
1213   for (;;) {
1214       const markOop mark = object->mark() ;
1215       assert (!mark->has_bias_pattern(), "invariant") ;
1216 
1217       // The mark can be in one of the following states:
1218       // *  Inflated     - just return
1219       // *  Stack-locked - coerce it to inflated
1220       // *  INFLATING    - busy wait for conversion to complete
1221       // *  Neutral      - aggressively inflate the object.
1222       // *  BIASED       - Illegal.  We should never see this
1223 
1224       // CASE: inflated
1225       if (mark->has_monitor()) {
1226           ObjectMonitor * inf = mark->monitor() ;
1227           assert (inf->header()->is_neutral(), "invariant");
1228           assert (inf->object() == object, "invariant") ;
1229           assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1230           return inf ;
1231       }
1232 
1233       // CASE: inflation in progress - inflating over a stack-lock.
1234       // Some other thread is converting from stack-locked to inflated.
1235       // Only that thread can complete inflation -- other threads must wait.
1236       // The INFLATING value is transient.
1237       // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1238       // We could always eliminate polling by parking the thread on some auxiliary list.
1239       if (mark == markOopDesc::INFLATING()) {
1240          TEVENT (Inflate: spin while INFLATING) ;
1241          ReadStableMark(object) ;
1242          continue ;
1243       }
1244 
1245       // CASE: stack-locked
1246       // Could be stack-locked either by this thread or by some other thread.
1247       //
1248       // Note that we allocate the objectmonitor speculatively, _before_ attempting
1249       // to install INFLATING into the mark word.  We originally installed INFLATING,
1250       // allocated the objectmonitor, and then finally STed the address of the
1251       // objectmonitor into the mark.  This was correct, but artificially lengthened
1252       // the interval in which INFLATED appeared in the mark, thus increasing
1253       // the odds of inflation contention.
1254       //
1255       // We now use per-thread private objectmonitor free lists.
1256       // These list are reprovisioned from the global free list outside the
1257       // critical INFLATING...ST interval.  A thread can transfer
1258       // multiple objectmonitors en-mass from the global free list to its local free list.
1259       // This reduces coherency traffic and lock contention on the global free list.
1260       // Using such local free lists, it doesn't matter if the omAlloc() call appears
1261       // before or after the CAS(INFLATING) operation.
1262       // See the comments in omAlloc().
1263 
1264       if (mark->has_locker()) {
1265           ObjectMonitor * m = omAlloc (Self) ;
1266           // Optimistically prepare the objectmonitor - anticipate successful CAS
1267           // We do this before the CAS in order to minimize the length of time
1268           // in which INFLATING appears in the mark.
1269           m->Recycle();
1270           m->_Responsible  = NULL ;
1271           m->OwnerIsThread = 0 ;
1272           m->_recursions   = 0 ;
1273           m->_SpinDuration = Knob_SpinLimit ;   // Consider: maintain by type/class
1274 
1275           markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
1276           if (cmp != mark) {
1277              omRelease (Self, m, true) ;
1278              continue ;       // Interference -- just retry
1279           }
1280 
1281           // We've successfully installed INFLATING (0) into the mark-word.
1282           // This is the only case where 0 will appear in a mark-work.
1283           // Only the singular thread that successfully swings the mark-word
1284           // to 0 can perform (or more precisely, complete) inflation.
1285           //
1286           // Why do we CAS a 0 into the mark-word instead of just CASing the
1287           // mark-word from the stack-locked value directly to the new inflated state?
1288           // Consider what happens when a thread unlocks a stack-locked object.
1289           // It attempts to use CAS to swing the displaced header value from the
1290           // on-stack basiclock back into the object header.  Recall also that the
1291           // header value (hashcode, etc) can reside in (a) the object header, or
1292           // (b) a displaced header associated with the stack-lock, or (c) a displaced
1293           // header in an objectMonitor.  The inflate() routine must copy the header
1294           // value from the basiclock on the owner's stack to the objectMonitor, all
1295           // the while preserving the hashCode stability invariants.  If the owner
1296           // decides to release the lock while the value is 0, the unlock will fail
1297           // and control will eventually pass from slow_exit() to inflate.  The owner
1298           // will then spin, waiting for the 0 value to disappear.   Put another way,
1299           // the 0 causes the owner to stall if the owner happens to try to
1300           // drop the lock (restoring the header from the basiclock to the object)
1301           // while inflation is in-progress.  This protocol avoids races that might
1302           // would otherwise permit hashCode values to change or "flicker" for an object.
1303           // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1304           // 0 serves as a "BUSY" inflate-in-progress indicator.
1305 
1306 
1307           // fetch the displaced mark from the owner's stack.
1308           // The owner can't die or unwind past the lock while our INFLATING
1309           // object is in the mark.  Furthermore the owner can't complete
1310           // an unlock on the object, either.
1311           markOop dmw = mark->displaced_mark_helper() ;
1312           assert (dmw->is_neutral(), "invariant") ;
1313 
1314           // Setup monitor fields to proper values -- prepare the monitor
1315           m->set_header(dmw) ;
1316 
1317           // Optimization: if the mark->locker stack address is associated
1318           // with this thread we could simply set m->_owner = Self and
1319           // m->OwnerIsThread = 1. Note that a thread can inflate an object
1320           // that it has stack-locked -- as might happen in wait() -- directly
1321           // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1322           m->set_owner(mark->locker());
1323           m->set_object(object);
1324           // TODO-FIXME: assert BasicLock->dhw != 0.
1325 
1326           // Must preserve store ordering. The monitor state must
1327           // be stable at the time of publishing the monitor address.
1328           guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
1329           object->release_set_mark(markOopDesc::encode(m));
1330 
1331           // Hopefully the performance counters are allocated on distinct cache lines
1332           // to avoid false sharing on MP systems ...
1333           if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
1334           TEVENT(Inflate: overwrite stacklock) ;
1335           if (TraceMonitorInflation) {
1336             if (object->is_instance()) {
1337               ResourceMark rm;
1338               tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1339                 (intptr_t) object, (intptr_t) object->mark(),
1340                 Klass::cast(object->klass())->external_name());
1341             }
1342           }
1343           return m ;
1344       }
1345 
1346       // CASE: neutral
1347       // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1348       // If we know we're inflating for entry it's better to inflate by swinging a
1349       // pre-locked objectMonitor pointer into the object header.   A successful
1350       // CAS inflates the object *and* confers ownership to the inflating thread.
1351       // In the current implementation we use a 2-step mechanism where we CAS()
1352       // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1353       // An inflateTry() method that we could call from fast_enter() and slow_enter()
1354       // would be useful.
1355 
1356       assert (mark->is_neutral(), "invariant");
1357       ObjectMonitor * m = omAlloc (Self) ;
1358       // prepare m for installation - set monitor to initial state
1359       m->Recycle();
1360       m->set_header(mark);
1361       m->set_owner(NULL);
1362       m->set_object(object);
1363       m->OwnerIsThread = 1 ;
1364       m->_recursions   = 0 ;
1365       m->_Responsible  = NULL ;
1366       m->_SpinDuration = Knob_SpinLimit ;       // consider: keep metastats by type/class
1367 
1368       if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1369           m->set_object (NULL) ;
1370           m->set_owner  (NULL) ;
1371           m->OwnerIsThread = 0 ;
1372           m->Recycle() ;
1373           omRelease (Self, m, true) ;
1374           m = NULL ;
1375           continue ;
1376           // interference - the markword changed - just retry.
1377           // The state-transitions are one-way, so there's no chance of
1378           // live-lock -- "Inflated" is an absorbing state.
1379       }
1380 
1381       // Hopefully the performance counters are allocated on distinct
1382       // cache lines to avoid false sharing on MP systems ...
1383       if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
1384       TEVENT(Inflate: overwrite neutral) ;
1385       if (TraceMonitorInflation) {
1386         if (object->is_instance()) {
1387           ResourceMark rm;
1388           tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1389             (intptr_t) object, (intptr_t) object->mark(),
1390             Klass::cast(object->klass())->external_name());
1391         }
1392       }
1393       return m ;
1394   }
1395 }
1396 
1397 
1398 // This the fast monitor enter. The interpreter and compiler use
1399 // some assembly copies of this code. Make sure update those code
1400 // if the following function is changed. The implementation is
1401 // extremely sensitive to race condition. Be careful.
1402 
1403 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
1404  if (UseBiasedLocking) {
1405     if (!SafepointSynchronize::is_at_safepoint()) {
1406       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
1407       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
1408         return;
1409       }
1410     } else {
1411       assert(!attempt_rebias, "can not rebias toward VM thread");
1412       BiasedLocking::revoke_at_safepoint(obj);
1413     }
1414     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1415  }
1416 
1417  slow_enter (obj, lock, THREAD) ;
1418 }
1419 
1420 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
1421   assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
1422   // if displaced header is null, the previous enter is recursive enter, no-op
1423   markOop dhw = lock->displaced_header();
1424   markOop mark ;
1425   if (dhw == NULL) {
1426      // Recursive stack-lock.
1427      // Diagnostics -- Could be: stack-locked, inflating, inflated.
1428      mark = object->mark() ;
1429      assert (!mark->is_neutral(), "invariant") ;
1430      if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
1431         assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
1432      }
1433      if (mark->has_monitor()) {
1434         ObjectMonitor * m = mark->monitor() ;
1435         assert(((oop)(m->object()))->mark() == mark, "invariant") ;
1436         assert(m->is_entered(THREAD), "invariant") ;
1437      }
1438      return ;
1439   }
1440 
1441   mark = object->mark() ;
1442 
1443   // If the object is stack-locked by the current thread, try to
1444   // swing the displaced header from the box back to the mark.
1445   if (mark == (markOop) lock) {
1446      assert (dhw->is_neutral(), "invariant") ;
1447      if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
1448         TEVENT (fast_exit: release stacklock) ;
1449         return;
1450      }
1451   }
1452 
1453   ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
1454 }
1455 
1456 // This routine is used to handle interpreter/compiler slow case
1457 // We don't need to use fast path here, because it must have been
1458 // failed in the interpreter/compiler code.
1459 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
1460   markOop mark = obj->mark();
1461   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
1462 
1463   if (mark->is_neutral()) {
1464     // Anticipate successful CAS -- the ST of the displaced mark must
1465     // be visible <= the ST performed by the CAS.
1466     lock->set_displaced_header(mark);
1467     if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
1468       TEVENT (slow_enter: release stacklock) ;
1469       return ;
1470     }
1471     // Fall through to inflate() ...
1472   } else
1473   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
1474     assert(lock != mark->locker(), "must not re-lock the same lock");
1475     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
1476     lock->set_displaced_header(NULL);
1477     return;
1478   }
1479 
1480 #if 0
1481   // The following optimization isn't particularly useful.
1482   if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
1483     lock->set_displaced_header (NULL) ;
1484     return ;
1485   }
1486 #endif
1487 
1488   // The object header will never be displaced to this lock,
1489   // so it does not matter what the value is, except that it
1490   // must be non-zero to avoid looking like a re-entrant lock,
1491   // and must not look locked either.
1492   lock->set_displaced_header(markOopDesc::unused_mark());
1493   ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
1494 }
1495 
1496 // This routine is used to handle interpreter/compiler slow case
1497 // We don't need to use fast path here, because it must have
1498 // failed in the interpreter/compiler code. Simply use the heavy
1499 // weight monitor should be ok, unless someone find otherwise.
1500 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
1501   fast_exit (object, lock, THREAD) ;
1502 }
1503 
1504 // NOTE: must use heavy weight monitor to handle jni monitor enter
1505 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
1506   // the current locking is from JNI instead of Java code
1507   TEVENT (jni_enter) ;
1508   if (UseBiasedLocking) {
1509     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1510     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1511   }
1512   THREAD->set_current_pending_monitor_is_from_java(false);
1513   ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
1514   THREAD->set_current_pending_monitor_is_from_java(true);
1515 }
1516 
1517 // NOTE: must use heavy weight monitor to handle jni monitor enter
1518 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
1519   if (UseBiasedLocking) {
1520     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1521     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1522   }
1523 
1524   ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
1525   return monitor->try_enter(THREAD);
1526 }
1527 
1528 
1529 // NOTE: must use heavy weight monitor to handle jni monitor exit
1530 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
1531   TEVENT (jni_exit) ;
1532   if (UseBiasedLocking) {
1533     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1534   }
1535   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1536 
1537   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
1538   // If this thread has locked the object, exit the monitor.  Note:  can't use
1539   // monitor->check(CHECK); must exit even if an exception is pending.
1540   if (monitor->check(THREAD)) {
1541      monitor->exit(THREAD);
1542   }
1543 }
1544 
1545 // complete_exit()/reenter() are used to wait on a nested lock
1546 // i.e. to give up an outer lock completely and then re-enter
1547 // Used when holding nested locks - lock acquisition order: lock1 then lock2
1548 //  1) complete_exit lock1 - saving recursion count
1549 //  2) wait on lock2
1550 //  3) when notified on lock2, unlock lock2
1551 //  4) reenter lock1 with original recursion count
1552 //  5) lock lock2
1553 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
1554 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
1555   TEVENT (complete_exit) ;
1556   if (UseBiasedLocking) {
1557     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1558     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1559   }
1560 
1561   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
1562 
1563   return monitor->complete_exit(THREAD);
1564 }
1565 
1566 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
1567 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
1568   TEVENT (reenter) ;
1569   if (UseBiasedLocking) {
1570     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1571     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1572   }
1573 
1574   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
1575 
1576   monitor->reenter(recursion, THREAD);
1577 }
1578 
1579 // This exists only as a workaround of dtrace bug 6254741
1580 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
1581   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
1582   return 0;
1583 }
1584 
1585 // NOTE: must use heavy weight monitor to handle wait()
1586 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
1587   if (UseBiasedLocking) {
1588     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1589     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1590   }
1591   if (millis < 0) {
1592     TEVENT (wait - throw IAX) ;
1593     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
1594   }
1595   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
1596   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
1597   monitor->wait(millis, true, THREAD);
1598 
1599   /* This dummy call is in place to get around dtrace bug 6254741.  Once
1600      that's fixed we can uncomment the following line and remove the call */
1601   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
1602   dtrace_waited_probe(monitor, obj, THREAD);
1603 }
1604 
1605 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
1606   if (UseBiasedLocking) {
1607     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1608     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1609   }
1610   if (millis < 0) {
1611     TEVENT (wait - throw IAX) ;
1612     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
1613   }
1614   ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
1615 }
1616 
1617 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
1618  if (UseBiasedLocking) {
1619     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1620     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1621   }
1622 
1623   markOop mark = obj->mark();
1624   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
1625     return;
1626   }
1627   ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
1628 }
1629 
1630 // NOTE: see comment of notify()
1631 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
1632   if (UseBiasedLocking) {
1633     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1634     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1635   }
1636 
1637   markOop mark = obj->mark();
1638   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
1639     return;
1640   }
1641   ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
1642 }
1643 
1644 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
1645   if (UseBiasedLocking) {
1646     // NOTE: many places throughout the JVM do not expect a safepoint
1647     // to be taken here, in particular most operations on perm gen
1648     // objects. However, we only ever bias Java instances and all of
1649     // the call sites of identity_hash that might revoke biases have
1650     // been checked to make sure they can handle a safepoint. The
1651     // added check of the bias pattern is to avoid useless calls to
1652     // thread-local storage.
1653     if (obj->mark()->has_bias_pattern()) {
1654       // Box and unbox the raw reference just in case we cause a STW safepoint.
1655       Handle hobj (Self, obj) ;
1656       // Relaxing assertion for bug 6320749.
1657       assert (Universe::verify_in_progress() ||
1658               !SafepointSynchronize::is_at_safepoint(),
1659              "biases should not be seen by VM thread here");
1660       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
1661       obj = hobj() ;
1662       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1663     }
1664   }
1665 
1666   // hashCode() is a heap mutator ...
1667   // Relaxing assertion for bug 6320749.
1668   assert (Universe::verify_in_progress() ||
1669           !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1670   assert (Universe::verify_in_progress() ||
1671           Self->is_Java_thread() , "invariant") ;
1672   assert (Universe::verify_in_progress() ||
1673          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
1674 
1675   ObjectMonitor* monitor = NULL;
1676   markOop temp, test;
1677   intptr_t hash;
1678   markOop mark = ReadStableMark (obj);
1679 
1680   // object should remain ineligible for biased locking
1681   assert (!mark->has_bias_pattern(), "invariant") ;
1682 
1683   if (mark->is_neutral()) {
1684     hash = mark->hash();              // this is a normal header
1685     if (hash) {                       // if it has hash, just return it
1686       return hash;
1687     }
1688     hash = get_next_hash(Self, obj);  // allocate a new hash code
1689     temp = mark->copy_set_hash(hash); // merge the hash code into header
1690     // use (machine word version) atomic operation to install the hash
1691     test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
1692     if (test == mark) {
1693       return hash;
1694     }
1695     // If atomic operation failed, we must inflate the header
1696     // into heavy weight monitor. We could add more code here
1697     // for fast path, but it does not worth the complexity.
1698   } else if (mark->has_monitor()) {
1699     monitor = mark->monitor();
1700     temp = monitor->header();
1701     assert (temp->is_neutral(), "invariant") ;
1702     hash = temp->hash();
1703     if (hash) {
1704       return hash;
1705     }
1706     // Skip to the following code to reduce code size
1707   } else if (Self->is_lock_owned((address)mark->locker())) {
1708     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
1709     assert (temp->is_neutral(), "invariant") ;
1710     hash = temp->hash();              // by current thread, check if the displaced
1711     if (hash) {                       // header contains hash code
1712       return hash;
1713     }
1714     // WARNING:
1715     //   The displaced header is strictly immutable.
1716     // It can NOT be changed in ANY cases. So we have
1717     // to inflate the header into heavyweight monitor
1718     // even the current thread owns the lock. The reason
1719     // is the BasicLock (stack slot) will be asynchronously
1720     // read by other threads during the inflate() function.
1721     // Any change to stack may not propagate to other threads
1722     // correctly.
1723   }
1724 
1725   // Inflate the monitor to set hash code
1726   monitor = ObjectSynchronizer::inflate(Self, obj);
1727   // Load displaced header and check it has hash code
1728   mark = monitor->header();
1729   assert (mark->is_neutral(), "invariant") ;
1730   hash = mark->hash();
1731   if (hash == 0) {
1732     hash = get_next_hash(Self, obj);
1733     temp = mark->copy_set_hash(hash); // merge hash code into header
1734     assert (temp->is_neutral(), "invariant") ;
1735     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
1736     if (test != mark) {
1737       // The only update to the header in the monitor (outside GC)
1738       // is install the hash code. If someone add new usage of
1739       // displaced header, please update this code
1740       hash = test->hash();
1741       assert (test->is_neutral(), "invariant") ;
1742       assert (hash != 0, "Trivial unexpected object/monitor header usage.");
1743     }
1744   }
1745   // We finally get the hash
1746   return hash;
1747 }
1748 
1749 // Deprecated -- use FastHashCode() instead.
1750 
1751 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
1752   return FastHashCode (Thread::current(), obj()) ;
1753 }
1754 
1755 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
1756                                                    Handle h_obj) {
1757   if (UseBiasedLocking) {
1758     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
1759     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1760   }
1761 
1762   assert(thread == JavaThread::current(), "Can only be called on current thread");
1763   oop obj = h_obj();
1764 
1765   markOop mark = ReadStableMark (obj) ;
1766 
1767   // Uncontended case, header points to stack
1768   if (mark->has_locker()) {
1769     return thread->is_lock_owned((address)mark->locker());
1770   }
1771   // Contended case, header points to ObjectMonitor (tagged pointer)
1772   if (mark->has_monitor()) {
1773     ObjectMonitor* monitor = mark->monitor();
1774     return monitor->is_entered(thread) != 0 ;
1775   }
1776   // Unlocked case, header in place
1777   assert(mark->is_neutral(), "sanity check");
1778   return false;
1779 }
1780 
1781 // Be aware of this method could revoke bias of the lock object.
1782 // This method querys the ownership of the lock handle specified by 'h_obj'.
1783 // If the current thread owns the lock, it returns owner_self. If no
1784 // thread owns the lock, it returns owner_none. Otherwise, it will return
1785 // ower_other.
1786 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
1787 (JavaThread *self, Handle h_obj) {
1788   // The caller must beware this method can revoke bias, and
1789   // revocation can result in a safepoint.
1790   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
1791   assert (self->thread_state() != _thread_blocked , "invariant") ;
1792 
1793   // Possible mark states: neutral, biased, stack-locked, inflated
1794 
1795   if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
1796     // CASE: biased
1797     BiasedLocking::revoke_and_rebias(h_obj, false, self);
1798     assert(!h_obj->mark()->has_bias_pattern(),
1799            "biases should be revoked by now");
1800   }
1801 
1802   assert(self == JavaThread::current(), "Can only be called on current thread");
1803   oop obj = h_obj();
1804   markOop mark = ReadStableMark (obj) ;
1805 
1806   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
1807   if (mark->has_locker()) {
1808     return self->is_lock_owned((address)mark->locker()) ?
1809       owner_self : owner_other;
1810   }
1811 
1812   // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
1813   // The Object:ObjectMonitor relationship is stable as long as we're
1814   // not at a safepoint.
1815   if (mark->has_monitor()) {
1816     void * owner = mark->monitor()->_owner ;
1817     if (owner == NULL) return owner_none ;
1818     return (owner == self ||
1819             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
1820   }
1821 
1822   // CASE: neutral
1823   assert(mark->is_neutral(), "sanity check");
1824   return owner_none ;           // it's unlocked
1825 }
1826 
1827 // FIXME: jvmti should call this
1828 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
1829   if (UseBiasedLocking) {
1830     if (SafepointSynchronize::is_at_safepoint()) {
1831       BiasedLocking::revoke_at_safepoint(h_obj);
1832     } else {
1833       BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
1834     }
1835     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1836   }
1837 
1838   oop obj = h_obj();
1839   address owner = NULL;
1840 
1841   markOop mark = ReadStableMark (obj) ;
1842 
1843   // Uncontended case, header points to stack
1844   if (mark->has_locker()) {
1845     owner = (address) mark->locker();
1846   }
1847 
1848   // Contended case, header points to ObjectMonitor (tagged pointer)
1849   if (mark->has_monitor()) {
1850     ObjectMonitor* monitor = mark->monitor();
1851     assert(monitor != NULL, "monitor should be non-null");
1852     owner = (address) monitor->owner();
1853   }
1854 
1855   if (owner != NULL) {
1856     return Threads::owning_thread_from_monitor_owner(owner, doLock);
1857   }
1858 
1859   // Unlocked case, header in place
1860   // Cannot have assertion since this object may have been
1861   // locked by another thread when reaching here.
1862   // assert(mark->is_neutral(), "sanity check");
1863 
1864   return NULL;
1865 }
1866 
1867 // Iterate through monitor cache and attempt to release thread's monitors
1868 // Gives up on a particular monitor if an exception occurs, but continues
1869 // the overall iteration, swallowing the exception.
1870 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1871 private:
1872   TRAPS;
1873 
1874 public:
1875   ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
1876   void do_monitor(ObjectMonitor* mid) {
1877     if (mid->owner() == THREAD) {
1878       (void)mid->complete_exit(CHECK);
1879     }
1880   }
1881 };
1882 
1883 // Release all inflated monitors owned by THREAD.  Lightweight monitors are
1884 // ignored.  This is meant to be called during JNI thread detach which assumes
1885 // all remaining monitors are heavyweight.  All exceptions are swallowed.
1886 // Scanning the extant monitor list can be time consuming.
1887 // A simple optimization is to add a per-thread flag that indicates a thread
1888 // called jni_monitorenter() during its lifetime.
1889 //
1890 // Instead of No_Savepoint_Verifier it might be cheaper to
1891 // use an idiom of the form:
1892 //   auto int tmp = SafepointSynchronize::_safepoint_counter ;
1893 //   <code that must not run at safepoint>
1894 //   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1895 // Since the tests are extremely cheap we could leave them enabled
1896 // for normal product builds.
1897 
1898 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1899   assert(THREAD == JavaThread::current(), "must be current Java thread");
1900   No_Safepoint_Verifier nsv ;
1901   ReleaseJavaMonitorsClosure rjmc(THREAD);
1902   Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
1903   ObjectSynchronizer::monitors_iterate(&rjmc);
1904   Thread::muxRelease(&ListLock);
1905   THREAD->clear_pending_exception();
1906 }
1907 
1908 // Visitors ...
1909 
1910 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1911   ObjectMonitor* block = gBlockList;
1912   ObjectMonitor* mid;
1913   while (block) {
1914     assert(block->object() == CHAINMARKER, "must be a block header");
1915     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1916       mid = block + i;
1917       oop object = (oop) mid->object();
1918       if (object != NULL) {
1919         closure->do_monitor(mid);
1920       }
1921     }
1922     block = (ObjectMonitor*) block->FreeNext;
1923   }
1924 }
1925 
1926 void ObjectSynchronizer::oops_do(OopClosure* f) {
1927   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1928   for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
1929     assert(block->object() == CHAINMARKER, "must be a block header");
1930     for (int i = 1; i < _BLOCKSIZE; i++) {
1931       ObjectMonitor* mid = &block[i];
1932       if (mid->object() != NULL) {
1933         f->do_oop((oop*)mid->object_addr());
1934       }
1935     }
1936   }
1937 }
1938 
1939 // Deflate_idle_monitors() is called at all safepoints, immediately
1940 // after all mutators are stopped, but before any objects have moved.
1941 // It traverses the list of known monitors, deflating where possible.
1942 // The scavenged monitor are returned to the monitor free list.
1943 //
1944 // Beware that we scavenge at *every* stop-the-world point.
1945 // Having a large number of monitors in-circulation negatively
1946 // impacts the performance of some applications (e.g., PointBase).
1947 // Broadly, we want to minimize the # of monitors in circulation.
1948 //
1949 // We have added a flag, MonitorInUseLists, which creates a list
1950 // of active monitors for each thread. deflate_idle_monitors()
1951 // only scans the per-thread inuse lists. omAlloc() puts all
1952 // assigned monitors on the per-thread list. deflate_idle_monitors()
1953 // returns the non-busy monitors to the global free list.
1954 // When a thread dies, omFlush() adds the list of active monitors for
1955 // that thread to a global gOmInUseList acquiring the
1956 // global list lock. deflate_idle_monitors() acquires the global
1957 // list lock to scan for non-busy monitors to the global free list.
1958 // An alternative could have used a single global inuse list. The
1959 // downside would have been the additional cost of acquiring the global list lock
1960 // for every omAlloc().
1961 //
1962 // Perversely, the heap size -- and thus the STW safepoint rate --
1963 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
1964 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1965 // This is an unfortunate aspect of this design.
1966 //
1967 // Another refinement would be to refrain from calling deflate_idle_monitors()
1968 // except at stop-the-world points associated with garbage collections.
1969 //
1970 // An even better solution would be to deflate on-the-fly, aggressively,
1971 // at monitorexit-time as is done in EVM's metalock or Relaxed Locks.
1972 
1973 
1974 // Deflate a single monitor if not in use
1975 // Return true if deflated, false if in use
1976 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1977                                          ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
1978   bool deflated;
1979   // Normal case ... The monitor is associated with obj.
1980   guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
1981   guarantee (mid == obj->mark()->monitor(), "invariant");
1982   guarantee (mid->header()->is_neutral(), "invariant");
1983 
1984   if (mid->is_busy()) {
1985      if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
1986      deflated = false;
1987   } else {
1988      // Deflate the monitor if it is no longer being used
1989      // It's idle - scavenge and return to the global free list
1990      // plain old deflation ...
1991      TEVENT (deflate_idle_monitors - scavenge1) ;
1992      if (TraceMonitorInflation) {
1993        if (obj->is_instance()) {
1994          ResourceMark rm;
1995            tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1996                 (intptr_t) obj, (intptr_t) obj->mark(), Klass::cast(obj->klass())->external_name());
1997        }
1998      }
1999 
2000      // Restore the header back to obj
2001      obj->release_set_mark(mid->header());
2002      mid->clear();
2003 
2004      assert (mid->object() == NULL, "invariant") ;
2005 
2006      // Move the object to the working free list defined by FreeHead,FreeTail.
2007      if (*FreeHeadp == NULL) *FreeHeadp = mid;
2008      if (*FreeTailp != NULL) {
2009        ObjectMonitor * prevtail = *FreeTailp;
2010        assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
2011        prevtail->FreeNext = mid;
2012       }
2013      *FreeTailp = mid;
2014      deflated = true;
2015   }
2016   return deflated;
2017 }
2018 
2019 // Caller acquires ListLock
2020 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
2021                                           ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
2022   ObjectMonitor* mid;
2023   ObjectMonitor* next;
2024   ObjectMonitor* curmidinuse = NULL;
2025   int deflatedcount = 0;
2026 
2027   for (mid = *listheadp; mid != NULL; ) {
2028      oop obj = (oop) mid->object();
2029      bool deflated = false;
2030      if (obj != NULL) {
2031        deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp);
2032      }
2033      if (deflated) {
2034        // extract from per-thread in-use-list
2035        if (mid == *listheadp) {
2036          *listheadp = mid->FreeNext;
2037        } else if (curmidinuse != NULL) {
2038          curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
2039        }
2040        next = mid->FreeNext;
2041        mid->FreeNext = NULL;  // This mid is current tail in the FreeHead list
2042        mid = next;
2043        deflatedcount++;
2044      } else {
2045        curmidinuse = mid;
2046        mid = mid->FreeNext;
2047     }
2048   }
2049   return deflatedcount;
2050 }
2051 
2052 void ObjectSynchronizer::deflate_idle_monitors() {
2053   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2054   int nInuse = 0 ;              // currently associated with objects
2055   int nInCirculation = 0 ;      // extant
2056   int nScavenged = 0 ;          // reclaimed
2057   bool deflated = false;
2058 
2059   ObjectMonitor * FreeHead = NULL ;  // Local SLL of scavenged monitors
2060   ObjectMonitor * FreeTail = NULL ;
2061 
2062   TEVENT (deflate_idle_monitors) ;
2063   // Prevent omFlush from changing mids in Thread dtor's during deflation
2064   // And in case the vm thread is acquiring a lock during a safepoint
2065   // See e.g. 6320749
2066   Thread::muxAcquire (&ListLock, "scavenge - return") ;
2067 
2068   if (MonitorInUseLists) {
2069     int inUse = 0;
2070     for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
2071       nInCirculation+= cur->omInUseCount;
2072       int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
2073       cur->omInUseCount-= deflatedcount;
2074       // verifyInUse(cur);
2075       nScavenged += deflatedcount;
2076       nInuse += cur->omInUseCount;
2077      }
2078 
2079    // For moribund threads, scan gOmInUseList
2080    if (gOmInUseList) {
2081      nInCirculation += gOmInUseCount;
2082      int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
2083      gOmInUseCount-= deflatedcount;
2084      nScavenged += deflatedcount;
2085      nInuse += gOmInUseCount;
2086     }
2087 
2088   } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
2089   // Iterate over all extant monitors - Scavenge all idle monitors.
2090     assert(block->object() == CHAINMARKER, "must be a block header");
2091     nInCirculation += _BLOCKSIZE ;
2092     for (int i = 1 ; i < _BLOCKSIZE; i++) {
2093       ObjectMonitor* mid = &block[i];
2094       oop obj = (oop) mid->object();
2095 
2096       if (obj == NULL) {
2097         // The monitor is not associated with an object.
2098         // The monitor should either be a thread-specific private
2099         // free list or the global free list.
2100         // obj == NULL IMPLIES mid->is_busy() == 0
2101         guarantee (!mid->is_busy(), "invariant") ;
2102         continue ;
2103       }
2104       deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
2105 
2106       if (deflated) {
2107         mid->FreeNext = NULL ;
2108         nScavenged ++ ;
2109       } else {
2110         nInuse ++;
2111       }
2112     }
2113   }
2114 
2115   MonitorFreeCount += nScavenged;
2116 
2117   // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
2118 
2119   if (Knob_Verbose) {
2120     ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
2121         nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
2122         MonitorPopulation, MonitorFreeCount) ;
2123     ::fflush(stdout) ;
2124   }
2125 
2126   ForceMonitorScavenge = 0;    // Reset
2127 
2128   // Move the scavenged monitors back to the global free list.
2129   if (FreeHead != NULL) {
2130      guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ;
2131      assert (FreeTail->FreeNext == NULL, "invariant") ;
2132      // constant-time list splice - prepend scavenged segment to gFreeList
2133      FreeTail->FreeNext = gFreeList ;
2134      gFreeList = FreeHead ;
2135   }
2136   Thread::muxRelease (&ListLock) ;
2137 
2138   if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ;
2139   if (_sync_MonExtant  != NULL) _sync_MonExtant ->set_value(nInCirculation);
2140 
2141   // TODO: Add objectMonitor leak detection.
2142   // Audit/inventory the objectMonitors -- make sure they're all accounted for.
2143   GVars.stwRandom = os::random() ;
2144   GVars.stwCycle ++ ;
2145 }
2146 
2147 // A macro is used below because there may already be a pending
2148 // exception which should not abort the execution of the routines
2149 // which use this (which is why we don't put this into check_slow and
2150 // call it with a CHECK argument).
2151 
2152 #define CHECK_OWNER()                                                             \
2153   do {                                                                            \
2154     if (THREAD != _owner) {                                                       \
2155       if (THREAD->is_lock_owned((address) _owner)) {                              \
2156         _owner = THREAD ;  /* Convert from basiclock addr to Thread addr */       \
2157         _recursions = 0;                                                          \
2158         OwnerIsThread = 1 ;                                                       \
2159       } else {                                                                    \
2160         TEVENT (Throw IMSX) ;                                                     \
2161         THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
2162       }                                                                           \
2163     }                                                                             \
2164   } while (false)
2165 
2166 // TODO-FIXME: eliminate ObjectWaiters.  Replace this visitor/enumerator
2167 // interface with a simple FirstWaitingThread(), NextWaitingThread() interface.
2168 
2169 ObjectWaiter* ObjectMonitor::first_waiter() {
2170   return _WaitSet;
2171 }
2172 
2173 ObjectWaiter* ObjectMonitor::next_waiter(ObjectWaiter* o) {
2174   return o->_next;
2175 }
2176 
2177 Thread* ObjectMonitor::thread_of_waiter(ObjectWaiter* o) {
2178   return o->_thread;
2179 }
2180 
2181 // initialize the monitor, exception the semaphore, all other fields
2182 // are simple integers or pointers
2183 ObjectMonitor::ObjectMonitor() {
2184   _header       = NULL;
2185   _count        = 0;
2186   _waiters      = 0,
2187   _recursions   = 0;
2188   _object       = NULL;
2189   _owner        = NULL;
2190   _WaitSet      = NULL;
2191   _WaitSetLock  = 0 ;
2192   _Responsible  = NULL ;
2193   _succ         = NULL ;
2194   _cxq          = NULL ;
2195   FreeNext      = NULL ;
2196   _EntryList    = NULL ;
2197   _SpinFreq     = 0 ;
2198   _SpinClock    = 0 ;
2199   OwnerIsThread = 0 ;
2200 }
2201 
2202 ObjectMonitor::~ObjectMonitor() {
2203    // TODO: Add asserts ...
2204    // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
2205    // _count == 0 _EntryList  == NULL etc
2206 }
2207 
2208 intptr_t ObjectMonitor::is_busy() const {
2209   // TODO-FIXME: merge _count and _waiters.
2210   // TODO-FIXME: assert _owner == null implies _recursions = 0
2211   // TODO-FIXME: assert _WaitSet != null implies _count > 0
2212   return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
2213 }
2214 
2215 void ObjectMonitor::Recycle () {
2216   // TODO: add stronger asserts ...
2217   // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
2218   // _count == 0 EntryList  == NULL
2219   // _recursions == 0 _WaitSet == NULL
2220   // TODO: assert (is_busy()|_recursions) == 0
2221   _succ          = NULL ;
2222   _EntryList     = NULL ;
2223   _cxq           = NULL ;
2224   _WaitSet       = NULL ;
2225   _recursions    = 0 ;
2226   _SpinFreq      = 0 ;
2227   _SpinClock     = 0 ;
2228   OwnerIsThread  = 0 ;
2229 }
2230 
2231 // WaitSet management ...
2232 
2233 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
2234   assert(node != NULL, "should not dequeue NULL node");
2235   assert(node->_prev == NULL, "node already in list");
2236   assert(node->_next == NULL, "node already in list");
2237   // put node at end of queue (circular doubly linked list)
2238   if (_WaitSet == NULL) {
2239     _WaitSet = node;
2240     node->_prev = node;
2241     node->_next = node;
2242   } else {
2243     ObjectWaiter* head = _WaitSet ;
2244     ObjectWaiter* tail = head->_prev;
2245     assert(tail->_next == head, "invariant check");
2246     tail->_next = node;
2247     head->_prev = node;
2248     node->_next = head;
2249     node->_prev = tail;
2250   }
2251 }
2252 
2253 inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
2254   // dequeue the very first waiter
2255   ObjectWaiter* waiter = _WaitSet;
2256   if (waiter) {
2257     DequeueSpecificWaiter(waiter);
2258   }
2259   return waiter;
2260 }
2261 
2262 inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
2263   assert(node != NULL, "should not dequeue NULL node");
2264   assert(node->_prev != NULL, "node already removed from list");
2265   assert(node->_next != NULL, "node already removed from list");
2266   // when the waiter has woken up because of interrupt,
2267   // timeout or other spurious wake-up, dequeue the
2268   // waiter from waiting list
2269   ObjectWaiter* next = node->_next;
2270   if (next == node) {
2271     assert(node->_prev == node, "invariant check");
2272     _WaitSet = NULL;
2273   } else {
2274     ObjectWaiter* prev = node->_prev;
2275     assert(prev->_next == node, "invariant check");
2276     assert(next->_prev == node, "invariant check");
2277     next->_prev = prev;
2278     prev->_next = next;
2279     if (_WaitSet == node) {
2280       _WaitSet = next;
2281     }
2282   }
2283   node->_next = NULL;
2284   node->_prev = NULL;
2285 }
2286 
2287 static char * kvGet (char * kvList, const char * Key) {
2288     if (kvList == NULL) return NULL ;
2289     size_t n = strlen (Key) ;
2290     char * Search ;
2291     for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
2292         if (strncmp (Search, Key, n) == 0) {
2293             if (Search[n] == '=') return Search + n + 1 ;
2294             if (Search[n] == 0)   return (char *) "1" ;
2295         }
2296     }
2297     return NULL ;
2298 }
2299 
2300 static int kvGetInt (char * kvList, const char * Key, int Default) {
2301     char * v = kvGet (kvList, Key) ;
2302     int rslt = v ? ::strtol (v, NULL, 0) : Default ;
2303     if (Knob_ReportSettings && v != NULL) {
2304         ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
2305         ::fflush (stdout) ;
2306     }
2307     return rslt ;
2308 }
2309 
2310 // By convention we unlink a contending thread from EntryList|cxq immediately
2311 // after the thread acquires the lock in ::enter().  Equally, we could defer
2312 // unlinking the thread until ::exit()-time.
2313 
2314 void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
2315 {
2316     assert (_owner == Self, "invariant") ;
2317     assert (SelfNode->_thread == Self, "invariant") ;
2318 
2319     if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
2320         // Normal case: remove Self from the DLL EntryList .
2321         // This is a constant-time operation.
2322         ObjectWaiter * nxt = SelfNode->_next ;
2323         ObjectWaiter * prv = SelfNode->_prev ;
2324         if (nxt != NULL) nxt->_prev = prv ;
2325         if (prv != NULL) prv->_next = nxt ;
2326         if (SelfNode == _EntryList ) _EntryList = nxt ;
2327         assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
2328         assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
2329         TEVENT (Unlink from EntryList) ;
2330     } else {
2331         guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
2332         // Inopportune interleaving -- Self is still on the cxq.
2333         // This usually means the enqueue of self raced an exiting thread.
2334         // Normally we'll find Self near the front of the cxq, so
2335         // dequeueing is typically fast.  If needbe we can accelerate
2336         // this with some MCS/CHL-like bidirectional list hints and advisory
2337         // back-links so dequeueing from the interior will normally operate
2338         // in constant-time.
2339         // Dequeue Self from either the head (with CAS) or from the interior
2340         // with a linear-time scan and normal non-atomic memory operations.
2341         // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
2342         // and then unlink Self from EntryList.  We have to drain eventually,
2343         // so it might as well be now.
2344 
2345         ObjectWaiter * v = _cxq ;
2346         assert (v != NULL, "invariant") ;
2347         if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
2348             // The CAS above can fail from interference IFF a "RAT" arrived.
2349             // In that case Self must be in the interior and can no longer be
2350             // at the head of cxq.
2351             if (v == SelfNode) {
2352                 assert (_cxq != v, "invariant") ;
2353                 v = _cxq ;          // CAS above failed - start scan at head of list
2354             }
2355             ObjectWaiter * p ;
2356             ObjectWaiter * q = NULL ;
2357             for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
2358                 q = p ;
2359                 assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
2360             }
2361             assert (v != SelfNode,  "invariant") ;
2362             assert (p == SelfNode,  "Node not found on cxq") ;
2363             assert (p != _cxq,      "invariant") ;
2364             assert (q != NULL,      "invariant") ;
2365             assert (q->_next == p,  "invariant") ;
2366             q->_next = p->_next ;
2367         }
2368         TEVENT (Unlink from cxq) ;
2369     }
2370 
2371     // Diagnostic hygiene ...
2372     SelfNode->_prev  = (ObjectWaiter *) 0xBAD ;
2373     SelfNode->_next  = (ObjectWaiter *) 0xBAD ;
2374     SelfNode->TState = ObjectWaiter::TS_RUN ;
2375 }
2376 
2377 // Caveat: TryLock() is not necessarily serializing if it returns failure.
2378 // Callers must compensate as needed.
2379 
2380 int ObjectMonitor::TryLock (Thread * Self) {
2381    for (;;) {
2382       void * own = _owner ;
2383       if (own != NULL) return 0 ;
2384       if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
2385          // Either guarantee _recursions == 0 or set _recursions = 0.
2386          assert (_recursions == 0, "invariant") ;
2387          assert (_owner == Self, "invariant") ;
2388          // CONSIDER: set or assert that OwnerIsThread == 1
2389          return 1 ;
2390       }
2391       // The lock had been free momentarily, but we lost the race to the lock.
2392       // Interference -- the CAS failed.
2393       // We can either return -1 or retry.
2394       // Retry doesn't make as much sense because the lock was just acquired.
2395       if (true) return -1 ;
2396    }
2397 }
2398 
2399 // NotRunnable() -- informed spinning
2400 //
2401 // Don't bother spinning if the owner is not eligible to drop the lock.
2402 // Peek at the owner's schedctl.sc_state and Thread._thread_values and
2403 // spin only if the owner thread is _thread_in_Java or _thread_in_vm.
2404 // The thread must be runnable in order to drop the lock in timely fashion.
2405 // If the _owner is not runnable then spinning will not likely be
2406 // successful (profitable).
2407 //
2408 // Beware -- the thread referenced by _owner could have died
2409 // so a simply fetch from _owner->_thread_state might trap.
2410 // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
2411 // Because of the lifecycle issues the schedctl and _thread_state values
2412 // observed by NotRunnable() might be garbage.  NotRunnable must
2413 // tolerate this and consider the observed _thread_state value
2414 // as advisory.
2415 //
2416 // Beware too, that _owner is sometimes a BasicLock address and sometimes
2417 // a thread pointer.  We differentiate the two cases with OwnerIsThread.
2418 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
2419 // with the LSB of _owner.  Another option would be to probablistically probe
2420 // the putative _owner->TypeTag value.
2421 //
2422 // Checking _thread_state isn't perfect.  Even if the thread is
2423 // in_java it might be blocked on a page-fault or have been preempted
2424 // and sitting on a ready/dispatch queue.  _thread state in conjunction
2425 // with schedctl.sc_state gives us a good picture of what the
2426 // thread is doing, however.
2427 //
2428 // TODO: check schedctl.sc_state.
2429 // We'll need to use SafeFetch32() to read from the schedctl block.
2430 // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
2431 //
2432 // The return value from NotRunnable() is *advisory* -- the
2433 // result is based on sampling and is not necessarily coherent.
2434 // The caller must tolerate false-negative and false-positive errors.
2435 // Spinning, in general, is probabilistic anyway.
2436 
2437 
2438 int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
2439     // Check either OwnerIsThread or ox->TypeTag == 2BAD.
2440     if (!OwnerIsThread) return 0 ;
2441 
2442     if (ox == NULL) return 0 ;
2443 
2444     // Avoid transitive spinning ...
2445     // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
2446     // Immediately after T1 acquires L it's possible that T2, also
2447     // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
2448     // This occurs transiently after T1 acquired L but before
2449     // T1 managed to clear T1.Stalled.  T2 does not need to abort
2450     // its spin in this circumstance.
2451     intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
2452 
2453     if (BlockedOn == 1) return 1 ;
2454     if (BlockedOn != 0) {
2455       return BlockedOn != intptr_t(this) && _owner == ox ;
2456     }
2457 
2458     assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
2459     int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
2460     // consider also: jst != _thread_in_Java -- but that's overspecific.
2461     return jst == _thread_blocked || jst == _thread_in_native ;
2462 }
2463 
2464 
2465 // Adaptive spin-then-block - rational spinning
2466 //
2467 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
2468 // algorithm.  On high order SMP systems it would be better to start with
2469 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
2470 // a contending thread could enqueue itself on the cxq and then spin locally
2471 // on a thread-specific variable such as its ParkEvent._Event flag.
2472 // That's left as an exercise for the reader.  Note that global spinning is
2473 // not problematic on Niagara, as the L2$ serves the interconnect and has both
2474 // low latency and massive bandwidth.
2475 //
2476 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
2477 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
2478 // (duration) or we can fix the count at approximately the duration of
2479 // a context switch and vary the frequency.   Of course we could also
2480 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
2481 // See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
2482 //
2483 // This implementation varies the duration "D", where D varies with
2484 // the success rate of recent spin attempts. (D is capped at approximately
2485 // length of a round-trip context switch).  The success rate for recent
2486 // spin attempts is a good predictor of the success rate of future spin
2487 // attempts.  The mechanism adapts automatically to varying critical
2488 // section length (lock modality), system load and degree of parallelism.
2489 // D is maintained per-monitor in _SpinDuration and is initialized
2490 // optimistically.  Spin frequency is fixed at 100%.
2491 //
2492 // Note that _SpinDuration is volatile, but we update it without locks
2493 // or atomics.  The code is designed so that _SpinDuration stays within
2494 // a reasonable range even in the presence of races.  The arithmetic
2495 // operations on _SpinDuration are closed over the domain of legal values,
2496 // so at worst a race will install and older but still legal value.
2497 // At the very worst this introduces some apparent non-determinism.
2498 // We might spin when we shouldn't or vice-versa, but since the spin
2499 // count are relatively short, even in the worst case, the effect is harmless.
2500 //
2501 // Care must be taken that a low "D" value does not become an
2502 // an absorbing state.  Transient spinning failures -- when spinning
2503 // is overall profitable -- should not cause the system to converge
2504 // on low "D" values.  We want spinning to be stable and predictable
2505 // and fairly responsive to change and at the same time we don't want
2506 // it to oscillate, become metastable, be "too" non-deterministic,
2507 // or converge on or enter undesirable stable absorbing states.
2508 //
2509 // We implement a feedback-based control system -- using past behavior
2510 // to predict future behavior.  We face two issues: (a) if the
2511 // input signal is random then the spin predictor won't provide optimal
2512 // results, and (b) if the signal frequency is too high then the control
2513 // system, which has some natural response lag, will "chase" the signal.
2514 // (b) can arise from multimodal lock hold times.  Transient preemption
2515 // can also result in apparent bimodal lock hold times.
2516 // Although sub-optimal, neither condition is particularly harmful, as
2517 // in the worst-case we'll spin when we shouldn't or vice-versa.
2518 // The maximum spin duration is rather short so the failure modes aren't bad.
2519 // To be conservative, I've tuned the gain in system to bias toward
2520 // _not spinning.  Relatedly, the system can sometimes enter a mode where it
2521 // "rings" or oscillates between spinning and not spinning.  This happens
2522 // when spinning is just on the cusp of profitability, however, so the
2523 // situation is not dire.  The state is benign -- there's no need to add
2524 // hysteresis control to damp the transition rate between spinning and
2525 // not spinning.
2526 //
2527 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2528 //
2529 // Spin-then-block strategies ...
2530 //
2531 // Thoughts on ways to improve spinning :
2532 //
2533 // *  Periodically call {psr_}getloadavg() while spinning, and
2534 //    permit unbounded spinning if the load average is <
2535 //    the number of processors.  Beware, however, that getloadavg()
2536 //    is exceptionally fast on solaris (about 1/10 the cost of a full
2537 //    spin cycle, but quite expensive on linux.  Beware also, that
2538 //    multiple JVMs could "ring" or oscillate in a feedback loop.
2539 //    Sufficient damping would solve that problem.
2540 //
2541 // *  We currently use spin loops with iteration counters to approximate
2542 //    spinning for some interval.  Given the availability of high-precision
2543 //    time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should
2544 //    someday reimplement the spin loops to duration-based instead of iteration-based.
2545 //
2546 // *  Don't spin if there are more than N = (CPUs/2) threads
2547 //        currently spinning on the monitor (or globally).
2548 //    That is, limit the number of concurrent spinners.
2549 //    We might also limit the # of spinners in the JVM, globally.
2550 //
2551 // *  If a spinning thread observes _owner change hands it should
2552 //    abort the spin (and park immediately) or at least debit
2553 //    the spin counter by a large "penalty".
2554 //
2555 // *  Classically, the spin count is either K*(CPUs-1) or is a
2556 //        simple constant that approximates the length of a context switch.
2557 //    We currently use a value -- computed by a special utility -- that
2558 //    approximates round-trip context switch times.
2559 //
2560 // *  Normally schedctl_start()/_stop() is used to advise the kernel
2561 //    to avoid preempting threads that are running in short, bounded
2562 //    critical sections.  We could use the schedctl hooks in an inverted
2563 //    sense -- spinners would set the nopreempt flag, but poll the preempt
2564 //    pending flag.  If a spinner observed a pending preemption it'd immediately
2565 //    abort the spin and park.   As such, the schedctl service acts as
2566 //    a preemption warning mechanism.
2567 //
2568 // *  In lieu of spinning, if the system is running below saturation
2569 //    (that is, loadavg() << #cpus), we can instead suppress futile
2570 //    wakeup throttling, or even wake more than one successor at exit-time.
2571 //    The net effect is largely equivalent to spinning.  In both cases,
2572 //    contending threads go ONPROC and opportunistically attempt to acquire
2573 //    the lock, decreasing lock handover latency at the expense of wasted
2574 //    cycles and context switching.
2575 //
2576 // *  We might to spin less after we've parked as the thread will
2577 //    have less $ and TLB affinity with the processor.
2578 //    Likewise, we might spin less if we come ONPROC on a different
2579 //    processor or after a long period (>> rechose_interval).
2580 //
2581 // *  A table-driven state machine similar to Solaris' dispadmin scheduling
2582 //    tables might be a better design.  Instead of encoding information in
2583 //    _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit,
2584 //    discrete states.   Success or failure during a spin would drive
2585 //    state transitions, and each state node would contain a spin count.
2586 //
2587 // *  If the processor is operating in a mode intended to conserve power
2588 //    (such as Intel's SpeedStep) or to reduce thermal output (thermal
2589 //    step-down mode) then the Java synchronization subsystem should
2590 //    forgo spinning.
2591 //
2592 // *  The minimum spin duration should be approximately the worst-case
2593 //    store propagation latency on the platform.  That is, the time
2594 //    it takes a store on CPU A to become visible on CPU B, where A and
2595 //    B are "distant".
2596 //
2597 // *  We might want to factor a thread's priority in the spin policy.
2598 //    Threads with a higher priority might spin for slightly longer.
2599 //    Similarly, if we use back-off in the TATAS loop, lower priority
2600 //    threads might back-off longer.  We don't currently use a
2601 //    thread's priority when placing it on the entry queue.  We may
2602 //    want to consider doing so in future releases.
2603 //
2604 // *  We might transiently drop a thread's scheduling priority while it spins.
2605 //    SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris
2606 //    would suffice.  We could even consider letting the thread spin indefinitely at
2607 //    a depressed or "idle" priority.  This brings up fairness issues, however --
2608 //    in a saturated system a thread would with a reduced priority could languish
2609 //    for extended periods on the ready queue.
2610 //
2611 // *  While spinning try to use the otherwise wasted time to help the VM make
2612 //    progress:
2613 //
2614 //    -- YieldTo() the owner, if the owner is OFFPROC but ready
2615 //       Done our remaining quantum directly to the ready thread.
2616 //       This helps "push" the lock owner through the critical section.
2617 //       It also tends to improve affinity/locality as the lock
2618 //       "migrates" less frequently between CPUs.
2619 //    -- Walk our own stack in anticipation of blocking.  Memoize the roots.
2620 //    -- Perform strand checking for other thread.  Unpark potential strandees.
2621 //    -- Help GC: trace or mark -- this would need to be a bounded unit of work.
2622 //       Unfortunately this will pollute our $ and TLBs.  Recall that we
2623 //       spin to avoid context switching -- context switching has an
2624 //       immediate cost in latency, a disruptive cost to other strands on a CMT
2625 //       processor, and an amortized cost because of the D$ and TLB cache
2626 //       reload transient when the thread comes back ONPROC and repopulates
2627 //       $s and TLBs.
2628 //    -- call getloadavg() to see if the system is saturated.  It'd probably
2629 //       make sense to call getloadavg() half way through the spin.
2630 //       If the system isn't at full capacity the we'd simply reset
2631 //       the spin counter to and extend the spin attempt.
2632 //    -- Doug points out that we should use the same "helping" policy
2633 //       in thread.yield().
2634 //
2635 // *  Try MONITOR-MWAIT on systems that support those instructions.
2636 //
2637 // *  The spin statistics that drive spin decisions & frequency are
2638 //    maintained in the objectmonitor structure so if we deflate and reinflate
2639 //    we lose spin state.  In practice this is not usually a concern
2640 //    as the default spin state after inflation is aggressive (optimistic)
2641 //    and tends toward spinning.  So in the worst case for a lock where
2642 //    spinning is not profitable we may spin unnecessarily for a brief
2643 //    period.  But then again, if a lock is contended it'll tend not to deflate
2644 //    in the first place.
2645 
2646 
2647 intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
2648 int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
2649 
2650 // Spinning: Fixed frequency (100%), vary duration
2651 
2652 int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
2653 
2654     // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
2655     int ctr = Knob_FixedSpin ;
2656     if (ctr != 0) {
2657         while (--ctr >= 0) {
2658             if (TryLock (Self) > 0) return 1 ;
2659             SpinPause () ;
2660         }
2661         return 0 ;
2662     }
2663 
2664     for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
2665       if (TryLock(Self) > 0) {
2666         // Increase _SpinDuration ...
2667         // Note that we don't clamp SpinDuration precisely at SpinLimit.
2668         // Raising _SpurDuration to the poverty line is key.
2669         int x = _SpinDuration ;
2670         if (x < Knob_SpinLimit) {
2671            if (x < Knob_Poverty) x = Knob_Poverty ;
2672            _SpinDuration = x + Knob_BonusB ;
2673         }
2674         return 1 ;
2675       }
2676       SpinPause () ;
2677     }
2678 
2679     // Admission control - verify preconditions for spinning
2680     //
2681     // We always spin a little bit, just to prevent _SpinDuration == 0 from
2682     // becoming an absorbing state.  Put another way, we spin briefly to
2683     // sample, just in case the system load, parallelism, contention, or lock
2684     // modality changed.
2685     //
2686     // Consider the following alternative:
2687     // Periodically set _SpinDuration = _SpinLimit and try a long/full
2688     // spin attempt.  "Periodically" might mean after a tally of
2689     // the # of failed spin attempts (or iterations) reaches some threshold.
2690     // This takes us into the realm of 1-out-of-N spinning, where we
2691     // hold the duration constant but vary the frequency.
2692 
2693     ctr = _SpinDuration  ;
2694     if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
2695     if (ctr <= 0) return 0 ;
2696 
2697     if (Knob_SuccRestrict && _succ != NULL) return 0 ;
2698     if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
2699        TEVENT (Spin abort - notrunnable [TOP]);
2700        return 0 ;
2701     }
2702 
2703     int MaxSpin = Knob_MaxSpinners ;
2704     if (MaxSpin >= 0) {
2705        if (_Spinner > MaxSpin) {
2706           TEVENT (Spin abort -- too many spinners) ;
2707           return 0 ;
2708        }
2709        // Slighty racy, but benign ...
2710        Adjust (&_Spinner, 1) ;
2711     }
2712 
2713     // We're good to spin ... spin ingress.
2714     // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
2715     // when preparing to LD...CAS _owner, etc and the CAS is likely
2716     // to succeed.
2717     int hits    = 0 ;
2718     int msk     = 0 ;
2719     int caspty  = Knob_CASPenalty ;
2720     int oxpty   = Knob_OXPenalty ;
2721     int sss     = Knob_SpinSetSucc ;
2722     if (sss && _succ == NULL ) _succ = Self ;
2723     Thread * prv = NULL ;
2724 
2725     // There are three ways to exit the following loop:
2726     // 1.  A successful spin where this thread has acquired the lock.
2727     // 2.  Spin failure with prejudice
2728     // 3.  Spin failure without prejudice
2729 
2730     while (--ctr >= 0) {
2731 
2732       // Periodic polling -- Check for pending GC
2733       // Threads may spin while they're unsafe.
2734       // We don't want spinning threads to delay the JVM from reaching
2735       // a stop-the-world safepoint or to steal cycles from GC.
2736       // If we detect a pending safepoint we abort in order that
2737       // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
2738       // this thread, if safe, doesn't steal cycles from GC.
2739       // This is in keeping with the "no loitering in runtime" rule.
2740       // We periodically check to see if there's a safepoint pending.
2741       if ((ctr & 0xFF) == 0) {
2742          if (SafepointSynchronize::do_call_back()) {
2743             TEVENT (Spin: safepoint) ;
2744             goto Abort ;           // abrupt spin egress
2745          }
2746          if (Knob_UsePause & 1) SpinPause () ;
2747 
2748          int (*scb)(intptr_t,int) = SpinCallbackFunction ;
2749          if (hits > 50 && scb != NULL) {
2750             int abend = (*scb)(SpinCallbackArgument, 0) ;
2751          }
2752       }
2753 
2754       if (Knob_UsePause & 2) SpinPause() ;
2755 
2756       // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
2757       // This is useful on classic SMP systems, but is of less utility on
2758       // N1-style CMT platforms.
2759       //
2760       // Trade-off: lock acquisition latency vs coherency bandwidth.
2761       // Lock hold times are typically short.  A histogram
2762       // of successful spin attempts shows that we usually acquire
2763       // the lock early in the spin.  That suggests we want to
2764       // sample _owner frequently in the early phase of the spin,
2765       // but then back-off and sample less frequently as the spin
2766       // progresses.  The back-off makes a good citizen on SMP big
2767       // SMP systems.  Oversampling _owner can consume excessive
2768       // coherency bandwidth.  Relatedly, if we _oversample _owner we
2769       // can inadvertently interfere with the the ST m->owner=null.
2770       // executed by the lock owner.
2771       if (ctr & msk) continue ;
2772       ++hits ;
2773       if ((hits & 0xF) == 0) {
2774         // The 0xF, above, corresponds to the exponent.
2775         // Consider: (msk+1)|msk
2776         msk = ((msk << 2)|3) & BackOffMask ;
2777       }
2778 
2779       // Probe _owner with TATAS
2780       // If this thread observes the monitor transition or flicker
2781       // from locked to unlocked to locked, then the odds that this
2782       // thread will acquire the lock in this spin attempt go down
2783       // considerably.  The same argument applies if the CAS fails
2784       // or if we observe _owner change from one non-null value to
2785       // another non-null value.   In such cases we might abort
2786       // the spin without prejudice or apply a "penalty" to the
2787       // spin count-down variable "ctr", reducing it by 100, say.
2788 
2789       Thread * ox = (Thread *) _owner ;
2790       if (ox == NULL) {
2791          ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
2792          if (ox == NULL) {
2793             // The CAS succeeded -- this thread acquired ownership
2794             // Take care of some bookkeeping to exit spin state.
2795             if (sss && _succ == Self) {
2796                _succ = NULL ;
2797             }
2798             if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
2799 
2800             // Increase _SpinDuration :
2801             // The spin was successful (profitable) so we tend toward
2802             // longer spin attempts in the future.
2803             // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
2804             // If we acquired the lock early in the spin cycle it
2805             // makes sense to increase _SpinDuration proportionally.
2806             // Note that we don't clamp SpinDuration precisely at SpinLimit.
2807             int x = _SpinDuration ;
2808             if (x < Knob_SpinLimit) {
2809                 if (x < Knob_Poverty) x = Knob_Poverty ;
2810                 _SpinDuration = x + Knob_Bonus ;
2811             }
2812             return 1 ;
2813          }
2814 
2815          // The CAS failed ... we can take any of the following actions:
2816          // * penalize: ctr -= Knob_CASPenalty
2817          // * exit spin with prejudice -- goto Abort;
2818          // * exit spin without prejudice.
2819          // * Since CAS is high-latency, retry again immediately.
2820          prv = ox ;
2821          TEVENT (Spin: cas failed) ;
2822          if (caspty == -2) break ;
2823          if (caspty == -1) goto Abort ;
2824          ctr -= caspty ;
2825          continue ;
2826       }
2827 
2828       // Did lock ownership change hands ?
2829       if (ox != prv && prv != NULL ) {
2830           TEVENT (spin: Owner changed)
2831           if (oxpty == -2) break ;
2832           if (oxpty == -1) goto Abort ;
2833           ctr -= oxpty ;
2834       }
2835       prv = ox ;
2836 
2837       // Abort the spin if the owner is not executing.
2838       // The owner must be executing in order to drop the lock.
2839       // Spinning while the owner is OFFPROC is idiocy.
2840       // Consider: ctr -= RunnablePenalty ;
2841       if (Knob_OState && NotRunnable (Self, ox)) {
2842          TEVENT (Spin abort - notrunnable);
2843          goto Abort ;
2844       }
2845       if (sss && _succ == NULL ) _succ = Self ;
2846    }
2847 
2848    // Spin failed with prejudice -- reduce _SpinDuration.
2849    // TODO: Use an AIMD-like policy to adjust _SpinDuration.
2850    // AIMD is globally stable.
2851    TEVENT (Spin failure) ;
2852    {
2853      int x = _SpinDuration ;
2854      if (x > 0) {
2855         // Consider an AIMD scheme like: x -= (x >> 3) + 100
2856         // This is globally sample and tends to damp the response.
2857         x -= Knob_Penalty ;
2858         if (x < 0) x = 0 ;
2859         _SpinDuration = x ;
2860      }
2861    }
2862 
2863  Abort:
2864    if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
2865    if (sss && _succ == Self) {
2866       _succ = NULL ;
2867       // Invariant: after setting succ=null a contending thread
2868       // must recheck-retry _owner before parking.  This usually happens
2869       // in the normal usage of TrySpin(), but it's safest
2870       // to make TrySpin() as foolproof as possible.
2871       OrderAccess::fence() ;
2872       if (TryLock(Self) > 0) return 1 ;
2873    }
2874    return 0 ;
2875 }
2876 
2877 #define TrySpin TrySpin_VaryDuration
2878 
2879 static void DeferredInitialize () {
2880   if (InitDone > 0) return ;
2881   if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
2882       while (InitDone != 1) ;
2883       return ;
2884   }
2885 
2886   // One-shot global initialization ...
2887   // The initialization is idempotent, so we don't need locks.
2888   // In the future consider doing this via os::init_2().
2889   // SyncKnobs consist of <Key>=<Value> pairs in the style
2890   // of environment variables.  Start by converting ':' to NUL.
2891 
2892   if (SyncKnobs == NULL) SyncKnobs = "" ;
2893 
2894   size_t sz = strlen (SyncKnobs) ;
2895   char * knobs = (char *) malloc (sz + 2) ;
2896   if (knobs == NULL) {
2897      vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ;
2898      guarantee (0, "invariant") ;
2899   }
2900   strcpy (knobs, SyncKnobs) ;
2901   knobs[sz+1] = 0 ;
2902   for (char * p = knobs ; *p ; p++) {
2903      if (*p == ':') *p = 0 ;
2904   }
2905 
2906   #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
2907   SETKNOB(ReportSettings) ;
2908   SETKNOB(Verbose) ;
2909   SETKNOB(FixedSpin) ;
2910   SETKNOB(SpinLimit) ;
2911   SETKNOB(SpinBase) ;
2912   SETKNOB(SpinBackOff);
2913   SETKNOB(CASPenalty) ;
2914   SETKNOB(OXPenalty) ;
2915   SETKNOB(LogSpins) ;
2916   SETKNOB(SpinSetSucc) ;
2917   SETKNOB(SuccEnabled) ;
2918   SETKNOB(SuccRestrict) ;
2919   SETKNOB(Penalty) ;
2920   SETKNOB(Bonus) ;
2921   SETKNOB(BonusB) ;
2922   SETKNOB(Poverty) ;
2923   SETKNOB(SpinAfterFutile) ;
2924   SETKNOB(UsePause) ;
2925   SETKNOB(SpinEarly) ;
2926   SETKNOB(OState) ;
2927   SETKNOB(MaxSpinners) ;
2928   SETKNOB(PreSpin) ;
2929   SETKNOB(ExitPolicy) ;
2930   SETKNOB(QMode);
2931   SETKNOB(ResetEvent) ;
2932   SETKNOB(MoveNotifyee) ;
2933   SETKNOB(FastHSSEC) ;
2934   #undef SETKNOB
2935 
2936   if (os::is_MP()) {
2937      BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
2938      if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
2939      // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
2940   } else {
2941      Knob_SpinLimit = 0 ;
2942      Knob_SpinBase  = 0 ;
2943      Knob_PreSpin   = 0 ;
2944      Knob_FixedSpin = -1 ;
2945   }
2946 
2947   if (Knob_LogSpins == 0) {
2948      ObjectSynchronizer::_sync_FailedSpins = NULL ;
2949   }
2950 
2951   free (knobs) ;
2952   OrderAccess::fence() ;
2953   InitDone = 1 ;
2954 }
2955 
2956 // Theory of operations -- Monitors lists, thread residency, etc:
2957 //
2958 // * A thread acquires ownership of a monitor by successfully
2959 //   CAS()ing the _owner field from null to non-null.
2960 //
2961 // * Invariant: A thread appears on at most one monitor list --
2962 //   cxq, EntryList or WaitSet -- at any one time.
2963 //
2964 // * Contending threads "push" themselves onto the cxq with CAS
2965 //   and then spin/park.
2966 //
2967 // * After a contending thread eventually acquires the lock it must
2968 //   dequeue itself from either the EntryList or the cxq.
2969 //
2970 // * The exiting thread identifies and unparks an "heir presumptive"
2971 //   tentative successor thread on the EntryList.  Critically, the
2972 //   exiting thread doesn't unlink the successor thread from the EntryList.
2973 //   After having been unparked, the wakee will recontend for ownership of
2974 //   the monitor.   The successor (wakee) will either acquire the lock or
2975 //   re-park itself.
2976 //
2977 //   Succession is provided for by a policy of competitive handoff.
2978 //   The exiting thread does _not_ grant or pass ownership to the
2979 //   successor thread.  (This is also referred to as "handoff" succession").
2980 //   Instead the exiting thread releases ownership and possibly wakes
2981 //   a successor, so the successor can (re)compete for ownership of the lock.
2982 //   If the EntryList is empty but the cxq is populated the exiting
2983 //   thread will drain the cxq into the EntryList.  It does so by
2984 //   by detaching the cxq (installing null with CAS) and folding
2985 //   the threads from the cxq into the EntryList.  The EntryList is
2986 //   doubly linked, while the cxq is singly linked because of the
2987 //   CAS-based "push" used to enqueue recently arrived threads (RATs).
2988 //
2989 // * Concurrency invariants:
2990 //
2991 //   -- only the monitor owner may access or mutate the EntryList.
2992 //      The mutex property of the monitor itself protects the EntryList
2993 //      from concurrent interference.
2994 //   -- Only the monitor owner may detach the cxq.
2995 //
2996 // * The monitor entry list operations avoid locks, but strictly speaking
2997 //   they're not lock-free.  Enter is lock-free, exit is not.
2998 //   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
2999 //
3000 // * The cxq can have multiple concurrent "pushers" but only one concurrent
3001 //   detaching thread.  This mechanism is immune from the ABA corruption.
3002 //   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
3003 //
3004 // * Taken together, the cxq and the EntryList constitute or form a
3005 //   single logical queue of threads stalled trying to acquire the lock.
3006 //   We use two distinct lists to improve the odds of a constant-time
3007 //   dequeue operation after acquisition (in the ::enter() epilog) and
3008 //   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
3009 //   A key desideratum is to minimize queue & monitor metadata manipulation
3010 //   that occurs while holding the monitor lock -- that is, we want to
3011 //   minimize monitor lock holds times.  Note that even a small amount of
3012 //   fixed spinning will greatly reduce the # of enqueue-dequeue operations
3013 //   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
3014 //   locks and monitor metadata.
3015 //
3016 //   Cxq points to the the set of Recently Arrived Threads attempting entry.
3017 //   Because we push threads onto _cxq with CAS, the RATs must take the form of
3018 //   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
3019 //   the unlocking thread notices that EntryList is null but _cxq is != null.
3020 //
3021 //   The EntryList is ordered by the prevailing queue discipline and
3022 //   can be organized in any convenient fashion, such as a doubly-linked list or
3023 //   a circular doubly-linked list.  Critically, we want insert and delete operations
3024 //   to operate in constant-time.  If we need a priority queue then something akin
3025 //   to Solaris' sleepq would work nicely.  Viz.,
3026 //   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
3027 //   Queue discipline is enforced at ::exit() time, when the unlocking thread
3028 //   drains the cxq into the EntryList, and orders or reorders the threads on the
3029 //   EntryList accordingly.
3030 //
3031 //   Barring "lock barging", this mechanism provides fair cyclic ordering,
3032 //   somewhat similar to an elevator-scan.
3033 //
3034 // * The monitor synchronization subsystem avoids the use of native
3035 //   synchronization primitives except for the narrow platform-specific
3036 //   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
3037 //   the semantics of park-unpark.  Put another way, this monitor implementation
3038 //   depends only on atomic operations and park-unpark.  The monitor subsystem
3039 //   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
3040 //   underlying OS manages the READY<->RUN transitions.
3041 //
3042 // * Waiting threads reside on the WaitSet list -- wait() puts
3043 //   the caller onto the WaitSet.
3044 //
3045 // * notify() or notifyAll() simply transfers threads from the WaitSet to
3046 //   either the EntryList or cxq.  Subsequent exit() operations will
3047 //   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
3048 //   it's likely the notifyee would simply impale itself on the lock held
3049 //   by the notifier.
3050 //
3051 // * An interesting alternative is to encode cxq as (List,LockByte) where
3052 //   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
3053 //   variable, like _recursions, in the scheme.  The threads or Events that form
3054 //   the list would have to be aligned in 256-byte addresses.  A thread would
3055 //   try to acquire the lock or enqueue itself with CAS, but exiting threads
3056 //   could use a 1-0 protocol and simply STB to set the LockByte to 0.
3057 //   Note that is is *not* word-tearing, but it does presume that full-word
3058 //   CAS operations are coherent with intermix with STB operations.  That's true
3059 //   on most common processors.
3060 //
3061 // * See also http://blogs.sun.com/dave
3062 
3063 
3064 void ATTR ObjectMonitor::EnterI (TRAPS) {
3065     Thread * Self = THREAD ;
3066     assert (Self->is_Java_thread(), "invariant") ;
3067     assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
3068 
3069     // Try the lock - TATAS
3070     if (TryLock (Self) > 0) {
3071         assert (_succ != Self              , "invariant") ;
3072         assert (_owner == Self             , "invariant") ;
3073         assert (_Responsible != Self       , "invariant") ;
3074         return ;
3075     }
3076 
3077     DeferredInitialize () ;
3078 
3079     // We try one round of spinning *before* enqueueing Self.
3080     //
3081     // If the _owner is ready but OFFPROC we could use a YieldTo()
3082     // operation to donate the remainder of this thread's quantum
3083     // to the owner.  This has subtle but beneficial affinity
3084     // effects.
3085 
3086     if (TrySpin (Self) > 0) {
3087         assert (_owner == Self        , "invariant") ;
3088         assert (_succ != Self         , "invariant") ;
3089         assert (_Responsible != Self  , "invariant") ;
3090         return ;
3091     }
3092 
3093     // The Spin failed -- Enqueue and park the thread ...
3094     assert (_succ  != Self            , "invariant") ;
3095     assert (_owner != Self            , "invariant") ;
3096     assert (_Responsible != Self      , "invariant") ;
3097 
3098     // Enqueue "Self" on ObjectMonitor's _cxq.
3099     //
3100     // Node acts as a proxy for Self.
3101     // As an aside, if were to ever rewrite the synchronization code mostly
3102     // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
3103     // Java objects.  This would avoid awkward lifecycle and liveness issues,
3104     // as well as eliminate a subset of ABA issues.
3105     // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
3106     //
3107 
3108     ObjectWaiter node(Self) ;
3109     Self->_ParkEvent->reset() ;
3110     node._prev   = (ObjectWaiter *) 0xBAD ;
3111     node.TState  = ObjectWaiter::TS_CXQ ;
3112 
3113     // Push "Self" onto the front of the _cxq.
3114     // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
3115     // Note that spinning tends to reduce the rate at which threads
3116     // enqueue and dequeue on EntryList|cxq.
3117     ObjectWaiter * nxt ;
3118     for (;;) {
3119         node._next = nxt = _cxq ;
3120         if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
3121 
3122         // Interference - the CAS failed because _cxq changed.  Just retry.
3123         // As an optional optimization we retry the lock.
3124         if (TryLock (Self) > 0) {
3125             assert (_succ != Self         , "invariant") ;
3126             assert (_owner == Self        , "invariant") ;
3127             assert (_Responsible != Self  , "invariant") ;
3128             return ;
3129         }
3130     }
3131 
3132     // Check for cxq|EntryList edge transition to non-null.  This indicates
3133     // the onset of contention.  While contention persists exiting threads
3134     // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
3135     // operations revert to the faster 1-0 mode.  This enter operation may interleave
3136     // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
3137     // arrange for one of the contending thread to use a timed park() operations
3138     // to detect and recover from the race.  (Stranding is form of progress failure
3139     // where the monitor is unlocked but all the contending threads remain parked).
3140     // That is, at least one of the contended threads will periodically poll _owner.
3141     // One of the contending threads will become the designated "Responsible" thread.
3142     // The Responsible thread uses a timed park instead of a normal indefinite park
3143     // operation -- it periodically wakes and checks for and recovers from potential
3144     // strandings admitted by 1-0 exit operations.   We need at most one Responsible
3145     // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
3146     // be responsible for a monitor.
3147     //
3148     // Currently, one of the contended threads takes on the added role of "Responsible".
3149     // A viable alternative would be to use a dedicated "stranding checker" thread
3150     // that periodically iterated over all the threads (or active monitors) and unparked
3151     // successors where there was risk of stranding.  This would help eliminate the
3152     // timer scalability issues we see on some platforms as we'd only have one thread
3153     // -- the checker -- parked on a timer.
3154 
3155     if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
3156         // Try to assume the role of responsible thread for the monitor.
3157         // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
3158         Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
3159     }
3160 
3161     // The lock have been released while this thread was occupied queueing
3162     // itself onto _cxq.  To close the race and avoid "stranding" and
3163     // progress-liveness failure we must resample-retry _owner before parking.
3164     // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
3165     // In this case the ST-MEMBAR is accomplished with CAS().
3166     //
3167     // TODO: Defer all thread state transitions until park-time.
3168     // Since state transitions are heavy and inefficient we'd like
3169     // to defer the state transitions until absolutely necessary,
3170     // and in doing so avoid some transitions ...
3171 
3172     TEVENT (Inflated enter - Contention) ;
3173     int nWakeups = 0 ;
3174     int RecheckInterval = 1 ;
3175 
3176     for (;;) {
3177 
3178         if (TryLock (Self) > 0) break ;
3179         assert (_owner != Self, "invariant") ;
3180 
3181         if ((SyncFlags & 2) && _Responsible == NULL) {
3182            Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
3183         }
3184 
3185         // park self
3186         if (_Responsible == Self || (SyncFlags & 1)) {
3187             TEVENT (Inflated enter - park TIMED) ;
3188             Self->_ParkEvent->park ((jlong) RecheckInterval) ;
3189             // Increase the RecheckInterval, but clamp the value.
3190             RecheckInterval *= 8 ;
3191             if (RecheckInterval > 1000) RecheckInterval = 1000 ;
3192         } else {
3193             TEVENT (Inflated enter - park UNTIMED) ;
3194             Self->_ParkEvent->park() ;
3195         }
3196 
3197         if (TryLock(Self) > 0) break ;
3198 
3199         // The lock is still contested.
3200         // Keep a tally of the # of futile wakeups.
3201         // Note that the counter is not protected by a lock or updated by atomics.
3202         // That is by design - we trade "lossy" counters which are exposed to
3203         // races during updates for a lower probe effect.
3204         TEVENT (Inflated enter - Futile wakeup) ;
3205         if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
3206            ObjectSynchronizer::_sync_FutileWakeups->inc() ;
3207         }
3208         ++ nWakeups ;
3209 
3210         // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
3211         // We can defer clearing _succ until after the spin completes
3212         // TrySpin() must tolerate being called with _succ == Self.
3213         // Try yet another round of adaptive spinning.
3214         if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
3215 
3216         // We can find that we were unpark()ed and redesignated _succ while
3217         // we were spinning.  That's harmless.  If we iterate and call park(),
3218         // park() will consume the event and return immediately and we'll
3219         // just spin again.  This pattern can repeat, leaving _succ to simply
3220         // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
3221         // Alternately, we can sample fired() here, and if set, forgo spinning
3222         // in the next iteration.
3223 
3224         if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
3225            Self->_ParkEvent->reset() ;
3226            OrderAccess::fence() ;
3227         }
3228         if (_succ == Self) _succ = NULL ;
3229 
3230         // Invariant: after clearing _succ a thread *must* retry _owner before parking.
3231         OrderAccess::fence() ;
3232     }
3233 
3234     // Egress :
3235     // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
3236     // Normally we'll find Self on the EntryList .
3237     // From the perspective of the lock owner (this thread), the
3238     // EntryList is stable and cxq is prepend-only.
3239     // The head of cxq is volatile but the interior is stable.
3240     // In addition, Self.TState is stable.
3241 
3242     assert (_owner == Self      , "invariant") ;
3243     assert (object() != NULL    , "invariant") ;
3244     // I'd like to write:
3245     //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
3246     // but as we're at a safepoint that's not safe.
3247 
3248     UnlinkAfterAcquire (Self, &node) ;
3249     if (_succ == Self) _succ = NULL ;
3250 
3251     assert (_succ != Self, "invariant") ;
3252     if (_Responsible == Self) {
3253         _Responsible = NULL ;
3254         // Dekker pivot-point.
3255         // Consider OrderAccess::storeload() here
3256 
3257         // We may leave threads on cxq|EntryList without a designated
3258         // "Responsible" thread.  This is benign.  When this thread subsequently
3259         // exits the monitor it can "see" such preexisting "old" threads --
3260         // threads that arrived on the cxq|EntryList before the fence, above --
3261         // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
3262         // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
3263         // non-null and elect a new "Responsible" timer thread.
3264         //
3265         // This thread executes:
3266         //    ST Responsible=null; MEMBAR    (in enter epilog - here)
3267         //    LD cxq|EntryList               (in subsequent exit)
3268         //
3269         // Entering threads in the slow/contended path execute:
3270         //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
3271         //    The (ST cxq; MEMBAR) is accomplished with CAS().
3272         //
3273         // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
3274         // exit operation from floating above the ST Responsible=null.
3275         //
3276         // In *practice* however, EnterI() is always followed by some atomic
3277         // operation such as the decrement of _count in ::enter().  Those atomics
3278         // obviate the need for the explicit MEMBAR, above.
3279     }
3280 
3281     // We've acquired ownership with CAS().
3282     // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
3283     // But since the CAS() this thread may have also stored into _succ,
3284     // EntryList, cxq or Responsible.  These meta-data updates must be
3285     // visible __before this thread subsequently drops the lock.
3286     // Consider what could occur if we didn't enforce this constraint --
3287     // STs to monitor meta-data and user-data could reorder with (become
3288     // visible after) the ST in exit that drops ownership of the lock.
3289     // Some other thread could then acquire the lock, but observe inconsistent
3290     // or old monitor meta-data and heap data.  That violates the JMM.
3291     // To that end, the 1-0 exit() operation must have at least STST|LDST
3292     // "release" barrier semantics.  Specifically, there must be at least a
3293     // STST|LDST barrier in exit() before the ST of null into _owner that drops
3294     // the lock.   The barrier ensures that changes to monitor meta-data and data
3295     // protected by the lock will be visible before we release the lock, and
3296     // therefore before some other thread (CPU) has a chance to acquire the lock.
3297     // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
3298     //
3299     // Critically, any prior STs to _succ or EntryList must be visible before
3300     // the ST of null into _owner in the *subsequent* (following) corresponding
3301     // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
3302     // execute a serializing instruction.
3303 
3304     if (SyncFlags & 8) {
3305        OrderAccess::fence() ;
3306     }
3307     return ;
3308 }
3309 
3310 // ExitSuspendEquivalent:
3311 // A faster alternate to handle_special_suspend_equivalent_condition()
3312 //
3313 // handle_special_suspend_equivalent_condition() unconditionally
3314 // acquires the SR_lock.  On some platforms uncontended MutexLocker()
3315 // operations have high latency.  Note that in ::enter() we call HSSEC
3316 // while holding the monitor, so we effectively lengthen the critical sections.
3317 //
3318 // There are a number of possible solutions:
3319 //
3320 // A.  To ameliorate the problem we might also defer state transitions
3321 //     to as late as possible -- just prior to parking.
3322 //     Given that, we'd call HSSEC after having returned from park(),
3323 //     but before attempting to acquire the monitor.  This is only a
3324 //     partial solution.  It avoids calling HSSEC while holding the
3325 //     monitor (good), but it still increases successor reacquisition latency --
3326 //     the interval between unparking a successor and the time the successor
3327 //     resumes and retries the lock.  See ReenterI(), which defers state transitions.
3328 //     If we use this technique we can also avoid EnterI()-exit() loop
3329 //     in ::enter() where we iteratively drop the lock and then attempt
3330 //     to reacquire it after suspending.
3331 //
3332 // B.  In the future we might fold all the suspend bits into a
3333 //     composite per-thread suspend flag and then update it with CAS().
3334 //     Alternately, a Dekker-like mechanism with multiple variables
3335 //     would suffice:
3336 //       ST Self->_suspend_equivalent = false
3337 //       MEMBAR
3338 //       LD Self_>_suspend_flags
3339 //
3340 
3341 
3342 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
3343    int Mode = Knob_FastHSSEC ;
3344    if (Mode && !jSelf->is_external_suspend()) {
3345       assert (jSelf->is_suspend_equivalent(), "invariant") ;
3346       jSelf->clear_suspend_equivalent() ;
3347       if (2 == Mode) OrderAccess::storeload() ;
3348       if (!jSelf->is_external_suspend()) return false ;
3349       // We raced a suspension -- fall thru into the slow path
3350       TEVENT (ExitSuspendEquivalent - raced) ;
3351       jSelf->set_suspend_equivalent() ;
3352    }
3353    return jSelf->handle_special_suspend_equivalent_condition() ;
3354 }
3355 
3356 
3357 // ReenterI() is a specialized inline form of the latter half of the
3358 // contended slow-path from EnterI().  We use ReenterI() only for
3359 // monitor reentry in wait().
3360 //
3361 // In the future we should reconcile EnterI() and ReenterI(), adding
3362 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
3363 // loop accordingly.
3364 
3365 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
3366     assert (Self != NULL                , "invariant") ;
3367     assert (SelfNode != NULL            , "invariant") ;
3368     assert (SelfNode->_thread == Self   , "invariant") ;
3369     assert (_waiters > 0                , "invariant") ;
3370     assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
3371     assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
3372     JavaThread * jt = (JavaThread *) Self ;
3373 
3374     int nWakeups = 0 ;
3375     for (;;) {
3376         ObjectWaiter::TStates v = SelfNode->TState ;
3377         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
3378         assert    (_owner != Self, "invariant") ;
3379 
3380         if (TryLock (Self) > 0) break ;
3381         if (TrySpin (Self) > 0) break ;
3382 
3383         TEVENT (Wait Reentry - parking) ;
3384 
3385         // State transition wrappers around park() ...
3386         // ReenterI() wisely defers state transitions until
3387         // it's clear we must park the thread.
3388         {
3389            OSThreadContendState osts(Self->osthread());
3390            ThreadBlockInVM tbivm(jt);
3391 
3392            // cleared by handle_special_suspend_equivalent_condition()
3393            // or java_suspend_self()
3394            jt->set_suspend_equivalent();
3395            if (SyncFlags & 1) {
3396               Self->_ParkEvent->park ((jlong)1000) ;
3397            } else {
3398               Self->_ParkEvent->park () ;
3399            }
3400 
3401            // were we externally suspended while we were waiting?
3402            for (;;) {
3403               if (!ExitSuspendEquivalent (jt)) break ;
3404               if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
3405               jt->java_suspend_self();
3406               jt->set_suspend_equivalent();
3407            }
3408         }
3409 
3410         // Try again, but just so we distinguish between futile wakeups and
3411         // successful wakeups.  The following test isn't algorithmically
3412         // necessary, but it helps us maintain sensible statistics.
3413         if (TryLock(Self) > 0) break ;
3414 
3415         // The lock is still contested.
3416         // Keep a tally of the # of futile wakeups.
3417         // Note that the counter is not protected by a lock or updated by atomics.
3418         // That is by design - we trade "lossy" counters which are exposed to
3419         // races during updates for a lower probe effect.
3420         TEVENT (Wait Reentry - futile wakeup) ;
3421         ++ nWakeups ;
3422 
3423         // Assuming this is not a spurious wakeup we'll normally
3424         // find that _succ == Self.
3425         if (_succ == Self) _succ = NULL ;
3426 
3427         // Invariant: after clearing _succ a contending thread
3428         // *must* retry  _owner before parking.
3429         OrderAccess::fence() ;
3430 
3431         if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
3432           ObjectSynchronizer::_sync_FutileWakeups->inc() ;
3433         }
3434     }
3435 
3436     // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
3437     // Normally we'll find Self on the EntryList.
3438     // Unlinking from the EntryList is constant-time and atomic-free.
3439     // From the perspective of the lock owner (this thread), the
3440     // EntryList is stable and cxq is prepend-only.
3441     // The head of cxq is volatile but the interior is stable.
3442     // In addition, Self.TState is stable.
3443 
3444     assert (_owner == Self, "invariant") ;
3445     assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
3446     UnlinkAfterAcquire (Self, SelfNode) ;
3447     if (_succ == Self) _succ = NULL ;
3448     assert (_succ != Self, "invariant") ;
3449     SelfNode->TState = ObjectWaiter::TS_RUN ;
3450     OrderAccess::fence() ;      // see comments at the end of EnterI()
3451 }
3452 
3453 bool ObjectMonitor::try_enter(Thread* THREAD) {
3454   if (THREAD != _owner) {
3455     if (THREAD->is_lock_owned ((address)_owner)) {
3456        assert(_recursions == 0, "internal state error");
3457        _owner = THREAD ;
3458        _recursions = 1 ;
3459        OwnerIsThread = 1 ;
3460        return true;
3461     }
3462     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
3463       return false;
3464     }
3465     return true;
3466   } else {
3467     _recursions++;
3468     return true;
3469   }
3470 }
3471 
3472 void ATTR ObjectMonitor::enter(TRAPS) {
3473   // The following code is ordered to check the most common cases first
3474   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
3475   Thread * const Self = THREAD ;
3476   void * cur ;
3477 
3478   cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
3479   if (cur == NULL) {
3480      // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
3481      assert (_recursions == 0   , "invariant") ;
3482      assert (_owner      == Self, "invariant") ;
3483      // CONSIDER: set or assert OwnerIsThread == 1
3484      return ;
3485   }
3486 
3487   if (cur == Self) {
3488      // TODO-FIXME: check for integer overflow!  BUGID 6557169.
3489      _recursions ++ ;
3490      return ;
3491   }
3492 
3493   if (Self->is_lock_owned ((address)cur)) {
3494     assert (_recursions == 0, "internal state error");
3495     _recursions = 1 ;
3496     // Commute owner from a thread-specific on-stack BasicLockObject address to
3497     // a full-fledged "Thread *".
3498     _owner = Self ;
3499     OwnerIsThread = 1 ;
3500     return ;
3501   }
3502 
3503   // We've encountered genuine contention.
3504   assert (Self->_Stalled == 0, "invariant") ;
3505   Self->_Stalled = intptr_t(this) ;
3506 
3507   // Try one round of spinning *before* enqueueing Self
3508   // and before going through the awkward and expensive state
3509   // transitions.  The following spin is strictly optional ...
3510   // Note that if we acquire the monitor from an initial spin
3511   // we forgo posting JVMTI events and firing DTRACE probes.
3512   if (Knob_SpinEarly && TrySpin (Self) > 0) {
3513      assert (_owner == Self      , "invariant") ;
3514      assert (_recursions == 0    , "invariant") ;
3515      assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
3516      Self->_Stalled = 0 ;
3517      return ;
3518   }
3519 
3520   assert (_owner != Self          , "invariant") ;
3521   assert (_succ  != Self          , "invariant") ;
3522   assert (Self->is_Java_thread()  , "invariant") ;
3523   JavaThread * jt = (JavaThread *) Self ;
3524   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
3525   assert (jt->thread_state() != _thread_blocked   , "invariant") ;
3526   assert (this->object() != NULL  , "invariant") ;
3527   assert (_count >= 0, "invariant") ;
3528 
3529   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
3530   // Ensure the object-monitor relationship remains stable while there's contention.
3531   Atomic::inc_ptr(&_count);
3532 
3533   { // Change java thread status to indicate blocked on monitor enter.
3534     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
3535 
3536     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
3537     if (JvmtiExport::should_post_monitor_contended_enter()) {
3538       JvmtiExport::post_monitor_contended_enter(jt, this);
3539     }
3540 
3541     OSThreadContendState osts(Self->osthread());
3542     ThreadBlockInVM tbivm(jt);
3543 
3544     Self->set_current_pending_monitor(this);
3545 
3546     // TODO-FIXME: change the following for(;;) loop to straight-line code.
3547     for (;;) {
3548       jt->set_suspend_equivalent();
3549       // cleared by handle_special_suspend_equivalent_condition()
3550       // or java_suspend_self()
3551 
3552       EnterI (THREAD) ;
3553 
3554       if (!ExitSuspendEquivalent(jt)) break ;
3555 
3556       //
3557       // We have acquired the contended monitor, but while we were
3558       // waiting another thread suspended us. We don't want to enter
3559       // the monitor while suspended because that would surprise the
3560       // thread that suspended us.
3561       //
3562           _recursions = 0 ;
3563       _succ = NULL ;
3564       exit (Self) ;
3565 
3566       jt->java_suspend_self();
3567     }
3568     Self->set_current_pending_monitor(NULL);
3569   }
3570 
3571   Atomic::dec_ptr(&_count);
3572   assert (_count >= 0, "invariant") ;
3573   Self->_Stalled = 0 ;
3574 
3575   // Must either set _recursions = 0 or ASSERT _recursions == 0.
3576   assert (_recursions == 0     , "invariant") ;
3577   assert (_owner == Self       , "invariant") ;
3578   assert (_succ  != Self       , "invariant") ;
3579   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
3580 
3581   // The thread -- now the owner -- is back in vm mode.
3582   // Report the glorious news via TI,DTrace and jvmstat.
3583   // The probe effect is non-trivial.  All the reportage occurs
3584   // while we hold the monitor, increasing the length of the critical
3585   // section.  Amdahl's parallel speedup law comes vividly into play.
3586   //
3587   // Another option might be to aggregate the events (thread local or
3588   // per-monitor aggregation) and defer reporting until a more opportune
3589   // time -- such as next time some thread encounters contention but has
3590   // yet to acquire the lock.  While spinning that thread could
3591   // spinning we could increment JVMStat counters, etc.
3592 
3593   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
3594   if (JvmtiExport::should_post_monitor_contended_entered()) {
3595     JvmtiExport::post_monitor_contended_entered(jt, this);
3596   }
3597   if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) {
3598      ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ;
3599   }
3600 }
3601 
3602 void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
3603    assert (_owner == Self, "invariant") ;
3604 
3605    // Exit protocol:
3606    // 1. ST _succ = wakee
3607    // 2. membar #loadstore|#storestore;
3608    // 2. ST _owner = NULL
3609    // 3. unpark(wakee)
3610 
3611    _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
3612    ParkEvent * Trigger = Wakee->_event ;
3613 
3614    // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
3615    // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
3616    // out-of-scope (non-extant).
3617    Wakee  = NULL ;
3618 
3619    // Drop the lock
3620    OrderAccess::release_store_ptr (&_owner, NULL) ;
3621    OrderAccess::fence() ;                               // ST _owner vs LD in unpark()
3622 
3623    // TODO-FIXME:
3624    // If there's a safepoint pending the best policy would be to
3625    // get _this thread to a safepoint and only wake the successor
3626    // after the safepoint completed.  monitorexit uses a "leaf"
3627    // state transition, however, so this thread can't become
3628    // safe at this point in time.  (Its stack isn't walkable).
3629    // The next best thing is to defer waking the successor by
3630    // adding to a list of thread to be unparked after at the
3631    // end of the forthcoming STW).
3632    if (SafepointSynchronize::do_call_back()) {
3633       TEVENT (unpark before SAFEPOINT) ;
3634    }
3635 
3636    // Possible optimizations ...
3637    //
3638    // * Consider: set Wakee->UnparkTime = timeNow()
3639    //   When the thread wakes up it'll compute (timeNow() - Self->UnparkTime()).
3640    //   By measuring recent ONPROC latency we can approximate the
3641    //   system load.  In turn, we can feed that information back
3642    //   into the spinning & succession policies.
3643    //   (ONPROC latency correlates strongly with load).
3644    //
3645    // * Pull affinity:
3646    //   If the wakee is cold then transiently setting it's affinity
3647    //   to the current CPU is a good idea.
3648    //   See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt
3649    DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
3650    Trigger->unpark() ;
3651 
3652    // Maintain stats and report events to JVMTI
3653    if (ObjectSynchronizer::_sync_Parks != NULL) {
3654       ObjectSynchronizer::_sync_Parks->inc() ;
3655    }
3656 }
3657 
3658 
3659 // exit()
3660 // ~~~~~~
3661 // Note that the collector can't reclaim the objectMonitor or deflate
3662 // the object out from underneath the thread calling ::exit() as the
3663 // thread calling ::exit() never transitions to a stable state.
3664 // This inhibits GC, which in turn inhibits asynchronous (and
3665 // inopportune) reclamation of "this".
3666 //
3667 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
3668 // There's one exception to the claim above, however.  EnterI() can call
3669 // exit() to drop a lock if the acquirer has been externally suspended.
3670 // In that case exit() is called with _thread_state as _thread_blocked,
3671 // but the monitor's _count field is > 0, which inhibits reclamation.
3672 //
3673 // 1-0 exit
3674 // ~~~~~~~~
3675 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
3676 // the fast-path operators have been optimized so the common ::exit()
3677 // operation is 1-0.  See i486.ad fast_unlock(), for instance.
3678 // The code emitted by fast_unlock() elides the usual MEMBAR.  This
3679 // greatly improves latency -- MEMBAR and CAS having considerable local
3680 // latency on modern processors -- but at the cost of "stranding".  Absent the
3681 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
3682 // ::enter() path, resulting in the entering thread being stranding
3683 // and a progress-liveness failure.   Stranding is extremely rare.
3684 // We use timers (timed park operations) & periodic polling to detect
3685 // and recover from stranding.  Potentially stranded threads periodically
3686 // wake up and poll the lock.  See the usage of the _Responsible variable.
3687 //
3688 // The CAS() in enter provides for safety and exclusion, while the CAS or
3689 // MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
3690 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
3691 // We detect and recover from stranding with timers.
3692 //
3693 // If a thread transiently strands it'll park until (a) another
3694 // thread acquires the lock and then drops the lock, at which time the
3695 // exiting thread will notice and unpark the stranded thread, or, (b)
3696 // the timer expires.  If the lock is high traffic then the stranding latency
3697 // will be low due to (a).  If the lock is low traffic then the odds of
3698 // stranding are lower, although the worst-case stranding latency
3699 // is longer.  Critically, we don't want to put excessive load in the
3700 // platform's timer subsystem.  We want to minimize both the timer injection
3701 // rate (timers created/sec) as well as the number of timers active at
3702 // any one time.  (more precisely, we want to minimize timer-seconds, which is
3703 // the integral of the # of active timers at any instant over time).
3704 // Both impinge on OS scalability.  Given that, at most one thread parked on
3705 // a monitor will use a timer.
3706 
3707 void ATTR ObjectMonitor::exit(TRAPS) {
3708    Thread * Self = THREAD ;
3709    if (THREAD != _owner) {
3710      if (THREAD->is_lock_owned((address) _owner)) {
3711        // Transmute _owner from a BasicLock pointer to a Thread address.
3712        // We don't need to hold _mutex for this transition.
3713        // Non-null to Non-null is safe as long as all readers can
3714        // tolerate either flavor.
3715        assert (_recursions == 0, "invariant") ;
3716        _owner = THREAD ;
3717        _recursions = 0 ;
3718        OwnerIsThread = 1 ;
3719      } else {
3720        // NOTE: we need to handle unbalanced monitor enter/exit
3721        // in native code by throwing an exception.
3722        // TODO: Throw an IllegalMonitorStateException ?
3723        TEVENT (Exit - Throw IMSX) ;
3724        assert(false, "Non-balanced monitor enter/exit!");
3725        if (false) {
3726           THROW(vmSymbols::java_lang_IllegalMonitorStateException());
3727        }
3728        return;
3729      }
3730    }
3731 
3732    if (_recursions != 0) {
3733      _recursions--;        // this is simple recursive enter
3734      TEVENT (Inflated exit - recursive) ;
3735      return ;
3736    }
3737 
3738    // Invariant: after setting Responsible=null an thread must execute
3739    // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
3740    if ((SyncFlags & 4) == 0) {
3741       _Responsible = NULL ;
3742    }
3743 
3744    for (;;) {
3745       assert (THREAD == _owner, "invariant") ;
3746 
3747       // Fast-path monitor exit:
3748       //
3749       // Observe the Dekker/Lamport duality:
3750       // A thread in ::exit() executes:
3751       //   ST Owner=null; MEMBAR; LD EntryList|cxq.
3752       // A thread in the contended ::enter() path executes the complementary:
3753       //   ST EntryList|cxq = nonnull; MEMBAR; LD Owner.
3754       //
3755       // Note that there's a benign race in the exit path.  We can drop the
3756       // lock, another thread can reacquire the lock immediately, and we can
3757       // then wake a thread unnecessarily (yet another flavor of futile wakeup).
3758       // This is benign, and we've structured the code so the windows are short
3759       // and the frequency of such futile wakeups is low.
3760       //
3761       // We could eliminate the race by encoding both the "LOCKED" state and
3762       // the queue head in a single word.  Exit would then use either CAS to
3763       // clear the LOCKED bit/byte.  This precludes the desirable 1-0 optimization,
3764       // however.
3765       //
3766       // Possible fast-path ::exit() optimization:
3767       // The current fast-path exit implementation fetches both cxq and EntryList.
3768       // See also i486.ad fast_unlock().  Testing has shown that two LDs
3769       // isn't measurably slower than a single LD on any platforms.
3770       // Still, we could reduce the 2 LDs to one or zero by one of the following:
3771       //
3772       // - Use _count instead of cxq|EntryList
3773       //   We intend to eliminate _count, however, when we switch
3774       //   to on-the-fly deflation in ::exit() as is used in
3775       //   Metalocks and RelaxedLocks.
3776       //
3777       // - Establish the invariant that cxq == null implies EntryList == null.
3778       //   set cxq == EMPTY (1) to encode the state where cxq is empty
3779       //   by EntryList != null.  EMPTY is a distinguished value.
3780       //   The fast-path exit() would fetch cxq but not EntryList.
3781       //
3782       // - Encode succ as follows:
3783       //   succ = t :  Thread t is the successor -- t is ready or is spinning.
3784       //               Exiting thread does not need to wake a successor.
3785       //   succ = 0 :  No successor required -> (EntryList|cxq) == null
3786       //               Exiting thread does not need to wake a successor
3787       //   succ = 1 :  Successor required    -> (EntryList|cxq) != null and
3788       //               logically succ == null.
3789       //               Exiting thread must wake a successor.
3790       //
3791       //   The 1-1 fast-exit path would appear as :
3792       //     _owner = null ; membar ;
3793       //     if (_succ == 1 && CAS (&_owner, null, Self) == null) goto SlowPath
3794       //     goto FastPathDone ;
3795       //
3796       //   and the 1-0 fast-exit path would appear as:
3797       //      if (_succ == 1) goto SlowPath
3798       //      Owner = null ;
3799       //      goto FastPathDone
3800       //
3801       // - Encode the LSB of _owner as 1 to indicate that exit()
3802       //   must use the slow-path and make a successor ready.
3803       //   (_owner & 1) == 0 IFF succ != null || (EntryList|cxq) == null
3804       //   (_owner & 1) == 0 IFF succ == null && (EntryList|cxq) != null (obviously)
3805       //   The 1-0 fast exit path would read:
3806       //      if (_owner != Self) goto SlowPath
3807       //      _owner = null
3808       //      goto FastPathDone
3809 
3810       if (Knob_ExitPolicy == 0) {
3811          // release semantics: prior loads and stores from within the critical section
3812          // must not float (reorder) past the following store that drops the lock.
3813          // On SPARC that requires MEMBAR #loadstore|#storestore.
3814          // But of course in TSO #loadstore|#storestore is not required.
3815          // I'd like to write one of the following:
3816          // A.  OrderAccess::release() ; _owner = NULL
3817          // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
3818          // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
3819          // store into a _dummy variable.  That store is not needed, but can result
3820          // in massive wasteful coherency traffic on classic SMP systems.
3821          // Instead, I use release_store(), which is implemented as just a simple
3822          // ST on x64, x86 and SPARC.
3823          OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
3824          OrderAccess::storeload() ;                         // See if we need to wake a successor
3825          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
3826             TEVENT (Inflated exit - simple egress) ;
3827             return ;
3828          }
3829          TEVENT (Inflated exit - complex egress) ;
3830 
3831          // Normally the exiting thread is responsible for ensuring succession,
3832          // but if other successors are ready or other entering threads are spinning
3833          // then this thread can simply store NULL into _owner and exit without
3834          // waking a successor.  The existence of spinners or ready successors
3835          // guarantees proper succession (liveness).  Responsibility passes to the
3836          // ready or running successors.  The exiting thread delegates the duty.
3837          // More precisely, if a successor already exists this thread is absolved
3838          // of the responsibility of waking (unparking) one.
3839          //
3840          // The _succ variable is critical to reducing futile wakeup frequency.
3841          // _succ identifies the "heir presumptive" thread that has been made
3842          // ready (unparked) but that has not yet run.  We need only one such
3843          // successor thread to guarantee progress.
3844          // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
3845          // section 3.3 "Futile Wakeup Throttling" for details.
3846          //
3847          // Note that spinners in Enter() also set _succ non-null.
3848          // In the current implementation spinners opportunistically set
3849          // _succ so that exiting threads might avoid waking a successor.
3850          // Another less appealing alternative would be for the exiting thread
3851          // to drop the lock and then spin briefly to see if a spinner managed
3852          // to acquire the lock.  If so, the exiting thread could exit
3853          // immediately without waking a successor, otherwise the exiting
3854          // thread would need to dequeue and wake a successor.
3855          // (Note that we'd need to make the post-drop spin short, but no
3856          // shorter than the worst-case round-trip cache-line migration time.
3857          // The dropped lock needs to become visible to the spinner, and then
3858          // the acquisition of the lock by the spinner must become visible to
3859          // the exiting thread).
3860          //
3861 
3862          // It appears that an heir-presumptive (successor) must be made ready.
3863          // Only the current lock owner can manipulate the EntryList or
3864          // drain _cxq, so we need to reacquire the lock.  If we fail
3865          // to reacquire the lock the responsibility for ensuring succession
3866          // falls to the new owner.
3867          //
3868          if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
3869             return ;
3870          }
3871          TEVENT (Exit - Reacquired) ;
3872       } else {
3873          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
3874             OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
3875             OrderAccess::storeload() ;
3876             // Ratify the previously observed values.
3877             if (_cxq == NULL || _succ != NULL) {
3878                 TEVENT (Inflated exit - simple egress) ;
3879                 return ;
3880             }
3881 
3882             // inopportune interleaving -- the exiting thread (this thread)
3883             // in the fast-exit path raced an entering thread in the slow-enter
3884             // path.
3885             // We have two choices:
3886             // A.  Try to reacquire the lock.
3887             //     If the CAS() fails return immediately, otherwise
3888             //     we either restart/rerun the exit operation, or simply
3889             //     fall-through into the code below which wakes a successor.
3890             // B.  If the elements forming the EntryList|cxq are TSM
3891             //     we could simply unpark() the lead thread and return
3892             //     without having set _succ.
3893             if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
3894                TEVENT (Inflated exit - reacquired succeeded) ;
3895                return ;
3896             }
3897             TEVENT (Inflated exit - reacquired failed) ;
3898          } else {
3899             TEVENT (Inflated exit - complex egress) ;
3900          }
3901       }
3902 
3903       guarantee (_owner == THREAD, "invariant") ;
3904 
3905       // Select an appropriate successor ("heir presumptive") from the EntryList
3906       // and make it ready.  Generally we just wake the head of EntryList .
3907       // There's no algorithmic constraint that we use the head - it's just
3908       // a policy decision.   Note that the thread at head of the EntryList
3909       // remains at the head until it acquires the lock.  This means we'll
3910       // repeatedly wake the same thread until it manages to grab the lock.
3911       // This is generally a good policy - if we're seeing lots of futile wakeups
3912       // at least we're waking/rewaking a thread that's like to be hot or warm
3913       // (have residual D$ and TLB affinity).
3914       //
3915       // "Wakeup locality" optimization:
3916       // http://j2se.east/~dice/PERSIST/040825-WakeLocality.txt
3917       // In the future we'll try to bias the selection mechanism
3918       // to preferentially pick a thread that recently ran on
3919       // a processor element that shares cache with the CPU on which
3920       // the exiting thread is running.   We need access to Solaris'
3921       // schedctl.sc_cpu to make that work.
3922       //
3923       ObjectWaiter * w = NULL ;
3924       int QMode = Knob_QMode ;
3925 
3926       if (QMode == 2 && _cxq != NULL) {
3927           // QMode == 2 : cxq has precedence over EntryList.
3928           // Try to directly wake a successor from the cxq.
3929           // If successful, the successor will need to unlink itself from cxq.
3930           w = _cxq ;
3931           assert (w != NULL, "invariant") ;
3932           assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
3933           ExitEpilog (Self, w) ;
3934           return ;
3935       }
3936 
3937       if (QMode == 3 && _cxq != NULL) {
3938           // Aggressively drain cxq into EntryList at the first opportunity.
3939           // This policy ensure that recently-run threads live at the head of EntryList.
3940           // Drain _cxq into EntryList - bulk transfer.
3941           // First, detach _cxq.
3942           // The following loop is tantamount to: w = swap (&cxq, NULL)
3943           w = _cxq ;
3944           for (;;) {
3945              assert (w != NULL, "Invariant") ;
3946              ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
3947              if (u == w) break ;
3948              w = u ;
3949           }
3950           assert (w != NULL              , "invariant") ;
3951 
3952           ObjectWaiter * q = NULL ;
3953           ObjectWaiter * p ;
3954           for (p = w ; p != NULL ; p = p->_next) {
3955               guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
3956               p->TState = ObjectWaiter::TS_ENTER ;
3957               p->_prev = q ;
3958               q = p ;
3959           }
3960 
3961           // Append the RATs to the EntryList
3962           // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
3963           ObjectWaiter * Tail ;
3964           for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
3965           if (Tail == NULL) {
3966               _EntryList = w ;
3967           } else {
3968               Tail->_next = w ;
3969               w->_prev = Tail ;
3970           }
3971 
3972           // Fall thru into code that tries to wake a successor from EntryList
3973       }
3974 
3975       if (QMode == 4 && _cxq != NULL) {
3976           // Aggressively drain cxq into EntryList at the first opportunity.
3977           // This policy ensure that recently-run threads live at the head of EntryList.
3978 
3979           // Drain _cxq into EntryList - bulk transfer.
3980           // First, detach _cxq.
3981           // The following loop is tantamount to: w = swap (&cxq, NULL)
3982           w = _cxq ;
3983           for (;;) {
3984              assert (w != NULL, "Invariant") ;
3985              ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
3986              if (u == w) break ;
3987              w = u ;
3988           }
3989           assert (w != NULL              , "invariant") ;
3990 
3991           ObjectWaiter * q = NULL ;
3992           ObjectWaiter * p ;
3993           for (p = w ; p != NULL ; p = p->_next) {
3994               guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
3995               p->TState = ObjectWaiter::TS_ENTER ;
3996               p->_prev = q ;
3997               q = p ;
3998           }
3999 
4000           // Prepend the RATs to the EntryList
4001           if (_EntryList != NULL) {
4002               q->_next = _EntryList ;
4003               _EntryList->_prev = q ;
4004           }
4005           _EntryList = w ;
4006 
4007           // Fall thru into code that tries to wake a successor from EntryList
4008       }
4009 
4010       w = _EntryList  ;
4011       if (w != NULL) {
4012           // I'd like to write: guarantee (w->_thread != Self).
4013           // But in practice an exiting thread may find itself on the EntryList.
4014           // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
4015           // then calls exit().  Exit release the lock by setting O._owner to NULL.
4016           // Lets say T1 then stalls.  T2 acquires O and calls O.notify().  The
4017           // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
4018           // release the lock "O".  T2 resumes immediately after the ST of null into
4019           // _owner, above.  T2 notices that the EntryList is populated, so it
4020           // reacquires the lock and then finds itself on the EntryList.
4021           // Given all that, we have to tolerate the circumstance where "w" is
4022           // associated with Self.
4023           assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
4024           ExitEpilog (Self, w) ;
4025           return ;
4026       }
4027 
4028       // If we find that both _cxq and EntryList are null then just
4029       // re-run the exit protocol from the top.
4030       w = _cxq ;
4031       if (w == NULL) continue ;
4032 
4033       // Drain _cxq into EntryList - bulk transfer.
4034       // First, detach _cxq.
4035       // The following loop is tantamount to: w = swap (&cxq, NULL)
4036       for (;;) {
4037           assert (w != NULL, "Invariant") ;
4038           ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
4039           if (u == w) break ;
4040           w = u ;
4041       }
4042       TEVENT (Inflated exit - drain cxq into EntryList) ;
4043 
4044       assert (w != NULL              , "invariant") ;
4045       assert (_EntryList  == NULL    , "invariant") ;
4046 
4047       // Convert the LIFO SLL anchored by _cxq into a DLL.
4048       // The list reorganization step operates in O(LENGTH(w)) time.
4049       // It's critical that this step operate quickly as
4050       // "Self" still holds the outer-lock, restricting parallelism
4051       // and effectively lengthening the critical section.
4052       // Invariant: s chases t chases u.
4053       // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
4054       // we have faster access to the tail.
4055 
4056       if (QMode == 1) {
4057          // QMode == 1 : drain cxq to EntryList, reversing order
4058          // We also reverse the order of the list.
4059          ObjectWaiter * s = NULL ;
4060          ObjectWaiter * t = w ;
4061          ObjectWaiter * u = NULL ;
4062          while (t != NULL) {
4063              guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
4064              t->TState = ObjectWaiter::TS_ENTER ;
4065              u = t->_next ;
4066              t->_prev = u ;
4067              t->_next = s ;
4068              s = t;
4069              t = u ;
4070          }
4071          _EntryList  = s ;
4072          assert (s != NULL, "invariant") ;
4073       } else {
4074          // QMode == 0 or QMode == 2
4075          _EntryList = w ;
4076          ObjectWaiter * q = NULL ;
4077          ObjectWaiter * p ;
4078          for (p = w ; p != NULL ; p = p->_next) {
4079              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
4080              p->TState = ObjectWaiter::TS_ENTER ;
4081              p->_prev = q ;
4082              q = p ;
4083          }
4084       }
4085 
4086       // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
4087       // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
4088 
4089       // See if we can abdicate to a spinner instead of waking a thread.
4090       // A primary goal of the implementation is to reduce the
4091       // context-switch rate.
4092       if (_succ != NULL) continue;
4093 
4094       w = _EntryList  ;
4095       if (w != NULL) {
4096           guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
4097           ExitEpilog (Self, w) ;
4098           return ;
4099       }
4100    }
4101 }
4102 // complete_exit exits a lock returning recursion count
4103 // complete_exit/reenter operate as a wait without waiting
4104 // complete_exit requires an inflated monitor
4105 // The _owner field is not always the Thread addr even with an
4106 // inflated monitor, e.g. the monitor can be inflated by a non-owning
4107 // thread due to contention.
4108 intptr_t ObjectMonitor::complete_exit(TRAPS) {
4109    Thread * const Self = THREAD;
4110    assert(Self->is_Java_thread(), "Must be Java thread!");
4111    JavaThread *jt = (JavaThread *)THREAD;
4112 
4113    DeferredInitialize();
4114 
4115    if (THREAD != _owner) {
4116     if (THREAD->is_lock_owned ((address)_owner)) {
4117        assert(_recursions == 0, "internal state error");
4118        _owner = THREAD ;   /* Convert from basiclock addr to Thread addr */
4119        _recursions = 0 ;
4120        OwnerIsThread = 1 ;
4121     }
4122    }
4123 
4124    guarantee(Self == _owner, "complete_exit not owner");
4125    intptr_t save = _recursions; // record the old recursion count
4126    _recursions = 0;        // set the recursion level to be 0
4127    exit (Self) ;           // exit the monitor
4128    guarantee (_owner != Self, "invariant");
4129    return save;
4130 }
4131 
4132 // reenter() enters a lock and sets recursion count
4133 // complete_exit/reenter operate as a wait without waiting
4134 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
4135    Thread * const Self = THREAD;
4136    assert(Self->is_Java_thread(), "Must be Java thread!");
4137    JavaThread *jt = (JavaThread *)THREAD;
4138 
4139    guarantee(_owner != Self, "reenter already owner");
4140    enter (THREAD);       // enter the monitor
4141    guarantee (_recursions == 0, "reenter recursion");
4142    _recursions = recursions;
4143    return;
4144 }
4145 
4146 // Note: a subset of changes to ObjectMonitor::wait()
4147 // will need to be replicated in complete_exit above
4148 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
4149    Thread * const Self = THREAD ;
4150    assert(Self->is_Java_thread(), "Must be Java thread!");
4151    JavaThread *jt = (JavaThread *)THREAD;
4152 
4153    DeferredInitialize () ;
4154 
4155    // Throw IMSX or IEX.
4156    CHECK_OWNER();
4157 
4158    // check for a pending interrupt
4159    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
4160      // post monitor waited event.  Note that this is past-tense, we are done waiting.
4161      if (JvmtiExport::should_post_monitor_waited()) {
4162         // Note: 'false' parameter is passed here because the
4163         // wait was not timed out due to thread interrupt.
4164         JvmtiExport::post_monitor_waited(jt, this, false);
4165      }
4166      TEVENT (Wait - Throw IEX) ;
4167      THROW(vmSymbols::java_lang_InterruptedException());
4168      return ;
4169    }
4170    TEVENT (Wait) ;
4171 
4172    assert (Self->_Stalled == 0, "invariant") ;
4173    Self->_Stalled = intptr_t(this) ;
4174    jt->set_current_waiting_monitor(this);
4175 
4176    // create a node to be put into the queue
4177    // Critically, after we reset() the event but prior to park(), we must check
4178    // for a pending interrupt.
4179    ObjectWaiter node(Self);
4180    node.TState = ObjectWaiter::TS_WAIT ;
4181    Self->_ParkEvent->reset() ;
4182    OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
4183 
4184    // Enter the waiting queue, which is a circular doubly linked list in this case
4185    // but it could be a priority queue or any data structure.
4186    // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
4187    // by the the owner of the monitor *except* in the case where park()
4188    // returns because of a timeout of interrupt.  Contention is exceptionally rare
4189    // so we use a simple spin-lock instead of a heavier-weight blocking lock.
4190 
4191    Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
4192    AddWaiter (&node) ;
4193    Thread::SpinRelease (&_WaitSetLock) ;
4194 
4195    if ((SyncFlags & 4) == 0) {
4196       _Responsible = NULL ;
4197    }
4198    intptr_t save = _recursions; // record the old recursion count
4199    _waiters++;                  // increment the number of waiters
4200    _recursions = 0;             // set the recursion level to be 1
4201    exit (Self) ;                    // exit the monitor
4202    guarantee (_owner != Self, "invariant") ;
4203 
4204    // As soon as the ObjectMonitor's ownership is dropped in the exit()
4205    // call above, another thread can enter() the ObjectMonitor, do the
4206    // notify(), and exit() the ObjectMonitor. If the other thread's
4207    // exit() call chooses this thread as the successor and the unpark()
4208    // call happens to occur while this thread is posting a
4209    // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
4210    // handler using RawMonitors and consuming the unpark().
4211    //
4212    // To avoid the problem, we re-post the event. This does no harm
4213    // even if the original unpark() was not consumed because we are the
4214    // chosen successor for this monitor.
4215    if (node._notified != 0 && _succ == Self) {
4216       node._event->unpark();
4217    }
4218 
4219    // The thread is on the WaitSet list - now park() it.
4220    // On MP systems it's conceivable that a brief spin before we park
4221    // could be profitable.
4222    //
4223    // TODO-FIXME: change the following logic to a loop of the form
4224    //   while (!timeout && !interrupted && _notified == 0) park()
4225 
4226    int ret = OS_OK ;
4227    int WasNotified = 0 ;
4228    { // State transition wrappers
4229      OSThread* osthread = Self->osthread();
4230      OSThreadWaitState osts(osthread, true);
4231      {
4232        ThreadBlockInVM tbivm(jt);
4233        // Thread is in thread_blocked state and oop access is unsafe.
4234        jt->set_suspend_equivalent();
4235 
4236        if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
4237            // Intentionally empty
4238        } else
4239        if (node._notified == 0) {
4240          if (millis <= 0) {
4241             Self->_ParkEvent->park () ;
4242          } else {
4243             ret = Self->_ParkEvent->park (millis) ;
4244          }
4245        }
4246 
4247        // were we externally suspended while we were waiting?
4248        if (ExitSuspendEquivalent (jt)) {
4249           // TODO-FIXME: add -- if succ == Self then succ = null.
4250           jt->java_suspend_self();
4251        }
4252 
4253      } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
4254 
4255 
4256      // Node may be on the WaitSet, the EntryList (or cxq), or in transition
4257      // from the WaitSet to the EntryList.
4258      // See if we need to remove Node from the WaitSet.
4259      // We use double-checked locking to avoid grabbing _WaitSetLock
4260      // if the thread is not on the wait queue.
4261      //
4262      // Note that we don't need a fence before the fetch of TState.
4263      // In the worst case we'll fetch a old-stale value of TS_WAIT previously
4264      // written by the is thread. (perhaps the fetch might even be satisfied
4265      // by a look-aside into the processor's own store buffer, although given
4266      // the length of the code path between the prior ST and this load that's
4267      // highly unlikely).  If the following LD fetches a stale TS_WAIT value
4268      // then we'll acquire the lock and then re-fetch a fresh TState value.
4269      // That is, we fail toward safety.
4270 
4271      if (node.TState == ObjectWaiter::TS_WAIT) {
4272          Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
4273          if (node.TState == ObjectWaiter::TS_WAIT) {
4274             DequeueSpecificWaiter (&node) ;       // unlink from WaitSet
4275             assert(node._notified == 0, "invariant");
4276             node.TState = ObjectWaiter::TS_RUN ;
4277          }
4278          Thread::SpinRelease (&_WaitSetLock) ;
4279      }
4280 
4281      // The thread is now either on off-list (TS_RUN),
4282      // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
4283      // The Node's TState variable is stable from the perspective of this thread.
4284      // No other threads will asynchronously modify TState.
4285      guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
4286      OrderAccess::loadload() ;
4287      if (_succ == Self) _succ = NULL ;
4288      WasNotified = node._notified ;
4289 
4290      // Reentry phase -- reacquire the monitor.
4291      // re-enter contended monitor after object.wait().
4292      // retain OBJECT_WAIT state until re-enter successfully completes
4293      // Thread state is thread_in_vm and oop access is again safe,
4294      // although the raw address of the object may have changed.
4295      // (Don't cache naked oops over safepoints, of course).
4296 
4297      // post monitor waited event. Note that this is past-tense, we are done waiting.
4298      if (JvmtiExport::should_post_monitor_waited()) {
4299        JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
4300      }
4301      OrderAccess::fence() ;
4302 
4303      assert (Self->_Stalled != 0, "invariant") ;
4304      Self->_Stalled = 0 ;
4305 
4306      assert (_owner != Self, "invariant") ;
4307      ObjectWaiter::TStates v = node.TState ;
4308      if (v == ObjectWaiter::TS_RUN) {
4309          enter (Self) ;
4310      } else {
4311          guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
4312          ReenterI (Self, &node) ;
4313          node.wait_reenter_end(this);
4314      }
4315 
4316      // Self has reacquired the lock.
4317      // Lifecycle - the node representing Self must not appear on any queues.
4318      // Node is about to go out-of-scope, but even if it were immortal we wouldn't
4319      // want residual elements associated with this thread left on any lists.
4320      guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
4321      assert    (_owner == Self, "invariant") ;
4322      assert    (_succ != Self , "invariant") ;
4323    } // OSThreadWaitState()
4324 
4325    jt->set_current_waiting_monitor(NULL);
4326 
4327    guarantee (_recursions == 0, "invariant") ;
4328    _recursions = save;     // restore the old recursion count
4329    _waiters--;             // decrement the number of waiters
4330 
4331    // Verify a few postconditions
4332    assert (_owner == Self       , "invariant") ;
4333    assert (_succ  != Self       , "invariant") ;
4334    assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
4335 
4336    if (SyncFlags & 32) {
4337       OrderAccess::fence() ;
4338    }
4339 
4340    // check if the notification happened
4341    if (!WasNotified) {
4342      // no, it could be timeout or Thread.interrupt() or both
4343      // check for interrupt event, otherwise it is timeout
4344      if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
4345        TEVENT (Wait - throw IEX from epilog) ;
4346        THROW(vmSymbols::java_lang_InterruptedException());
4347      }
4348    }
4349 
4350    // NOTE: Spurious wake up will be consider as timeout.
4351    // Monitor notify has precedence over thread interrupt.
4352 }
4353 
4354 
4355 // Consider:
4356 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
4357 // then instead of transferring a thread from the WaitSet to the EntryList
4358 // we might just dequeue a thread from the WaitSet and directly unpark() it.
4359 
4360 void ObjectMonitor::notify(TRAPS) {
4361   CHECK_OWNER();
4362   if (_WaitSet == NULL) {
4363      TEVENT (Empty-Notify) ;
4364      return ;
4365   }
4366   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
4367 
4368   int Policy = Knob_MoveNotifyee ;
4369 
4370   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
4371   ObjectWaiter * iterator = DequeueWaiter() ;
4372   if (iterator != NULL) {
4373      TEVENT (Notify1 - Transfer) ;
4374      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
4375      guarantee (iterator->_notified == 0, "invariant") ;
4376      // Disposition - what might we do with iterator ?
4377      // a.  add it directly to the EntryList - either tail or head.
4378      // b.  push it onto the front of the _cxq.
4379      // For now we use (a).
4380      if (Policy != 4) {
4381         iterator->TState = ObjectWaiter::TS_ENTER ;
4382      }
4383      iterator->_notified = 1 ;
4384 
4385      ObjectWaiter * List = _EntryList ;
4386      if (List != NULL) {
4387         assert (List->_prev == NULL, "invariant") ;
4388         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
4389         assert (List != iterator, "invariant") ;
4390      }
4391 
4392      if (Policy == 0) {       // prepend to EntryList
4393          if (List == NULL) {
4394              iterator->_next = iterator->_prev = NULL ;
4395              _EntryList = iterator ;
4396          } else {
4397              List->_prev = iterator ;
4398              iterator->_next = List ;
4399              iterator->_prev = NULL ;
4400              _EntryList = iterator ;
4401         }
4402      } else
4403      if (Policy == 1) {      // append to EntryList
4404          if (List == NULL) {
4405              iterator->_next = iterator->_prev = NULL ;
4406              _EntryList = iterator ;
4407          } else {
4408             // CONSIDER:  finding the tail currently requires a linear-time walk of
4409             // the EntryList.  We can make tail access constant-time by converting to
4410             // a CDLL instead of using our current DLL.
4411             ObjectWaiter * Tail ;
4412             for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
4413             assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
4414             Tail->_next = iterator ;
4415             iterator->_prev = Tail ;
4416             iterator->_next = NULL ;
4417         }
4418      } else
4419      if (Policy == 2) {      // prepend to cxq
4420          // prepend to cxq
4421          if (List == NULL) {
4422              iterator->_next = iterator->_prev = NULL ;
4423              _EntryList = iterator ;
4424          } else {
4425             iterator->TState = ObjectWaiter::TS_CXQ ;
4426             for (;;) {
4427                 ObjectWaiter * Front = _cxq ;
4428                 iterator->_next = Front ;
4429                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
4430                     break ;
4431                 }
4432             }
4433          }
4434      } else
4435      if (Policy == 3) {      // append to cxq
4436         iterator->TState = ObjectWaiter::TS_CXQ ;
4437         for (;;) {
4438             ObjectWaiter * Tail ;
4439             Tail = _cxq ;
4440             if (Tail == NULL) {
4441                 iterator->_next = NULL ;
4442                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
4443                    break ;
4444                 }
4445             } else {
4446                 while (Tail->_next != NULL) Tail = Tail->_next ;
4447                 Tail->_next = iterator ;
4448                 iterator->_prev = Tail ;
4449                 iterator->_next = NULL ;
4450                 break ;
4451             }
4452         }
4453      } else {
4454         ParkEvent * ev = iterator->_event ;
4455         iterator->TState = ObjectWaiter::TS_RUN ;
4456         OrderAccess::fence() ;
4457         ev->unpark() ;
4458      }
4459 
4460      if (Policy < 4) {
4461        iterator->wait_reenter_begin(this);
4462      }
4463 
4464      // _WaitSetLock protects the wait queue, not the EntryList.  We could
4465      // move the add-to-EntryList operation, above, outside the critical section
4466      // protected by _WaitSetLock.  In practice that's not useful.  With the
4467      // exception of  wait() timeouts and interrupts the monitor owner
4468      // is the only thread that grabs _WaitSetLock.  There's almost no contention
4469      // on _WaitSetLock so it's not profitable to reduce the length of the
4470      // critical section.
4471   }
4472 
4473   Thread::SpinRelease (&_WaitSetLock) ;
4474 
4475   if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) {
4476      ObjectSynchronizer::_sync_Notifications->inc() ;
4477   }
4478 }
4479 
4480 
4481 void ObjectMonitor::notifyAll(TRAPS) {
4482   CHECK_OWNER();
4483   ObjectWaiter* iterator;
4484   if (_WaitSet == NULL) {
4485       TEVENT (Empty-NotifyAll) ;
4486       return ;
4487   }
4488   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
4489 
4490   int Policy = Knob_MoveNotifyee ;
4491   int Tally = 0 ;
4492   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
4493 
4494   for (;;) {
4495      iterator = DequeueWaiter () ;
4496      if (iterator == NULL) break ;
4497      TEVENT (NotifyAll - Transfer1) ;
4498      ++Tally ;
4499 
4500      // Disposition - what might we do with iterator ?
4501      // a.  add it directly to the EntryList - either tail or head.
4502      // b.  push it onto the front of the _cxq.
4503      // For now we use (a).
4504      //
4505      // TODO-FIXME: currently notifyAll() transfers the waiters one-at-a-time from the waitset
4506      // to the EntryList.  This could be done more efficiently with a single bulk transfer,
4507      // but in practice it's not time-critical.  Beware too, that in prepend-mode we invert the
4508      // order of the waiters.  Lets say that the waitset is "ABCD" and the EntryList is "XYZ".
4509      // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will
4510      // be "DCBAXYZ".
4511 
4512      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
4513      guarantee (iterator->_notified == 0, "invariant") ;
4514      iterator->_notified = 1 ;
4515      if (Policy != 4) {
4516         iterator->TState = ObjectWaiter::TS_ENTER ;
4517      }
4518 
4519      ObjectWaiter * List = _EntryList ;
4520      if (List != NULL) {
4521         assert (List->_prev == NULL, "invariant") ;
4522         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
4523         assert (List != iterator, "invariant") ;
4524      }
4525 
4526      if (Policy == 0) {       // prepend to EntryList
4527          if (List == NULL) {
4528              iterator->_next = iterator->_prev = NULL ;
4529              _EntryList = iterator ;
4530          } else {
4531              List->_prev = iterator ;
4532              iterator->_next = List ;
4533              iterator->_prev = NULL ;
4534              _EntryList = iterator ;
4535         }
4536      } else
4537      if (Policy == 1) {      // append to EntryList
4538          if (List == NULL) {
4539              iterator->_next = iterator->_prev = NULL ;
4540              _EntryList = iterator ;
4541          } else {
4542             // CONSIDER:  finding the tail currently requires a linear-time walk of
4543             // the EntryList.  We can make tail access constant-time by converting to
4544             // a CDLL instead of using our current DLL.
4545             ObjectWaiter * Tail ;
4546             for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
4547             assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
4548             Tail->_next = iterator ;
4549             iterator->_prev = Tail ;
4550             iterator->_next = NULL ;
4551         }
4552      } else
4553      if (Policy == 2) {      // prepend to cxq
4554          // prepend to cxq
4555          iterator->TState = ObjectWaiter::TS_CXQ ;
4556          for (;;) {
4557              ObjectWaiter * Front = _cxq ;
4558              iterator->_next = Front ;
4559              if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
4560                  break ;
4561              }
4562          }
4563      } else
4564      if (Policy == 3) {      // append to cxq
4565         iterator->TState = ObjectWaiter::TS_CXQ ;
4566         for (;;) {
4567             ObjectWaiter * Tail ;
4568             Tail = _cxq ;
4569             if (Tail == NULL) {
4570                 iterator->_next = NULL ;
4571                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
4572                    break ;
4573                 }
4574             } else {
4575                 while (Tail->_next != NULL) Tail = Tail->_next ;
4576                 Tail->_next = iterator ;
4577                 iterator->_prev = Tail ;
4578                 iterator->_next = NULL ;
4579                 break ;
4580             }
4581         }
4582      } else {
4583         ParkEvent * ev = iterator->_event ;
4584         iterator->TState = ObjectWaiter::TS_RUN ;
4585         OrderAccess::fence() ;
4586         ev->unpark() ;
4587      }
4588 
4589      if (Policy < 4) {
4590        iterator->wait_reenter_begin(this);
4591      }
4592 
4593      // _WaitSetLock protects the wait queue, not the EntryList.  We could
4594      // move the add-to-EntryList operation, above, outside the critical section
4595      // protected by _WaitSetLock.  In practice that's not useful.  With the
4596      // exception of  wait() timeouts and interrupts the monitor owner
4597      // is the only thread that grabs _WaitSetLock.  There's almost no contention
4598      // on _WaitSetLock so it's not profitable to reduce the length of the
4599      // critical section.
4600   }
4601 
4602   Thread::SpinRelease (&_WaitSetLock) ;
4603 
4604   if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) {
4605      ObjectSynchronizer::_sync_Notifications->inc(Tally) ;
4606   }
4607 }
4608 
4609 // check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
4610 // TODO-FIXME: remove check_slow() -- it's likely dead.
4611 
4612 void ObjectMonitor::check_slow(TRAPS) {
4613   TEVENT (check_slow - throw IMSX) ;
4614   assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
4615   THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
4616 }
4617 
4618 
4619 // -------------------------------------------------------------------------
4620 // The raw monitor subsystem is entirely distinct from normal
4621 // java-synchronization or jni-synchronization.  raw monitors are not
4622 // associated with objects.  They can be implemented in any manner
4623 // that makes sense.  The original implementors decided to piggy-back
4624 // the raw-monitor implementation on the existing Java objectMonitor mechanism.
4625 // This flaw needs to fixed.  We should reimplement raw monitors as sui-generis.
4626 // Specifically, we should not implement raw monitors via java monitors.
4627 // Time permitting, we should disentangle and deconvolve the two implementations
4628 // and move the resulting raw monitor implementation over to the JVMTI directories.
4629 // Ideally, the raw monitor implementation would be built on top of
4630 // park-unpark and nothing else.
4631 //
4632 // raw monitors are used mainly by JVMTI
4633 // The raw monitor implementation borrows the ObjectMonitor structure,
4634 // but the operators are degenerate and extremely simple.
4635 //
4636 // Mixed use of a single objectMonitor instance -- as both a raw monitor
4637 // and a normal java monitor -- is not permissible.
4638 //
4639 // Note that we use the single RawMonitor_lock to protect queue operations for
4640 // _all_ raw monitors.  This is a scalability impediment, but since raw monitor usage
4641 // is deprecated and rare, this is not of concern.  The RawMonitor_lock can not
4642 // be held indefinitely.  The critical sections must be short and bounded.
4643 //
4644 // -------------------------------------------------------------------------
4645 
4646 int ObjectMonitor::SimpleEnter (Thread * Self) {
4647   for (;;) {
4648     if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
4649        return OS_OK ;
4650     }
4651 
4652     ObjectWaiter Node (Self) ;
4653     Self->_ParkEvent->reset() ;     // strictly optional
4654     Node.TState = ObjectWaiter::TS_ENTER ;
4655 
4656     RawMonitor_lock->lock_without_safepoint_check() ;
4657     Node._next  = _EntryList ;
4658     _EntryList  = &Node ;
4659     OrderAccess::fence() ;
4660     if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
4661         _EntryList = Node._next ;
4662         RawMonitor_lock->unlock() ;
4663         return OS_OK ;
4664     }
4665     RawMonitor_lock->unlock() ;
4666     while (Node.TState == ObjectWaiter::TS_ENTER) {
4667        Self->_ParkEvent->park() ;
4668     }
4669   }
4670 }
4671 
4672 int ObjectMonitor::SimpleExit (Thread * Self) {
4673   guarantee (_owner == Self, "invariant") ;
4674   OrderAccess::release_store_ptr (&_owner, NULL) ;
4675   OrderAccess::fence() ;
4676   if (_EntryList == NULL) return OS_OK ;
4677   ObjectWaiter * w ;
4678 
4679   RawMonitor_lock->lock_without_safepoint_check() ;
4680   w = _EntryList ;
4681   if (w != NULL) {
4682       _EntryList = w->_next ;
4683   }
4684   RawMonitor_lock->unlock() ;
4685   if (w != NULL) {
4686       guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
4687       ParkEvent * ev = w->_event ;
4688       w->TState = ObjectWaiter::TS_RUN ;
4689       OrderAccess::fence() ;
4690       ev->unpark() ;
4691   }
4692   return OS_OK ;
4693 }
4694 
4695 int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) {
4696   guarantee (_owner == Self  , "invariant") ;
4697   guarantee (_recursions == 0, "invariant") ;
4698 
4699   ObjectWaiter Node (Self) ;
4700   Node._notified = 0 ;
4701   Node.TState    = ObjectWaiter::TS_WAIT ;
4702 
4703   RawMonitor_lock->lock_without_safepoint_check() ;
4704   Node._next     = _WaitSet ;
4705   _WaitSet       = &Node ;
4706   RawMonitor_lock->unlock() ;
4707 
4708   SimpleExit (Self) ;
4709   guarantee (_owner != Self, "invariant") ;
4710 
4711   int ret = OS_OK ;
4712   if (millis <= 0) {
4713     Self->_ParkEvent->park();
4714   } else {
4715     ret = Self->_ParkEvent->park(millis);
4716   }
4717 
4718   // If thread still resides on the waitset then unlink it.
4719   // Double-checked locking -- the usage is safe in this context
4720   // as we TState is volatile and the lock-unlock operators are
4721   // serializing (barrier-equivalent).
4722 
4723   if (Node.TState == ObjectWaiter::TS_WAIT) {
4724     RawMonitor_lock->lock_without_safepoint_check() ;
4725     if (Node.TState == ObjectWaiter::TS_WAIT) {
4726       // Simple O(n) unlink, but performance isn't critical here.
4727       ObjectWaiter * p ;
4728       ObjectWaiter * q = NULL ;
4729       for (p = _WaitSet ; p != &Node; p = p->_next) {
4730          q = p ;
4731       }
4732       guarantee (p == &Node, "invariant") ;
4733       if (q == NULL) {
4734         guarantee (p == _WaitSet, "invariant") ;
4735         _WaitSet = p->_next ;
4736       } else {
4737         guarantee (p == q->_next, "invariant") ;
4738         q->_next = p->_next ;
4739       }
4740       Node.TState = ObjectWaiter::TS_RUN ;
4741     }
4742     RawMonitor_lock->unlock() ;
4743   }
4744 
4745   guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
4746   SimpleEnter (Self) ;
4747 
4748   guarantee (_owner == Self, "invariant") ;
4749   guarantee (_recursions == 0, "invariant") ;
4750   return ret ;
4751 }
4752 
4753 int ObjectMonitor::SimpleNotify (Thread * Self, bool All) {
4754   guarantee (_owner == Self, "invariant") ;
4755   if (_WaitSet == NULL) return OS_OK ;
4756 
4757   // We have two options:
4758   // A. Transfer the threads from the WaitSet to the EntryList
4759   // B. Remove the thread from the WaitSet and unpark() it.
4760   //
4761   // We use (B), which is crude and results in lots of futile
4762   // context switching.  In particular (B) induces lots of contention.
4763 
4764   ParkEvent * ev = NULL ;       // consider using a small auto array ...
4765   RawMonitor_lock->lock_without_safepoint_check() ;
4766   for (;;) {
4767       ObjectWaiter * w = _WaitSet ;
4768       if (w == NULL) break ;
4769       _WaitSet = w->_next ;
4770       if (ev != NULL) { ev->unpark(); ev = NULL; }
4771       ev = w->_event ;
4772       OrderAccess::loadstore() ;
4773       w->TState = ObjectWaiter::TS_RUN ;
4774       OrderAccess::storeload();
4775       if (!All) break ;
4776   }
4777   RawMonitor_lock->unlock() ;
4778   if (ev != NULL) ev->unpark();
4779   return OS_OK ;
4780 }
4781 
4782 // Any JavaThread will enter here with state _thread_blocked
4783 int ObjectMonitor::raw_enter(TRAPS) {
4784   TEVENT (raw_enter) ;
4785   void * Contended ;
4786 
4787   // don't enter raw monitor if thread is being externally suspended, it will
4788   // surprise the suspender if a "suspended" thread can still enter monitor
4789   JavaThread * jt = (JavaThread *)THREAD;
4790   if (THREAD->is_Java_thread()) {
4791     jt->SR_lock()->lock_without_safepoint_check();
4792     while (jt->is_external_suspend()) {
4793       jt->SR_lock()->unlock();
4794       jt->java_suspend_self();
4795       jt->SR_lock()->lock_without_safepoint_check();
4796     }
4797     // guarded by SR_lock to avoid racing with new external suspend requests.
4798     Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
4799     jt->SR_lock()->unlock();
4800   } else {
4801     Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
4802   }
4803 
4804   if (Contended == THREAD) {
4805      _recursions ++ ;
4806      return OM_OK ;
4807   }
4808 
4809   if (Contended == NULL) {
4810      guarantee (_owner == THREAD, "invariant") ;
4811      guarantee (_recursions == 0, "invariant") ;
4812      return OM_OK ;
4813   }
4814 
4815   THREAD->set_current_pending_monitor(this);
4816 
4817   if (!THREAD->is_Java_thread()) {
4818      // No other non-Java threads besides VM thread would acquire
4819      // a raw monitor.
4820      assert(THREAD->is_VM_thread(), "must be VM thread");
4821      SimpleEnter (THREAD) ;
4822    } else {
4823      guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
4824      for (;;) {
4825        jt->set_suspend_equivalent();
4826        // cleared by handle_special_suspend_equivalent_condition() or
4827        // java_suspend_self()
4828        SimpleEnter (THREAD) ;
4829 
4830        // were we externally suspended while we were waiting?
4831        if (!jt->handle_special_suspend_equivalent_condition()) break ;
4832 
4833        // This thread was externally suspended
4834        //
4835        // This logic isn't needed for JVMTI raw monitors,
4836        // but doesn't hurt just in case the suspend rules change. This
4837            // logic is needed for the ObjectMonitor.wait() reentry phase.
4838            // We have reentered the contended monitor, but while we were
4839            // waiting another thread suspended us. We don't want to reenter
4840            // the monitor while suspended because that would surprise the
4841            // thread that suspended us.
4842            //
4843            // Drop the lock -
4844        SimpleExit (THREAD) ;
4845 
4846            jt->java_suspend_self();
4847          }
4848 
4849      assert(_owner == THREAD, "Fatal error with monitor owner!");
4850      assert(_recursions == 0, "Fatal error with monitor recursions!");
4851   }
4852 
4853   THREAD->set_current_pending_monitor(NULL);
4854   guarantee (_recursions == 0, "invariant") ;
4855   return OM_OK;
4856 }
4857 
4858 // Used mainly for JVMTI raw monitor implementation
4859 // Also used for ObjectMonitor::wait().
4860 int ObjectMonitor::raw_exit(TRAPS) {
4861   TEVENT (raw_exit) ;
4862   if (THREAD != _owner) {
4863     return OM_ILLEGAL_MONITOR_STATE;
4864   }
4865   if (_recursions > 0) {
4866     --_recursions ;
4867     return OM_OK ;
4868   }
4869 
4870   void * List = _EntryList ;
4871   SimpleExit (THREAD) ;
4872 
4873   return OM_OK;
4874 }
4875 
4876 // Used for JVMTI raw monitor implementation.
4877 // All JavaThreads will enter here with state _thread_blocked
4878 
4879 int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
4880   TEVENT (raw_wait) ;
4881   if (THREAD != _owner) {
4882     return OM_ILLEGAL_MONITOR_STATE;
4883   }
4884 
4885   // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
4886   // The caller must be able to tolerate spurious returns from raw_wait().
4887   THREAD->_ParkEvent->reset() ;
4888   OrderAccess::fence() ;
4889 
4890   // check interrupt event
4891   if (interruptible && Thread::is_interrupted(THREAD, true)) {
4892     return OM_INTERRUPTED;
4893   }
4894 
4895   intptr_t save = _recursions ;
4896   _recursions = 0 ;
4897   _waiters ++ ;
4898   if (THREAD->is_Java_thread()) {
4899     guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
4900     ((JavaThread *)THREAD)->set_suspend_equivalent();
4901   }
4902   int rv = SimpleWait (THREAD, millis) ;
4903   _recursions = save ;
4904   _waiters -- ;
4905 
4906   guarantee (THREAD == _owner, "invariant") ;
4907   if (THREAD->is_Java_thread()) {
4908      JavaThread * jSelf = (JavaThread *) THREAD ;
4909      for (;;) {
4910         if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
4911         SimpleExit (THREAD) ;
4912         jSelf->java_suspend_self();
4913         SimpleEnter (THREAD) ;
4914         jSelf->set_suspend_equivalent() ;
4915      }
4916   }
4917   guarantee (THREAD == _owner, "invariant") ;
4918 
4919   if (interruptible && Thread::is_interrupted(THREAD, true)) {
4920     return OM_INTERRUPTED;
4921   }
4922   return OM_OK ;
4923 }
4924 
4925 int ObjectMonitor::raw_notify(TRAPS) {
4926   TEVENT (raw_notify) ;
4927   if (THREAD != _owner) {
4928     return OM_ILLEGAL_MONITOR_STATE;
4929   }
4930   SimpleNotify (THREAD, false) ;
4931   return OM_OK;
4932 }
4933 
4934 int ObjectMonitor::raw_notifyAll(TRAPS) {
4935   TEVENT (raw_notifyAll) ;
4936   if (THREAD != _owner) {
4937     return OM_ILLEGAL_MONITOR_STATE;
4938   }
4939   SimpleNotify (THREAD, true) ;
4940   return OM_OK;
4941 }
4942 
4943 #ifndef PRODUCT
4944 void ObjectMonitor::verify() {
4945 }
4946 
4947 void ObjectMonitor::print() {
4948 }
4949 #endif
4950 
4951 //------------------------------------------------------------------------------
4952 // Non-product code
4953 
4954 #ifndef PRODUCT
4955 
4956 void ObjectSynchronizer::trace_locking(Handle locking_obj, bool is_compiled,
4957                                        bool is_method, bool is_locking) {
4958   // Don't know what to do here
4959 }
4960 
4961 // Verify all monitors in the monitor cache, the verification is weak.
4962 void ObjectSynchronizer::verify() {
4963   ObjectMonitor* block = gBlockList;
4964   ObjectMonitor* mid;
4965   while (block) {
4966     assert(block->object() == CHAINMARKER, "must be a block header");
4967     for (int i = 1; i < _BLOCKSIZE; i++) {
4968       mid = block + i;
4969       oop object = (oop) mid->object();
4970       if (object != NULL) {
4971         mid->verify();
4972       }
4973     }
4974     block = (ObjectMonitor*) block->FreeNext;
4975   }
4976 }
4977 
4978 // Check if monitor belongs to the monitor cache
4979 // The list is grow-only so it's *relatively* safe to traverse
4980 // the list of extant blocks without taking a lock.
4981 
4982 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
4983   ObjectMonitor* block = gBlockList;
4984 
4985   while (block) {
4986     assert(block->object() == CHAINMARKER, "must be a block header");
4987     if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
4988       address mon = (address) monitor;
4989       address blk = (address) block;
4990       size_t diff = mon - blk;
4991       assert((diff % sizeof(ObjectMonitor)) == 0, "check");
4992       return 1;
4993     }
4994     block = (ObjectMonitor*) block->FreeNext;
4995   }
4996   return 0;
4997 }
4998 
4999 #endif