1 /*
   2  * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "memory/padded.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/atomic.inline.hpp"
  32 #include "runtime/biasedLocking.hpp"
  33 #include "runtime/handles.inline.hpp"
  34 #include "runtime/interfaceSupport.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "runtime/objectMonitor.hpp"
  37 #include "runtime/objectMonitor.inline.hpp"
  38 #include "runtime/osThread.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/synchronizer.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 #include "utilities/dtrace.hpp"
  43 #include "utilities/events.hpp"
  44 #include "utilities/preserveException.hpp"
  45 
  46 #if defined(__GNUC__) && !defined(PPC64)
  47 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
  48   #define NOINLINE __attribute__((noinline))
  49 #else
  50   #define NOINLINE
  51 #endif
  52 
  53 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  54 
  55 // The "core" versions of monitor enter and exit reside in this file.
  56 // The interpreter and compilers contain specialized transliterated
  57 // variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
  58 // for instance.  If you make changes here, make sure to modify the
  59 // interpreter, and both C1 and C2 fast-path inline locking code emission.
  60 //
  61 // -----------------------------------------------------------------------------
  62 
  63 #ifdef DTRACE_ENABLED
  64 
  65 // Only bother with this argument setup if dtrace is available
  66 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  67 
  68 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  69   char* bytes = NULL;                                                      \
  70   int len = 0;                                                             \
  71   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  72   Symbol* klassname = ((oop)(obj))->klass()->name();                       \
  73   if (klassname != NULL) {                                                 \
  74     bytes = (char*)klassname->bytes();                                     \
  75     len = klassname->utf8_length();                                        \
  76   }
  77 
  78 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
  79   {                                                                        \
  80     if (DTraceMonitorProbes) {                                             \
  81       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  82       HOTSPOT_MONITOR_WAIT(jtid,                                           \
  83                            (uintptr_t)(monitor), bytes, len, (millis));    \
  84     }                                                                      \
  85   }
  86 
  87 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
  88 
  89 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
  90   {                                                                        \
  91     if (DTraceMonitorProbes) {                                             \
  92       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  93       HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */             \
  94                                     (uintptr_t)(monitor), bytes, len);     \
  95     }                                                                      \
  96   }
  97 
  98 #else //  ndef DTRACE_ENABLED
  99 
 100 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
 101 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 102 
 103 #endif // ndef DTRACE_ENABLED
 104 
 105 // This exists only as a workaround of dtrace bug 6254741
 106 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
 107   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 108   return 0;
 109 }
 110 
 111 #define NINFLATIONLOCKS 256
 112 static volatile intptr_t InflationLocks[NINFLATIONLOCKS];
 113 
 114 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
 115 // want to expose the PaddedEnd template more than necessary.
 116 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL;
 117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
 118 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
 119 int ObjectSynchronizer::gOmInUseCount = 0;
 120 static volatile intptr_t ListLock = 0;      // protects global monitor free-list cache
 121 static volatile int MonitorFreeCount  = 0;  // # on gFreeList
 122 static volatile int MonitorPopulation = 0;  // # Extant -- in circulation
 123 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 124 
 125 
 126 // =====================> Quick functions
 127 
 128 // The quick_* forms are special fast-path variants used to improve
 129 // performance.  In the simplest case, a "quick_*" implementation could
 130 // simply return false, in which case the caller will perform the necessary
 131 // state transitions and call the slow-path form.
 132 // The fast-path is designed to handle frequently arising cases in an efficient
 133 // manner and is just a degenerate "optimistic" variant of the slow-path.
 134 // returns true  -- to indicate the call was satisfied.
 135 // returns false -- to indicate the call needs the services of the slow-path.
 136 // A no-loitering ordinance is in effect for code in the quick_* family
 137 // operators: safepoints or indefinite blocking (blocking that might span a
 138 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 139 // entry.
 140 
 141 // The LockNode emitted directly at the synchronization site would have
 142 // been too big if it were to have included support for the cases of inflated
 143 // recursive enter and exit, so they go here instead.
 144 // Note that we can't safely call AsyncPrintJavaStack() from within
 145 // quick_enter() as our thread state remains _in_Java.
 146 
 147 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
 148                                      BasicLock * Lock) {
 149   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 150   assert(Self->is_Java_thread(), "invariant");
 151   assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
 152   No_Safepoint_Verifier nsv;
 153   if (obj == NULL) return false;       // Need to throw NPE
 154   const markOop mark = obj->mark();
 155 
 156   if (mark->has_monitor()) {
 157     ObjectMonitor * const m = mark->monitor();
 158     assert(m->object() == obj, "invariant");
 159     Thread * const owner = (Thread *) m->_owner;
 160 
 161     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 162     // and observability
 163     // Case: light contention possibly amenable to TLE
 164     // Case: TLE inimical operations such as nested/recursive synchronization
 165 
 166     if (owner == Self) {
 167       m->_recursions++;
 168       return true;
 169     }
 170 
 171     if (owner == NULL &&
 172         Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) {
 173       assert(m->_recursions == 0, "invariant");
 174       assert(m->_owner == Self, "invariant");
 175       return true;
 176     }
 177   }
 178 
 179   // Note that we could inflate in quick_enter.
 180   // This is likely a useful optimization
 181   // Critically, in quick_enter() we must not:
 182   // -- perform bias revocation, or
 183   // -- block indefinitely, or
 184   // -- reach a safepoint
 185 
 186   return false;        // revert to slow-path
 187 }
 188 
 189 // -----------------------------------------------------------------------------
 190 //  Fast Monitor Enter/Exit
 191 // This the fast monitor enter. The interpreter and compiler use
 192 // some assembly copies of this code. Make sure update those code
 193 // if the following function is changed. The implementation is
 194 // extremely sensitive to race condition. Be careful.
 195 
 196 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
 197                                     bool attempt_rebias, TRAPS) {
 198   if (UseBiasedLocking) {
 199     if (!SafepointSynchronize::is_at_safepoint()) {
 200       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
 201       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
 202         return;
 203       }
 204     } else {
 205       assert(!attempt_rebias, "can not rebias toward VM thread");
 206       BiasedLocking::revoke_at_safepoint(obj);
 207     }
 208     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 209   }
 210 
 211   slow_enter(obj, lock, THREAD);
 212 }
 213 
 214 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
 215   assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
 216   // if displaced header is null, the previous enter is recursive enter, no-op
 217   markOop dhw = lock->displaced_header();
 218   markOop mark;
 219   if (dhw == NULL) {
 220     // Recursive stack-lock.
 221     // Diagnostics -- Could be: stack-locked, inflating, inflated.
 222     mark = object->mark();
 223     assert(!mark->is_neutral(), "invariant");
 224     if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
 225       assert(THREAD->is_lock_owned((address)mark->locker()), "invariant");
 226     }
 227     if (mark->has_monitor()) {
 228       ObjectMonitor * m = mark->monitor();
 229       assert(((oop)(m->object()))->mark() == mark, "invariant");
 230       assert(m->is_entered(THREAD), "invariant");
 231     }
 232     return;
 233   }
 234 
 235   mark = object->mark();
 236 
 237   // If the object is stack-locked by the current thread, try to
 238   // swing the displaced header from the box back to the mark.
 239   if (mark == (markOop) lock) {
 240     assert(dhw->is_neutral(), "invariant");
 241     if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
 242       TEVENT(fast_exit: release stacklock);
 243       return;
 244     }
 245   }
 246 
 247   ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD);
 248 }
 249 
 250 // -----------------------------------------------------------------------------
 251 // Interpreter/Compiler Slow Case
 252 // This routine is used to handle interpreter/compiler slow case
 253 // We don't need to use fast path here, because it must have been
 254 // failed in the interpreter/compiler code.
 255 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
 256   markOop mark = obj->mark();
 257   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 258 
 259   if (mark->is_neutral()) {
 260     // Anticipate successful CAS -- the ST of the displaced mark must
 261     // be visible <= the ST performed by the CAS.
 262     lock->set_displaced_header(mark);
 263     if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
 264       TEVENT(slow_enter: release stacklock);
 265       return;
 266     }
 267     // Fall through to inflate() ...
 268   } else if (mark->has_locker() &&
 269              THREAD->is_lock_owned((address)mark->locker())) {
 270     assert(lock != mark->locker(), "must not re-lock the same lock");
 271     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 272     lock->set_displaced_header(NULL);
 273     return;
 274   }
 275 
 276   // The object header will never be displaced to this lock,
 277   // so it does not matter what the value is, except that it
 278   // must be non-zero to avoid looking like a re-entrant lock,
 279   // and must not look locked either.
 280   lock->set_displaced_header(markOopDesc::unused_mark());
 281   ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
 282 }
 283 
 284 // This routine is used to handle interpreter/compiler slow case
 285 // We don't need to use fast path here, because it must have
 286 // failed in the interpreter/compiler code. Simply use the heavy
 287 // weight monitor should be ok, unless someone find otherwise.
 288 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 289   fast_exit(object, lock, THREAD);
 290 }
 291 
 292 // -----------------------------------------------------------------------------
 293 // Class Loader  support to workaround deadlocks on the class loader lock objects
 294 // Also used by GC
 295 // complete_exit()/reenter() are used to wait on a nested lock
 296 // i.e. to give up an outer lock completely and then re-enter
 297 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 298 //  1) complete_exit lock1 - saving recursion count
 299 //  2) wait on lock2
 300 //  3) when notified on lock2, unlock lock2
 301 //  4) reenter lock1 with original recursion count
 302 //  5) lock lock2
 303 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 304 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
 305   TEVENT(complete_exit);
 306   if (UseBiasedLocking) {
 307     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 308     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 309   }
 310 
 311   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
 312 
 313   return monitor->complete_exit(THREAD);
 314 }
 315 
 316 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 317 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
 318   TEVENT(reenter);
 319   if (UseBiasedLocking) {
 320     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 321     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 322   }
 323 
 324   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
 325 
 326   monitor->reenter(recursion, THREAD);
 327 }
 328 // -----------------------------------------------------------------------------
 329 // JNI locks on java objects
 330 // NOTE: must use heavy weight monitor to handle jni monitor enter
 331 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 332   // the current locking is from JNI instead of Java code
 333   TEVENT(jni_enter);
 334   if (UseBiasedLocking) {
 335     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 336     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 337   }
 338   THREAD->set_current_pending_monitor_is_from_java(false);
 339   ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
 340   THREAD->set_current_pending_monitor_is_from_java(true);
 341 }
 342 
 343 // NOTE: must use heavy weight monitor to handle jni monitor exit
 344 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 345   TEVENT(jni_exit);
 346   if (UseBiasedLocking) {
 347     Handle h_obj(THREAD, obj);
 348     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
 349     obj = h_obj();
 350   }
 351   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 352 
 353   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
 354   // If this thread has locked the object, exit the monitor.  Note:  can't use
 355   // monitor->check(CHECK); must exit even if an exception is pending.
 356   if (monitor->check(THREAD)) {
 357     monitor->exit(true, THREAD);
 358   }
 359 }
 360 
 361 // -----------------------------------------------------------------------------
 362 // Internal VM locks on java objects
 363 // standard constructor, allows locking failures
 364 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
 365   _dolock = doLock;
 366   _thread = thread;
 367   debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
 368   _obj = obj;
 369 
 370   if (_dolock) {
 371     TEVENT(ObjectLocker);
 372 
 373     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
 374   }
 375 }
 376 
 377 ObjectLocker::~ObjectLocker() {
 378   if (_dolock) {
 379     ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
 380   }
 381 }
 382 
 383 
 384 // -----------------------------------------------------------------------------
 385 //  Wait/Notify/NotifyAll
 386 // NOTE: must use heavy weight monitor to handle wait()
 387 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 388   if (UseBiasedLocking) {
 389     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 390     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 391   }
 392   if (millis < 0) {
 393     TEVENT(wait - throw IAX);
 394     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 395   }
 396   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
 397   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
 398   monitor->wait(millis, true, THREAD);
 399 
 400   // This dummy call is in place to get around dtrace bug 6254741.  Once
 401   // that's fixed we can uncomment the following line, remove the call
 402   // and change this function back into a "void" func.
 403   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 404   return dtrace_waited_probe(monitor, obj, THREAD);
 405 }
 406 
 407 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 408   if (UseBiasedLocking) {
 409     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 410     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 411   }
 412   if (millis < 0) {
 413     TEVENT(wait - throw IAX);
 414     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 415   }
 416   ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD);
 417 }
 418 
 419 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 420   if (UseBiasedLocking) {
 421     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 422     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 423   }
 424 
 425   markOop mark = obj->mark();
 426   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 427     return;
 428   }
 429   ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
 430 }
 431 
 432 // NOTE: see comment of notify()
 433 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 434   if (UseBiasedLocking) {
 435     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 436     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 437   }
 438 
 439   markOop mark = obj->mark();
 440   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 441     return;
 442   }
 443   ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
 444 }
 445 
 446 // -----------------------------------------------------------------------------
 447 // Hash Code handling
 448 //
 449 // Performance concern:
 450 // OrderAccess::storestore() calls release() which at one time stored 0
 451 // into the global volatile OrderAccess::dummy variable. This store was
 452 // unnecessary for correctness. Many threads storing into a common location
 453 // causes considerable cache migration or "sloshing" on large SMP systems.
 454 // As such, I avoided using OrderAccess::storestore(). In some cases
 455 // OrderAccess::fence() -- which incurs local latency on the executing
 456 // processor -- is a better choice as it scales on SMP systems.
 457 //
 458 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
 459 // a discussion of coherency costs. Note that all our current reference
 460 // platforms provide strong ST-ST order, so the issue is moot on IA32,
 461 // x64, and SPARC.
 462 //
 463 // As a general policy we use "volatile" to control compiler-based reordering
 464 // and explicit fences (barriers) to control for architectural reordering
 465 // performed by the CPU(s) or platform.
 466 
 467 struct SharedGlobals {
 468   char         _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
 469   // These are highly shared mostly-read variables.
 470   // To avoid false-sharing they need to be the sole occupants of a cache line.
 471   volatile int stwRandom;
 472   volatile int stwCycle;
 473   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
 474   // Hot RW variable -- Sequester to avoid false-sharing
 475   volatile int hcSequence;
 476   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
 477 };
 478 
 479 static SharedGlobals GVars;
 480 static int MonitorScavengeThreshold = 1000000;
 481 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
 482 
 483 static markOop ReadStableMark(oop obj) {
 484   markOop mark = obj->mark();
 485   if (!mark->is_being_inflated()) {
 486     return mark;       // normal fast-path return
 487   }
 488 
 489   int its = 0;
 490   for (;;) {
 491     markOop mark = obj->mark();
 492     if (!mark->is_being_inflated()) {
 493       return mark;    // normal fast-path return
 494     }
 495 
 496     // The object is being inflated by some other thread.
 497     // The caller of ReadStableMark() must wait for inflation to complete.
 498     // Avoid live-lock
 499     // TODO: consider calling SafepointSynchronize::do_call_back() while
 500     // spinning to see if there's a safepoint pending.  If so, immediately
 501     // yielding or blocking would be appropriate.  Avoid spinning while
 502     // there is a safepoint pending.
 503     // TODO: add inflation contention performance counters.
 504     // TODO: restrict the aggregate number of spinners.
 505 
 506     ++its;
 507     if (its > 10000 || !os::is_MP()) {
 508       if (its & 1) {
 509         os::naked_yield();
 510         TEVENT(Inflate: INFLATING - yield);
 511       } else {
 512         // Note that the following code attenuates the livelock problem but is not
 513         // a complete remedy.  A more complete solution would require that the inflating
 514         // thread hold the associated inflation lock.  The following code simply restricts
 515         // the number of spinners to at most one.  We'll have N-2 threads blocked
 516         // on the inflationlock, 1 thread holding the inflation lock and using
 517         // a yield/park strategy, and 1 thread in the midst of inflation.
 518         // A more refined approach would be to change the encoding of INFLATING
 519         // to allow encapsulation of a native thread pointer.  Threads waiting for
 520         // inflation to complete would use CAS to push themselves onto a singly linked
 521         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
 522         // and calling park().  When inflation was complete the thread that accomplished inflation
 523         // would detach the list and set the markword to inflated with a single CAS and
 524         // then for each thread on the list, set the flag and unpark() the thread.
 525         // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
 526         // wakes at most one thread whereas we need to wake the entire list.
 527         int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
 528         int YieldThenBlock = 0;
 529         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
 530         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
 531         Thread::muxAcquire(InflationLocks + ix, "InflationLock");
 532         while (obj->mark() == markOopDesc::INFLATING()) {
 533           // Beware: NakedYield() is advisory and has almost no effect on some platforms
 534           // so we periodically call Self->_ParkEvent->park(1).
 535           // We use a mixed spin/yield/block mechanism.
 536           if ((YieldThenBlock++) >= 16) {
 537             Thread::current()->_ParkEvent->park(1);
 538           } else {
 539             os::naked_yield();
 540           }
 541         }
 542         Thread::muxRelease(InflationLocks + ix);
 543         TEVENT(Inflate: INFLATING - yield/park);
 544       }
 545     } else {
 546       SpinPause();       // SMP-polite spinning
 547     }
 548   }
 549 }
 550 
 551 // hashCode() generation :
 552 //
 553 // Possibilities:
 554 // * MD5Digest of {obj,stwRandom}
 555 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
 556 // * A DES- or AES-style SBox[] mechanism
 557 // * One of the Phi-based schemes, such as:
 558 //   2654435761 = 2^32 * Phi (golden ratio)
 559 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
 560 // * A variation of Marsaglia's shift-xor RNG scheme.
 561 // * (obj ^ stwRandom) is appealing, but can result
 562 //   in undesirable regularity in the hashCode values of adjacent objects
 563 //   (objects allocated back-to-back, in particular).  This could potentially
 564 //   result in hashtable collisions and reduced hashtable efficiency.
 565 //   There are simple ways to "diffuse" the middle address bits over the
 566 //   generated hashCode values:
 567 
 568 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
 569   intptr_t value = 0;
 570   if (hashCode == 0) {
 571     // This form uses an unguarded global Park-Miller RNG,
 572     // so it's possible for two threads to race and generate the same RNG.
 573     // On MP system we'll have lots of RW access to a global, so the
 574     // mechanism induces lots of coherency traffic.
 575     value = os::random();
 576   } else if (hashCode == 1) {
 577     // This variation has the property of being stable (idempotent)
 578     // between STW operations.  This can be useful in some of the 1-0
 579     // synchronization schemes.
 580     intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
 581     value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
 582   } else if (hashCode == 2) {
 583     value = 1;            // for sensitivity testing
 584   } else if (hashCode == 3) {
 585     value = ++GVars.hcSequence;
 586   } else if (hashCode == 4) {
 587     value = cast_from_oop<intptr_t>(obj);
 588   } else {
 589     // Marsaglia's xor-shift scheme with thread-specific state
 590     // This is probably the best overall implementation -- we'll
 591     // likely make this the default in future releases.
 592     unsigned t = Self->_hashStateX;
 593     t ^= (t << 11);
 594     Self->_hashStateX = Self->_hashStateY;
 595     Self->_hashStateY = Self->_hashStateZ;
 596     Self->_hashStateZ = Self->_hashStateW;
 597     unsigned v = Self->_hashStateW;
 598     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 599     Self->_hashStateW = v;
 600     value = v;
 601   }
 602 
 603   value &= markOopDesc::hash_mask;
 604   if (value == 0) value = 0xBAD;
 605   assert(value != markOopDesc::no_hash, "invariant");
 606   TEVENT(hashCode: GENERATE);
 607   return value;
 608 }
 609 
 610 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
 611   if (UseBiasedLocking) {
 612     // NOTE: many places throughout the JVM do not expect a safepoint
 613     // to be taken here, in particular most operations on perm gen
 614     // objects. However, we only ever bias Java instances and all of
 615     // the call sites of identity_hash that might revoke biases have
 616     // been checked to make sure they can handle a safepoint. The
 617     // added check of the bias pattern is to avoid useless calls to
 618     // thread-local storage.
 619     if (obj->mark()->has_bias_pattern()) {
 620       // Handle for oop obj in case of STW safepoint
 621       Handle hobj(Self, obj);
 622       // Relaxing assertion for bug 6320749.
 623       assert(Universe::verify_in_progress() ||
 624              !SafepointSynchronize::is_at_safepoint(),
 625              "biases should not be seen by VM thread here");
 626       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
 627       obj = hobj();
 628       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 629     }
 630   }
 631 
 632   // hashCode() is a heap mutator ...
 633   // Relaxing assertion for bug 6320749.
 634   assert(Universe::verify_in_progress() ||
 635          !SafepointSynchronize::is_at_safepoint(), "invariant");
 636   assert(Universe::verify_in_progress() ||
 637          Self->is_Java_thread() , "invariant");
 638   assert(Universe::verify_in_progress() ||
 639          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 640 
 641   ObjectMonitor* monitor = NULL;
 642   markOop temp, test;
 643   intptr_t hash;
 644   markOop mark = ReadStableMark(obj);
 645 
 646   // object should remain ineligible for biased locking
 647   assert(!mark->has_bias_pattern(), "invariant");
 648 
 649   if (mark->is_neutral()) {
 650     hash = mark->hash();              // this is a normal header
 651     if (hash) {                       // if it has hash, just return it
 652       return hash;
 653     }
 654     hash = get_next_hash(Self, obj);  // allocate a new hash code
 655     temp = mark->copy_set_hash(hash); // merge the hash code into header
 656     // use (machine word version) atomic operation to install the hash
 657     test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
 658     if (test == mark) {
 659       return hash;
 660     }
 661     // If atomic operation failed, we must inflate the header
 662     // into heavy weight monitor. We could add more code here
 663     // for fast path, but it does not worth the complexity.
 664   } else if (mark->has_monitor()) {
 665     monitor = mark->monitor();
 666     temp = monitor->header();
 667     assert(temp->is_neutral(), "invariant");
 668     hash = temp->hash();
 669     if (hash) {
 670       return hash;
 671     }
 672     // Skip to the following code to reduce code size
 673   } else if (Self->is_lock_owned((address)mark->locker())) {
 674     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
 675     assert(temp->is_neutral(), "invariant");
 676     hash = temp->hash();              // by current thread, check if the displaced
 677     if (hash) {                       // header contains hash code
 678       return hash;
 679     }
 680     // WARNING:
 681     //   The displaced header is strictly immutable.
 682     // It can NOT be changed in ANY cases. So we have
 683     // to inflate the header into heavyweight monitor
 684     // even the current thread owns the lock. The reason
 685     // is the BasicLock (stack slot) will be asynchronously
 686     // read by other threads during the inflate() function.
 687     // Any change to stack may not propagate to other threads
 688     // correctly.
 689   }
 690 
 691   // Inflate the monitor to set hash code
 692   monitor = ObjectSynchronizer::inflate(Self, obj);
 693   // Load displaced header and check it has hash code
 694   mark = monitor->header();
 695   assert(mark->is_neutral(), "invariant");
 696   hash = mark->hash();
 697   if (hash == 0) {
 698     hash = get_next_hash(Self, obj);
 699     temp = mark->copy_set_hash(hash); // merge hash code into header
 700     assert(temp->is_neutral(), "invariant");
 701     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
 702     if (test != mark) {
 703       // The only update to the header in the monitor (outside GC)
 704       // is install the hash code. If someone add new usage of
 705       // displaced header, please update this code
 706       hash = test->hash();
 707       assert(test->is_neutral(), "invariant");
 708       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
 709     }
 710   }
 711   // We finally get the hash
 712   return hash;
 713 }
 714 
 715 // Deprecated -- use FastHashCode() instead.
 716 
 717 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
 718   return FastHashCode(Thread::current(), obj());
 719 }
 720 
 721 
 722 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
 723                                                    Handle h_obj) {
 724   if (UseBiasedLocking) {
 725     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
 726     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 727   }
 728 
 729   assert(thread == JavaThread::current(), "Can only be called on current thread");
 730   oop obj = h_obj();
 731 
 732   markOop mark = ReadStableMark(obj);
 733 
 734   // Uncontended case, header points to stack
 735   if (mark->has_locker()) {
 736     return thread->is_lock_owned((address)mark->locker());
 737   }
 738   // Contended case, header points to ObjectMonitor (tagged pointer)
 739   if (mark->has_monitor()) {
 740     ObjectMonitor* monitor = mark->monitor();
 741     return monitor->is_entered(thread) != 0;
 742   }
 743   // Unlocked case, header in place
 744   assert(mark->is_neutral(), "sanity check");
 745   return false;
 746 }
 747 
 748 // Be aware of this method could revoke bias of the lock object.
 749 // This method queries the ownership of the lock handle specified by 'h_obj'.
 750 // If the current thread owns the lock, it returns owner_self. If no
 751 // thread owns the lock, it returns owner_none. Otherwise, it will return
 752 // owner_other.
 753 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
 754 (JavaThread *self, Handle h_obj) {
 755   // The caller must beware this method can revoke bias, and
 756   // revocation can result in a safepoint.
 757   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 758   assert(self->thread_state() != _thread_blocked, "invariant");
 759 
 760   // Possible mark states: neutral, biased, stack-locked, inflated
 761 
 762   if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
 763     // CASE: biased
 764     BiasedLocking::revoke_and_rebias(h_obj, false, self);
 765     assert(!h_obj->mark()->has_bias_pattern(),
 766            "biases should be revoked by now");
 767   }
 768 
 769   assert(self == JavaThread::current(), "Can only be called on current thread");
 770   oop obj = h_obj();
 771   markOop mark = ReadStableMark(obj);
 772 
 773   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
 774   if (mark->has_locker()) {
 775     return self->is_lock_owned((address)mark->locker()) ?
 776       owner_self : owner_other;
 777   }
 778 
 779   // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
 780   // The Object:ObjectMonitor relationship is stable as long as we're
 781   // not at a safepoint.
 782   if (mark->has_monitor()) {
 783     void * owner = mark->monitor()->_owner;
 784     if (owner == NULL) return owner_none;
 785     return (owner == self ||
 786             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
 787   }
 788 
 789   // CASE: neutral
 790   assert(mark->is_neutral(), "sanity check");
 791   return owner_none;           // it's unlocked
 792 }
 793 
 794 // FIXME: jvmti should call this
 795 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
 796   if (UseBiasedLocking) {
 797     if (SafepointSynchronize::is_at_safepoint()) {
 798       BiasedLocking::revoke_at_safepoint(h_obj);
 799     } else {
 800       BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
 801     }
 802     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 803   }
 804 
 805   oop obj = h_obj();
 806   address owner = NULL;
 807 
 808   markOop mark = ReadStableMark(obj);
 809 
 810   // Uncontended case, header points to stack
 811   if (mark->has_locker()) {
 812     owner = (address) mark->locker();
 813   }
 814 
 815   // Contended case, header points to ObjectMonitor (tagged pointer)
 816   if (mark->has_monitor()) {
 817     ObjectMonitor* monitor = mark->monitor();
 818     assert(monitor != NULL, "monitor should be non-null");
 819     owner = (address) monitor->owner();
 820   }
 821 
 822   if (owner != NULL) {
 823     // owning_thread_from_monitor_owner() may also return NULL here
 824     return Threads::owning_thread_from_monitor_owner(owner, doLock);
 825   }
 826 
 827   // Unlocked case, header in place
 828   // Cannot have assertion since this object may have been
 829   // locked by another thread when reaching here.
 830   // assert(mark->is_neutral(), "sanity check");
 831 
 832   return NULL;
 833 }
 834 // Visitors ...
 835 
 836 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
 837   PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
 838   ObjectMonitor* mid;
 839   while (block) {
 840     assert(block->object() == CHAINMARKER, "must be a block header");
 841     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
 842       mid = (ObjectMonitor *)(block + i);
 843       oop object = (oop) mid->object();
 844       if (object != NULL) {
 845         closure->do_monitor(mid);
 846       }
 847     }
 848     block = (PaddedEnd<ObjectMonitor> *) block->FreeNext;
 849   }
 850 }
 851 
 852 // Get the next block in the block list.
 853 static inline ObjectMonitor* next(ObjectMonitor* block) {
 854   assert(block->object() == CHAINMARKER, "must be a block header");
 855   block = block->FreeNext;
 856   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
 857   return block;
 858 }
 859 
 860 
 861 void ObjectSynchronizer::oops_do(OopClosure* f) {
 862   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 863   for (PaddedEnd<ObjectMonitor> * block =
 864        (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL;
 865        block = (PaddedEnd<ObjectMonitor> *)next(block)) {
 866     assert(block->object() == CHAINMARKER, "must be a block header");
 867     for (int i = 1; i < _BLOCKSIZE; i++) {
 868       ObjectMonitor* mid = (ObjectMonitor *)&block[i];
 869       if (mid->object() != NULL) {
 870         f->do_oop((oop*)mid->object_addr());
 871       }
 872     }
 873   }
 874 }
 875 
 876 
 877 // -----------------------------------------------------------------------------
 878 // ObjectMonitor Lifecycle
 879 // -----------------------
 880 // Inflation unlinks monitors from the global gFreeList and
 881 // associates them with objects.  Deflation -- which occurs at
 882 // STW-time -- disassociates idle monitors from objects.  Such
 883 // scavenged monitors are returned to the gFreeList.
 884 //
 885 // The global list is protected by ListLock.  All the critical sections
 886 // are short and operate in constant-time.
 887 //
 888 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
 889 //
 890 // Lifecycle:
 891 // --   unassigned and on the global free list
 892 // --   unassigned and on a thread's private omFreeList
 893 // --   assigned to an object.  The object is inflated and the mark refers
 894 //      to the objectmonitor.
 895 
 896 
 897 // Constraining monitor pool growth via MonitorBound ...
 898 //
 899 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
 900 // the rate of scavenging is driven primarily by GC.  As such,  we can find
 901 // an inordinate number of monitors in circulation.
 902 // To avoid that scenario we can artificially induce a STW safepoint
 903 // if the pool appears to be growing past some reasonable bound.
 904 // Generally we favor time in space-time tradeoffs, but as there's no
 905 // natural back-pressure on the # of extant monitors we need to impose some
 906 // type of limit.  Beware that if MonitorBound is set to too low a value
 907 // we could just loop. In addition, if MonitorBound is set to a low value
 908 // we'll incur more safepoints, which are harmful to performance.
 909 // See also: GuaranteedSafepointInterval
 910 //
 911 // The current implementation uses asynchronous VM operations.
 912 
 913 static void InduceScavenge(Thread * Self, const char * Whence) {
 914   // Induce STW safepoint to trim monitors
 915   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
 916   // More precisely, trigger an asynchronous STW safepoint as the number
 917   // of active monitors passes the specified threshold.
 918   // TODO: assert thread state is reasonable
 919 
 920   if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
 921     if (ObjectMonitor::Knob_Verbose) {
 922       ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
 923       ::fflush(stdout);
 924     }
 925     // Induce a 'null' safepoint to scavenge monitors
 926     // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
 927     // to the VMthread and have a lifespan longer than that of this activation record.
 928     // The VMThread will delete the op when completed.
 929     VMThread::execute(new VM_ForceAsyncSafepoint());
 930 
 931     if (ObjectMonitor::Knob_Verbose) {
 932       ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
 933       ::fflush(stdout);
 934     }
 935   }
 936 }
 937 
 938 void ObjectSynchronizer::verifyInUse(Thread *Self) {
 939   ObjectMonitor* mid;
 940   int inusetally = 0;
 941   for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
 942     inusetally++;
 943   }
 944   assert(inusetally == Self->omInUseCount, "inuse count off");
 945 
 946   int freetally = 0;
 947   for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
 948     freetally++;
 949   }
 950   assert(freetally == Self->omFreeCount, "free count off");
 951 }
 952 
 953 ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) {
 954   // A large MAXPRIVATE value reduces both list lock contention
 955   // and list coherency traffic, but also tends to increase the
 956   // number of objectMonitors in circulation as well as the STW
 957   // scavenge costs.  As usual, we lean toward time in space-time
 958   // tradeoffs.
 959   const int MAXPRIVATE = 1024;
 960   for (;;) {
 961     ObjectMonitor * m;
 962 
 963     // 1: try to allocate from the thread's local omFreeList.
 964     // Threads will attempt to allocate first from their local list, then
 965     // from the global list, and only after those attempts fail will the thread
 966     // attempt to instantiate new monitors.   Thread-local free lists take
 967     // heat off the ListLock and improve allocation latency, as well as reducing
 968     // coherency traffic on the shared global list.
 969     m = Self->omFreeList;
 970     if (m != NULL) {
 971       Self->omFreeList = m->FreeNext;
 972       Self->omFreeCount--;
 973       // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
 974       guarantee(m->object() == NULL, "invariant");
 975       if (MonitorInUseLists) {
 976         m->FreeNext = Self->omInUseList;
 977         Self->omInUseList = m;
 978         Self->omInUseCount++;
 979         if (ObjectMonitor::Knob_VerifyInUse) {
 980           verifyInUse(Self);
 981         }
 982       } else {
 983         m->FreeNext = NULL;
 984       }
 985       return m;
 986     }
 987 
 988     // 2: try to allocate from the global gFreeList
 989     // CONSIDER: use muxTry() instead of muxAcquire().
 990     // If the muxTry() fails then drop immediately into case 3.
 991     // If we're using thread-local free lists then try
 992     // to reprovision the caller's free list.
 993     if (gFreeList != NULL) {
 994       // Reprovision the thread's omFreeList.
 995       // Use bulk transfers to reduce the allocation rate and heat
 996       // on various locks.
 997       Thread::muxAcquire(&ListLock, "omAlloc");
 998       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
 999         MonitorFreeCount--;
1000         ObjectMonitor * take = gFreeList;
1001         gFreeList = take->FreeNext;
1002         guarantee(take->object() == NULL, "invariant");
1003         guarantee(!take->is_busy(), "invariant");
1004         take->Recycle();
1005         omRelease(Self, take, false);
1006       }
1007       Thread::muxRelease(&ListLock);
1008       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1009       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1010       TEVENT(omFirst - reprovision);
1011 
1012       const int mx = MonitorBound;
1013       if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
1014         // We can't safely induce a STW safepoint from omAlloc() as our thread
1015         // state may not be appropriate for such activities and callers may hold
1016         // naked oops, so instead we defer the action.
1017         InduceScavenge(Self, "omAlloc");
1018       }
1019       continue;
1020     }
1021 
1022     // 3: allocate a block of new ObjectMonitors
1023     // Both the local and global free lists are empty -- resort to malloc().
1024     // In the current implementation objectMonitors are TSM - immortal.
1025     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1026     // each ObjectMonitor to start at the beginning of a cache line,
1027     // so we use align_size_up().
1028     // A better solution would be to use C++ placement-new.
1029     // BEWARE: As it stands currently, we don't run the ctors!
1030     assert(_BLOCKSIZE > 1, "invariant");
1031     size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE;
1032     PaddedEnd<ObjectMonitor> * temp;
1033     size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
1034     void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size,
1035                                                       mtInternal);
1036     temp = (PaddedEnd<ObjectMonitor> *)
1037              align_size_up((intptr_t)real_malloc_addr,
1038                            DEFAULT_CACHE_LINE_SIZE);
1039 
1040     // NOTE: (almost) no way to recover if allocation failed.
1041     // We might be able to induce a STW safepoint and scavenge enough
1042     // objectMonitors to permit progress.
1043     if (temp == NULL) {
1044       vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
1045                             "Allocate ObjectMonitors");
1046     }
1047     (void)memset((void *) temp, 0, neededsize);
1048 
1049     // Format the block.
1050     // initialize the linked list, each monitor points to its next
1051     // forming the single linked free list, the very first monitor
1052     // will points to next block, which forms the block list.
1053     // The trick of using the 1st element in the block as gBlockList
1054     // linkage should be reconsidered.  A better implementation would
1055     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1056 
1057     for (int i = 1; i < _BLOCKSIZE; i++) {
1058       temp[i].FreeNext = (ObjectMonitor *)&temp[i+1];
1059     }
1060 
1061     // terminate the last monitor as the end of list
1062     temp[_BLOCKSIZE - 1].FreeNext = NULL;
1063 
1064     // Element [0] is reserved for global list linkage
1065     temp[0].set_object(CHAINMARKER);
1066 
1067     // Consider carving out this thread's current request from the
1068     // block in hand.  This avoids some lock traffic and redundant
1069     // list activity.
1070 
1071     // Acquire the ListLock to manipulate BlockList and FreeList.
1072     // An Oyama-Taura-Yonezawa scheme might be more efficient.
1073     Thread::muxAcquire(&ListLock, "omAlloc [2]");
1074     MonitorPopulation += _BLOCKSIZE-1;
1075     MonitorFreeCount += _BLOCKSIZE-1;
1076 
1077     // Add the new block to the list of extant blocks (gBlockList).
1078     // The very first objectMonitor in a block is reserved and dedicated.
1079     // It serves as blocklist "next" linkage.
1080     temp[0].FreeNext = gBlockList;
1081     gBlockList = temp;
1082 
1083     // Add the new string of objectMonitors to the global free list
1084     temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
1085     gFreeList = temp + 1;
1086     Thread::muxRelease(&ListLock);
1087     TEVENT(Allocate block of monitors);
1088   }
1089 }
1090 
1091 // Place "m" on the caller's private per-thread omFreeList.
1092 // In practice there's no need to clamp or limit the number of
1093 // monitors on a thread's omFreeList as the only time we'll call
1094 // omRelease is to return a monitor to the free list after a CAS
1095 // attempt failed.  This doesn't allow unbounded #s of monitors to
1096 // accumulate on a thread's free list.
1097 
1098 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1099                                    bool fromPerThreadAlloc) {
1100   guarantee(m->object() == NULL, "invariant");
1101 
1102   // Remove from omInUseList
1103   if (MonitorInUseLists && fromPerThreadAlloc) {
1104     ObjectMonitor* curmidinuse = NULL;
1105     for (ObjectMonitor* mid = Self->omInUseList; mid != NULL;) {
1106       if (m == mid) {
1107         // extract from per-thread in-use-list
1108         if (mid == Self->omInUseList) {
1109           Self->omInUseList = mid->FreeNext;
1110         } else if (curmidinuse != NULL) {
1111           curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1112         }
1113         Self->omInUseCount--;
1114         if (ObjectMonitor::Knob_VerifyInUse) {
1115           verifyInUse(Self);
1116         }
1117         break;
1118       } else {
1119         curmidinuse = mid;
1120         mid = mid->FreeNext;
1121       }
1122     }
1123   }
1124 
1125   // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
1126   m->FreeNext = Self->omFreeList;
1127   Self->omFreeList = m;
1128   Self->omFreeCount++;
1129 }
1130 
1131 // Return the monitors of a moribund thread's local free list to
1132 // the global free list.  Typically a thread calls omFlush() when
1133 // it's dying.  We could also consider having the VM thread steal
1134 // monitors from threads that have not run java code over a few
1135 // consecutive STW safepoints.  Relatedly, we might decay
1136 // omFreeProvision at STW safepoints.
1137 //
1138 // Also return the monitors of a moribund thread's omInUseList to
1139 // a global gOmInUseList under the global list lock so these
1140 // will continue to be scanned.
1141 //
1142 // We currently call omFlush() from the Thread:: dtor _after the thread
1143 // has been excised from the thread list and is no longer a mutator.
1144 // That means that omFlush() can run concurrently with a safepoint and
1145 // the scavenge operator.  Calling omFlush() from JavaThread::exit() might
1146 // be a better choice as we could safely reason that that the JVM is
1147 // not at a safepoint at the time of the call, and thus there could
1148 // be not inopportune interleavings between omFlush() and the scavenge
1149 // operator.
1150 
1151 void ObjectSynchronizer::omFlush(Thread * Self) {
1152   ObjectMonitor * List = Self->omFreeList;  // Null-terminated SLL
1153   Self->omFreeList = NULL;
1154   ObjectMonitor * Tail = NULL;
1155   int Tally = 0;
1156   if (List != NULL) {
1157     ObjectMonitor * s;
1158     for (s = List; s != NULL; s = s->FreeNext) {
1159       Tally++;
1160       Tail = s;
1161       guarantee(s->object() == NULL, "invariant");
1162       guarantee(!s->is_busy(), "invariant");
1163       s->set_owner(NULL);   // redundant but good hygiene
1164       TEVENT(omFlush - Move one);
1165     }
1166     guarantee(Tail != NULL && List != NULL, "invariant");
1167   }
1168 
1169   ObjectMonitor * InUseList = Self->omInUseList;
1170   ObjectMonitor * InUseTail = NULL;
1171   int InUseTally = 0;
1172   if (InUseList != NULL) {
1173     Self->omInUseList = NULL;
1174     ObjectMonitor *curom;
1175     for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
1176       InUseTail = curom;
1177       InUseTally++;
1178     }
1179     assert(Self->omInUseCount == InUseTally, "inuse count off");
1180     Self->omInUseCount = 0;
1181     guarantee(InUseTail != NULL && InUseList != NULL, "invariant");
1182   }
1183 
1184   Thread::muxAcquire(&ListLock, "omFlush");
1185   if (Tail != NULL) {
1186     Tail->FreeNext = gFreeList;
1187     gFreeList = List;
1188     MonitorFreeCount += Tally;
1189   }
1190 
1191   if (InUseTail != NULL) {
1192     InUseTail->FreeNext = gOmInUseList;
1193     gOmInUseList = InUseList;
1194     gOmInUseCount += InUseTally;
1195   }
1196 
1197   Thread::muxRelease(&ListLock);
1198   TEVENT(omFlush);
1199 }
1200 
1201 // Fast path code shared by multiple functions
1202 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1203   markOop mark = obj->mark();
1204   if (mark->has_monitor()) {
1205     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1206     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1207     return mark->monitor();
1208   }
1209   return ObjectSynchronizer::inflate(Thread::current(), obj);
1210 }
1211 
1212 
1213 ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self,
1214                                                      oop object) {
1215   // Inflate mutates the heap ...
1216   // Relaxing assertion for bug 6320749.
1217   assert(Universe::verify_in_progress() ||
1218          !SafepointSynchronize::is_at_safepoint(), "invariant");
1219 
1220   for (;;) {
1221     const markOop mark = object->mark();
1222     assert(!mark->has_bias_pattern(), "invariant");
1223 
1224     // The mark can be in one of the following states:
1225     // *  Inflated     - just return
1226     // *  Stack-locked - coerce it to inflated
1227     // *  INFLATING    - busy wait for conversion to complete
1228     // *  Neutral      - aggressively inflate the object.
1229     // *  BIASED       - Illegal.  We should never see this
1230 
1231     // CASE: inflated
1232     if (mark->has_monitor()) {
1233       ObjectMonitor * inf = mark->monitor();
1234       assert(inf->header()->is_neutral(), "invariant");
1235       assert(inf->object() == object, "invariant");
1236       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1237       return inf;
1238     }
1239 
1240     // CASE: inflation in progress - inflating over a stack-lock.
1241     // Some other thread is converting from stack-locked to inflated.
1242     // Only that thread can complete inflation -- other threads must wait.
1243     // The INFLATING value is transient.
1244     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1245     // We could always eliminate polling by parking the thread on some auxiliary list.
1246     if (mark == markOopDesc::INFLATING()) {
1247       TEVENT(Inflate: spin while INFLATING);
1248       ReadStableMark(object);
1249       continue;
1250     }
1251 
1252     // CASE: stack-locked
1253     // Could be stack-locked either by this thread or by some other thread.
1254     //
1255     // Note that we allocate the objectmonitor speculatively, _before_ attempting
1256     // to install INFLATING into the mark word.  We originally installed INFLATING,
1257     // allocated the objectmonitor, and then finally STed the address of the
1258     // objectmonitor into the mark.  This was correct, but artificially lengthened
1259     // the interval in which INFLATED appeared in the mark, thus increasing
1260     // the odds of inflation contention.
1261     //
1262     // We now use per-thread private objectmonitor free lists.
1263     // These list are reprovisioned from the global free list outside the
1264     // critical INFLATING...ST interval.  A thread can transfer
1265     // multiple objectmonitors en-mass from the global free list to its local free list.
1266     // This reduces coherency traffic and lock contention on the global free list.
1267     // Using such local free lists, it doesn't matter if the omAlloc() call appears
1268     // before or after the CAS(INFLATING) operation.
1269     // See the comments in omAlloc().
1270 
1271     if (mark->has_locker()) {
1272       ObjectMonitor * m = omAlloc(Self);
1273       // Optimistically prepare the objectmonitor - anticipate successful CAS
1274       // We do this before the CAS in order to minimize the length of time
1275       // in which INFLATING appears in the mark.
1276       m->Recycle();
1277       m->_Responsible  = NULL;
1278       m->_recursions   = 0;
1279       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
1280 
1281       markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark);
1282       if (cmp != mark) {
1283         omRelease(Self, m, true);
1284         continue;       // Interference -- just retry
1285       }
1286 
1287       // We've successfully installed INFLATING (0) into the mark-word.
1288       // This is the only case where 0 will appear in a mark-work.
1289       // Only the singular thread that successfully swings the mark-word
1290       // to 0 can perform (or more precisely, complete) inflation.
1291       //
1292       // Why do we CAS a 0 into the mark-word instead of just CASing the
1293       // mark-word from the stack-locked value directly to the new inflated state?
1294       // Consider what happens when a thread unlocks a stack-locked object.
1295       // It attempts to use CAS to swing the displaced header value from the
1296       // on-stack basiclock back into the object header.  Recall also that the
1297       // header value (hashcode, etc) can reside in (a) the object header, or
1298       // (b) a displaced header associated with the stack-lock, or (c) a displaced
1299       // header in an objectMonitor.  The inflate() routine must copy the header
1300       // value from the basiclock on the owner's stack to the objectMonitor, all
1301       // the while preserving the hashCode stability invariants.  If the owner
1302       // decides to release the lock while the value is 0, the unlock will fail
1303       // and control will eventually pass from slow_exit() to inflate.  The owner
1304       // will then spin, waiting for the 0 value to disappear.   Put another way,
1305       // the 0 causes the owner to stall if the owner happens to try to
1306       // drop the lock (restoring the header from the basiclock to the object)
1307       // while inflation is in-progress.  This protocol avoids races that might
1308       // would otherwise permit hashCode values to change or "flicker" for an object.
1309       // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1310       // 0 serves as a "BUSY" inflate-in-progress indicator.
1311 
1312 
1313       // fetch the displaced mark from the owner's stack.
1314       // The owner can't die or unwind past the lock while our INFLATING
1315       // object is in the mark.  Furthermore the owner can't complete
1316       // an unlock on the object, either.
1317       markOop dmw = mark->displaced_mark_helper();
1318       assert(dmw->is_neutral(), "invariant");
1319 
1320       // Setup monitor fields to proper values -- prepare the monitor
1321       m->set_header(dmw);
1322 
1323       // Optimization: if the mark->locker stack address is associated
1324       // with this thread we could simply set m->_owner = Self.
1325       // Note that a thread can inflate an object
1326       // that it has stack-locked -- as might happen in wait() -- directly
1327       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1328       m->set_owner(mark->locker());
1329       m->set_object(object);
1330       // TODO-FIXME: assert BasicLock->dhw != 0.
1331 
1332       // Must preserve store ordering. The monitor state must
1333       // be stable at the time of publishing the monitor address.
1334       guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1335       object->release_set_mark(markOopDesc::encode(m));
1336 
1337       // Hopefully the performance counters are allocated on distinct cache lines
1338       // to avoid false sharing on MP systems ...
1339       if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc();
1340       TEVENT(Inflate: overwrite stacklock);
1341       if (TraceMonitorInflation) {
1342         if (object->is_instance()) {
1343           ResourceMark rm;
1344           tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1345                         (void *) object, (intptr_t) object->mark(),
1346                         object->klass()->external_name());
1347         }
1348       }
1349       return m;
1350     }
1351 
1352     // CASE: neutral
1353     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1354     // If we know we're inflating for entry it's better to inflate by swinging a
1355     // pre-locked objectMonitor pointer into the object header.   A successful
1356     // CAS inflates the object *and* confers ownership to the inflating thread.
1357     // In the current implementation we use a 2-step mechanism where we CAS()
1358     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1359     // An inflateTry() method that we could call from fast_enter() and slow_enter()
1360     // would be useful.
1361 
1362     assert(mark->is_neutral(), "invariant");
1363     ObjectMonitor * m = omAlloc(Self);
1364     // prepare m for installation - set monitor to initial state
1365     m->Recycle();
1366     m->set_header(mark);
1367     m->set_owner(NULL);
1368     m->set_object(object);
1369     m->_recursions   = 0;
1370     m->_Responsible  = NULL;
1371     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1372 
1373     if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1374       m->set_object(NULL);
1375       m->set_owner(NULL);
1376       m->Recycle();
1377       omRelease(Self, m, true);
1378       m = NULL;
1379       continue;
1380       // interference - the markword changed - just retry.
1381       // The state-transitions are one-way, so there's no chance of
1382       // live-lock -- "Inflated" is an absorbing state.
1383     }
1384 
1385     // Hopefully the performance counters are allocated on distinct
1386     // cache lines to avoid false sharing on MP systems ...
1387     if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc();
1388     TEVENT(Inflate: overwrite neutral);
1389     if (TraceMonitorInflation) {
1390       if (object->is_instance()) {
1391         ResourceMark rm;
1392         tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1393                       (void *) object, (intptr_t) object->mark(),
1394                       object->klass()->external_name());
1395       }
1396     }
1397     return m;
1398   }
1399 }
1400 
1401 
1402 // Deflate_idle_monitors() is called at all safepoints, immediately
1403 // after all mutators are stopped, but before any objects have moved.
1404 // It traverses the list of known monitors, deflating where possible.
1405 // The scavenged monitor are returned to the monitor free list.
1406 //
1407 // Beware that we scavenge at *every* stop-the-world point.
1408 // Having a large number of monitors in-circulation negatively
1409 // impacts the performance of some applications (e.g., PointBase).
1410 // Broadly, we want to minimize the # of monitors in circulation.
1411 //
1412 // We have added a flag, MonitorInUseLists, which creates a list
1413 // of active monitors for each thread. deflate_idle_monitors()
1414 // only scans the per-thread inuse lists. omAlloc() puts all
1415 // assigned monitors on the per-thread list. deflate_idle_monitors()
1416 // returns the non-busy monitors to the global free list.
1417 // When a thread dies, omFlush() adds the list of active monitors for
1418 // that thread to a global gOmInUseList acquiring the
1419 // global list lock. deflate_idle_monitors() acquires the global
1420 // list lock to scan for non-busy monitors to the global free list.
1421 // An alternative could have used a single global inuse list. The
1422 // downside would have been the additional cost of acquiring the global list lock
1423 // for every omAlloc().
1424 //
1425 // Perversely, the heap size -- and thus the STW safepoint rate --
1426 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
1427 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1428 // This is an unfortunate aspect of this design.
1429 
1430 enum ManifestConstants {
1431   ClearResponsibleAtSTW   = 0,
1432   MaximumRecheckInterval  = 1000
1433 };
1434 
1435 // Deflate a single monitor if not in use
1436 // Return true if deflated, false if in use
1437 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1438                                          ObjectMonitor** freeHeadp,
1439                                          ObjectMonitor** freeTailp) {
1440   bool deflated;
1441   // Normal case ... The monitor is associated with obj.
1442   guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1443   guarantee(mid == obj->mark()->monitor(), "invariant");
1444   guarantee(mid->header()->is_neutral(), "invariant");
1445 
1446   if (mid->is_busy()) {
1447     if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1448     deflated = false;
1449   } else {
1450     // Deflate the monitor if it is no longer being used
1451     // It's idle - scavenge and return to the global free list
1452     // plain old deflation ...
1453     TEVENT(deflate_idle_monitors - scavenge1);
1454     if (TraceMonitorInflation) {
1455       if (obj->is_instance()) {
1456         ResourceMark rm;
1457         tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1458                       (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
1459       }
1460     }
1461 
1462     // Restore the header back to obj
1463     obj->release_set_mark(mid->header());
1464     mid->clear();
1465 
1466     assert(mid->object() == NULL, "invariant");
1467 
1468     // Move the object to the working free list defined by FreeHead,FreeTail.
1469     if (*freeHeadp == NULL) *freeHeadp = mid;
1470     if (*freeTailp != NULL) {
1471       ObjectMonitor * prevtail = *freeTailp;
1472       assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
1473       prevtail->FreeNext = mid;
1474     }
1475     *freeTailp = mid;
1476     deflated = true;
1477   }
1478   return deflated;
1479 }
1480 
1481 // Caller acquires ListLock
1482 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
1483                                           ObjectMonitor** freeHeadp,
1484                                           ObjectMonitor** freeTailp) {
1485   ObjectMonitor* mid;
1486   ObjectMonitor* next;
1487   ObjectMonitor* curmidinuse = NULL;
1488   int deflatedcount = 0;
1489 
1490   for (mid = *listheadp; mid != NULL;) {
1491     oop obj = (oop) mid->object();
1492     bool deflated = false;
1493     if (obj != NULL) {
1494       deflated = deflate_monitor(mid, obj, freeHeadp, freeTailp);
1495     }
1496     if (deflated) {
1497       // extract from per-thread in-use-list
1498       if (mid == *listheadp) {
1499         *listheadp = mid->FreeNext;
1500       } else if (curmidinuse != NULL) {
1501         curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1502       }
1503       next = mid->FreeNext;
1504       mid->FreeNext = NULL;  // This mid is current tail in the FreeHead list
1505       mid = next;
1506       deflatedcount++;
1507     } else {
1508       curmidinuse = mid;
1509       mid = mid->FreeNext;
1510     }
1511   }
1512   return deflatedcount;
1513 }
1514 
1515 void ObjectSynchronizer::deflate_idle_monitors() {
1516   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1517   int nInuse = 0;              // currently associated with objects
1518   int nInCirculation = 0;      // extant
1519   int nScavenged = 0;          // reclaimed
1520   bool deflated = false;
1521 
1522   ObjectMonitor * FreeHead = NULL;  // Local SLL of scavenged monitors
1523   ObjectMonitor * FreeTail = NULL;
1524 
1525   TEVENT(deflate_idle_monitors);
1526   // Prevent omFlush from changing mids in Thread dtor's during deflation
1527   // And in case the vm thread is acquiring a lock during a safepoint
1528   // See e.g. 6320749
1529   Thread::muxAcquire(&ListLock, "scavenge - return");
1530 
1531   if (MonitorInUseLists) {
1532     int inUse = 0;
1533     for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
1534       nInCirculation+= cur->omInUseCount;
1535       int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
1536       cur->omInUseCount-= deflatedcount;
1537       if (ObjectMonitor::Knob_VerifyInUse) {
1538         verifyInUse(cur);
1539       }
1540       nScavenged += deflatedcount;
1541       nInuse += cur->omInUseCount;
1542     }
1543 
1544     // For moribund threads, scan gOmInUseList
1545     if (gOmInUseList) {
1546       nInCirculation += gOmInUseCount;
1547       int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
1548       gOmInUseCount-= deflatedcount;
1549       nScavenged += deflatedcount;
1550       nInuse += gOmInUseCount;
1551     }
1552 
1553   } else for (PaddedEnd<ObjectMonitor> * block =
1554               (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL;
1555               block = (PaddedEnd<ObjectMonitor> *)next(block)) {
1556     // Iterate over all extant monitors - Scavenge all idle monitors.
1557     assert(block->object() == CHAINMARKER, "must be a block header");
1558     nInCirculation += _BLOCKSIZE;
1559     for (int i = 1; i < _BLOCKSIZE; i++) {
1560       ObjectMonitor* mid = (ObjectMonitor*)&block[i];
1561       oop obj = (oop) mid->object();
1562 
1563       if (obj == NULL) {
1564         // The monitor is not associated with an object.
1565         // The monitor should either be a thread-specific private
1566         // free list or the global free list.
1567         // obj == NULL IMPLIES mid->is_busy() == 0
1568         guarantee(!mid->is_busy(), "invariant");
1569         continue;
1570       }
1571       deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
1572 
1573       if (deflated) {
1574         mid->FreeNext = NULL;
1575         nScavenged++;
1576       } else {
1577         nInuse++;
1578       }
1579     }
1580   }
1581 
1582   MonitorFreeCount += nScavenged;
1583 
1584   // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
1585 
1586   if (ObjectMonitor::Knob_Verbose) {
1587     ::printf("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
1588              nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
1589              MonitorPopulation, MonitorFreeCount);
1590     ::fflush(stdout);
1591   }
1592 
1593   ForceMonitorScavenge = 0;    // Reset
1594 
1595   // Move the scavenged monitors back to the global free list.
1596   if (FreeHead != NULL) {
1597     guarantee(FreeTail != NULL && nScavenged > 0, "invariant");
1598     assert(FreeTail->FreeNext == NULL, "invariant");
1599     // constant-time list splice - prepend scavenged segment to gFreeList
1600     FreeTail->FreeNext = gFreeList;
1601     gFreeList = FreeHead;
1602   }
1603   Thread::muxRelease(&ListLock);
1604 
1605   if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged);
1606   if (ObjectMonitor::_sync_MonExtant  != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation);
1607 
1608   // TODO: Add objectMonitor leak detection.
1609   // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1610   GVars.stwRandom = os::random();
1611   GVars.stwCycle++;
1612 }
1613 
1614 // Monitor cleanup on JavaThread::exit
1615 
1616 // Iterate through monitor cache and attempt to release thread's monitors
1617 // Gives up on a particular monitor if an exception occurs, but continues
1618 // the overall iteration, swallowing the exception.
1619 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1620  private:
1621   TRAPS;
1622 
1623  public:
1624   ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
1625   void do_monitor(ObjectMonitor* mid) {
1626     if (mid->owner() == THREAD) {
1627       (void)mid->complete_exit(CHECK);
1628     }
1629   }
1630 };
1631 
1632 // Release all inflated monitors owned by THREAD.  Lightweight monitors are
1633 // ignored.  This is meant to be called during JNI thread detach which assumes
1634 // all remaining monitors are heavyweight.  All exceptions are swallowed.
1635 // Scanning the extant monitor list can be time consuming.
1636 // A simple optimization is to add a per-thread flag that indicates a thread
1637 // called jni_monitorenter() during its lifetime.
1638 //
1639 // Instead of No_Savepoint_Verifier it might be cheaper to
1640 // use an idiom of the form:
1641 //   auto int tmp = SafepointSynchronize::_safepoint_counter ;
1642 //   <code that must not run at safepoint>
1643 //   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1644 // Since the tests are extremely cheap we could leave them enabled
1645 // for normal product builds.
1646 
1647 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1648   assert(THREAD == JavaThread::current(), "must be current Java thread");
1649   No_Safepoint_Verifier nsv;
1650   ReleaseJavaMonitorsClosure rjmc(THREAD);
1651   Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
1652   ObjectSynchronizer::monitors_iterate(&rjmc);
1653   Thread::muxRelease(&ListLock);
1654   THREAD->clear_pending_exception();
1655 }
1656 
1657 //------------------------------------------------------------------------------
1658 // Debugging code
1659 
1660 void ObjectSynchronizer::sanity_checks(const bool verbose,
1661                                        const uint cache_line_size,
1662                                        int *error_cnt_ptr,
1663                                        int *warning_cnt_ptr) {
1664   u_char *addr_begin      = (u_char*)&GVars;
1665   u_char *addr_stwRandom  = (u_char*)&GVars.stwRandom;
1666   u_char *addr_hcSequence = (u_char*)&GVars.hcSequence;
1667 
1668   if (verbose) {
1669     tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT,
1670                   sizeof(SharedGlobals));
1671   }
1672 
1673   uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin);
1674   if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom);
1675 
1676   uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin);
1677   if (verbose) {
1678     tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence);
1679   }
1680 
1681   if (cache_line_size != 0) {
1682     // We were able to determine the L1 data cache line size so
1683     // do some cache line specific sanity checks
1684 
1685     if (offset_stwRandom < cache_line_size) {
1686       tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer "
1687                     "to the struct beginning than a cache line which permits "
1688                     "false sharing.");
1689       (*warning_cnt_ptr)++;
1690     }
1691 
1692     if ((offset_hcSequence - offset_stwRandom) < cache_line_size) {
1693       tty->print_cr("WARNING: the SharedGlobals.stwRandom and "
1694                     "SharedGlobals.hcSequence fields are closer than a cache "
1695                     "line which permits false sharing.");
1696       (*warning_cnt_ptr)++;
1697     }
1698 
1699     if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1700       tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1701                     "to the struct end than a cache line which permits false "
1702                     "sharing.");
1703       (*warning_cnt_ptr)++;
1704     }
1705   }
1706 }
1707 
1708 #ifndef PRODUCT
1709 
1710 // Verify all monitors in the monitor cache, the verification is weak.
1711 void ObjectSynchronizer::verify() {
1712   PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
1713   ObjectMonitor* mid;
1714   while (block) {
1715     assert(block->object() == CHAINMARKER, "must be a block header");
1716     for (int i = 1; i < _BLOCKSIZE; i++) {
1717       mid = (ObjectMonitor *)(block + i);
1718       oop object = (oop) mid->object();
1719       if (object != NULL) {
1720         mid->verify();
1721       }
1722     }
1723     block = (PaddedEnd<ObjectMonitor> *) block->FreeNext;
1724   }
1725 }
1726 
1727 // Check if monitor belongs to the monitor cache
1728 // The list is grow-only so it's *relatively* safe to traverse
1729 // the list of extant blocks without taking a lock.
1730 
1731 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1732   PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
1733 
1734   while (block) {
1735     assert(block->object() == CHAINMARKER, "must be a block header");
1736     if (monitor > (ObjectMonitor *)&block[0] &&
1737         monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
1738       address mon = (address) monitor;
1739       address blk = (address) block;
1740       size_t diff = mon - blk;
1741       assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "check");
1742       return 1;
1743     }
1744     block = (PaddedEnd<ObjectMonitor> *) block->FreeNext;
1745   }
1746   return 0;
1747 }
1748 
1749 #endif