1 /*
   2  * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "logging/log.hpp"
  28 #include "memory/metaspaceShared.hpp"
  29 #include "memory/padded.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "oops/markOop.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/atomic.hpp"
  34 #include "runtime/biasedLocking.hpp"
  35 #include "runtime/handles.inline.hpp"
  36 #include "runtime/interfaceSupport.hpp"
  37 #include "runtime/mutexLocker.hpp"
  38 #include "runtime/objectMonitor.hpp"
  39 #include "runtime/objectMonitor.inline.hpp"
  40 #include "runtime/osThread.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "runtime/synchronizer.hpp"
  43 #include "runtime/thread.inline.hpp"
  44 #include "runtime/vframe.hpp"
  45 #include "trace/traceMacros.hpp"
  46 #include "trace/tracing.hpp"
  47 #include "utilities/dtrace.hpp"
  48 #include "utilities/events.hpp"
  49 #include "utilities/preserveException.hpp"
  50 
  51 // The "core" versions of monitor enter and exit reside in this file.
  52 // The interpreter and compilers contain specialized transliterated
  53 // variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
  54 // for instance.  If you make changes here, make sure to modify the
  55 // interpreter, and both C1 and C2 fast-path inline locking code emission.
  56 //
  57 // -----------------------------------------------------------------------------
  58 
  59 #ifdef DTRACE_ENABLED
  60 
  61 // Only bother with this argument setup if dtrace is available
  62 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  63 
  64 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  65   char* bytes = NULL;                                                      \
  66   int len = 0;                                                             \
  67   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  68   Symbol* klassname = ((oop)(obj))->klass()->name();                       \
  69   if (klassname != NULL) {                                                 \
  70     bytes = (char*)klassname->bytes();                                     \
  71     len = klassname->utf8_length();                                        \
  72   }
  73 
  74 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
  75   {                                                                        \
  76     if (DTraceMonitorProbes) {                                             \
  77       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  78       HOTSPOT_MONITOR_WAIT(jtid,                                           \
  79                            (uintptr_t)(monitor), bytes, len, (millis));    \
  80     }                                                                      \
  81   }
  82 
  83 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
  84 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
  85 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
  86 
  87 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
  88   {                                                                        \
  89     if (DTraceMonitorProbes) {                                             \
  90       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  91       HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */             \
  92                                     (uintptr_t)(monitor), bytes, len);     \
  93     }                                                                      \
  94   }
  95 
  96 #else //  ndef DTRACE_ENABLED
  97 
  98 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
  99 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 100 
 101 #endif // ndef DTRACE_ENABLED
 102 
 103 // This exists only as a workaround of dtrace bug 6254741
 104 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
 105   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 106   return 0;
 107 }
 108 
 109 #define NINFLATIONLOCKS 256
 110 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 111 
 112 // global list of blocks of monitors
 113 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
 114 // want to expose the PaddedEnd template more than necessary.
 115 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
 116 // global monitor free list
 117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
 118 // global monitor in-use list, for moribund threads,
 119 // monitors they inflated need to be scanned for deflation
 120 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
 121 // count of entries in gOmInUseList
 122 int ObjectSynchronizer::gOmInUseCount = 0;
 123 
 124 static volatile intptr_t gListLock = 0;      // protects global monitor lists
 125 static volatile int gMonitorFreeCount  = 0;  // # on gFreeList
 126 static volatile int gMonitorPopulation = 0;  // # Extant -- in circulation
 127 
 128 static void post_monitor_inflate_event(EventJavaMonitorInflate&,
 129                                        const oop,
 130                                        const ObjectSynchronizer::InflateCause);
 131 
 132 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 133 
 134 
 135 // =====================> Quick functions
 136 
 137 // The quick_* forms are special fast-path variants used to improve
 138 // performance.  In the simplest case, a "quick_*" implementation could
 139 // simply return false, in which case the caller will perform the necessary
 140 // state transitions and call the slow-path form.
 141 // The fast-path is designed to handle frequently arising cases in an efficient
 142 // manner and is just a degenerate "optimistic" variant of the slow-path.
 143 // returns true  -- to indicate the call was satisfied.
 144 // returns false -- to indicate the call needs the services of the slow-path.
 145 // A no-loitering ordinance is in effect for code in the quick_* family
 146 // operators: safepoints or indefinite blocking (blocking that might span a
 147 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 148 // entry.
 149 //
 150 // Consider: An interesting optimization is to have the JIT recognize the
 151 // following common idiom:
 152 //   synchronized (someobj) { .... ; notify(); }
 153 // That is, we find a notify() or notifyAll() call that immediately precedes
 154 // the monitorexit operation.  In that case the JIT could fuse the operations
 155 // into a single notifyAndExit() runtime primitive.
 156 
 157 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
 158   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 159   assert(self->is_Java_thread(), "invariant");
 160   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
 161   NoSafepointVerifier nsv;
 162   if (obj == NULL) return false;  // slow-path for invalid obj
 163   const markOop mark = obj->mark();
 164 
 165   if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
 166     // Degenerate notify
 167     // stack-locked by caller so by definition the implied waitset is empty.
 168     return true;
 169   }
 170 
 171   if (mark->has_monitor()) {
 172     ObjectMonitor * const mon = mark->monitor();
 173     assert(mon->object() == obj, "invariant");
 174     if (mon->owner() != self) return false;  // slow-path for IMS exception
 175 
 176     if (mon->first_waiter() != NULL) {
 177       // We have one or more waiters. Since this is an inflated monitor
 178       // that we own, we can transfer one or more threads from the waitset
 179       // to the entrylist here and now, avoiding the slow-path.
 180       if (all) {
 181         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
 182       } else {
 183         DTRACE_MONITOR_PROBE(notify, mon, obj, self);
 184       }
 185       int tally = 0;
 186       do {
 187         mon->INotify(self);
 188         ++tally;
 189       } while (mon->first_waiter() != NULL && all);
 190       OM_PERFDATA_OP(Notifications, inc(tally));
 191     }
 192     return true;
 193   }
 194 
 195   // biased locking and any other IMS exception states take the slow-path
 196   return false;
 197 }
 198 
 199 
 200 // The LockNode emitted directly at the synchronization site would have
 201 // been too big if it were to have included support for the cases of inflated
 202 // recursive enter and exit, so they go here instead.
 203 // Note that we can't safely call AsyncPrintJavaStack() from within
 204 // quick_enter() as our thread state remains _in_Java.
 205 
 206 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
 207                                      BasicLock * lock) {
 208   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 209   assert(Self->is_Java_thread(), "invariant");
 210   assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
 211   NoSafepointVerifier nsv;
 212   if (obj == NULL) return false;       // Need to throw NPE
 213   const markOop mark = obj->mark();
 214 
 215   if (mark->has_monitor()) {
 216     ObjectMonitor * const m = mark->monitor();
 217     assert(m->object() == obj, "invariant");
 218     Thread * const owner = (Thread *) m->_owner;
 219 
 220     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 221     // and observability
 222     // Case: light contention possibly amenable to TLE
 223     // Case: TLE inimical operations such as nested/recursive synchronization
 224 
 225     if (owner == Self) {
 226       m->_recursions++;
 227       return true;
 228     }
 229 
 230     // This Java Monitor is inflated so obj's header will never be
 231     // displaced to this thread's BasicLock. Make the displaced header
 232     // non-NULL so this BasicLock is not seen as recursive nor as
 233     // being locked. We do this unconditionally so that this thread's
 234     // BasicLock cannot be mis-interpreted by any stack walkers. For
 235     // performance reasons, stack walkers generally first check for
 236     // Biased Locking in the object's header, the second check is for
 237     // stack-locking in the object's header, the third check is for
 238     // recursive stack-locking in the displaced header in the BasicLock,
 239     // and last are the inflated Java Monitor (ObjectMonitor) checks.
 240     lock->set_displaced_header(markOopDesc::unused_mark());
 241 
 242     if (owner == NULL &&
 243         Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) {
 244       assert(m->_recursions == 0, "invariant");
 245       assert(m->_owner == Self, "invariant");
 246       return true;
 247     }
 248   }
 249 
 250   // Note that we could inflate in quick_enter.
 251   // This is likely a useful optimization
 252   // Critically, in quick_enter() we must not:
 253   // -- perform bias revocation, or
 254   // -- block indefinitely, or
 255   // -- reach a safepoint
 256 
 257   return false;        // revert to slow-path
 258 }
 259 
 260 // -----------------------------------------------------------------------------
 261 //  Fast Monitor Enter/Exit
 262 // This the fast monitor enter. The interpreter and compiler use
 263 // some assembly copies of this code. Make sure update those code
 264 // if the following function is changed. The implementation is
 265 // extremely sensitive to race condition. Be careful.
 266 
 267 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
 268                                     bool attempt_rebias, TRAPS) {
 269   if (UseBiasedLocking) {
 270     if (!SafepointSynchronize::is_at_safepoint()) {
 271       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
 272       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
 273         return;
 274       }
 275     } else {
 276       assert(!attempt_rebias, "can not rebias toward VM thread");
 277       BiasedLocking::revoke_at_safepoint(obj);
 278     }
 279     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 280   }
 281 
 282   slow_enter(obj, lock, THREAD);
 283 }
 284 
 285 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
 286   markOop mark = object->mark();
 287   // We cannot check for Biased Locking if we are racing an inflation.
 288   assert(mark == markOopDesc::INFLATING() ||
 289          !mark->has_bias_pattern(), "should not see bias pattern here");
 290 
 291   markOop dhw = lock->displaced_header();
 292   if (dhw == NULL) {
 293     // If the displaced header is NULL, then this exit matches up with
 294     // a recursive enter. No real work to do here except for diagnostics.
 295 #ifndef PRODUCT
 296     if (mark != markOopDesc::INFLATING()) {
 297       // Only do diagnostics if we are not racing an inflation. Simply
 298       // exiting a recursive enter of a Java Monitor that is being
 299       // inflated is safe; see the has_monitor() comment below.
 300       assert(!mark->is_neutral(), "invariant");
 301       assert(!mark->has_locker() ||
 302              THREAD->is_lock_owned((address)mark->locker()), "invariant");
 303       if (mark->has_monitor()) {
 304         // The BasicLock's displaced_header is marked as a recursive
 305         // enter and we have an inflated Java Monitor (ObjectMonitor).
 306         // This is a special case where the Java Monitor was inflated
 307         // after this thread entered the stack-lock recursively. When a
 308         // Java Monitor is inflated, we cannot safely walk the Java
 309         // Monitor owner's stack and update the BasicLocks because a
 310         // Java Monitor can be asynchronously inflated by a thread that
 311         // does not own the Java Monitor.
 312         ObjectMonitor * m = mark->monitor();
 313         assert(((oop)(m->object()))->mark() == mark, "invariant");
 314         assert(m->is_entered(THREAD), "invariant");
 315       }
 316     }
 317 #endif
 318     return;
 319   }
 320 
 321   if (mark == (markOop) lock) {
 322     // If the object is stack-locked by the current thread, try to
 323     // swing the displaced header from the BasicLock back to the mark.
 324     assert(dhw->is_neutral(), "invariant");
 325     if ((markOop) Atomic::cmpxchg_ptr(dhw, object->mark_addr(), mark) == mark) {
 326       TEVENT(fast_exit: release stack-lock);
 327       return;
 328     }
 329   }
 330 
 331   // We have to take the slow-path of possible inflation and then exit.
 332   ObjectSynchronizer::inflate(THREAD,
 333                               object,
 334                               inflate_cause_vm_internal)->exit(true, THREAD);
 335 }
 336 
 337 // -----------------------------------------------------------------------------
 338 // Interpreter/Compiler Slow Case
 339 // This routine is used to handle interpreter/compiler slow case
 340 // We don't need to use fast path here, because it must have been
 341 // failed in the interpreter/compiler code.
 342 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
 343   markOop mark = obj->mark();
 344   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 345 
 346   if (mark->is_neutral()) {
 347     // Anticipate successful CAS -- the ST of the displaced mark must
 348     // be visible <= the ST performed by the CAS.
 349     lock->set_displaced_header(mark);
 350     if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
 351       TEVENT(slow_enter: release stacklock);
 352       return;
 353     }
 354     // Fall through to inflate() ...
 355   } else if (mark->has_locker() &&
 356              THREAD->is_lock_owned((address)mark->locker())) {
 357     assert(lock != mark->locker(), "must not re-lock the same lock");
 358     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 359     lock->set_displaced_header(NULL);
 360     return;
 361   }
 362 
 363   // The object header will never be displaced to this lock,
 364   // so it does not matter what the value is, except that it
 365   // must be non-zero to avoid looking like a re-entrant lock,
 366   // and must not look locked either.
 367   lock->set_displaced_header(markOopDesc::unused_mark());
 368   ObjectSynchronizer::inflate(THREAD,
 369                               obj(),
 370                               inflate_cause_monitor_enter)->enter(THREAD);
 371 }
 372 
 373 // This routine is used to handle interpreter/compiler slow case
 374 // We don't need to use fast path here, because it must have
 375 // failed in the interpreter/compiler code. Simply use the heavy
 376 // weight monitor should be ok, unless someone find otherwise.
 377 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 378   fast_exit(object, lock, THREAD);
 379 }
 380 
 381 // -----------------------------------------------------------------------------
 382 // Class Loader  support to workaround deadlocks on the class loader lock objects
 383 // Also used by GC
 384 // complete_exit()/reenter() are used to wait on a nested lock
 385 // i.e. to give up an outer lock completely and then re-enter
 386 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 387 //  1) complete_exit lock1 - saving recursion count
 388 //  2) wait on lock2
 389 //  3) when notified on lock2, unlock lock2
 390 //  4) reenter lock1 with original recursion count
 391 //  5) lock lock2
 392 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 393 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
 394   TEVENT(complete_exit);
 395   if (UseBiasedLocking) {
 396     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 397     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 398   }
 399 
 400   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 401                                                        obj(),
 402                                                        inflate_cause_vm_internal);
 403 
 404   return monitor->complete_exit(THREAD);
 405 }
 406 
 407 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 408 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
 409   TEVENT(reenter);
 410   if (UseBiasedLocking) {
 411     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 412     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 413   }
 414 
 415   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 416                                                        obj(),
 417                                                        inflate_cause_vm_internal);
 418 
 419   monitor->reenter(recursion, THREAD);
 420 }
 421 // -----------------------------------------------------------------------------
 422 // JNI locks on java objects
 423 // NOTE: must use heavy weight monitor to handle jni monitor enter
 424 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 425   // the current locking is from JNI instead of Java code
 426   TEVENT(jni_enter);
 427   if (UseBiasedLocking) {
 428     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 429     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 430   }
 431   THREAD->set_current_pending_monitor_is_from_java(false);
 432   ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
 433   THREAD->set_current_pending_monitor_is_from_java(true);
 434 }
 435 
 436 // NOTE: must use heavy weight monitor to handle jni monitor exit
 437 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 438   TEVENT(jni_exit);
 439   if (UseBiasedLocking) {
 440     Handle h_obj(THREAD, obj);
 441     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
 442     obj = h_obj();
 443   }
 444   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 445 
 446   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 447                                                        obj,
 448                                                        inflate_cause_jni_exit);
 449   // If this thread has locked the object, exit the monitor.  Note:  can't use
 450   // monitor->check(CHECK); must exit even if an exception is pending.
 451   if (monitor->check(THREAD)) {
 452     monitor->exit(true, THREAD);
 453   }
 454 }
 455 
 456 // -----------------------------------------------------------------------------
 457 // Internal VM locks on java objects
 458 // standard constructor, allows locking failures
 459 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
 460   _dolock = doLock;
 461   _thread = thread;
 462   debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
 463   _obj = obj;
 464 
 465   if (_dolock) {
 466     TEVENT(ObjectLocker);
 467 
 468     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
 469   }
 470 }
 471 
 472 ObjectLocker::~ObjectLocker() {
 473   if (_dolock) {
 474     ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
 475   }
 476 }
 477 
 478 
 479 // -----------------------------------------------------------------------------
 480 //  Wait/Notify/NotifyAll
 481 // NOTE: must use heavy weight monitor to handle wait()
 482 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 483   if (UseBiasedLocking) {
 484     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 485     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 486   }
 487   if (millis < 0) {
 488     TEVENT(wait - throw IAX);
 489     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 490   }
 491   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 492                                                        obj(),
 493                                                        inflate_cause_wait);
 494 
 495   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
 496   monitor->wait(millis, true, THREAD);
 497 
 498   // This dummy call is in place to get around dtrace bug 6254741.  Once
 499   // that's fixed we can uncomment the following line, remove the call
 500   // and change this function back into a "void" func.
 501   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 502   return dtrace_waited_probe(monitor, obj, THREAD);
 503 }
 504 
 505 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 506   if (UseBiasedLocking) {
 507     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 508     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 509   }
 510   if (millis < 0) {
 511     TEVENT(wait - throw IAX);
 512     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 513   }
 514   ObjectSynchronizer::inflate(THREAD,
 515                               obj(),
 516                               inflate_cause_wait)->wait(millis, false, THREAD);
 517 }
 518 
 519 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 520   if (UseBiasedLocking) {
 521     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 522     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 523   }
 524 
 525   markOop mark = obj->mark();
 526   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 527     return;
 528   }
 529   ObjectSynchronizer::inflate(THREAD,
 530                               obj(),
 531                               inflate_cause_notify)->notify(THREAD);
 532 }
 533 
 534 // NOTE: see comment of notify()
 535 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 536   if (UseBiasedLocking) {
 537     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 538     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 539   }
 540 
 541   markOop mark = obj->mark();
 542   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 543     return;
 544   }
 545   ObjectSynchronizer::inflate(THREAD,
 546                               obj(),
 547                               inflate_cause_notify)->notifyAll(THREAD);
 548 }
 549 
 550 // -----------------------------------------------------------------------------
 551 // Hash Code handling
 552 //
 553 // Performance concern:
 554 // OrderAccess::storestore() calls release() which at one time stored 0
 555 // into the global volatile OrderAccess::dummy variable. This store was
 556 // unnecessary for correctness. Many threads storing into a common location
 557 // causes considerable cache migration or "sloshing" on large SMP systems.
 558 // As such, I avoided using OrderAccess::storestore(). In some cases
 559 // OrderAccess::fence() -- which incurs local latency on the executing
 560 // processor -- is a better choice as it scales on SMP systems.
 561 //
 562 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
 563 // a discussion of coherency costs. Note that all our current reference
 564 // platforms provide strong ST-ST order, so the issue is moot on IA32,
 565 // x64, and SPARC.
 566 //
 567 // As a general policy we use "volatile" to control compiler-based reordering
 568 // and explicit fences (barriers) to control for architectural reordering
 569 // performed by the CPU(s) or platform.
 570 
 571 struct SharedGlobals {
 572   char         _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
 573   // These are highly shared mostly-read variables.
 574   // To avoid false-sharing they need to be the sole occupants of a cache line.
 575   volatile int stwRandom;
 576   volatile int stwCycle;
 577   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
 578   // Hot RW variable -- Sequester to avoid false-sharing
 579   volatile int hcSequence;
 580   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
 581 };
 582 
 583 static SharedGlobals GVars;
 584 static int MonitorScavengeThreshold = 1000000;
 585 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
 586 
 587 static markOop ReadStableMark(oop obj) {
 588   markOop mark = obj->mark();
 589   if (!mark->is_being_inflated()) {
 590     return mark;       // normal fast-path return
 591   }
 592 
 593   int its = 0;
 594   for (;;) {
 595     markOop mark = obj->mark();
 596     if (!mark->is_being_inflated()) {
 597       return mark;    // normal fast-path return
 598     }
 599 
 600     // The object is being inflated by some other thread.
 601     // The caller of ReadStableMark() must wait for inflation to complete.
 602     // Avoid live-lock
 603     // TODO: consider calling SafepointSynchronize::do_call_back() while
 604     // spinning to see if there's a safepoint pending.  If so, immediately
 605     // yielding or blocking would be appropriate.  Avoid spinning while
 606     // there is a safepoint pending.
 607     // TODO: add inflation contention performance counters.
 608     // TODO: restrict the aggregate number of spinners.
 609 
 610     ++its;
 611     if (its > 10000 || !os::is_MP()) {
 612       if (its & 1) {
 613         os::naked_yield();
 614         TEVENT(Inflate: INFLATING - yield);
 615       } else {
 616         // Note that the following code attenuates the livelock problem but is not
 617         // a complete remedy.  A more complete solution would require that the inflating
 618         // thread hold the associated inflation lock.  The following code simply restricts
 619         // the number of spinners to at most one.  We'll have N-2 threads blocked
 620         // on the inflationlock, 1 thread holding the inflation lock and using
 621         // a yield/park strategy, and 1 thread in the midst of inflation.
 622         // A more refined approach would be to change the encoding of INFLATING
 623         // to allow encapsulation of a native thread pointer.  Threads waiting for
 624         // inflation to complete would use CAS to push themselves onto a singly linked
 625         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
 626         // and calling park().  When inflation was complete the thread that accomplished inflation
 627         // would detach the list and set the markword to inflated with a single CAS and
 628         // then for each thread on the list, set the flag and unpark() the thread.
 629         // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
 630         // wakes at most one thread whereas we need to wake the entire list.
 631         int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
 632         int YieldThenBlock = 0;
 633         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
 634         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
 635         Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
 636         while (obj->mark() == markOopDesc::INFLATING()) {
 637           // Beware: NakedYield() is advisory and has almost no effect on some platforms
 638           // so we periodically call Self->_ParkEvent->park(1).
 639           // We use a mixed spin/yield/block mechanism.
 640           if ((YieldThenBlock++) >= 16) {
 641             Thread::current()->_ParkEvent->park(1);
 642           } else {
 643             os::naked_yield();
 644           }
 645         }
 646         Thread::muxRelease(gInflationLocks + ix);
 647         TEVENT(Inflate: INFLATING - yield/park);
 648       }
 649     } else {
 650       SpinPause();       // SMP-polite spinning
 651     }
 652   }
 653 }
 654 
 655 // hashCode() generation :
 656 //
 657 // Possibilities:
 658 // * MD5Digest of {obj,stwRandom}
 659 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
 660 // * A DES- or AES-style SBox[] mechanism
 661 // * One of the Phi-based schemes, such as:
 662 //   2654435761 = 2^32 * Phi (golden ratio)
 663 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
 664 // * A variation of Marsaglia's shift-xor RNG scheme.
 665 // * (obj ^ stwRandom) is appealing, but can result
 666 //   in undesirable regularity in the hashCode values of adjacent objects
 667 //   (objects allocated back-to-back, in particular).  This could potentially
 668 //   result in hashtable collisions and reduced hashtable efficiency.
 669 //   There are simple ways to "diffuse" the middle address bits over the
 670 //   generated hashCode values:
 671 
 672 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
 673   intptr_t value = 0;
 674   if (hashCode == 0) {
 675     // This form uses an unguarded global Park-Miller RNG,
 676     // so it's possible for two threads to race and generate the same RNG.
 677     // On MP system we'll have lots of RW access to a global, so the
 678     // mechanism induces lots of coherency traffic.
 679     value = os::random();
 680   } else if (hashCode == 1) {
 681     // This variation has the property of being stable (idempotent)
 682     // between STW operations.  This can be useful in some of the 1-0
 683     // synchronization schemes.
 684     intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
 685     value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
 686   } else if (hashCode == 2) {
 687     value = 1;            // for sensitivity testing
 688   } else if (hashCode == 3) {
 689     value = ++GVars.hcSequence;
 690   } else if (hashCode == 4) {
 691     value = cast_from_oop<intptr_t>(obj);
 692   } else {
 693     // Marsaglia's xor-shift scheme with thread-specific state
 694     // This is probably the best overall implementation -- we'll
 695     // likely make this the default in future releases.
 696     unsigned t = Self->_hashStateX;
 697     t ^= (t << 11);
 698     Self->_hashStateX = Self->_hashStateY;
 699     Self->_hashStateY = Self->_hashStateZ;
 700     Self->_hashStateZ = Self->_hashStateW;
 701     unsigned v = Self->_hashStateW;
 702     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 703     Self->_hashStateW = v;
 704     value = v;
 705   }
 706 
 707   value &= markOopDesc::hash_mask;
 708   if (value == 0) value = 0xBAD;
 709   assert(value != markOopDesc::no_hash, "invariant");
 710   TEVENT(hashCode: GENERATE);
 711   return value;
 712 }
 713 
 714 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
 715   if (UseBiasedLocking) {
 716     // NOTE: many places throughout the JVM do not expect a safepoint
 717     // to be taken here, in particular most operations on perm gen
 718     // objects. However, we only ever bias Java instances and all of
 719     // the call sites of identity_hash that might revoke biases have
 720     // been checked to make sure they can handle a safepoint. The
 721     // added check of the bias pattern is to avoid useless calls to
 722     // thread-local storage.
 723     if (obj->mark()->has_bias_pattern()) {
 724       // Handle for oop obj in case of STW safepoint
 725       Handle hobj(Self, obj);
 726       // Relaxing assertion for bug 6320749.
 727       assert(Universe::verify_in_progress() ||
 728              !SafepointSynchronize::is_at_safepoint(),
 729              "biases should not be seen by VM thread here");
 730       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
 731       obj = hobj();
 732       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 733     }
 734   }
 735 
 736   // hashCode() is a heap mutator ...
 737   // Relaxing assertion for bug 6320749.
 738   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 739          !SafepointSynchronize::is_at_safepoint(), "invariant");
 740   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 741          Self->is_Java_thread() , "invariant");
 742   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 743          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 744 
 745   ObjectMonitor* monitor = NULL;
 746   markOop temp, test;
 747   intptr_t hash;
 748   markOop mark = ReadStableMark(obj);
 749 
 750   // object should remain ineligible for biased locking
 751   assert(!mark->has_bias_pattern(), "invariant");
 752 
 753   if (mark->is_neutral()) {
 754     hash = mark->hash();              // this is a normal header
 755     if (hash) {                       // if it has hash, just return it
 756       return hash;
 757     }
 758     hash = get_next_hash(Self, obj);  // allocate a new hash code
 759     temp = mark->copy_set_hash(hash); // merge the hash code into header
 760     // use (machine word version) atomic operation to install the hash
 761     test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
 762     if (test == mark) {
 763       return hash;
 764     }
 765     // If atomic operation failed, we must inflate the header
 766     // into heavy weight monitor. We could add more code here
 767     // for fast path, but it does not worth the complexity.
 768   } else if (mark->has_monitor()) {
 769     monitor = mark->monitor();
 770     temp = monitor->header();
 771     assert(temp->is_neutral(), "invariant");
 772     hash = temp->hash();
 773     if (hash) {
 774       return hash;
 775     }
 776     // Skip to the following code to reduce code size
 777   } else if (Self->is_lock_owned((address)mark->locker())) {
 778     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
 779     assert(temp->is_neutral(), "invariant");
 780     hash = temp->hash();              // by current thread, check if the displaced
 781     if (hash) {                       // header contains hash code
 782       return hash;
 783     }
 784     // WARNING:
 785     //   The displaced header is strictly immutable.
 786     // It can NOT be changed in ANY cases. So we have
 787     // to inflate the header into heavyweight monitor
 788     // even the current thread owns the lock. The reason
 789     // is the BasicLock (stack slot) will be asynchronously
 790     // read by other threads during the inflate() function.
 791     // Any change to stack may not propagate to other threads
 792     // correctly.
 793   }
 794 
 795   // Inflate the monitor to set hash code
 796   monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
 797   // Load displaced header and check it has hash code
 798   mark = monitor->header();
 799   assert(mark->is_neutral(), "invariant");
 800   hash = mark->hash();
 801   if (hash == 0) {
 802     hash = get_next_hash(Self, obj);
 803     temp = mark->copy_set_hash(hash); // merge hash code into header
 804     assert(temp->is_neutral(), "invariant");
 805     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
 806     if (test != mark) {
 807       // The only update to the header in the monitor (outside GC)
 808       // is install the hash code. If someone add new usage of
 809       // displaced header, please update this code
 810       hash = test->hash();
 811       assert(test->is_neutral(), "invariant");
 812       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
 813     }
 814   }
 815   // We finally get the hash
 816   return hash;
 817 }
 818 
 819 // Deprecated -- use FastHashCode() instead.
 820 
 821 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
 822   return FastHashCode(Thread::current(), obj());
 823 }
 824 
 825 
 826 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
 827                                                    Handle h_obj) {
 828   if (UseBiasedLocking) {
 829     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
 830     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 831   }
 832 
 833   assert(thread == JavaThread::current(), "Can only be called on current thread");
 834   oop obj = h_obj();
 835 
 836   markOop mark = ReadStableMark(obj);
 837 
 838   // Uncontended case, header points to stack
 839   if (mark->has_locker()) {
 840     return thread->is_lock_owned((address)mark->locker());
 841   }
 842   // Contended case, header points to ObjectMonitor (tagged pointer)
 843   if (mark->has_monitor()) {
 844     ObjectMonitor* monitor = mark->monitor();
 845     return monitor->is_entered(thread) != 0;
 846   }
 847   // Unlocked case, header in place
 848   assert(mark->is_neutral(), "sanity check");
 849   return false;
 850 }
 851 
 852 // Be aware of this method could revoke bias of the lock object.
 853 // This method queries the ownership of the lock handle specified by 'h_obj'.
 854 // If the current thread owns the lock, it returns owner_self. If no
 855 // thread owns the lock, it returns owner_none. Otherwise, it will return
 856 // owner_other.
 857 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
 858 (JavaThread *self, Handle h_obj) {
 859   // The caller must beware this method can revoke bias, and
 860   // revocation can result in a safepoint.
 861   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 862   assert(self->thread_state() != _thread_blocked, "invariant");
 863 
 864   // Possible mark states: neutral, biased, stack-locked, inflated
 865 
 866   if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
 867     // CASE: biased
 868     BiasedLocking::revoke_and_rebias(h_obj, false, self);
 869     assert(!h_obj->mark()->has_bias_pattern(),
 870            "biases should be revoked by now");
 871   }
 872 
 873   assert(self == JavaThread::current(), "Can only be called on current thread");
 874   oop obj = h_obj();
 875   markOop mark = ReadStableMark(obj);
 876 
 877   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
 878   if (mark->has_locker()) {
 879     return self->is_lock_owned((address)mark->locker()) ?
 880       owner_self : owner_other;
 881   }
 882 
 883   // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
 884   // The Object:ObjectMonitor relationship is stable as long as we're
 885   // not at a safepoint.
 886   if (mark->has_monitor()) {
 887     void * owner = mark->monitor()->_owner;
 888     if (owner == NULL) return owner_none;
 889     return (owner == self ||
 890             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
 891   }
 892 
 893   // CASE: neutral
 894   assert(mark->is_neutral(), "sanity check");
 895   return owner_none;           // it's unlocked
 896 }
 897 
 898 // FIXME: jvmti should call this
 899 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
 900   if (UseBiasedLocking) {
 901     if (SafepointSynchronize::is_at_safepoint()) {
 902       BiasedLocking::revoke_at_safepoint(h_obj);
 903     } else {
 904       BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
 905     }
 906     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 907   }
 908 
 909   oop obj = h_obj();
 910   address owner = NULL;
 911 
 912   markOop mark = ReadStableMark(obj);
 913 
 914   // Uncontended case, header points to stack
 915   if (mark->has_locker()) {
 916     owner = (address) mark->locker();
 917   }
 918 
 919   // Contended case, header points to ObjectMonitor (tagged pointer)
 920   if (mark->has_monitor()) {
 921     ObjectMonitor* monitor = mark->monitor();
 922     assert(monitor != NULL, "monitor should be non-null");
 923     owner = (address) monitor->owner();
 924   }
 925 
 926   if (owner != NULL) {
 927     // owning_thread_from_monitor_owner() may also return NULL here
 928     return Threads::owning_thread_from_monitor_owner(owner, doLock);
 929   }
 930 
 931   // Unlocked case, header in place
 932   // Cannot have assertion since this object may have been
 933   // locked by another thread when reaching here.
 934   // assert(mark->is_neutral(), "sanity check");
 935 
 936   return NULL;
 937 }
 938 
 939 // Visitors ...
 940 
 941 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
 942   PaddedEnd<ObjectMonitor> * block =
 943     (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
 944   while (block != NULL) {
 945     assert(block->object() == CHAINMARKER, "must be a block header");
 946     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
 947       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
 948       oop object = (oop)mid->object();
 949       if (object != NULL) {
 950         closure->do_monitor(mid);
 951       }
 952     }
 953     block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
 954   }
 955 }
 956 
 957 // Get the next block in the block list.
 958 static inline ObjectMonitor* next(ObjectMonitor* block) {
 959   assert(block->object() == CHAINMARKER, "must be a block header");
 960   block = block->FreeNext;
 961   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
 962   return block;
 963 }
 964 
 965 
 966 void ObjectSynchronizer::oops_do(OopClosure* f) {
 967   if (MonitorInUseLists) {
 968     // When using thread local monitor lists, we only scan the
 969     // global used list here (for moribund threads), and
 970     // the thread-local monitors in Thread::oops_do().
 971     global_used_oops_do(f);
 972   } else {
 973     global_oops_do(f);
 974   }
 975 }
 976 
 977 void ObjectSynchronizer::global_oops_do(OopClosure* f) {
 978   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 979   PaddedEnd<ObjectMonitor> * block =
 980     (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
 981   for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
 982     assert(block->object() == CHAINMARKER, "must be a block header");
 983     for (int i = 1; i < _BLOCKSIZE; i++) {
 984       ObjectMonitor* mid = (ObjectMonitor *)&block[i];
 985       if (mid->object() != NULL) {
 986         f->do_oop((oop*)mid->object_addr());
 987       }
 988     }
 989   }
 990 }
 991 
 992 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
 993   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 994   list_oops_do(gOmInUseList, f);
 995 }
 996 
 997 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
 998   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 999   list_oops_do(thread->omInUseList, f);
1000 }
1001 
1002 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1003   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1004   ObjectMonitor* mid;
1005   for (mid = list; mid != NULL; mid = mid->FreeNext) {
1006     if (mid->object() != NULL) {
1007       f->do_oop((oop*)mid->object_addr());
1008     }
1009   }
1010 }
1011 
1012 
1013 // -----------------------------------------------------------------------------
1014 // ObjectMonitor Lifecycle
1015 // -----------------------
1016 // Inflation unlinks monitors from the global gFreeList and
1017 // associates them with objects.  Deflation -- which occurs at
1018 // STW-time -- disassociates idle monitors from objects.  Such
1019 // scavenged monitors are returned to the gFreeList.
1020 //
1021 // The global list is protected by gListLock.  All the critical sections
1022 // are short and operate in constant-time.
1023 //
1024 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1025 //
1026 // Lifecycle:
1027 // --   unassigned and on the global free list
1028 // --   unassigned and on a thread's private omFreeList
1029 // --   assigned to an object.  The object is inflated and the mark refers
1030 //      to the objectmonitor.
1031 
1032 
1033 // Constraining monitor pool growth via MonitorBound ...
1034 //
1035 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
1036 // the rate of scavenging is driven primarily by GC.  As such,  we can find
1037 // an inordinate number of monitors in circulation.
1038 // To avoid that scenario we can artificially induce a STW safepoint
1039 // if the pool appears to be growing past some reasonable bound.
1040 // Generally we favor time in space-time tradeoffs, but as there's no
1041 // natural back-pressure on the # of extant monitors we need to impose some
1042 // type of limit.  Beware that if MonitorBound is set to too low a value
1043 // we could just loop. In addition, if MonitorBound is set to a low value
1044 // we'll incur more safepoints, which are harmful to performance.
1045 // See also: GuaranteedSafepointInterval
1046 //
1047 // The current implementation uses asynchronous VM operations.
1048 
1049 static void InduceScavenge(Thread * Self, const char * Whence) {
1050   // Induce STW safepoint to trim monitors
1051   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1052   // More precisely, trigger an asynchronous STW safepoint as the number
1053   // of active monitors passes the specified threshold.
1054   // TODO: assert thread state is reasonable
1055 
1056   if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
1057     if (ObjectMonitor::Knob_Verbose) {
1058       tty->print_cr("INFO: Monitor scavenge - Induced STW @%s (%d)",
1059                     Whence, ForceMonitorScavenge) ;
1060       tty->flush();
1061     }
1062     // Induce a 'null' safepoint to scavenge monitors
1063     // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
1064     // to the VMthread and have a lifespan longer than that of this activation record.
1065     // The VMThread will delete the op when completed.
1066     VMThread::execute(new VM_ScavengeMonitors());
1067 
1068     if (ObjectMonitor::Knob_Verbose) {
1069       tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)",
1070                     Whence, ForceMonitorScavenge) ;
1071       tty->flush();
1072     }
1073   }
1074 }
1075 
1076 void ObjectSynchronizer::verifyInUse(Thread *Self) {
1077   ObjectMonitor* mid;
1078   int in_use_tally = 0;
1079   for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
1080     in_use_tally++;
1081   }
1082   assert(in_use_tally == Self->omInUseCount, "in-use count off");
1083 
1084   int free_tally = 0;
1085   for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
1086     free_tally++;
1087   }
1088   assert(free_tally == Self->omFreeCount, "free count off");
1089 }
1090 
1091 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
1092   // A large MAXPRIVATE value reduces both list lock contention
1093   // and list coherency traffic, but also tends to increase the
1094   // number of objectMonitors in circulation as well as the STW
1095   // scavenge costs.  As usual, we lean toward time in space-time
1096   // tradeoffs.
1097   const int MAXPRIVATE = 1024;
1098   for (;;) {
1099     ObjectMonitor * m;
1100 
1101     // 1: try to allocate from the thread's local omFreeList.
1102     // Threads will attempt to allocate first from their local list, then
1103     // from the global list, and only after those attempts fail will the thread
1104     // attempt to instantiate new monitors.   Thread-local free lists take
1105     // heat off the gListLock and improve allocation latency, as well as reducing
1106     // coherency traffic on the shared global list.
1107     m = Self->omFreeList;
1108     if (m != NULL) {
1109       Self->omFreeList = m->FreeNext;
1110       Self->omFreeCount--;
1111       // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
1112       guarantee(m->object() == NULL, "invariant");
1113       if (MonitorInUseLists) {
1114         m->FreeNext = Self->omInUseList;
1115         Self->omInUseList = m;
1116         Self->omInUseCount++;
1117         if (ObjectMonitor::Knob_VerifyInUse) {
1118           verifyInUse(Self);
1119         }
1120       } else {
1121         m->FreeNext = NULL;
1122       }
1123       return m;
1124     }
1125 
1126     // 2: try to allocate from the global gFreeList
1127     // CONSIDER: use muxTry() instead of muxAcquire().
1128     // If the muxTry() fails then drop immediately into case 3.
1129     // If we're using thread-local free lists then try
1130     // to reprovision the caller's free list.
1131     if (gFreeList != NULL) {
1132       // Reprovision the thread's omFreeList.
1133       // Use bulk transfers to reduce the allocation rate and heat
1134       // on various locks.
1135       Thread::muxAcquire(&gListLock, "omAlloc");
1136       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1137         gMonitorFreeCount--;
1138         ObjectMonitor * take = gFreeList;
1139         gFreeList = take->FreeNext;
1140         guarantee(take->object() == NULL, "invariant");
1141         guarantee(!take->is_busy(), "invariant");
1142         take->Recycle();
1143         omRelease(Self, take, false);
1144       }
1145       Thread::muxRelease(&gListLock);
1146       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1147       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1148       TEVENT(omFirst - reprovision);
1149 
1150       const int mx = MonitorBound;
1151       if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1152         // We can't safely induce a STW safepoint from omAlloc() as our thread
1153         // state may not be appropriate for such activities and callers may hold
1154         // naked oops, so instead we defer the action.
1155         InduceScavenge(Self, "omAlloc");
1156       }
1157       continue;
1158     }
1159 
1160     // 3: allocate a block of new ObjectMonitors
1161     // Both the local and global free lists are empty -- resort to malloc().
1162     // In the current implementation objectMonitors are TSM - immortal.
1163     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1164     // each ObjectMonitor to start at the beginning of a cache line,
1165     // so we use align_size_up().
1166     // A better solution would be to use C++ placement-new.
1167     // BEWARE: As it stands currently, we don't run the ctors!
1168     assert(_BLOCKSIZE > 1, "invariant");
1169     size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE;
1170     PaddedEnd<ObjectMonitor> * temp;
1171     size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
1172     void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size,
1173                                                       mtInternal);
1174     temp = (PaddedEnd<ObjectMonitor> *)
1175              align_size_up((intptr_t)real_malloc_addr,
1176                            DEFAULT_CACHE_LINE_SIZE);
1177 
1178     // NOTE: (almost) no way to recover if allocation failed.
1179     // We might be able to induce a STW safepoint and scavenge enough
1180     // objectMonitors to permit progress.
1181     if (temp == NULL) {
1182       vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
1183                             "Allocate ObjectMonitors");
1184     }
1185     (void)memset((void *) temp, 0, neededsize);
1186 
1187     // Format the block.
1188     // initialize the linked list, each monitor points to its next
1189     // forming the single linked free list, the very first monitor
1190     // will points to next block, which forms the block list.
1191     // The trick of using the 1st element in the block as gBlockList
1192     // linkage should be reconsidered.  A better implementation would
1193     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1194 
1195     for (int i = 1; i < _BLOCKSIZE; i++) {
1196       temp[i].FreeNext = (ObjectMonitor *)&temp[i+1];
1197     }
1198 
1199     // terminate the last monitor as the end of list
1200     temp[_BLOCKSIZE - 1].FreeNext = NULL;
1201 
1202     // Element [0] is reserved for global list linkage
1203     temp[0].set_object(CHAINMARKER);
1204 
1205     // Consider carving out this thread's current request from the
1206     // block in hand.  This avoids some lock traffic and redundant
1207     // list activity.
1208 
1209     // Acquire the gListLock to manipulate gBlockList and gFreeList.
1210     // An Oyama-Taura-Yonezawa scheme might be more efficient.
1211     Thread::muxAcquire(&gListLock, "omAlloc [2]");
1212     gMonitorPopulation += _BLOCKSIZE-1;
1213     gMonitorFreeCount += _BLOCKSIZE-1;
1214 
1215     // Add the new block to the list of extant blocks (gBlockList).
1216     // The very first objectMonitor in a block is reserved and dedicated.
1217     // It serves as blocklist "next" linkage.
1218     temp[0].FreeNext = gBlockList;
1219     // There are lock-free uses of gBlockList so make sure that
1220     // the previous stores happen before we update gBlockList.
1221     OrderAccess::release_store_ptr(&gBlockList, temp);
1222 
1223     // Add the new string of objectMonitors to the global free list
1224     temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
1225     gFreeList = temp + 1;
1226     Thread::muxRelease(&gListLock);
1227     TEVENT(Allocate block of monitors);
1228   }
1229 }
1230 
1231 // Place "m" on the caller's private per-thread omFreeList.
1232 // In practice there's no need to clamp or limit the number of
1233 // monitors on a thread's omFreeList as the only time we'll call
1234 // omRelease is to return a monitor to the free list after a CAS
1235 // attempt failed.  This doesn't allow unbounded #s of monitors to
1236 // accumulate on a thread's free list.
1237 //
1238 // Key constraint: all ObjectMonitors on a thread's free list and the global
1239 // free list must have their object field set to null. This prevents the
1240 // scavenger -- deflate_idle_monitors -- from reclaiming them.
1241 
1242 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1243                                    bool fromPerThreadAlloc) {
1244   guarantee(m->object() == NULL, "invariant");
1245   guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
1246   // Remove from omInUseList
1247   if (MonitorInUseLists && fromPerThreadAlloc) {
1248     ObjectMonitor* cur_mid_in_use = NULL;
1249     bool extracted = false;
1250     for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
1251       if (m == mid) {
1252         // extract from per-thread in-use list
1253         if (mid == Self->omInUseList) {
1254           Self->omInUseList = mid->FreeNext;
1255         } else if (cur_mid_in_use != NULL) {
1256           cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1257         }
1258         extracted = true;
1259         Self->omInUseCount--;
1260         if (ObjectMonitor::Knob_VerifyInUse) {
1261           verifyInUse(Self);
1262         }
1263         break;
1264       }
1265     }
1266     assert(extracted, "Should have extracted from in-use list");
1267   }
1268 
1269   // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
1270   m->FreeNext = Self->omFreeList;
1271   Self->omFreeList = m;
1272   Self->omFreeCount++;
1273 }
1274 
1275 // Return the monitors of a moribund thread's local free list to
1276 // the global free list.  Typically a thread calls omFlush() when
1277 // it's dying.  We could also consider having the VM thread steal
1278 // monitors from threads that have not run java code over a few
1279 // consecutive STW safepoints.  Relatedly, we might decay
1280 // omFreeProvision at STW safepoints.
1281 //
1282 // Also return the monitors of a moribund thread's omInUseList to
1283 // a global gOmInUseList under the global list lock so these
1284 // will continue to be scanned.
1285 //
1286 // We currently call omFlush() from Threads::remove() _before the thread
1287 // has been excised from the thread list and is no longer a mutator.
1288 // This means that omFlush() can not run concurrently with a safepoint and
1289 // interleave with the scavenge operator. In particular, this ensures that
1290 // the thread's monitors are scanned by a GC safepoint, either via
1291 // Thread::oops_do() (if safepoint happens before omFlush()) or via
1292 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's
1293 // monitors have been transferred to the global in-use list).
1294 
1295 void ObjectSynchronizer::omFlush(Thread * Self) {
1296   ObjectMonitor * list = Self->omFreeList;  // Null-terminated SLL
1297   Self->omFreeList = NULL;
1298   ObjectMonitor * tail = NULL;
1299   int tally = 0;
1300   if (list != NULL) {
1301     ObjectMonitor * s;
1302     // The thread is going away, the per-thread free monitors
1303     // are freed via set_owner(NULL)
1304     // Link them to tail, which will be linked into the global free list
1305     // gFreeList below, under the gListLock
1306     for (s = list; s != NULL; s = s->FreeNext) {
1307       tally++;
1308       tail = s;
1309       guarantee(s->object() == NULL, "invariant");
1310       guarantee(!s->is_busy(), "invariant");
1311       s->set_owner(NULL);   // redundant but good hygiene
1312       TEVENT(omFlush - Move one);
1313     }
1314     guarantee(tail != NULL && list != NULL, "invariant");
1315   }
1316 
1317   ObjectMonitor * inUseList = Self->omInUseList;
1318   ObjectMonitor * inUseTail = NULL;
1319   int inUseTally = 0;
1320   if (inUseList != NULL) {
1321     Self->omInUseList = NULL;
1322     ObjectMonitor *cur_om;
1323     // The thread is going away, however the omInUseList inflated
1324     // monitors may still be in-use by other threads.
1325     // Link them to inUseTail, which will be linked into the global in-use list
1326     // gOmInUseList below, under the gListLock
1327     for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
1328       inUseTail = cur_om;
1329       inUseTally++;
1330     }
1331     assert(Self->omInUseCount == inUseTally, "in-use count off");
1332     Self->omInUseCount = 0;
1333     guarantee(inUseTail != NULL && inUseList != NULL, "invariant");
1334   }
1335 
1336   Thread::muxAcquire(&gListLock, "omFlush");
1337   if (tail != NULL) {
1338     tail->FreeNext = gFreeList;
1339     gFreeList = list;
1340     gMonitorFreeCount += tally;
1341     assert(Self->omFreeCount == tally, "free-count off");
1342     Self->omFreeCount = 0;
1343   }
1344 
1345   if (inUseTail != NULL) {
1346     inUseTail->FreeNext = gOmInUseList;
1347     gOmInUseList = inUseList;
1348     gOmInUseCount += inUseTally;
1349   }
1350 
1351   Thread::muxRelease(&gListLock);
1352   TEVENT(omFlush);
1353 }
1354 
1355 // Fast path code shared by multiple functions
1356 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1357   markOop mark = obj->mark();
1358   if (mark->has_monitor()) {
1359     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1360     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1361     return mark->monitor();
1362   }
1363   return ObjectSynchronizer::inflate(Thread::current(),
1364                                      obj,
1365                                      inflate_cause_vm_internal);
1366 }
1367 
1368 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1369                                                      oop object,
1370                                                      const InflateCause cause) {
1371 
1372   // Inflate mutates the heap ...
1373   // Relaxing assertion for bug 6320749.
1374   assert(Universe::verify_in_progress() ||
1375          !SafepointSynchronize::is_at_safepoint(), "invariant");
1376 
1377   EventJavaMonitorInflate event;
1378 
1379   for (;;) {
1380     const markOop mark = object->mark();
1381     assert(!mark->has_bias_pattern(), "invariant");
1382 
1383     // The mark can be in one of the following states:
1384     // *  Inflated     - just return
1385     // *  Stack-locked - coerce it to inflated
1386     // *  INFLATING    - busy wait for conversion to complete
1387     // *  Neutral      - aggressively inflate the object.
1388     // *  BIASED       - Illegal.  We should never see this
1389 
1390     // CASE: inflated
1391     if (mark->has_monitor()) {
1392       ObjectMonitor * inf = mark->monitor();
1393       assert(inf->header()->is_neutral(), "invariant");
1394       assert(inf->object() == object, "invariant");
1395       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1396       event.cancel(); // let's not post an inflation event, unless we did the deed ourselves
1397       return inf;
1398     }
1399 
1400     // CASE: inflation in progress - inflating over a stack-lock.
1401     // Some other thread is converting from stack-locked to inflated.
1402     // Only that thread can complete inflation -- other threads must wait.
1403     // The INFLATING value is transient.
1404     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1405     // We could always eliminate polling by parking the thread on some auxiliary list.
1406     if (mark == markOopDesc::INFLATING()) {
1407       TEVENT(Inflate: spin while INFLATING);
1408       ReadStableMark(object);
1409       continue;
1410     }
1411 
1412     // CASE: stack-locked
1413     // Could be stack-locked either by this thread or by some other thread.
1414     //
1415     // Note that we allocate the objectmonitor speculatively, _before_ attempting
1416     // to install INFLATING into the mark word.  We originally installed INFLATING,
1417     // allocated the objectmonitor, and then finally STed the address of the
1418     // objectmonitor into the mark.  This was correct, but artificially lengthened
1419     // the interval in which INFLATED appeared in the mark, thus increasing
1420     // the odds of inflation contention.
1421     //
1422     // We now use per-thread private objectmonitor free lists.
1423     // These list are reprovisioned from the global free list outside the
1424     // critical INFLATING...ST interval.  A thread can transfer
1425     // multiple objectmonitors en-mass from the global free list to its local free list.
1426     // This reduces coherency traffic and lock contention on the global free list.
1427     // Using such local free lists, it doesn't matter if the omAlloc() call appears
1428     // before or after the CAS(INFLATING) operation.
1429     // See the comments in omAlloc().
1430 
1431     if (mark->has_locker()) {
1432       ObjectMonitor * m = omAlloc(Self);
1433       // Optimistically prepare the objectmonitor - anticipate successful CAS
1434       // We do this before the CAS in order to minimize the length of time
1435       // in which INFLATING appears in the mark.
1436       m->Recycle();
1437       m->_Responsible  = NULL;
1438       m->_recursions   = 0;
1439       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
1440 
1441       markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark);
1442       if (cmp != mark) {
1443         omRelease(Self, m, true);
1444         continue;       // Interference -- just retry
1445       }
1446 
1447       // We've successfully installed INFLATING (0) into the mark-word.
1448       // This is the only case where 0 will appear in a mark-word.
1449       // Only the singular thread that successfully swings the mark-word
1450       // to 0 can perform (or more precisely, complete) inflation.
1451       //
1452       // Why do we CAS a 0 into the mark-word instead of just CASing the
1453       // mark-word from the stack-locked value directly to the new inflated state?
1454       // Consider what happens when a thread unlocks a stack-locked object.
1455       // It attempts to use CAS to swing the displaced header value from the
1456       // on-stack basiclock back into the object header.  Recall also that the
1457       // header value (hashcode, etc) can reside in (a) the object header, or
1458       // (b) a displaced header associated with the stack-lock, or (c) a displaced
1459       // header in an objectMonitor.  The inflate() routine must copy the header
1460       // value from the basiclock on the owner's stack to the objectMonitor, all
1461       // the while preserving the hashCode stability invariants.  If the owner
1462       // decides to release the lock while the value is 0, the unlock will fail
1463       // and control will eventually pass from slow_exit() to inflate.  The owner
1464       // will then spin, waiting for the 0 value to disappear.   Put another way,
1465       // the 0 causes the owner to stall if the owner happens to try to
1466       // drop the lock (restoring the header from the basiclock to the object)
1467       // while inflation is in-progress.  This protocol avoids races that might
1468       // would otherwise permit hashCode values to change or "flicker" for an object.
1469       // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1470       // 0 serves as a "BUSY" inflate-in-progress indicator.
1471 
1472 
1473       // fetch the displaced mark from the owner's stack.
1474       // The owner can't die or unwind past the lock while our INFLATING
1475       // object is in the mark.  Furthermore the owner can't complete
1476       // an unlock on the object, either.
1477       markOop dmw = mark->displaced_mark_helper();
1478       assert(dmw->is_neutral(), "invariant");
1479 
1480       // Setup monitor fields to proper values -- prepare the monitor
1481       m->set_header(dmw);
1482 
1483       // Optimization: if the mark->locker stack address is associated
1484       // with this thread we could simply set m->_owner = Self.
1485       // Note that a thread can inflate an object
1486       // that it has stack-locked -- as might happen in wait() -- directly
1487       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1488       m->set_owner(mark->locker());
1489       m->set_object(object);
1490       // TODO-FIXME: assert BasicLock->dhw != 0.
1491 
1492       // Must preserve store ordering. The monitor state must
1493       // be stable at the time of publishing the monitor address.
1494       guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1495       object->release_set_mark(markOopDesc::encode(m));
1496 
1497       // Hopefully the performance counters are allocated on distinct cache lines
1498       // to avoid false sharing on MP systems ...
1499       OM_PERFDATA_OP(Inflations, inc());
1500       TEVENT(Inflate: overwrite stacklock);
1501       if (log_is_enabled(Debug, monitorinflation)) {
1502         if (object->is_instance()) {
1503           ResourceMark rm;
1504           log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1505                                       p2i(object), p2i(object->mark()),
1506                                       object->klass()->external_name());
1507         }
1508       }
1509       if (event.should_commit()) {
1510         post_monitor_inflate_event(event, object, cause);
1511       }
1512       return m;
1513     }
1514 
1515     // CASE: neutral
1516     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1517     // If we know we're inflating for entry it's better to inflate by swinging a
1518     // pre-locked objectMonitor pointer into the object header.   A successful
1519     // CAS inflates the object *and* confers ownership to the inflating thread.
1520     // In the current implementation we use a 2-step mechanism where we CAS()
1521     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1522     // An inflateTry() method that we could call from fast_enter() and slow_enter()
1523     // would be useful.
1524 
1525     assert(mark->is_neutral(), "invariant");
1526     ObjectMonitor * m = omAlloc(Self);
1527     // prepare m for installation - set monitor to initial state
1528     m->Recycle();
1529     m->set_header(mark);
1530     m->set_owner(NULL);
1531     m->set_object(object);
1532     m->_recursions   = 0;
1533     m->_Responsible  = NULL;
1534     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1535 
1536     if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1537       m->set_object(NULL);
1538       m->set_owner(NULL);
1539       m->Recycle();
1540       omRelease(Self, m, true);
1541       m = NULL;
1542       continue;
1543       // interference - the markword changed - just retry.
1544       // The state-transitions are one-way, so there's no chance of
1545       // live-lock -- "Inflated" is an absorbing state.
1546     }
1547 
1548     // Hopefully the performance counters are allocated on distinct
1549     // cache lines to avoid false sharing on MP systems ...
1550     OM_PERFDATA_OP(Inflations, inc());
1551     TEVENT(Inflate: overwrite neutral);
1552     if (log_is_enabled(Debug, monitorinflation)) {
1553       if (object->is_instance()) {
1554         ResourceMark rm;
1555         log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1556                                     p2i(object), p2i(object->mark()),
1557                                     object->klass()->external_name());
1558       }
1559     }
1560     if (event.should_commit()) {
1561       post_monitor_inflate_event(event, object, cause);
1562     }
1563     return m;
1564   }
1565 }
1566 
1567 
1568 // Deflate_idle_monitors() is called at all safepoints, immediately
1569 // after all mutators are stopped, but before any objects have moved.
1570 // It traverses the list of known monitors, deflating where possible.
1571 // The scavenged monitor are returned to the monitor free list.
1572 //
1573 // Beware that we scavenge at *every* stop-the-world point.
1574 // Having a large number of monitors in-circulation negatively
1575 // impacts the performance of some applications (e.g., PointBase).
1576 // Broadly, we want to minimize the # of monitors in circulation.
1577 //
1578 // We have added a flag, MonitorInUseLists, which creates a list
1579 // of active monitors for each thread. deflate_idle_monitors()
1580 // only scans the per-thread in-use lists. omAlloc() puts all
1581 // assigned monitors on the per-thread list. deflate_idle_monitors()
1582 // returns the non-busy monitors to the global free list.
1583 // When a thread dies, omFlush() adds the list of active monitors for
1584 // that thread to a global gOmInUseList acquiring the
1585 // global list lock. deflate_idle_monitors() acquires the global
1586 // list lock to scan for non-busy monitors to the global free list.
1587 // An alternative could have used a single global in-use list. The
1588 // downside would have been the additional cost of acquiring the global list lock
1589 // for every omAlloc().
1590 //
1591 // Perversely, the heap size -- and thus the STW safepoint rate --
1592 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
1593 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1594 // This is an unfortunate aspect of this design.
1595 
1596 enum ManifestConstants {
1597   ClearResponsibleAtSTW = 0
1598 };
1599 
1600 // Deflate a single monitor if not in-use
1601 // Return true if deflated, false if in-use
1602 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1603                                          ObjectMonitor** freeHeadp,
1604                                          ObjectMonitor** freeTailp) {
1605   bool deflated;
1606   // Normal case ... The monitor is associated with obj.
1607   guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1608   guarantee(mid == obj->mark()->monitor(), "invariant");
1609   guarantee(mid->header()->is_neutral(), "invariant");
1610 
1611   if (mid->is_busy()) {
1612     if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1613     deflated = false;
1614   } else {
1615     // Deflate the monitor if it is no longer being used
1616     // It's idle - scavenge and return to the global free list
1617     // plain old deflation ...
1618     TEVENT(deflate_idle_monitors - scavenge1);
1619     if (log_is_enabled(Debug, monitorinflation)) {
1620       if (obj->is_instance()) {
1621         ResourceMark rm;
1622         log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , "
1623                                     "mark " INTPTR_FORMAT " , type %s",
1624                                     p2i(obj), p2i(obj->mark()),
1625                                     obj->klass()->external_name());
1626       }
1627     }
1628 
1629     // Restore the header back to obj
1630     obj->release_set_mark(mid->header());
1631     mid->clear();
1632 
1633     assert(mid->object() == NULL, "invariant");
1634 
1635     // Move the object to the working free list defined by freeHeadp, freeTailp
1636     if (*freeHeadp == NULL) *freeHeadp = mid;
1637     if (*freeTailp != NULL) {
1638       ObjectMonitor * prevtail = *freeTailp;
1639       assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
1640       prevtail->FreeNext = mid;
1641     }
1642     *freeTailp = mid;
1643     deflated = true;
1644   }
1645   return deflated;
1646 }
1647 
1648 // Walk a given monitor list, and deflate idle monitors
1649 // The given list could be a per-thread list or a global list
1650 // Caller acquires gListLock
1651 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp,
1652                                              ObjectMonitor** freeHeadp,
1653                                              ObjectMonitor** freeTailp) {
1654   ObjectMonitor* mid;
1655   ObjectMonitor* next;
1656   ObjectMonitor* cur_mid_in_use = NULL;
1657   int deflated_count = 0;
1658 
1659   for (mid = *listHeadp; mid != NULL;) {
1660     oop obj = (oop) mid->object();
1661     if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) {
1662       // if deflate_monitor succeeded,
1663       // extract from per-thread in-use list
1664       if (mid == *listHeadp) {
1665         *listHeadp = mid->FreeNext;
1666       } else if (cur_mid_in_use != NULL) {
1667         cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1668       }
1669       next = mid->FreeNext;
1670       mid->FreeNext = NULL;  // This mid is current tail in the freeHeadp list
1671       mid = next;
1672       deflated_count++;
1673     } else {
1674       cur_mid_in_use = mid;
1675       mid = mid->FreeNext;
1676     }
1677   }
1678   return deflated_count;
1679 }
1680 
1681 void ObjectSynchronizer::deflate_idle_monitors() {
1682   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1683   int nInuse = 0;              // currently associated with objects
1684   int nInCirculation = 0;      // extant
1685   int nScavenged = 0;          // reclaimed
1686   bool deflated = false;
1687 
1688   ObjectMonitor * freeHeadp = NULL;  // Local SLL of scavenged monitors
1689   ObjectMonitor * freeTailp = NULL;
1690 
1691   TEVENT(deflate_idle_monitors);
1692   // Prevent omFlush from changing mids in Thread dtor's during deflation
1693   // And in case the vm thread is acquiring a lock during a safepoint
1694   // See e.g. 6320749
1695   Thread::muxAcquire(&gListLock, "scavenge - return");
1696 
1697   if (MonitorInUseLists) {
1698     int inUse = 0;
1699     for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
1700       nInCirculation+= cur->omInUseCount;
1701       int deflated_count = deflate_monitor_list(cur->omInUseList_addr(), &freeHeadp, &freeTailp);
1702       cur->omInUseCount-= deflated_count;
1703       if (ObjectMonitor::Knob_VerifyInUse) {
1704         verifyInUse(cur);
1705       }
1706       nScavenged += deflated_count;
1707       nInuse += cur->omInUseCount;
1708     }
1709 
1710     // For moribund threads, scan gOmInUseList
1711     if (gOmInUseList) {
1712       nInCirculation += gOmInUseCount;
1713       int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
1714       gOmInUseCount-= deflated_count;
1715       nScavenged += deflated_count;
1716       nInuse += gOmInUseCount;
1717     }
1718 
1719   } else {
1720     PaddedEnd<ObjectMonitor> * block =
1721       (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
1722     for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
1723       // Iterate over all extant monitors - Scavenge all idle monitors.
1724       assert(block->object() == CHAINMARKER, "must be a block header");
1725       nInCirculation += _BLOCKSIZE;
1726       for (int i = 1; i < _BLOCKSIZE; i++) {
1727         ObjectMonitor* mid = (ObjectMonitor*)&block[i];
1728         oop obj = (oop)mid->object();
1729 
1730         if (obj == NULL) {
1731           // The monitor is not associated with an object.
1732           // The monitor should either be a thread-specific private
1733           // free list or the global free list.
1734           // obj == NULL IMPLIES mid->is_busy() == 0
1735           guarantee(!mid->is_busy(), "invariant");
1736           continue;
1737         }
1738         deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp);
1739 
1740         if (deflated) {
1741           mid->FreeNext = NULL;
1742           nScavenged++;
1743         } else {
1744           nInuse++;
1745         }
1746       }
1747     }
1748   }
1749 
1750   gMonitorFreeCount += nScavenged;
1751 
1752   // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree.
1753 
1754   if (ObjectMonitor::Knob_Verbose) {
1755     tty->print_cr("INFO: Deflate: InCirc=%d InUse=%d Scavenged=%d "
1756                   "ForceMonitorScavenge=%d : pop=%d free=%d",
1757                   nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
1758                   gMonitorPopulation, gMonitorFreeCount);
1759     tty->flush();
1760   }
1761 
1762   ForceMonitorScavenge = 0;    // Reset
1763 
1764   // Move the scavenged monitors back to the global free list.
1765   if (freeHeadp != NULL) {
1766     guarantee(freeTailp != NULL && nScavenged > 0, "invariant");
1767     assert(freeTailp->FreeNext == NULL, "invariant");
1768     // constant-time list splice - prepend scavenged segment to gFreeList
1769     freeTailp->FreeNext = gFreeList;
1770     gFreeList = freeHeadp;
1771   }
1772   Thread::muxRelease(&gListLock);
1773 
1774   OM_PERFDATA_OP(Deflations, inc(nScavenged));
1775   OM_PERFDATA_OP(MonExtant, set_value(nInCirculation));
1776 
1777   // TODO: Add objectMonitor leak detection.
1778   // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1779   GVars.stwRandom = os::random();
1780   GVars.stwCycle++;
1781 }
1782 
1783 // Monitor cleanup on JavaThread::exit
1784 
1785 // Iterate through monitor cache and attempt to release thread's monitors
1786 // Gives up on a particular monitor if an exception occurs, but continues
1787 // the overall iteration, swallowing the exception.
1788 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1789  private:
1790   TRAPS;
1791 
1792  public:
1793   ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
1794   void do_monitor(ObjectMonitor* mid) {
1795     if (mid->owner() == THREAD) {
1796       if (ObjectMonitor::Knob_VerifyMatch != 0) {
1797         ResourceMark rm;
1798         Handle obj(THREAD, (oop) mid->object());
1799         tty->print("INFO: unexpected locked object:");
1800         javaVFrame::print_locked_object_class_name(tty, obj, "locked");
1801         fatal("exiting JavaThread=" INTPTR_FORMAT
1802               " unexpectedly owns ObjectMonitor=" INTPTR_FORMAT,
1803               p2i(THREAD), p2i(mid));
1804       }
1805       (void)mid->complete_exit(CHECK);
1806     }
1807   }
1808 };
1809 
1810 // Release all inflated monitors owned by THREAD.  Lightweight monitors are
1811 // ignored.  This is meant to be called during JNI thread detach which assumes
1812 // all remaining monitors are heavyweight.  All exceptions are swallowed.
1813 // Scanning the extant monitor list can be time consuming.
1814 // A simple optimization is to add a per-thread flag that indicates a thread
1815 // called jni_monitorenter() during its lifetime.
1816 //
1817 // Instead of No_Savepoint_Verifier it might be cheaper to
1818 // use an idiom of the form:
1819 //   auto int tmp = SafepointSynchronize::_safepoint_counter ;
1820 //   <code that must not run at safepoint>
1821 //   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1822 // Since the tests are extremely cheap we could leave them enabled
1823 // for normal product builds.
1824 
1825 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1826   assert(THREAD == JavaThread::current(), "must be current Java thread");
1827   NoSafepointVerifier nsv;
1828   ReleaseJavaMonitorsClosure rjmc(THREAD);
1829   Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread");
1830   ObjectSynchronizer::monitors_iterate(&rjmc);
1831   Thread::muxRelease(&gListLock);
1832   THREAD->clear_pending_exception();
1833 }
1834 
1835 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1836   switch (cause) {
1837     case inflate_cause_vm_internal:    return "VM Internal";
1838     case inflate_cause_monitor_enter:  return "Monitor Enter";
1839     case inflate_cause_wait:           return "Monitor Wait";
1840     case inflate_cause_notify:         return "Monitor Notify";
1841     case inflate_cause_hash_code:      return "Monitor Hash Code";
1842     case inflate_cause_jni_enter:      return "JNI Monitor Enter";
1843     case inflate_cause_jni_exit:       return "JNI Monitor Exit";
1844     default:
1845       ShouldNotReachHere();
1846   }
1847   return "Unknown";
1848 }
1849 
1850 static void post_monitor_inflate_event(EventJavaMonitorInflate& event,
1851                                        const oop obj,
1852                                        const ObjectSynchronizer::InflateCause cause) {
1853 #if INCLUDE_TRACE
1854   assert(event.should_commit(), "check outside");
1855   event.set_monitorClass(obj->klass());
1856   event.set_address((TYPE_ADDRESS)(uintptr_t)(void*)obj);
1857   event.set_cause((u1)cause);
1858   event.commit();
1859 #endif
1860 }
1861 
1862 //------------------------------------------------------------------------------
1863 // Debugging code
1864 
1865 void ObjectSynchronizer::sanity_checks(const bool verbose,
1866                                        const uint cache_line_size,
1867                                        int *error_cnt_ptr,
1868                                        int *warning_cnt_ptr) {
1869   u_char *addr_begin      = (u_char*)&GVars;
1870   u_char *addr_stwRandom  = (u_char*)&GVars.stwRandom;
1871   u_char *addr_hcSequence = (u_char*)&GVars.hcSequence;
1872 
1873   if (verbose) {
1874     tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT,
1875                   sizeof(SharedGlobals));
1876   }
1877 
1878   uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin);
1879   if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom);
1880 
1881   uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin);
1882   if (verbose) {
1883     tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence);
1884   }
1885 
1886   if (cache_line_size != 0) {
1887     // We were able to determine the L1 data cache line size so
1888     // do some cache line specific sanity checks
1889 
1890     if (offset_stwRandom < cache_line_size) {
1891       tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer "
1892                     "to the struct beginning than a cache line which permits "
1893                     "false sharing.");
1894       (*warning_cnt_ptr)++;
1895     }
1896 
1897     if ((offset_hcSequence - offset_stwRandom) < cache_line_size) {
1898       tty->print_cr("WARNING: the SharedGlobals.stwRandom and "
1899                     "SharedGlobals.hcSequence fields are closer than a cache "
1900                     "line which permits false sharing.");
1901       (*warning_cnt_ptr)++;
1902     }
1903 
1904     if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1905       tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1906                     "to the struct end than a cache line which permits false "
1907                     "sharing.");
1908       (*warning_cnt_ptr)++;
1909     }
1910   }
1911 }
1912 
1913 #ifndef PRODUCT
1914 
1915 // Check if monitor belongs to the monitor cache
1916 // The list is grow-only so it's *relatively* safe to traverse
1917 // the list of extant blocks without taking a lock.
1918 
1919 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1920   PaddedEnd<ObjectMonitor> * block =
1921     (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
1922   while (block != NULL) {
1923     assert(block->object() == CHAINMARKER, "must be a block header");
1924     if (monitor > (ObjectMonitor *)&block[0] &&
1925         monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
1926       address mon = (address)monitor;
1927       address blk = (address)block;
1928       size_t diff = mon - blk;
1929       assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
1930       return 1;
1931     }
1932     block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
1933   }
1934   return 0;
1935 }
1936 
1937 #endif