1 /*
   2  * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "logging/log.hpp"
  28 #include "jfr/jfrEvents.hpp"
  29 #include "memory/allocation.inline.hpp"
  30 #include "memory/metaspaceShared.hpp"
  31 #include "memory/padded.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "oops/markOop.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/biasedLocking.hpp"
  37 #include "runtime/handles.inline.hpp"
  38 #include "runtime/interfaceSupport.inline.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/objectMonitor.hpp"
  41 #include "runtime/objectMonitor.inline.hpp"
  42 #include "runtime/osThread.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/synchronizer.hpp"
  47 #include "runtime/thread.inline.hpp"
  48 #include "runtime/vframe.hpp"
  49 #include "runtime/vmThread.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/dtrace.hpp"
  52 #include "utilities/events.hpp"
  53 #include "utilities/preserveException.hpp"
  54 
  55 // The "core" versions of monitor enter and exit reside in this file.
  56 // The interpreter and compilers contain specialized transliterated
  57 // variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
  58 // for instance.  If you make changes here, make sure to modify the
  59 // interpreter, and both C1 and C2 fast-path inline locking code emission.
  60 //
  61 // -----------------------------------------------------------------------------
  62 
  63 #ifdef DTRACE_ENABLED
  64 
  65 // Only bother with this argument setup if dtrace is available
  66 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  67 
  68 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  69   char* bytes = NULL;                                                      \
  70   int len = 0;                                                             \
  71   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  72   Symbol* klassname = ((oop)(obj))->klass()->name();                       \
  73   if (klassname != NULL) {                                                 \
  74     bytes = (char*)klassname->bytes();                                     \
  75     len = klassname->utf8_length();                                        \
  76   }
  77 
  78 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
  79   {                                                                        \
  80     if (DTraceMonitorProbes) {                                             \
  81       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  82       HOTSPOT_MONITOR_WAIT(jtid,                                           \
  83                            (uintptr_t)(monitor), bytes, len, (millis));    \
  84     }                                                                      \
  85   }
  86 
  87 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
  88 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
  89 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
  90 
  91 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
  92   {                                                                        \
  93     if (DTraceMonitorProbes) {                                             \
  94       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  95       HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */             \
  96                                     (uintptr_t)(monitor), bytes, len);     \
  97     }                                                                      \
  98   }
  99 
 100 #else //  ndef DTRACE_ENABLED
 101 
 102 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
 103 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 104 
 105 #endif // ndef DTRACE_ENABLED
 106 
 107 // This exists only as a workaround of dtrace bug 6254741
 108 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
 109   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 110   return 0;
 111 }
 112 
 113 #define NINFLATIONLOCKS 256
 114 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 115 
 116 // global list of blocks of monitors
 117 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
 118 // global monitor free list
 119 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
 120 // global monitor in-use list, for moribund threads,
 121 // monitors they inflated need to be scanned for deflation
 122 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
 123 // count of entries in gOmInUseList
 124 int ObjectSynchronizer::gOmInUseCount = 0;
 125 
 126 static volatile intptr_t gListLock = 0;      // protects global monitor lists
 127 static volatile int gMonitorFreeCount  = 0;  // # on gFreeList
 128 static volatile int gMonitorPopulation = 0;  // # Extant -- in circulation
 129 
 130 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 131 
 132 
 133 // =====================> Quick functions
 134 
 135 // The quick_* forms are special fast-path variants used to improve
 136 // performance.  In the simplest case, a "quick_*" implementation could
 137 // simply return false, in which case the caller will perform the necessary
 138 // state transitions and call the slow-path form.
 139 // The fast-path is designed to handle frequently arising cases in an efficient
 140 // manner and is just a degenerate "optimistic" variant of the slow-path.
 141 // returns true  -- to indicate the call was satisfied.
 142 // returns false -- to indicate the call needs the services of the slow-path.
 143 // A no-loitering ordinance is in effect for code in the quick_* family
 144 // operators: safepoints or indefinite blocking (blocking that might span a
 145 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 146 // entry.
 147 //
 148 // Consider: An interesting optimization is to have the JIT recognize the
 149 // following common idiom:
 150 //   synchronized (someobj) { .... ; notify(); }
 151 // That is, we find a notify() or notifyAll() call that immediately precedes
 152 // the monitorexit operation.  In that case the JIT could fuse the operations
 153 // into a single notifyAndExit() runtime primitive.
 154 
 155 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
 156   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 157   assert(self->is_Java_thread(), "invariant");
 158   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
 159   NoSafepointVerifier nsv;
 160   if (obj == NULL) return false;  // slow-path for invalid obj
 161   const markOop mark = obj->mark();
 162 
 163   if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
 164     // Degenerate notify
 165     // stack-locked by caller so by definition the implied waitset is empty.
 166     return true;
 167   }
 168 
 169   if (mark->has_monitor()) {
 170     ObjectMonitor * const mon = mark->monitor();
 171     assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
 172     if (mon->owner() != self) return false;  // slow-path for IMS exception
 173 
 174     if (mon->first_waiter() != NULL) {
 175       // We have one or more waiters. Since this is an inflated monitor
 176       // that we own, we can transfer one or more threads from the waitset
 177       // to the entrylist here and now, avoiding the slow-path.
 178       if (all) {
 179         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
 180       } else {
 181         DTRACE_MONITOR_PROBE(notify, mon, obj, self);
 182       }
 183       int tally = 0;
 184       do {
 185         mon->INotify(self);
 186         ++tally;
 187       } while (mon->first_waiter() != NULL && all);
 188       OM_PERFDATA_OP(Notifications, inc(tally));
 189     }
 190     return true;
 191   }
 192 
 193   // biased locking and any other IMS exception states take the slow-path
 194   return false;
 195 }
 196 
 197 
 198 // The LockNode emitted directly at the synchronization site would have
 199 // been too big if it were to have included support for the cases of inflated
 200 // recursive enter and exit, so they go here instead.
 201 // Note that we can't safely call AsyncPrintJavaStack() from within
 202 // quick_enter() as our thread state remains _in_Java.
 203 
 204 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
 205                                      BasicLock * lock) {
 206   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 207   assert(Self->is_Java_thread(), "invariant");
 208   assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
 209   NoSafepointVerifier nsv;
 210   if (obj == NULL) return false;       // Need to throw NPE
 211   const markOop mark = obj->mark();
 212 
 213   if (mark->has_monitor()) {
 214     ObjectMonitor * const m = mark->monitor();
 215     assert(oopDesc::equals((oop) m->object(), obj), "invariant");
 216     Thread * const owner = (Thread *) m->_owner;
 217 
 218     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 219     // and observability
 220     // Case: light contention possibly amenable to TLE
 221     // Case: TLE inimical operations such as nested/recursive synchronization
 222 
 223     if (owner == Self) {
 224       m->_recursions++;
 225       return true;
 226     }
 227 
 228     // This Java Monitor is inflated so obj's header will never be
 229     // displaced to this thread's BasicLock. Make the displaced header
 230     // non-NULL so this BasicLock is not seen as recursive nor as
 231     // being locked. We do this unconditionally so that this thread's
 232     // BasicLock cannot be mis-interpreted by any stack walkers. For
 233     // performance reasons, stack walkers generally first check for
 234     // Biased Locking in the object's header, the second check is for
 235     // stack-locking in the object's header, the third check is for
 236     // recursive stack-locking in the displaced header in the BasicLock,
 237     // and last are the inflated Java Monitor (ObjectMonitor) checks.
 238     lock->set_displaced_header(markOopDesc::unused_mark());
 239 
 240     if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
 241       assert(m->_recursions == 0, "invariant");
 242       assert(m->_owner == Self, "invariant");
 243       return true;
 244     }
 245   }
 246 
 247   // Note that we could inflate in quick_enter.
 248   // This is likely a useful optimization
 249   // Critically, in quick_enter() we must not:
 250   // -- perform bias revocation, or
 251   // -- block indefinitely, or
 252   // -- reach a safepoint
 253 
 254   return false;        // revert to slow-path
 255 }
 256 
 257 // -----------------------------------------------------------------------------
 258 //  Fast Monitor Enter/Exit
 259 // This the fast monitor enter. The interpreter and compiler use
 260 // some assembly copies of this code. Make sure update those code
 261 // if the following function is changed. The implementation is
 262 // extremely sensitive to race condition. Be careful.
 263 
 264 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
 265                                     bool attempt_rebias, TRAPS) {
 266   if (UseBiasedLocking) {
 267     if (!SafepointSynchronize::is_at_safepoint()) {
 268       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
 269       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
 270         return;
 271       }
 272     } else {
 273       assert(!attempt_rebias, "can not rebias toward VM thread");
 274       BiasedLocking::revoke_at_safepoint(obj);
 275     }
 276     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 277   }
 278 
 279   slow_enter(obj, lock, THREAD);
 280 }
 281 
 282 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
 283   markOop mark = object->mark();
 284   // We cannot check for Biased Locking if we are racing an inflation.
 285   assert(mark == markOopDesc::INFLATING() ||
 286          !mark->has_bias_pattern(), "should not see bias pattern here");
 287 
 288   markOop dhw = lock->displaced_header();
 289   if (dhw == NULL) {
 290     // If the displaced header is NULL, then this exit matches up with
 291     // a recursive enter. No real work to do here except for diagnostics.
 292 #ifndef PRODUCT
 293     if (mark != markOopDesc::INFLATING()) {
 294       // Only do diagnostics if we are not racing an inflation. Simply
 295       // exiting a recursive enter of a Java Monitor that is being
 296       // inflated is safe; see the has_monitor() comment below.
 297       assert(!mark->is_neutral(), "invariant");
 298       assert(!mark->has_locker() ||
 299              THREAD->is_lock_owned((address)mark->locker()), "invariant");
 300       if (mark->has_monitor()) {
 301         // The BasicLock's displaced_header is marked as a recursive
 302         // enter and we have an inflated Java Monitor (ObjectMonitor).
 303         // This is a special case where the Java Monitor was inflated
 304         // after this thread entered the stack-lock recursively. When a
 305         // Java Monitor is inflated, we cannot safely walk the Java
 306         // Monitor owner's stack and update the BasicLocks because a
 307         // Java Monitor can be asynchronously inflated by a thread that
 308         // does not own the Java Monitor.
 309         ObjectMonitor * m = mark->monitor();
 310         assert(((oop)(m->object()))->mark() == mark, "invariant");
 311         assert(m->is_entered(THREAD), "invariant");
 312       }
 313     }
 314 #endif
 315     return;
 316   }
 317 
 318   if (mark == (markOop) lock) {
 319     // If the object is stack-locked by the current thread, try to
 320     // swing the displaced header from the BasicLock back to the mark.
 321     assert(dhw->is_neutral(), "invariant");
 322     if (object->cas_set_mark(dhw, mark) == mark) {
 323       TEVENT(fast_exit: release stack-lock);
 324       return;
 325     }
 326   }
 327 
 328   // We have to take the slow-path of possible inflation and then exit.
 329   ObjectSynchronizer::inflate(THREAD,
 330                               object,
 331                               inflate_cause_vm_internal)->exit(true, THREAD);
 332 }
 333 
 334 // -----------------------------------------------------------------------------
 335 // Interpreter/Compiler Slow Case
 336 // This routine is used to handle interpreter/compiler slow case
 337 // We don't need to use fast path here, because it must have been
 338 // failed in the interpreter/compiler code.
 339 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
 340   markOop mark = obj->mark();
 341   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 342 
 343   if (mark->is_neutral()) {
 344     // Anticipate successful CAS -- the ST of the displaced mark must
 345     // be visible <= the ST performed by the CAS.
 346     lock->set_displaced_header(mark);
 347     if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
 348       TEVENT(slow_enter: release stacklock);
 349       return;
 350     }
 351     // Fall through to inflate() ...
 352   } else if (mark->has_locker() &&
 353              THREAD->is_lock_owned((address)mark->locker())) {
 354     assert(lock != mark->locker(), "must not re-lock the same lock");
 355     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 356     lock->set_displaced_header(NULL);
 357     return;
 358   }
 359 
 360   // The object header will never be displaced to this lock,
 361   // so it does not matter what the value is, except that it
 362   // must be non-zero to avoid looking like a re-entrant lock,
 363   // and must not look locked either.
 364   lock->set_displaced_header(markOopDesc::unused_mark());
 365   ObjectSynchronizer::inflate(THREAD,
 366                               obj(),
 367                               inflate_cause_monitor_enter)->enter(THREAD);
 368 }
 369 
 370 // This routine is used to handle interpreter/compiler slow case
 371 // We don't need to use fast path here, because it must have
 372 // failed in the interpreter/compiler code. Simply use the heavy
 373 // weight monitor should be ok, unless someone find otherwise.
 374 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 375   fast_exit(object, lock, THREAD);
 376 }
 377 
 378 // -----------------------------------------------------------------------------
 379 // Class Loader  support to workaround deadlocks on the class loader lock objects
 380 // Also used by GC
 381 // complete_exit()/reenter() are used to wait on a nested lock
 382 // i.e. to give up an outer lock completely and then re-enter
 383 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 384 //  1) complete_exit lock1 - saving recursion count
 385 //  2) wait on lock2
 386 //  3) when notified on lock2, unlock lock2
 387 //  4) reenter lock1 with original recursion count
 388 //  5) lock lock2
 389 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 390 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
 391   TEVENT(complete_exit);
 392   if (UseBiasedLocking) {
 393     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 394     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 395   }
 396 
 397   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 398                                                        obj(),
 399                                                        inflate_cause_vm_internal);
 400 
 401   return monitor->complete_exit(THREAD);
 402 }
 403 
 404 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 405 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
 406   TEVENT(reenter);
 407   if (UseBiasedLocking) {
 408     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 409     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 410   }
 411 
 412   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 413                                                        obj(),
 414                                                        inflate_cause_vm_internal);
 415 
 416   monitor->reenter(recursion, THREAD);
 417 }
 418 // -----------------------------------------------------------------------------
 419 // JNI locks on java objects
 420 // NOTE: must use heavy weight monitor to handle jni monitor enter
 421 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 422   // the current locking is from JNI instead of Java code
 423   TEVENT(jni_enter);
 424   if (UseBiasedLocking) {
 425     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 426     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 427   }
 428   THREAD->set_current_pending_monitor_is_from_java(false);
 429   ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
 430   THREAD->set_current_pending_monitor_is_from_java(true);
 431 }
 432 
 433 // NOTE: must use heavy weight monitor to handle jni monitor exit
 434 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 435   TEVENT(jni_exit);
 436   if (UseBiasedLocking) {
 437     Handle h_obj(THREAD, obj);
 438     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
 439     obj = h_obj();
 440   }
 441   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 442 
 443   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 444                                                        obj,
 445                                                        inflate_cause_jni_exit);
 446   // If this thread has locked the object, exit the monitor.  Note:  can't use
 447   // monitor->check(CHECK); must exit even if an exception is pending.
 448   if (monitor->check(THREAD)) {
 449     monitor->exit(true, THREAD);
 450   }
 451 }
 452 
 453 // -----------------------------------------------------------------------------
 454 // Internal VM locks on java objects
 455 // standard constructor, allows locking failures
 456 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
 457   _dolock = doLock;
 458   _thread = thread;
 459   debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
 460   _obj = obj;
 461 
 462   if (_dolock) {
 463     TEVENT(ObjectLocker);
 464 
 465     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
 466   }
 467 }
 468 
 469 ObjectLocker::~ObjectLocker() {
 470   if (_dolock) {
 471     ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
 472   }
 473 }
 474 
 475 
 476 // -----------------------------------------------------------------------------
 477 //  Wait/Notify/NotifyAll
 478 // NOTE: must use heavy weight monitor to handle wait()
 479 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 480   if (UseBiasedLocking) {
 481     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 482     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 483   }
 484   if (millis < 0) {
 485     TEVENT(wait - throw IAX);
 486     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 487   }
 488   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 489                                                        obj(),
 490                                                        inflate_cause_wait);
 491 
 492   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
 493   monitor->wait(millis, true, THREAD);
 494 
 495   // This dummy call is in place to get around dtrace bug 6254741.  Once
 496   // that's fixed we can uncomment the following line, remove the call
 497   // and change this function back into a "void" func.
 498   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 499   return dtrace_waited_probe(monitor, obj, THREAD);
 500 }
 501 
 502 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 503   if (UseBiasedLocking) {
 504     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 505     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 506   }
 507   if (millis < 0) {
 508     TEVENT(wait - throw IAX);
 509     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 510   }
 511   ObjectSynchronizer::inflate(THREAD,
 512                               obj(),
 513                               inflate_cause_wait)->wait(millis, false, THREAD);
 514 }
 515 
 516 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 517   if (UseBiasedLocking) {
 518     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 519     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 520   }
 521 
 522   markOop mark = obj->mark();
 523   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 524     return;
 525   }
 526   ObjectSynchronizer::inflate(THREAD,
 527                               obj(),
 528                               inflate_cause_notify)->notify(THREAD);
 529 }
 530 
 531 // NOTE: see comment of notify()
 532 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 533   if (UseBiasedLocking) {
 534     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 535     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 536   }
 537 
 538   markOop mark = obj->mark();
 539   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 540     return;
 541   }
 542   ObjectSynchronizer::inflate(THREAD,
 543                               obj(),
 544                               inflate_cause_notify)->notifyAll(THREAD);
 545 }
 546 
 547 // -----------------------------------------------------------------------------
 548 // Hash Code handling
 549 //
 550 // Performance concern:
 551 // OrderAccess::storestore() calls release() which at one time stored 0
 552 // into the global volatile OrderAccess::dummy variable. This store was
 553 // unnecessary for correctness. Many threads storing into a common location
 554 // causes considerable cache migration or "sloshing" on large SMP systems.
 555 // As such, I avoided using OrderAccess::storestore(). In some cases
 556 // OrderAccess::fence() -- which incurs local latency on the executing
 557 // processor -- is a better choice as it scales on SMP systems.
 558 //
 559 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
 560 // a discussion of coherency costs. Note that all our current reference
 561 // platforms provide strong ST-ST order, so the issue is moot on IA32,
 562 // x64, and SPARC.
 563 //
 564 // As a general policy we use "volatile" to control compiler-based reordering
 565 // and explicit fences (barriers) to control for architectural reordering
 566 // performed by the CPU(s) or platform.
 567 
 568 struct SharedGlobals {
 569   char         _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
 570   // These are highly shared mostly-read variables.
 571   // To avoid false-sharing they need to be the sole occupants of a cache line.
 572   volatile int stwRandom;
 573   volatile int stwCycle;
 574   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
 575   // Hot RW variable -- Sequester to avoid false-sharing
 576   volatile int hcSequence;
 577   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
 578 };
 579 
 580 static SharedGlobals GVars;
 581 static int MonitorScavengeThreshold = 1000000;
 582 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
 583 
 584 static markOop ReadStableMark(oop obj) {
 585   markOop mark = obj->mark();
 586   if (!mark->is_being_inflated()) {
 587     return mark;       // normal fast-path return
 588   }
 589 
 590   int its = 0;
 591   for (;;) {
 592     markOop mark = obj->mark();
 593     if (!mark->is_being_inflated()) {
 594       return mark;    // normal fast-path return
 595     }
 596 
 597     // The object is being inflated by some other thread.
 598     // The caller of ReadStableMark() must wait for inflation to complete.
 599     // Avoid live-lock
 600     // TODO: consider calling SafepointSynchronize::do_call_back() while
 601     // spinning to see if there's a safepoint pending.  If so, immediately
 602     // yielding or blocking would be appropriate.  Avoid spinning while
 603     // there is a safepoint pending.
 604     // TODO: add inflation contention performance counters.
 605     // TODO: restrict the aggregate number of spinners.
 606 
 607     ++its;
 608     if (its > 10000 || !os::is_MP()) {
 609       if (its & 1) {
 610         os::naked_yield();
 611         TEVENT(Inflate: INFLATING - yield);
 612       } else {
 613         // Note that the following code attenuates the livelock problem but is not
 614         // a complete remedy.  A more complete solution would require that the inflating
 615         // thread hold the associated inflation lock.  The following code simply restricts
 616         // the number of spinners to at most one.  We'll have N-2 threads blocked
 617         // on the inflationlock, 1 thread holding the inflation lock and using
 618         // a yield/park strategy, and 1 thread in the midst of inflation.
 619         // A more refined approach would be to change the encoding of INFLATING
 620         // to allow encapsulation of a native thread pointer.  Threads waiting for
 621         // inflation to complete would use CAS to push themselves onto a singly linked
 622         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
 623         // and calling park().  When inflation was complete the thread that accomplished inflation
 624         // would detach the list and set the markword to inflated with a single CAS and
 625         // then for each thread on the list, set the flag and unpark() the thread.
 626         // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
 627         // wakes at most one thread whereas we need to wake the entire list.
 628         int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
 629         int YieldThenBlock = 0;
 630         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
 631         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
 632         Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
 633         while (obj->mark() == markOopDesc::INFLATING()) {
 634           // Beware: NakedYield() is advisory and has almost no effect on some platforms
 635           // so we periodically call Self->_ParkEvent->park(1).
 636           // We use a mixed spin/yield/block mechanism.
 637           if ((YieldThenBlock++) >= 16) {
 638             Thread::current()->_ParkEvent->park(1);
 639           } else {
 640             os::naked_yield();
 641           }
 642         }
 643         Thread::muxRelease(gInflationLocks + ix);
 644         TEVENT(Inflate: INFLATING - yield/park);
 645       }
 646     } else {
 647       SpinPause();       // SMP-polite spinning
 648     }
 649   }
 650 }
 651 
 652 // hashCode() generation :
 653 //
 654 // Possibilities:
 655 // * MD5Digest of {obj,stwRandom}
 656 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
 657 // * A DES- or AES-style SBox[] mechanism
 658 // * One of the Phi-based schemes, such as:
 659 //   2654435761 = 2^32 * Phi (golden ratio)
 660 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
 661 // * A variation of Marsaglia's shift-xor RNG scheme.
 662 // * (obj ^ stwRandom) is appealing, but can result
 663 //   in undesirable regularity in the hashCode values of adjacent objects
 664 //   (objects allocated back-to-back, in particular).  This could potentially
 665 //   result in hashtable collisions and reduced hashtable efficiency.
 666 //   There are simple ways to "diffuse" the middle address bits over the
 667 //   generated hashCode values:
 668 
 669 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
 670   intptr_t value = 0;
 671   if (hashCode == 0) {
 672     // This form uses global Park-Miller RNG.
 673     // On MP system we'll have lots of RW access to a global, so the
 674     // mechanism induces lots of coherency traffic.
 675     value = os::random();
 676   } else if (hashCode == 1) {
 677     // This variation has the property of being stable (idempotent)
 678     // between STW operations.  This can be useful in some of the 1-0
 679     // synchronization schemes.
 680     intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
 681     value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
 682   } else if (hashCode == 2) {
 683     value = 1;            // for sensitivity testing
 684   } else if (hashCode == 3) {
 685     value = ++GVars.hcSequence;
 686   } else if (hashCode == 4) {
 687     value = cast_from_oop<intptr_t>(obj);
 688   } else {
 689     // Marsaglia's xor-shift scheme with thread-specific state
 690     // This is probably the best overall implementation -- we'll
 691     // likely make this the default in future releases.
 692     unsigned t = Self->_hashStateX;
 693     t ^= (t << 11);
 694     Self->_hashStateX = Self->_hashStateY;
 695     Self->_hashStateY = Self->_hashStateZ;
 696     Self->_hashStateZ = Self->_hashStateW;
 697     unsigned v = Self->_hashStateW;
 698     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 699     Self->_hashStateW = v;
 700     value = v;
 701   }
 702 
 703   value &= markOopDesc::hash_mask;
 704   if (value == 0) value = 0xBAD;
 705   assert(value != markOopDesc::no_hash, "invariant");
 706   TEVENT(hashCode: GENERATE);
 707   return value;
 708 }
 709 
 710 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
 711   if (UseBiasedLocking) {
 712     // NOTE: many places throughout the JVM do not expect a safepoint
 713     // to be taken here, in particular most operations on perm gen
 714     // objects. However, we only ever bias Java instances and all of
 715     // the call sites of identity_hash that might revoke biases have
 716     // been checked to make sure they can handle a safepoint. The
 717     // added check of the bias pattern is to avoid useless calls to
 718     // thread-local storage.
 719     if (obj->mark()->has_bias_pattern()) {
 720       // Handle for oop obj in case of STW safepoint
 721       Handle hobj(Self, obj);
 722       // Relaxing assertion for bug 6320749.
 723       assert(Universe::verify_in_progress() ||
 724              !SafepointSynchronize::is_at_safepoint(),
 725              "biases should not be seen by VM thread here");
 726       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
 727       obj = hobj();
 728       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 729     }
 730   }
 731 
 732   // hashCode() is a heap mutator ...
 733   // Relaxing assertion for bug 6320749.
 734   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 735          !SafepointSynchronize::is_at_safepoint(), "invariant");
 736   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 737          Self->is_Java_thread() , "invariant");
 738   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 739          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 740 
 741   ObjectMonitor* monitor = NULL;
 742   markOop temp, test;
 743   intptr_t hash;
 744   markOop mark = ReadStableMark(obj);
 745 
 746   // object should remain ineligible for biased locking
 747   assert(!mark->has_bias_pattern(), "invariant");
 748 
 749   if (mark->is_neutral()) {
 750     hash = mark->hash();              // this is a normal header
 751     if (hash) {                       // if it has hash, just return it
 752       return hash;
 753     }
 754     hash = get_next_hash(Self, obj);  // allocate a new hash code
 755     temp = mark->copy_set_hash(hash); // merge the hash code into header
 756     // use (machine word version) atomic operation to install the hash
 757     test = obj->cas_set_mark(temp, mark);
 758     if (test == mark) {
 759       return hash;
 760     }
 761     // If atomic operation failed, we must inflate the header
 762     // into heavy weight monitor. We could add more code here
 763     // for fast path, but it does not worth the complexity.
 764   } else if (mark->has_monitor()) {
 765     monitor = mark->monitor();
 766     temp = monitor->header();
 767     assert(temp->is_neutral(), "invariant");
 768     hash = temp->hash();
 769     if (hash) {
 770       return hash;
 771     }
 772     // Skip to the following code to reduce code size
 773   } else if (Self->is_lock_owned((address)mark->locker())) {
 774     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
 775     assert(temp->is_neutral(), "invariant");
 776     hash = temp->hash();              // by current thread, check if the displaced
 777     if (hash) {                       // header contains hash code
 778       return hash;
 779     }
 780     // WARNING:
 781     //   The displaced header is strictly immutable.
 782     // It can NOT be changed in ANY cases. So we have
 783     // to inflate the header into heavyweight monitor
 784     // even the current thread owns the lock. The reason
 785     // is the BasicLock (stack slot) will be asynchronously
 786     // read by other threads during the inflate() function.
 787     // Any change to stack may not propagate to other threads
 788     // correctly.
 789   }
 790 
 791   // Inflate the monitor to set hash code
 792   monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
 793   // Load displaced header and check it has hash code
 794   mark = monitor->header();
 795   assert(mark->is_neutral(), "invariant");
 796   hash = mark->hash();
 797   if (hash == 0) {
 798     hash = get_next_hash(Self, obj);
 799     temp = mark->copy_set_hash(hash); // merge hash code into header
 800     assert(temp->is_neutral(), "invariant");
 801     test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
 802     if (test != mark) {
 803       // The only update to the header in the monitor (outside GC)
 804       // is install the hash code. If someone add new usage of
 805       // displaced header, please update this code
 806       hash = test->hash();
 807       assert(test->is_neutral(), "invariant");
 808       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
 809     }
 810   }
 811   // We finally get the hash
 812   return hash;
 813 }
 814 
 815 // Deprecated -- use FastHashCode() instead.
 816 
 817 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
 818   return FastHashCode(Thread::current(), obj());
 819 }
 820 
 821 
 822 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
 823                                                    Handle h_obj) {
 824   if (UseBiasedLocking) {
 825     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
 826     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 827   }
 828 
 829   assert(thread == JavaThread::current(), "Can only be called on current thread");
 830   oop obj = h_obj();
 831 
 832   markOop mark = ReadStableMark(obj);
 833 
 834   // Uncontended case, header points to stack
 835   if (mark->has_locker()) {
 836     return thread->is_lock_owned((address)mark->locker());
 837   }
 838   // Contended case, header points to ObjectMonitor (tagged pointer)
 839   if (mark->has_monitor()) {
 840     ObjectMonitor* monitor = mark->monitor();
 841     return monitor->is_entered(thread) != 0;
 842   }
 843   // Unlocked case, header in place
 844   assert(mark->is_neutral(), "sanity check");
 845   return false;
 846 }
 847 
 848 // Be aware of this method could revoke bias of the lock object.
 849 // This method queries the ownership of the lock handle specified by 'h_obj'.
 850 // If the current thread owns the lock, it returns owner_self. If no
 851 // thread owns the lock, it returns owner_none. Otherwise, it will return
 852 // owner_other.
 853 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
 854 (JavaThread *self, Handle h_obj) {
 855   // The caller must beware this method can revoke bias, and
 856   // revocation can result in a safepoint.
 857   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 858   assert(self->thread_state() != _thread_blocked, "invariant");
 859 
 860   // Possible mark states: neutral, biased, stack-locked, inflated
 861 
 862   if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
 863     // CASE: biased
 864     BiasedLocking::revoke_and_rebias(h_obj, false, self);
 865     assert(!h_obj->mark()->has_bias_pattern(),
 866            "biases should be revoked by now");
 867   }
 868 
 869   assert(self == JavaThread::current(), "Can only be called on current thread");
 870   oop obj = h_obj();
 871   markOop mark = ReadStableMark(obj);
 872 
 873   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
 874   if (mark->has_locker()) {
 875     return self->is_lock_owned((address)mark->locker()) ?
 876       owner_self : owner_other;
 877   }
 878 
 879   // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
 880   // The Object:ObjectMonitor relationship is stable as long as we're
 881   // not at a safepoint.
 882   if (mark->has_monitor()) {
 883     void * owner = mark->monitor()->_owner;
 884     if (owner == NULL) return owner_none;
 885     return (owner == self ||
 886             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
 887   }
 888 
 889   // CASE: neutral
 890   assert(mark->is_neutral(), "sanity check");
 891   return owner_none;           // it's unlocked
 892 }
 893 
 894 // FIXME: jvmti should call this
 895 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
 896   if (UseBiasedLocking) {
 897     if (SafepointSynchronize::is_at_safepoint()) {
 898       BiasedLocking::revoke_at_safepoint(h_obj);
 899     } else {
 900       BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
 901     }
 902     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 903   }
 904 
 905   oop obj = h_obj();
 906   address owner = NULL;
 907 
 908   markOop mark = ReadStableMark(obj);
 909 
 910   // Uncontended case, header points to stack
 911   if (mark->has_locker()) {
 912     owner = (address) mark->locker();
 913   }
 914 
 915   // Contended case, header points to ObjectMonitor (tagged pointer)
 916   if (mark->has_monitor()) {
 917     ObjectMonitor* monitor = mark->monitor();
 918     assert(monitor != NULL, "monitor should be non-null");
 919     owner = (address) monitor->owner();
 920   }
 921 
 922   if (owner != NULL) {
 923     // owning_thread_from_monitor_owner() may also return NULL here
 924     return Threads::owning_thread_from_monitor_owner(t_list, owner);
 925   }
 926 
 927   // Unlocked case, header in place
 928   // Cannot have assertion since this object may have been
 929   // locked by another thread when reaching here.
 930   // assert(mark->is_neutral(), "sanity check");
 931 
 932   return NULL;
 933 }
 934 
 935 // Visitors ...
 936 
 937 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
 938   PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
 939   while (block != NULL) {
 940     assert(block->object() == CHAINMARKER, "must be a block header");
 941     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
 942       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
 943       oop object = (oop)mid->object();
 944       if (object != NULL) {
 945         closure->do_monitor(mid);
 946       }
 947     }
 948     block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
 949   }
 950 }
 951 
 952 // Get the next block in the block list.
 953 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) {
 954   assert(block->object() == CHAINMARKER, "must be a block header");
 955   block = (PaddedEnd<ObjectMonitor>*) block->FreeNext;
 956   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
 957   return block;
 958 }
 959 
 960 static bool monitors_used_above_threshold() {
 961   if (gMonitorPopulation == 0) {
 962     return false;
 963   }
 964   int monitors_used = gMonitorPopulation - gMonitorFreeCount;
 965   int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation;
 966   return monitor_usage > MonitorUsedDeflationThreshold;
 967 }
 968 
 969 bool ObjectSynchronizer::is_cleanup_needed() {
 970   if (MonitorUsedDeflationThreshold > 0) {
 971     return monitors_used_above_threshold();
 972   }
 973   return false;
 974 }
 975 
 976 void ObjectSynchronizer::oops_do(OopClosure* f) {
 977   if (MonitorInUseLists) {
 978     // When using thread local monitor lists, we only scan the
 979     // global used list here (for moribund threads), and
 980     // the thread-local monitors in Thread::oops_do().
 981     global_used_oops_do(f);
 982   } else {
 983     global_oops_do(f);
 984   }
 985 }
 986 
 987 void ObjectSynchronizer::global_oops_do(OopClosure* f) {
 988   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 989   PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
 990   for (; block != NULL; block = next(block)) {
 991     assert(block->object() == CHAINMARKER, "must be a block header");
 992     for (int i = 1; i < _BLOCKSIZE; i++) {
 993       ObjectMonitor* mid = (ObjectMonitor *)&block[i];
 994       if (mid->object() != NULL) {
 995         f->do_oop((oop*)mid->object_addr());
 996       }
 997     }
 998   }
 999 }
1000 
1001 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1002   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1003   list_oops_do(gOmInUseList, f);
1004 }
1005 
1006 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1007   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1008   list_oops_do(thread->omInUseList, f);
1009 }
1010 
1011 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1012   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1013   ObjectMonitor* mid;
1014   for (mid = list; mid != NULL; mid = mid->FreeNext) {
1015     if (mid->object() != NULL) {
1016       f->do_oop((oop*)mid->object_addr());
1017     }
1018   }
1019 }
1020 
1021 
1022 // -----------------------------------------------------------------------------
1023 // ObjectMonitor Lifecycle
1024 // -----------------------
1025 // Inflation unlinks monitors from the global gFreeList and
1026 // associates them with objects.  Deflation -- which occurs at
1027 // STW-time -- disassociates idle monitors from objects.  Such
1028 // scavenged monitors are returned to the gFreeList.
1029 //
1030 // The global list is protected by gListLock.  All the critical sections
1031 // are short and operate in constant-time.
1032 //
1033 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1034 //
1035 // Lifecycle:
1036 // --   unassigned and on the global free list
1037 // --   unassigned and on a thread's private omFreeList
1038 // --   assigned to an object.  The object is inflated and the mark refers
1039 //      to the objectmonitor.
1040 
1041 
1042 // Constraining monitor pool growth via MonitorBound ...
1043 //
1044 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
1045 // the rate of scavenging is driven primarily by GC.  As such,  we can find
1046 // an inordinate number of monitors in circulation.
1047 // To avoid that scenario we can artificially induce a STW safepoint
1048 // if the pool appears to be growing past some reasonable bound.
1049 // Generally we favor time in space-time tradeoffs, but as there's no
1050 // natural back-pressure on the # of extant monitors we need to impose some
1051 // type of limit.  Beware that if MonitorBound is set to too low a value
1052 // we could just loop. In addition, if MonitorBound is set to a low value
1053 // we'll incur more safepoints, which are harmful to performance.
1054 // See also: GuaranteedSafepointInterval
1055 //
1056 // The current implementation uses asynchronous VM operations.
1057 
1058 static void InduceScavenge(Thread * Self, const char * Whence) {
1059   // Induce STW safepoint to trim monitors
1060   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1061   // More precisely, trigger an asynchronous STW safepoint as the number
1062   // of active monitors passes the specified threshold.
1063   // TODO: assert thread state is reasonable
1064 
1065   if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
1066     if (ObjectMonitor::Knob_Verbose) {
1067       tty->print_cr("INFO: Monitor scavenge - Induced STW @%s (%d)",
1068                     Whence, ForceMonitorScavenge) ;
1069       tty->flush();
1070     }
1071     // Induce a 'null' safepoint to scavenge monitors
1072     // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
1073     // to the VMthread and have a lifespan longer than that of this activation record.
1074     // The VMThread will delete the op when completed.
1075     VMThread::execute(new VM_ScavengeMonitors());
1076 
1077     if (ObjectMonitor::Knob_Verbose) {
1078       tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)",
1079                     Whence, ForceMonitorScavenge) ;
1080       tty->flush();
1081     }
1082   }
1083 }
1084 
1085 void ObjectSynchronizer::verifyInUse(Thread *Self) {
1086   ObjectMonitor* mid;
1087   int in_use_tally = 0;
1088   for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
1089     in_use_tally++;
1090   }
1091   assert(in_use_tally == Self->omInUseCount, "in-use count off");
1092 
1093   int free_tally = 0;
1094   for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
1095     free_tally++;
1096   }
1097   assert(free_tally == Self->omFreeCount, "free count off");
1098 }
1099 
1100 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
1101   // A large MAXPRIVATE value reduces both list lock contention
1102   // and list coherency traffic, but also tends to increase the
1103   // number of objectMonitors in circulation as well as the STW
1104   // scavenge costs.  As usual, we lean toward time in space-time
1105   // tradeoffs.
1106   const int MAXPRIVATE = 1024;
1107   for (;;) {
1108     ObjectMonitor * m;
1109 
1110     // 1: try to allocate from the thread's local omFreeList.
1111     // Threads will attempt to allocate first from their local list, then
1112     // from the global list, and only after those attempts fail will the thread
1113     // attempt to instantiate new monitors.   Thread-local free lists take
1114     // heat off the gListLock and improve allocation latency, as well as reducing
1115     // coherency traffic on the shared global list.
1116     m = Self->omFreeList;
1117     if (m != NULL) {
1118       Self->omFreeList = m->FreeNext;
1119       Self->omFreeCount--;
1120       // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
1121       guarantee(m->object() == NULL, "invariant");
1122       if (MonitorInUseLists) {
1123         m->FreeNext = Self->omInUseList;
1124         Self->omInUseList = m;
1125         Self->omInUseCount++;
1126         if (ObjectMonitor::Knob_VerifyInUse) {
1127           verifyInUse(Self);
1128         }
1129       } else {
1130         m->FreeNext = NULL;
1131       }
1132       return m;
1133     }
1134 
1135     // 2: try to allocate from the global gFreeList
1136     // CONSIDER: use muxTry() instead of muxAcquire().
1137     // If the muxTry() fails then drop immediately into case 3.
1138     // If we're using thread-local free lists then try
1139     // to reprovision the caller's free list.
1140     if (gFreeList != NULL) {
1141       // Reprovision the thread's omFreeList.
1142       // Use bulk transfers to reduce the allocation rate and heat
1143       // on various locks.
1144       Thread::muxAcquire(&gListLock, "omAlloc");
1145       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1146         gMonitorFreeCount--;
1147         ObjectMonitor * take = gFreeList;
1148         gFreeList = take->FreeNext;
1149         guarantee(take->object() == NULL, "invariant");
1150         guarantee(!take->is_busy(), "invariant");
1151         take->Recycle();
1152         omRelease(Self, take, false);
1153       }
1154       Thread::muxRelease(&gListLock);
1155       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1156       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1157       TEVENT(omFirst - reprovision);
1158 
1159       const int mx = MonitorBound;
1160       if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1161         // We can't safely induce a STW safepoint from omAlloc() as our thread
1162         // state may not be appropriate for such activities and callers may hold
1163         // naked oops, so instead we defer the action.
1164         InduceScavenge(Self, "omAlloc");
1165       }
1166       continue;
1167     }
1168 
1169     // 3: allocate a block of new ObjectMonitors
1170     // Both the local and global free lists are empty -- resort to malloc().
1171     // In the current implementation objectMonitors are TSM - immortal.
1172     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1173     // each ObjectMonitor to start at the beginning of a cache line,
1174     // so we use align_up().
1175     // A better solution would be to use C++ placement-new.
1176     // BEWARE: As it stands currently, we don't run the ctors!
1177     assert(_BLOCKSIZE > 1, "invariant");
1178     size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE;
1179     PaddedEnd<ObjectMonitor> * temp;
1180     size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
1181     void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size,
1182                                                       mtInternal);
1183     temp = (PaddedEnd<ObjectMonitor> *)
1184              align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
1185 
1186     // NOTE: (almost) no way to recover if allocation failed.
1187     // We might be able to induce a STW safepoint and scavenge enough
1188     // objectMonitors to permit progress.
1189     if (temp == NULL) {
1190       vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
1191                             "Allocate ObjectMonitors");
1192     }
1193     (void)memset((void *) temp, 0, neededsize);
1194 
1195     // Format the block.
1196     // initialize the linked list, each monitor points to its next
1197     // forming the single linked free list, the very first monitor
1198     // will points to next block, which forms the block list.
1199     // The trick of using the 1st element in the block as gBlockList
1200     // linkage should be reconsidered.  A better implementation would
1201     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1202 
1203     for (int i = 1; i < _BLOCKSIZE; i++) {
1204       temp[i].FreeNext = (ObjectMonitor *)&temp[i+1];
1205     }
1206 
1207     // terminate the last monitor as the end of list
1208     temp[_BLOCKSIZE - 1].FreeNext = NULL;
1209 
1210     // Element [0] is reserved for global list linkage
1211     temp[0].set_object(CHAINMARKER);
1212 
1213     // Consider carving out this thread's current request from the
1214     // block in hand.  This avoids some lock traffic and redundant
1215     // list activity.
1216 
1217     // Acquire the gListLock to manipulate gBlockList and gFreeList.
1218     // An Oyama-Taura-Yonezawa scheme might be more efficient.
1219     Thread::muxAcquire(&gListLock, "omAlloc [2]");
1220     gMonitorPopulation += _BLOCKSIZE-1;
1221     gMonitorFreeCount += _BLOCKSIZE-1;
1222 
1223     // Add the new block to the list of extant blocks (gBlockList).
1224     // The very first objectMonitor in a block is reserved and dedicated.
1225     // It serves as blocklist "next" linkage.
1226     temp[0].FreeNext = gBlockList;
1227     // There are lock-free uses of gBlockList so make sure that
1228     // the previous stores happen before we update gBlockList.
1229     OrderAccess::release_store(&gBlockList, temp);
1230 
1231     // Add the new string of objectMonitors to the global free list
1232     temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
1233     gFreeList = temp + 1;
1234     Thread::muxRelease(&gListLock);
1235     TEVENT(Allocate block of monitors);
1236   }
1237 }
1238 
1239 // Place "m" on the caller's private per-thread omFreeList.
1240 // In practice there's no need to clamp or limit the number of
1241 // monitors on a thread's omFreeList as the only time we'll call
1242 // omRelease is to return a monitor to the free list after a CAS
1243 // attempt failed.  This doesn't allow unbounded #s of monitors to
1244 // accumulate on a thread's free list.
1245 //
1246 // Key constraint: all ObjectMonitors on a thread's free list and the global
1247 // free list must have their object field set to null. This prevents the
1248 // scavenger -- deflate_idle_monitors -- from reclaiming them.
1249 
1250 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1251                                    bool fromPerThreadAlloc) {
1252   guarantee(m->object() == NULL, "invariant");
1253   guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
1254   // Remove from omInUseList
1255   if (MonitorInUseLists && fromPerThreadAlloc) {
1256     ObjectMonitor* cur_mid_in_use = NULL;
1257     bool extracted = false;
1258     for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
1259       if (m == mid) {
1260         // extract from per-thread in-use list
1261         if (mid == Self->omInUseList) {
1262           Self->omInUseList = mid->FreeNext;
1263         } else if (cur_mid_in_use != NULL) {
1264           cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1265         }
1266         extracted = true;
1267         Self->omInUseCount--;
1268         if (ObjectMonitor::Knob_VerifyInUse) {
1269           verifyInUse(Self);
1270         }
1271         break;
1272       }
1273     }
1274     assert(extracted, "Should have extracted from in-use list");
1275   }
1276 
1277   // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
1278   m->FreeNext = Self->omFreeList;
1279   Self->omFreeList = m;
1280   Self->omFreeCount++;
1281 }
1282 
1283 // Return the monitors of a moribund thread's local free list to
1284 // the global free list.  Typically a thread calls omFlush() when
1285 // it's dying.  We could also consider having the VM thread steal
1286 // monitors from threads that have not run java code over a few
1287 // consecutive STW safepoints.  Relatedly, we might decay
1288 // omFreeProvision at STW safepoints.
1289 //
1290 // Also return the monitors of a moribund thread's omInUseList to
1291 // a global gOmInUseList under the global list lock so these
1292 // will continue to be scanned.
1293 //
1294 // We currently call omFlush() from Threads::remove() _before the thread
1295 // has been excised from the thread list and is no longer a mutator.
1296 // This means that omFlush() can not run concurrently with a safepoint and
1297 // interleave with the scavenge operator. In particular, this ensures that
1298 // the thread's monitors are scanned by a GC safepoint, either via
1299 // Thread::oops_do() (if safepoint happens before omFlush()) or via
1300 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's
1301 // monitors have been transferred to the global in-use list).
1302 
1303 void ObjectSynchronizer::omFlush(Thread * Self) {
1304   ObjectMonitor * list = Self->omFreeList;  // Null-terminated SLL
1305   Self->omFreeList = NULL;
1306   ObjectMonitor * tail = NULL;
1307   int tally = 0;
1308   if (list != NULL) {
1309     ObjectMonitor * s;
1310     // The thread is going away, the per-thread free monitors
1311     // are freed via set_owner(NULL)
1312     // Link them to tail, which will be linked into the global free list
1313     // gFreeList below, under the gListLock
1314     for (s = list; s != NULL; s = s->FreeNext) {
1315       tally++;
1316       tail = s;
1317       guarantee(s->object() == NULL, "invariant");
1318       guarantee(!s->is_busy(), "invariant");
1319       s->set_owner(NULL);   // redundant but good hygiene
1320       TEVENT(omFlush - Move one);
1321     }
1322     guarantee(tail != NULL && list != NULL, "invariant");
1323   }
1324 
1325   ObjectMonitor * inUseList = Self->omInUseList;
1326   ObjectMonitor * inUseTail = NULL;
1327   int inUseTally = 0;
1328   if (inUseList != NULL) {
1329     Self->omInUseList = NULL;
1330     ObjectMonitor *cur_om;
1331     // The thread is going away, however the omInUseList inflated
1332     // monitors may still be in-use by other threads.
1333     // Link them to inUseTail, which will be linked into the global in-use list
1334     // gOmInUseList below, under the gListLock
1335     for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
1336       inUseTail = cur_om;
1337       inUseTally++;
1338     }
1339     assert(Self->omInUseCount == inUseTally, "in-use count off");
1340     Self->omInUseCount = 0;
1341     guarantee(inUseTail != NULL && inUseList != NULL, "invariant");
1342   }
1343 
1344   Thread::muxAcquire(&gListLock, "omFlush");
1345   if (tail != NULL) {
1346     tail->FreeNext = gFreeList;
1347     gFreeList = list;
1348     gMonitorFreeCount += tally;
1349     assert(Self->omFreeCount == tally, "free-count off");
1350     Self->omFreeCount = 0;
1351   }
1352 
1353   if (inUseTail != NULL) {
1354     inUseTail->FreeNext = gOmInUseList;
1355     gOmInUseList = inUseList;
1356     gOmInUseCount += inUseTally;
1357   }
1358 
1359   Thread::muxRelease(&gListLock);
1360   TEVENT(omFlush);
1361 }
1362 
1363 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1364                                        const oop obj,
1365                                        ObjectSynchronizer::InflateCause cause) {
1366   assert(event != NULL, "invariant");
1367   assert(event->should_commit(), "invariant");
1368   event->set_monitorClass(obj->klass());
1369   event->set_address((uintptr_t)(void*)obj);
1370   event->set_cause((u1)cause);
1371   event->commit();
1372 }
1373 
1374 // Fast path code shared by multiple functions
1375 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1376   markOop mark = obj->mark();
1377   if (mark->has_monitor()) {
1378     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1379     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1380     return mark->monitor();
1381   }
1382   return ObjectSynchronizer::inflate(Thread::current(),
1383                                      obj,
1384                                      inflate_cause_vm_internal);
1385 }
1386 
1387 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1388                                                      oop object,
1389                                                      const InflateCause cause) {
1390 
1391   // Inflate mutates the heap ...
1392   // Relaxing assertion for bug 6320749.
1393   assert(Universe::verify_in_progress() ||
1394          !SafepointSynchronize::is_at_safepoint(), "invariant");
1395 
1396   EventJavaMonitorInflate event;
1397 
1398   for (;;) {
1399     const markOop mark = object->mark();
1400     assert(!mark->has_bias_pattern(), "invariant");
1401 
1402     // The mark can be in one of the following states:
1403     // *  Inflated     - just return
1404     // *  Stack-locked - coerce it to inflated
1405     // *  INFLATING    - busy wait for conversion to complete
1406     // *  Neutral      - aggressively inflate the object.
1407     // *  BIASED       - Illegal.  We should never see this
1408 
1409     // CASE: inflated
1410     if (mark->has_monitor()) {
1411       ObjectMonitor * inf = mark->monitor();
1412       assert(inf->header()->is_neutral(), "invariant");
1413       assert(oopDesc::equals((oop) inf->object(), object), "invariant");
1414       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1415       return inf;
1416     }
1417 
1418     // CASE: inflation in progress - inflating over a stack-lock.
1419     // Some other thread is converting from stack-locked to inflated.
1420     // Only that thread can complete inflation -- other threads must wait.
1421     // The INFLATING value is transient.
1422     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1423     // We could always eliminate polling by parking the thread on some auxiliary list.
1424     if (mark == markOopDesc::INFLATING()) {
1425       TEVENT(Inflate: spin while INFLATING);
1426       ReadStableMark(object);
1427       continue;
1428     }
1429 
1430     // CASE: stack-locked
1431     // Could be stack-locked either by this thread or by some other thread.
1432     //
1433     // Note that we allocate the objectmonitor speculatively, _before_ attempting
1434     // to install INFLATING into the mark word.  We originally installed INFLATING,
1435     // allocated the objectmonitor, and then finally STed the address of the
1436     // objectmonitor into the mark.  This was correct, but artificially lengthened
1437     // the interval in which INFLATED appeared in the mark, thus increasing
1438     // the odds of inflation contention.
1439     //
1440     // We now use per-thread private objectmonitor free lists.
1441     // These list are reprovisioned from the global free list outside the
1442     // critical INFLATING...ST interval.  A thread can transfer
1443     // multiple objectmonitors en-mass from the global free list to its local free list.
1444     // This reduces coherency traffic and lock contention on the global free list.
1445     // Using such local free lists, it doesn't matter if the omAlloc() call appears
1446     // before or after the CAS(INFLATING) operation.
1447     // See the comments in omAlloc().
1448 
1449     if (mark->has_locker()) {
1450       ObjectMonitor * m = omAlloc(Self);
1451       // Optimistically prepare the objectmonitor - anticipate successful CAS
1452       // We do this before the CAS in order to minimize the length of time
1453       // in which INFLATING appears in the mark.
1454       m->Recycle();
1455       m->_Responsible  = NULL;
1456       m->_recursions   = 0;
1457       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
1458 
1459       markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark);
1460       if (cmp != mark) {
1461         omRelease(Self, m, true);
1462         continue;       // Interference -- just retry
1463       }
1464 
1465       // We've successfully installed INFLATING (0) into the mark-word.
1466       // This is the only case where 0 will appear in a mark-word.
1467       // Only the singular thread that successfully swings the mark-word
1468       // to 0 can perform (or more precisely, complete) inflation.
1469       //
1470       // Why do we CAS a 0 into the mark-word instead of just CASing the
1471       // mark-word from the stack-locked value directly to the new inflated state?
1472       // Consider what happens when a thread unlocks a stack-locked object.
1473       // It attempts to use CAS to swing the displaced header value from the
1474       // on-stack basiclock back into the object header.  Recall also that the
1475       // header value (hashcode, etc) can reside in (a) the object header, or
1476       // (b) a displaced header associated with the stack-lock, or (c) a displaced
1477       // header in an objectMonitor.  The inflate() routine must copy the header
1478       // value from the basiclock on the owner's stack to the objectMonitor, all
1479       // the while preserving the hashCode stability invariants.  If the owner
1480       // decides to release the lock while the value is 0, the unlock will fail
1481       // and control will eventually pass from slow_exit() to inflate.  The owner
1482       // will then spin, waiting for the 0 value to disappear.   Put another way,
1483       // the 0 causes the owner to stall if the owner happens to try to
1484       // drop the lock (restoring the header from the basiclock to the object)
1485       // while inflation is in-progress.  This protocol avoids races that might
1486       // would otherwise permit hashCode values to change or "flicker" for an object.
1487       // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1488       // 0 serves as a "BUSY" inflate-in-progress indicator.
1489 
1490 
1491       // fetch the displaced mark from the owner's stack.
1492       // The owner can't die or unwind past the lock while our INFLATING
1493       // object is in the mark.  Furthermore the owner can't complete
1494       // an unlock on the object, either.
1495       markOop dmw = mark->displaced_mark_helper();
1496       assert(dmw->is_neutral(), "invariant");
1497 
1498       // Setup monitor fields to proper values -- prepare the monitor
1499       m->set_header(dmw);
1500 
1501       // Optimization: if the mark->locker stack address is associated
1502       // with this thread we could simply set m->_owner = Self.
1503       // Note that a thread can inflate an object
1504       // that it has stack-locked -- as might happen in wait() -- directly
1505       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1506       m->set_owner(mark->locker());
1507       m->set_object(object);
1508       // TODO-FIXME: assert BasicLock->dhw != 0.
1509 
1510       // Must preserve store ordering. The monitor state must
1511       // be stable at the time of publishing the monitor address.
1512       guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1513       object->release_set_mark(markOopDesc::encode(m));
1514 
1515       // Hopefully the performance counters are allocated on distinct cache lines
1516       // to avoid false sharing on MP systems ...
1517       OM_PERFDATA_OP(Inflations, inc());
1518       TEVENT(Inflate: overwrite stacklock);
1519       if (log_is_enabled(Debug, monitorinflation)) {
1520         if (object->is_instance()) {
1521           ResourceMark rm;
1522           log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1523                                       p2i(object), p2i(object->mark()),
1524                                       object->klass()->external_name());
1525         }
1526       }
1527       if (event.should_commit()) {
1528         post_monitor_inflate_event(&event, object, cause);
1529       }
1530       return m;
1531     }
1532 
1533     // CASE: neutral
1534     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1535     // If we know we're inflating for entry it's better to inflate by swinging a
1536     // pre-locked objectMonitor pointer into the object header.   A successful
1537     // CAS inflates the object *and* confers ownership to the inflating thread.
1538     // In the current implementation we use a 2-step mechanism where we CAS()
1539     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1540     // An inflateTry() method that we could call from fast_enter() and slow_enter()
1541     // would be useful.
1542 
1543     assert(mark->is_neutral(), "invariant");
1544     ObjectMonitor * m = omAlloc(Self);
1545     // prepare m for installation - set monitor to initial state
1546     m->Recycle();
1547     m->set_header(mark);
1548     m->set_owner(NULL);
1549     m->set_object(object);
1550     m->_recursions   = 0;
1551     m->_Responsible  = NULL;
1552     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1553 
1554     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1555       m->set_object(NULL);
1556       m->set_owner(NULL);
1557       m->Recycle();
1558       omRelease(Self, m, true);
1559       m = NULL;
1560       continue;
1561       // interference - the markword changed - just retry.
1562       // The state-transitions are one-way, so there's no chance of
1563       // live-lock -- "Inflated" is an absorbing state.
1564     }
1565 
1566     // Hopefully the performance counters are allocated on distinct
1567     // cache lines to avoid false sharing on MP systems ...
1568     OM_PERFDATA_OP(Inflations, inc());
1569     TEVENT(Inflate: overwrite neutral);
1570     if (log_is_enabled(Debug, monitorinflation)) {
1571       if (object->is_instance()) {
1572         ResourceMark rm;
1573         log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1574                                     p2i(object), p2i(object->mark()),
1575                                     object->klass()->external_name());
1576       }
1577     }
1578     if (event.should_commit()) {
1579       post_monitor_inflate_event(&event, object, cause);
1580     }
1581     return m;
1582   }
1583 }
1584 
1585 
1586 // Deflate_idle_monitors() is called at all safepoints, immediately
1587 // after all mutators are stopped, but before any objects have moved.
1588 // It traverses the list of known monitors, deflating where possible.
1589 // The scavenged monitor are returned to the monitor free list.
1590 //
1591 // Beware that we scavenge at *every* stop-the-world point.
1592 // Having a large number of monitors in-circulation negatively
1593 // impacts the performance of some applications (e.g., PointBase).
1594 // Broadly, we want to minimize the # of monitors in circulation.
1595 //
1596 // We have added a flag, MonitorInUseLists, which creates a list
1597 // of active monitors for each thread. deflate_idle_monitors()
1598 // only scans the per-thread in-use lists. omAlloc() puts all
1599 // assigned monitors on the per-thread list. deflate_idle_monitors()
1600 // returns the non-busy monitors to the global free list.
1601 // When a thread dies, omFlush() adds the list of active monitors for
1602 // that thread to a global gOmInUseList acquiring the
1603 // global list lock. deflate_idle_monitors() acquires the global
1604 // list lock to scan for non-busy monitors to the global free list.
1605 // An alternative could have used a single global in-use list. The
1606 // downside would have been the additional cost of acquiring the global list lock
1607 // for every omAlloc().
1608 //
1609 // Perversely, the heap size -- and thus the STW safepoint rate --
1610 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
1611 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1612 // This is an unfortunate aspect of this design.
1613 
1614 enum ManifestConstants {
1615   ClearResponsibleAtSTW = 0
1616 };
1617 
1618 // Deflate a single monitor if not in-use
1619 // Return true if deflated, false if in-use
1620 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1621                                          ObjectMonitor** freeHeadp,
1622                                          ObjectMonitor** freeTailp) {
1623   bool deflated;
1624   // Normal case ... The monitor is associated with obj.
1625   guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1626   guarantee(mid == obj->mark()->monitor(), "invariant");
1627   guarantee(mid->header()->is_neutral(), "invariant");
1628 
1629   if (mid->is_busy()) {
1630     if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1631     deflated = false;
1632   } else {
1633     // Deflate the monitor if it is no longer being used
1634     // It's idle - scavenge and return to the global free list
1635     // plain old deflation ...
1636     TEVENT(deflate_idle_monitors - scavenge1);
1637     if (log_is_enabled(Debug, monitorinflation)) {
1638       if (obj->is_instance()) {
1639         ResourceMark rm;
1640         log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , "
1641                                     "mark " INTPTR_FORMAT " , type %s",
1642                                     p2i(obj), p2i(obj->mark()),
1643                                     obj->klass()->external_name());
1644       }
1645     }
1646 
1647     // Restore the header back to obj
1648     obj->release_set_mark(mid->header());
1649     mid->clear();
1650 
1651     assert(mid->object() == NULL, "invariant");
1652 
1653     // Move the object to the working free list defined by freeHeadp, freeTailp
1654     if (*freeHeadp == NULL) *freeHeadp = mid;
1655     if (*freeTailp != NULL) {
1656       ObjectMonitor * prevtail = *freeTailp;
1657       assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
1658       prevtail->FreeNext = mid;
1659     }
1660     *freeTailp = mid;
1661     deflated = true;
1662   }
1663   return deflated;
1664 }
1665 
1666 // Walk a given monitor list, and deflate idle monitors
1667 // The given list could be a per-thread list or a global list
1668 // Caller acquires gListLock.
1669 //
1670 // In the case of parallel processing of thread local monitor lists,
1671 // work is done by Threads::parallel_threads_do() which ensures that
1672 // each Java thread is processed by exactly one worker thread, and
1673 // thus avoid conflicts that would arise when worker threads would
1674 // process the same monitor lists concurrently.
1675 //
1676 // See also ParallelSPCleanupTask and
1677 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
1678 // Threads::parallel_java_threads_do() in thread.cpp.
1679 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp,
1680                                              ObjectMonitor** freeHeadp,
1681                                              ObjectMonitor** freeTailp) {
1682   ObjectMonitor* mid;
1683   ObjectMonitor* next;
1684   ObjectMonitor* cur_mid_in_use = NULL;
1685   int deflated_count = 0;
1686 
1687   for (mid = *listHeadp; mid != NULL;) {
1688     oop obj = (oop) mid->object();
1689     if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) {
1690       // if deflate_monitor succeeded,
1691       // extract from per-thread in-use list
1692       if (mid == *listHeadp) {
1693         *listHeadp = mid->FreeNext;
1694       } else if (cur_mid_in_use != NULL) {
1695         cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1696       }
1697       next = mid->FreeNext;
1698       mid->FreeNext = NULL;  // This mid is current tail in the freeHeadp list
1699       mid = next;
1700       deflated_count++;
1701     } else {
1702       cur_mid_in_use = mid;
1703       mid = mid->FreeNext;
1704     }
1705   }
1706   return deflated_count;
1707 }
1708 
1709 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1710   counters->nInuse = 0;          // currently associated with objects
1711   counters->nInCirculation = 0;  // extant
1712   counters->nScavenged = 0;      // reclaimed
1713 }
1714 
1715 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
1716   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1717   bool deflated = false;
1718 
1719   ObjectMonitor * freeHeadp = NULL;  // Local SLL of scavenged monitors
1720   ObjectMonitor * freeTailp = NULL;
1721 
1722   TEVENT(deflate_idle_monitors);
1723   // Prevent omFlush from changing mids in Thread dtor's during deflation
1724   // And in case the vm thread is acquiring a lock during a safepoint
1725   // See e.g. 6320749
1726   Thread::muxAcquire(&gListLock, "scavenge - return");
1727 
1728   if (MonitorInUseLists) {
1729     // Note: the thread-local monitors lists get deflated in
1730     // a separate pass. See deflate_thread_local_monitors().
1731 
1732     // For moribund threads, scan gOmInUseList
1733     if (gOmInUseList) {
1734       counters->nInCirculation += gOmInUseCount;
1735       int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
1736       gOmInUseCount -= deflated_count;
1737       counters->nScavenged += deflated_count;
1738       counters->nInuse += gOmInUseCount;
1739     }
1740 
1741   } else {
1742     PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
1743     for (; block != NULL; block = next(block)) {
1744       // Iterate over all extant monitors - Scavenge all idle monitors.
1745       assert(block->object() == CHAINMARKER, "must be a block header");
1746       counters->nInCirculation += _BLOCKSIZE;
1747       for (int i = 1; i < _BLOCKSIZE; i++) {
1748         ObjectMonitor* mid = (ObjectMonitor*)&block[i];
1749         oop obj = (oop)mid->object();
1750 
1751         if (obj == NULL) {
1752           // The monitor is not associated with an object.
1753           // The monitor should either be a thread-specific private
1754           // free list or the global free list.
1755           // obj == NULL IMPLIES mid->is_busy() == 0
1756           guarantee(!mid->is_busy(), "invariant");
1757           continue;
1758         }
1759         deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp);
1760 
1761         if (deflated) {
1762           mid->FreeNext = NULL;
1763           counters->nScavenged++;
1764         } else {
1765           counters->nInuse++;
1766         }
1767       }
1768     }
1769   }
1770 
1771   // Move the scavenged monitors back to the global free list.
1772   if (freeHeadp != NULL) {
1773     guarantee(freeTailp != NULL && counters->nScavenged > 0, "invariant");
1774     assert(freeTailp->FreeNext == NULL, "invariant");
1775     // constant-time list splice - prepend scavenged segment to gFreeList
1776     freeTailp->FreeNext = gFreeList;
1777     gFreeList = freeHeadp;
1778   }
1779   Thread::muxRelease(&gListLock);
1780 
1781 }
1782 
1783 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1784   gMonitorFreeCount += counters->nScavenged;
1785 
1786   // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree.
1787 
1788   if (ObjectMonitor::Knob_Verbose) {
1789     tty->print_cr("INFO: Deflate: InCirc=%d InUse=%d Scavenged=%d "
1790                   "ForceMonitorScavenge=%d : pop=%d free=%d",
1791                   counters->nInCirculation, counters->nInuse, counters->nScavenged, ForceMonitorScavenge,
1792                   gMonitorPopulation, gMonitorFreeCount);
1793     tty->flush();
1794   }
1795 
1796   ForceMonitorScavenge = 0;    // Reset
1797 
1798   OM_PERFDATA_OP(Deflations, inc(counters->nScavenged));
1799   OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation));
1800 
1801   // TODO: Add objectMonitor leak detection.
1802   // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1803   GVars.stwRandom = os::random();
1804   GVars.stwCycle++;
1805 }
1806 
1807 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
1808   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1809   if (!MonitorInUseLists) return;
1810 
1811   ObjectMonitor * freeHeadp = NULL;  // Local SLL of scavenged monitors
1812   ObjectMonitor * freeTailp = NULL;
1813 
1814   int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
1815 
1816   Thread::muxAcquire(&gListLock, "scavenge - return");
1817 
1818   // Adjust counters
1819   counters->nInCirculation += thread->omInUseCount;
1820   thread->omInUseCount -= deflated_count;
1821   if (ObjectMonitor::Knob_VerifyInUse) {
1822     verifyInUse(thread);
1823   }
1824   counters->nScavenged += deflated_count;
1825   counters->nInuse += thread->omInUseCount;
1826 
1827   // Move the scavenged monitors back to the global free list.
1828   if (freeHeadp != NULL) {
1829     guarantee(freeTailp != NULL && deflated_count > 0, "invariant");
1830     assert(freeTailp->FreeNext == NULL, "invariant");
1831 
1832     // constant-time list splice - prepend scavenged segment to gFreeList
1833     freeTailp->FreeNext = gFreeList;
1834     gFreeList = freeHeadp;
1835   }
1836   Thread::muxRelease(&gListLock);
1837 }
1838 
1839 // Monitor cleanup on JavaThread::exit
1840 
1841 // Iterate through monitor cache and attempt to release thread's monitors
1842 // Gives up on a particular monitor if an exception occurs, but continues
1843 // the overall iteration, swallowing the exception.
1844 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1845  private:
1846   TRAPS;
1847 
1848  public:
1849   ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
1850   void do_monitor(ObjectMonitor* mid) {
1851     if (mid->owner() == THREAD) {
1852       if (ObjectMonitor::Knob_VerifyMatch != 0) {
1853         ResourceMark rm;
1854         Handle obj(THREAD, (oop) mid->object());
1855         tty->print("INFO: unexpected locked object:");
1856         javaVFrame::print_locked_object_class_name(tty, obj, "locked");
1857         fatal("exiting JavaThread=" INTPTR_FORMAT
1858               " unexpectedly owns ObjectMonitor=" INTPTR_FORMAT,
1859               p2i(THREAD), p2i(mid));
1860       }
1861       (void)mid->complete_exit(CHECK);
1862     }
1863   }
1864 };
1865 
1866 // Release all inflated monitors owned by THREAD.  Lightweight monitors are
1867 // ignored.  This is meant to be called during JNI thread detach which assumes
1868 // all remaining monitors are heavyweight.  All exceptions are swallowed.
1869 // Scanning the extant monitor list can be time consuming.
1870 // A simple optimization is to add a per-thread flag that indicates a thread
1871 // called jni_monitorenter() during its lifetime.
1872 //
1873 // Instead of No_Savepoint_Verifier it might be cheaper to
1874 // use an idiom of the form:
1875 //   auto int tmp = SafepointSynchronize::_safepoint_counter ;
1876 //   <code that must not run at safepoint>
1877 //   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1878 // Since the tests are extremely cheap we could leave them enabled
1879 // for normal product builds.
1880 
1881 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1882   assert(THREAD == JavaThread::current(), "must be current Java thread");
1883   NoSafepointVerifier nsv;
1884   ReleaseJavaMonitorsClosure rjmc(THREAD);
1885   Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread");
1886   ObjectSynchronizer::monitors_iterate(&rjmc);
1887   Thread::muxRelease(&gListLock);
1888   THREAD->clear_pending_exception();
1889 }
1890 
1891 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1892   switch (cause) {
1893     case inflate_cause_vm_internal:    return "VM Internal";
1894     case inflate_cause_monitor_enter:  return "Monitor Enter";
1895     case inflate_cause_wait:           return "Monitor Wait";
1896     case inflate_cause_notify:         return "Monitor Notify";
1897     case inflate_cause_hash_code:      return "Monitor Hash Code";
1898     case inflate_cause_jni_enter:      return "JNI Monitor Enter";
1899     case inflate_cause_jni_exit:       return "JNI Monitor Exit";
1900     default:
1901       ShouldNotReachHere();
1902   }
1903   return "Unknown";
1904 }
1905 
1906 //------------------------------------------------------------------------------
1907 // Debugging code
1908 
1909 void ObjectSynchronizer::sanity_checks(const bool verbose,
1910                                        const uint cache_line_size,
1911                                        int *error_cnt_ptr,
1912                                        int *warning_cnt_ptr) {
1913   u_char *addr_begin      = (u_char*)&GVars;
1914   u_char *addr_stwRandom  = (u_char*)&GVars.stwRandom;
1915   u_char *addr_hcSequence = (u_char*)&GVars.hcSequence;
1916 
1917   if (verbose) {
1918     tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT,
1919                   sizeof(SharedGlobals));
1920   }
1921 
1922   uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin);
1923   if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom);
1924 
1925   uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin);
1926   if (verbose) {
1927     tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence);
1928   }
1929 
1930   if (cache_line_size != 0) {
1931     // We were able to determine the L1 data cache line size so
1932     // do some cache line specific sanity checks
1933 
1934     if (offset_stwRandom < cache_line_size) {
1935       tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer "
1936                     "to the struct beginning than a cache line which permits "
1937                     "false sharing.");
1938       (*warning_cnt_ptr)++;
1939     }
1940 
1941     if ((offset_hcSequence - offset_stwRandom) < cache_line_size) {
1942       tty->print_cr("WARNING: the SharedGlobals.stwRandom and "
1943                     "SharedGlobals.hcSequence fields are closer than a cache "
1944                     "line which permits false sharing.");
1945       (*warning_cnt_ptr)++;
1946     }
1947 
1948     if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1949       tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1950                     "to the struct end than a cache line which permits false "
1951                     "sharing.");
1952       (*warning_cnt_ptr)++;
1953     }
1954   }
1955 }
1956 
1957 #ifndef PRODUCT
1958 
1959 // Check if monitor belongs to the monitor cache
1960 // The list is grow-only so it's *relatively* safe to traverse
1961 // the list of extant blocks without taking a lock.
1962 
1963 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1964   PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
1965   while (block != NULL) {
1966     assert(block->object() == CHAINMARKER, "must be a block header");
1967     if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
1968       address mon = (address)monitor;
1969       address blk = (address)block;
1970       size_t diff = mon - blk;
1971       assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
1972       return 1;
1973     }
1974     block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
1975   }
1976   return 0;
1977 }
1978 
1979 #endif