< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 58110 : v2.09a with 8235795, 8235931 and 8236035 extracted; rebased to jdk-14+28; merge with 8236035.patch.cr1; merge with 8235795.patch.cr1; merge with 8236035.patch.cr2; merge with 8235795.patch.cr2; merge with 8235795.patch.cr3.
rev 58111 : See CR9-to-CR10-changes; merge with jdk-15+11.


  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "logging/log.hpp"
  28 #include "logging/logStream.hpp"
  29 #include "jfr/jfrEvents.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "memory/metaspaceShared.hpp"
  32 #include "memory/padded.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "memory/universe.hpp"
  35 #include "oops/markWord.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "runtime/atomic.hpp"
  38 #include "runtime/biasedLocking.hpp"
  39 #include "runtime/handles.inline.hpp"

  40 #include "runtime/interfaceSupport.inline.hpp"
  41 #include "runtime/mutexLocker.hpp"
  42 #include "runtime/objectMonitor.hpp"
  43 #include "runtime/objectMonitor.inline.hpp"
  44 #include "runtime/osThread.hpp"

  45 #include "runtime/safepointVerifiers.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/stubRoutines.hpp"
  48 #include "runtime/synchronizer.hpp"
  49 #include "runtime/thread.inline.hpp"
  50 #include "runtime/timer.hpp"
  51 #include "runtime/vframe.hpp"
  52 #include "runtime/vmThread.hpp"
  53 #include "utilities/align.hpp"
  54 #include "utilities/dtrace.hpp"
  55 #include "utilities/events.hpp"
  56 #include "utilities/preserveException.hpp"
  57 
  58 // The "core" versions of monitor enter and exit reside in this file.
  59 // The interpreter and compilers contain specialized transliterated
  60 // variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
  61 // for instance.  If you make changes here, make sure to modify the
  62 // interpreter, and both C1 and C2 fast-path inline locking code emission.
  63 //
  64 // -----------------------------------------------------------------------------


 101   }
 102 
 103 #else //  ndef DTRACE_ENABLED
 104 
 105 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
 106 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 107 
 108 #endif // ndef DTRACE_ENABLED
 109 
 110 // This exists only as a workaround of dtrace bug 6254741
 111 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
 112   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 113   return 0;
 114 }
 115 
 116 #define NINFLATIONLOCKS 256
 117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 118 
 119 // global list of blocks of monitors
 120 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL;



 121 
 122 struct ObjectMonitorListGlobals {
 123   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 124   // These are highly shared list related variables.
 125   // To avoid false-sharing they need to be the sole occupants of a cache line.
 126 
 127   // Global ObjectMonitor free list. Newly allocated and deflated
 128   // ObjectMonitors are prepended here.
 129   ObjectMonitor* _free_list;
 130   DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
 131 
 132   // Global ObjectMonitor in-use list. When a JavaThread is exiting,
 133   // ObjectMonitors on its per-thread in-use list are prepended here.
 134   ObjectMonitor* _in_use_list;
 135   DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
 136 







 137   int _free_count;    // # on free_list
 138   DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(int));
 139 
 140   int _in_use_count;  // # on in_use_list
 141   DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int));
 142 
 143   int _population;    // # Extant -- in circulation
 144   DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int));



 145 };
 146 static ObjectMonitorListGlobals om_list_globals;
 147 
 148 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 149 
 150 
 151 // =====================> Spin-lock functions
 152 
 153 // ObjectMonitors are not lockable outside of this file. We use spin-locks
 154 // implemented using a bit in the _next_om field instead of the heavier
 155 // weight locking mechanisms for faster list management.
 156 
 157 #define OM_LOCK_BIT 0x1
 158 
 159 // Return true if the ObjectMonitor is locked.
 160 // Otherwise returns false.
 161 static bool is_locked(ObjectMonitor* om) {
 162   return ((intptr_t)om->next_om() & OM_LOCK_BIT) == OM_LOCK_BIT;
 163 }
 164 


 282       Atomic::add(&om_list_globals._population, _BLOCKSIZE - 1);
 283       break;
 284     }
 285     // Implied else: try it all again
 286   }
 287 
 288   // Second we handle om_list_globals._free_list:
 289   prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1,
 290                          &om_list_globals._free_list, &om_list_globals._free_count);
 291 }
 292 
 293 // Prepend a list of ObjectMonitors to om_list_globals._free_list.
 294 // 'tail' is the last ObjectMonitor in the list and there are 'count'
 295 // on the list. Also updates om_list_globals._free_count.
 296 static void prepend_list_to_global_free_list(ObjectMonitor* list,
 297                                              ObjectMonitor* tail, int count) {
 298   prepend_list_to_common(list, tail, count, &om_list_globals._free_list,
 299                          &om_list_globals._free_count);
 300 }
 301 









 302 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list.
 303 // 'tail' is the last ObjectMonitor in the list and there are 'count'
 304 // on the list. Also updates om_list_globals._in_use_list.
 305 static void prepend_list_to_global_in_use_list(ObjectMonitor* list,
 306                                                ObjectMonitor* tail, int count) {
 307   prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list,
 308                          &om_list_globals._in_use_count);
 309 }
 310 
 311 // Prepend an ObjectMonitor to the specified list. Also updates
 312 // the specified counter.
 313 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
 314                               int* count_p) {
 315   while (true) {
 316     om_lock(m);  // Lock m so we can safely update its next field.
 317     ObjectMonitor* cur = NULL;
 318     // Lock the list head to guard against races with a list walker
 319     // thread:
 320     if ((cur = get_list_head_locked(list_p)) != NULL) {
 321       // List head is now locked so we can safely switch it.
 322       m->set_next_om(cur);  // m now points to cur (and unlocks m)
 323       Atomic::store(list_p, m);  // Switch list head to unlocked m.
 324       om_unlock(cur);
 325       break;
 326     }
 327     // The list is empty so try to set the list head.
 328     assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
 329     m->set_next_om(cur);  // m now points to NULL (and unlocks m)
 330     if (Atomic::cmpxchg(list_p, cur, m) == cur) {
 331       // List head is now unlocked m.
 332       break;
 333     }
 334     // Implied else: try it all again
 335   }
 336   Atomic::inc(count_p);
 337 }
 338 
 339 // Prepend an ObjectMonitor to a per-thread om_free_list.
 340 // Also updates the per-thread om_free_count.
 341 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) {
 342   prepend_to_common(m, &self->om_free_list, &self->om_free_count);
 343 }
 344 
 345 // Prepend an ObjectMonitor to a per-thread om_in_use_list.
 346 // Also updates the per-thread om_in_use_count.
 347 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) {
 348   prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
 349 }
 350 
 351 // Take an ObjectMonitor from the start of the specified list. Also
 352 // decrements the specified counter. Returns NULL if none are available.
 353 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p,
 354                                                 int* count_p) {
 355   ObjectMonitor* take = NULL;
 356   // Lock the list head to guard against races with a list walker
 357   // thread:
 358   if ((take = get_list_head_locked(list_p)) == NULL) {
 359     return NULL;  // None are available.
 360   }
 361   ObjectMonitor* next = unmarked_next(take);
 362   // Switch locked list head to next (which unlocks the list head, but
 363   // leaves take locked):
 364   Atomic::store(list_p, next);
 365   Atomic::dec(count_p);
 366   // Unlock take, but leave the next value for any lagging list
 367   // walkers. It will get cleaned up when take is prepended to
 368   // the in-use list:
 369   om_unlock(take);
 370   return take;
 371 }
 372 
 373 // Take an ObjectMonitor from the start of the om_list_globals._free_list.
 374 // Also updates om_list_globals._free_count. Returns NULL if none are
 375 // available.
 376 static ObjectMonitor* take_from_start_of_global_free_list() {
 377   return take_from_start_of_common(&om_list_globals._free_list,


 446   }
 447 
 448   // biased locking and any other IMS exception states take the slow-path
 449   return false;
 450 }
 451 
 452 
 453 // The LockNode emitted directly at the synchronization site would have
 454 // been too big if it were to have included support for the cases of inflated
 455 // recursive enter and exit, so they go here instead.
 456 // Note that we can't safely call AsyncPrintJavaStack() from within
 457 // quick_enter() as our thread state remains _in_Java.
 458 
 459 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
 460                                      BasicLock * lock) {
 461   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 462   assert(self->is_Java_thread(), "invariant");
 463   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
 464   NoSafepointVerifier nsv;
 465   if (obj == NULL) return false;       // Need to throw NPE


 466   const markWord mark = obj->mark();
 467 
 468   if (mark.has_monitor()) {
 469     ObjectMonitor* const m = mark.monitor();






 470     assert(m->object() == obj, "invariant");
 471     Thread* const owner = (Thread *) m->_owner;
 472 
 473     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 474     // and observability
 475     // Case: light contention possibly amenable to TLE
 476     // Case: TLE inimical operations such as nested/recursive synchronization
 477 
 478     if (owner == self) {
 479       m->_recursions++;
 480       return true;
 481     }
 482 
 483     // This Java Monitor is inflated so obj's header will never be
 484     // displaced to this thread's BasicLock. Make the displaced header
 485     // non-NULL so this BasicLock is not seen as recursive nor as
 486     // being locked. We do this unconditionally so that this thread's
 487     // BasicLock cannot be mis-interpreted by any stack walkers. For
 488     // performance reasons, stack walkers generally first check for
 489     // Biased Locking in the object's header, the second check is for
 490     // stack-locking in the object's header, the third check is for
 491     // recursive stack-locking in the displaced header in the BasicLock,
 492     // and last are the inflated Java Monitor (ObjectMonitor) checks.
 493     lock->set_displaced_header(markWord::unused_mark());
 494 
 495     if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) {
 496       assert(m->_recursions == 0, "invariant");
 497       return true;
 498     }











 499   }
 500 
 501   // Note that we could inflate in quick_enter.
 502   // This is likely a useful optimization
 503   // Critically, in quick_enter() we must not:
 504   // -- perform bias revocation, or
 505   // -- block indefinitely, or
 506   // -- reach a safepoint
 507 
 508   return false;        // revert to slow-path
 509 }
 510 
 511 // -----------------------------------------------------------------------------
 512 // Monitor Enter/Exit
 513 // The interpreter and compiler assembly code tries to lock using the fast path
 514 // of this algorithm. Make sure to update that code if the following function is
 515 // changed. The implementation is extremely sensitive to race condition. Be careful.
 516 
 517 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) {
 518   if (UseBiasedLocking) {


 530     // Anticipate successful CAS -- the ST of the displaced mark must
 531     // be visible <= the ST performed by the CAS.
 532     lock->set_displaced_header(mark);
 533     if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 534       return;
 535     }
 536     // Fall through to inflate() ...
 537   } else if (mark.has_locker() &&
 538              THREAD->is_lock_owned((address)mark.locker())) {
 539     assert(lock != mark.locker(), "must not re-lock the same lock");
 540     assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
 541     lock->set_displaced_header(markWord::from_pointer(NULL));
 542     return;
 543   }
 544 
 545   // The object header will never be displaced to this lock,
 546   // so it does not matter what the value is, except that it
 547   // must be non-zero to avoid looking like a re-entrant lock,
 548   // and must not look locked either.
 549   lock->set_displaced_header(markWord::unused_mark());
 550   inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD);


 551 }
 552 
 553 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
 554   markWord mark = object->mark();
 555   // We cannot check for Biased Locking if we are racing an inflation.
 556   assert(mark == markWord::INFLATING() ||
 557          !mark.has_bias_pattern(), "should not see bias pattern here");
 558 
 559   markWord dhw = lock->displaced_header();
 560   if (dhw.value() == 0) {
 561     // If the displaced header is NULL, then this exit matches up with
 562     // a recursive enter. No real work to do here except for diagnostics.
 563 #ifndef PRODUCT
 564     if (mark != markWord::INFLATING()) {
 565       // Only do diagnostics if we are not racing an inflation. Simply
 566       // exiting a recursive enter of a Java Monitor that is being
 567       // inflated is safe; see the has_monitor() comment below.
 568       assert(!mark.is_neutral(), "invariant");
 569       assert(!mark.has_locker() ||
 570              THREAD->is_lock_owned((address)mark.locker()), "invariant");


 579         // does not own the Java Monitor.
 580         ObjectMonitor* m = mark.monitor();
 581         assert(((oop)(m->object()))->mark() == mark, "invariant");
 582         assert(m->is_entered(THREAD), "invariant");
 583       }
 584     }
 585 #endif
 586     return;
 587   }
 588 
 589   if (mark == markWord::from_pointer(lock)) {
 590     // If the object is stack-locked by the current thread, try to
 591     // swing the displaced header from the BasicLock back to the mark.
 592     assert(dhw.is_neutral(), "invariant");
 593     if (object->cas_set_mark(dhw, mark) == mark) {
 594       return;
 595     }
 596   }
 597 
 598   // We have to take the slow-path of possible inflation and then exit.
 599   inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD);


 600 }
 601 
 602 // -----------------------------------------------------------------------------
 603 // Class Loader  support to workaround deadlocks on the class loader lock objects
 604 // Also used by GC
 605 // complete_exit()/reenter() are used to wait on a nested lock
 606 // i.e. to give up an outer lock completely and then re-enter
 607 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 608 //  1) complete_exit lock1 - saving recursion count
 609 //  2) wait on lock2
 610 //  3) when notified on lock2, unlock lock2
 611 //  4) reenter lock1 with original recursion count
 612 //  5) lock lock2
 613 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 614 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
 615   if (UseBiasedLocking) {
 616     BiasedLocking::revoke(obj, THREAD);
 617     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 618   }
 619 
 620   ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
 621 
 622   return monitor->complete_exit(THREAD);

 623 }
 624 
 625 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 626 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) {
 627   if (UseBiasedLocking) {
 628     BiasedLocking::revoke(obj, THREAD);
 629     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 630   }
 631 
 632   ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
 633 
 634   monitor->reenter(recursions, THREAD);
 635 }
 636 // -----------------------------------------------------------------------------
 637 // JNI locks on java objects
 638 // NOTE: must use heavy weight monitor to handle jni monitor enter
 639 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 640   // the current locking is from JNI instead of Java code
 641   if (UseBiasedLocking) {
 642     BiasedLocking::revoke(obj, THREAD);
 643     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 644   }
 645   THREAD->set_current_pending_monitor_is_from_java(false);
 646   inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);


 647   THREAD->set_current_pending_monitor_is_from_java(true);
 648 }
 649 
 650 // NOTE: must use heavy weight monitor to handle jni monitor exit
 651 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 652   if (UseBiasedLocking) {
 653     Handle h_obj(THREAD, obj);
 654     BiasedLocking::revoke(h_obj, THREAD);
 655     obj = h_obj();
 656   }
 657   assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 658 
 659   ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);


 660   // If this thread has locked the object, exit the monitor. We
 661   // intentionally do not use CHECK here because we must exit the
 662   // monitor even if an exception is pending.
 663   if (monitor->check_owner(THREAD)) {
 664     monitor->exit(true, THREAD);
 665   }
 666 }
 667 
 668 // -----------------------------------------------------------------------------
 669 // Internal VM locks on java objects
 670 // standard constructor, allows locking failures
 671 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) {
 672   _dolock = do_lock;
 673   _thread = thread;
 674   _thread->check_for_valid_safepoint_state();
 675   _obj = obj;
 676 
 677   if (_dolock) {
 678     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 679   }
 680 }
 681 
 682 ObjectLocker::~ObjectLocker() {
 683   if (_dolock) {
 684     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 685   }
 686 }
 687 
 688 
 689 // -----------------------------------------------------------------------------
 690 //  Wait/Notify/NotifyAll
 691 // NOTE: must use heavy weight monitor to handle wait()
 692 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 693   if (UseBiasedLocking) {
 694     BiasedLocking::revoke(obj, THREAD);
 695     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 696   }
 697   if (millis < 0) {
 698     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 699   }
 700   ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait);


 701 
 702   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
 703   monitor->wait(millis, true, THREAD);
 704 
 705   // This dummy call is in place to get around dtrace bug 6254741.  Once
 706   // that's fixed we can uncomment the following line, remove the call
 707   // and change this function back into a "void" func.
 708   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 709   return dtrace_waited_probe(monitor, obj, THREAD);

 710 }
 711 
 712 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) {
 713   if (UseBiasedLocking) {
 714     BiasedLocking::revoke(obj, THREAD);
 715     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 716   }
 717   if (millis < 0) {
 718     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 719   }
 720   inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD);


 721 }
 722 
 723 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 724   if (UseBiasedLocking) {
 725     BiasedLocking::revoke(obj, THREAD);
 726     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 727   }
 728 
 729   markWord mark = obj->mark();
 730   if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
 731     return;
 732   }
 733   inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD);


 734 }
 735 
 736 // NOTE: see comment of notify()
 737 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 738   if (UseBiasedLocking) {
 739     BiasedLocking::revoke(obj, THREAD);
 740     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 741   }
 742 
 743   markWord mark = obj->mark();
 744   if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
 745     return;
 746   }
 747   inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD);


 748 }
 749 
 750 // -----------------------------------------------------------------------------
 751 // Hash Code handling
 752 //
 753 // Performance concern:
 754 // OrderAccess::storestore() calls release() which at one time stored 0
 755 // into the global volatile OrderAccess::dummy variable. This store was
 756 // unnecessary for correctness. Many threads storing into a common location
 757 // causes considerable cache migration or "sloshing" on large SMP systems.
 758 // As such, I avoided using OrderAccess::storestore(). In some cases
 759 // OrderAccess::fence() -- which incurs local latency on the executing
 760 // processor -- is a better choice as it scales on SMP systems.
 761 //
 762 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
 763 // a discussion of coherency costs. Note that all our current reference
 764 // platforms provide strong ST-ST order, so the issue is moot on IA32,
 765 // x64, and SPARC.
 766 //
 767 // As a general policy we use "volatile" to control compiler-based reordering


 920       Handle hobj(self, obj);
 921       // Relaxing assertion for bug 6320749.
 922       assert(Universe::verify_in_progress() ||
 923              !SafepointSynchronize::is_at_safepoint(),
 924              "biases should not be seen by VM thread here");
 925       BiasedLocking::revoke(hobj, JavaThread::current());
 926       obj = hobj();
 927       assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 928     }
 929   }
 930 
 931   // hashCode() is a heap mutator ...
 932   // Relaxing assertion for bug 6320749.
 933   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 934          !SafepointSynchronize::is_at_safepoint(), "invariant");
 935   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 936          self->is_Java_thread() , "invariant");
 937   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 938          ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant");
 939 

 940   ObjectMonitor* monitor = NULL;
 941   markWord temp, test;
 942   intptr_t hash;
 943   markWord mark = read_stable_mark(obj);
 944 
 945   // object should remain ineligible for biased locking
 946   assert(!mark.has_bias_pattern(), "invariant");
 947 
 948   if (mark.is_neutral()) {            // if this is a normal header
 949     hash = mark.hash();
 950     if (hash != 0) {                  // if it has a hash, just return it
 951       return hash;
 952     }
 953     hash = get_next_hash(self, obj);  // get a new hash
 954     temp = mark.copy_set_hash(hash);  // merge the hash into header
 955                                       // try to install the hash
 956     test = obj->cas_set_mark(temp, mark);
 957     if (test == mark) {               // if the hash was installed, return it
 958       return hash;
 959     }
 960     // Failed to install the hash. It could be that another thread
 961     // installed the hash just before our attempt or inflation has
 962     // occurred or... so we fall thru to inflate the monitor for
 963     // stability and then install the hash.
 964   } else if (mark.has_monitor()) {
 965     monitor = mark.monitor();






 966     temp = monitor->header();
 967     assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
 968     hash = temp.hash();
 969     if (hash != 0) {                  // if it has a hash, just return it
 970       return hash;
 971     }
 972     // Fall thru so we only have one place that installs the hash in
 973     // the ObjectMonitor.
 974   } else if (self->is_lock_owned((address)mark.locker())) {
 975     // This is a stack lock owned by the calling thread so fetch the
 976     // displaced markWord from the BasicLock on the stack.
 977     temp = mark.displaced_mark_helper();
 978     assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
 979     hash = temp.hash();
 980     if (hash != 0) {                  // if it has a hash, just return it
 981       return hash;
 982     }
 983     // WARNING:
 984     // The displaced header in the BasicLock on a thread's stack
 985     // is strictly immutable. It CANNOT be changed in ANY cases.
 986     // So we have to inflate the stack lock into an ObjectMonitor
 987     // even if the current thread owns the lock. The BasicLock on
 988     // a thread's stack can be asynchronously read by other threads
 989     // during an inflate() call so any change to that stack memory
 990     // may not propagate to other threads correctly.
 991   }
 992 
 993   // Inflate the monitor to set the hash.
 994   monitor = inflate(self, obj, inflate_cause_hash_code);


 995   // Load ObjectMonitor's header/dmw field and see if it has a hash.
 996   mark = monitor->header();
 997   assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
 998   hash = mark.hash();
 999   if (hash == 0) {                    // if it does not have a hash
1000     hash = get_next_hash(self, obj);  // get a new hash
1001     temp = mark.copy_set_hash(hash);  // merge the hash into header
1002     assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1003     uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
1004     test = markWord(v);
1005     if (test != mark) {
1006       // The attempt to update the ObjectMonitor's header/dmw field
1007       // did not work. This can happen if another thread managed to
1008       // merge in the hash just before our cmpxchg().
1009       // If we add any new usages of the header/dmw field, this code
1010       // will need to be updated.
1011       hash = test.hash();
1012       assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1013       assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1014     }
1015   }
1016   // We finally get the hash.
1017   return hash;

1018 }
1019 
1020 // Deprecated -- use FastHashCode() instead.
1021 
1022 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
1023   return FastHashCode(Thread::current(), obj());
1024 }
1025 
1026 
1027 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
1028                                                    Handle h_obj) {
1029   if (UseBiasedLocking) {
1030     BiasedLocking::revoke(h_obj, thread);
1031     assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1032   }
1033 
1034   assert(thread == JavaThread::current(), "Can only be called on current thread");
1035   oop obj = h_obj();
1036 

1037   markWord mark = read_stable_mark(obj);
1038 
1039   // Uncontended case, header points to stack
1040   if (mark.has_locker()) {
1041     return thread->is_lock_owned((address)mark.locker());
1042   }
1043   // Contended case, header points to ObjectMonitor (tagged pointer)
1044   if (mark.has_monitor()) {
1045     ObjectMonitor* monitor = mark.monitor();
1046     return monitor->is_entered(thread) != 0;






1047   }
1048   // Unlocked case, header in place
1049   assert(mark.is_neutral(), "sanity check");
1050   return false;

1051 }
1052 
1053 // Be aware of this method could revoke bias of the lock object.
1054 // This method queries the ownership of the lock handle specified by 'h_obj'.
1055 // If the current thread owns the lock, it returns owner_self. If no
1056 // thread owns the lock, it returns owner_none. Otherwise, it will return
1057 // owner_other.
1058 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
1059 (JavaThread *self, Handle h_obj) {
1060   // The caller must beware this method can revoke bias, and
1061   // revocation can result in a safepoint.
1062   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
1063   assert(self->thread_state() != _thread_blocked, "invariant");
1064 
1065   // Possible mark states: neutral, biased, stack-locked, inflated
1066 
1067   if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) {
1068     // CASE: biased
1069     BiasedLocking::revoke(h_obj, self);
1070     assert(!h_obj->mark().has_bias_pattern(),
1071            "biases should be revoked by now");
1072   }
1073 
1074   assert(self == JavaThread::current(), "Can only be called on current thread");
1075   oop obj = h_obj();


1076   markWord mark = read_stable_mark(obj);
1077 
1078   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
1079   if (mark.has_locker()) {
1080     return self->is_lock_owned((address)mark.locker()) ?
1081       owner_self : owner_other;
1082   }
1083 
1084   // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
1085   // The Object:ObjectMonitor relationship is stable as long as we're
1086   // not at a safepoint.
1087   if (mark.has_monitor()) {
1088     void* owner = mark.monitor()->_owner;







1089     if (owner == NULL) return owner_none;
1090     return (owner == self ||
1091             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
1092   }
1093 
1094   // CASE: neutral
1095   assert(mark.is_neutral(), "sanity check");
1096   return owner_none;           // it's unlocked

1097 }
1098 
1099 // FIXME: jvmti should call this
1100 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1101   if (UseBiasedLocking) {
1102     if (SafepointSynchronize::is_at_safepoint()) {
1103       BiasedLocking::revoke_at_safepoint(h_obj);
1104     } else {
1105       BiasedLocking::revoke(h_obj, JavaThread::current());
1106     }
1107     assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1108   }
1109 
1110   oop obj = h_obj();
1111   address owner = NULL;
1112 


1113   markWord mark = read_stable_mark(obj);
1114 
1115   // Uncontended case, header points to stack
1116   if (mark.has_locker()) {
1117     owner = (address) mark.locker();
1118   }
1119 
1120   // Contended case, header points to ObjectMonitor (tagged pointer)
1121   else if (mark.has_monitor()) {
1122     ObjectMonitor* monitor = mark.monitor();






1123     assert(monitor != NULL, "monitor should be non-null");
1124     owner = (address) monitor->owner();
1125   }
1126 
1127   if (owner != NULL) {
1128     // owning_thread_from_monitor_owner() may also return NULL here
1129     return Threads::owning_thread_from_monitor_owner(t_list, owner);
1130   }
1131 
1132   // Unlocked case, header in place
1133   // Cannot have assertion since this object may have been
1134   // locked by another thread when reaching here.
1135   // assert(mark.is_neutral(), "sanity check");
1136 
1137   return NULL;

1138 }
1139 
1140 // Visitors ...
1141 
1142 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1143   PaddedObjectMonitor* block = Atomic::load(&g_block_list);
1144   while (block != NULL) {
1145     assert(block->object() == CHAINMARKER, "must be a block header");
1146     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1147       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1148       oop object = (oop)mid->object();
1149       if (object != NULL) {


1150         // Only process with closure if the object is set.


1151         closure->do_monitor(mid);
1152       }
1153     }
1154     // unmarked_next() is not needed with g_block_list (no locking
1155     // used with block linkage _next_om fields).
1156     block = (PaddedObjectMonitor*)block->next_om();
1157   }
1158 }
1159 
1160 static bool monitors_used_above_threshold() {
1161   int population = Atomic::load(&om_list_globals._population);
1162   if (population == 0) {
1163     return false;
1164   }
1165   if (MonitorUsedDeflationThreshold > 0) {
1166     int monitors_used = population - Atomic::load(&om_list_globals._free_count);

1167     int monitor_usage = (monitors_used * 100LL) / population;
1168     return monitor_usage > MonitorUsedDeflationThreshold;
1169   }
1170   return false;
1171 }
1172 
1173 // Returns true if MonitorBound is set (> 0) and if the specified
1174 // cnt is > MonitorBound. Otherwise returns false.
1175 static bool is_MonitorBound_exceeded(const int cnt) {
1176   const int mx = MonitorBound;
1177   return mx > 0 && cnt > mx;
1178 }
1179 
1180 bool ObjectSynchronizer::is_cleanup_needed() {
1181   if (monitors_used_above_threshold()) {
1182     // Too many monitors in use.



1183     return true;
1184   }
1185   return needs_monitor_scavenge();

















1186 }
1187 
1188 bool ObjectSynchronizer::needs_monitor_scavenge() {
1189   if (Atomic::load(&_forceMonitorScavenge) == 1) {
1190     log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
1191     return true;
1192   }
1193   return false;
1194 }
1195 




















1196 void ObjectSynchronizer::oops_do(OopClosure* f) {
1197   // We only scan the global used list here (for moribund threads), and
1198   // the thread-local monitors in Thread::oops_do().
1199   global_used_oops_do(f);
1200 }
1201 
1202 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1203   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1204   list_oops_do(Atomic::load(&om_list_globals._in_use_list), f);
1205 }
1206 
1207 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1208   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1209   list_oops_do(thread->om_in_use_list, f);
1210 }
1211 
1212 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1213   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1214   // The oops_do() phase does not overlap with monitor deflation
1215   // so no need to lock ObjectMonitors for the list traversal.


1216   for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1217     if (mid->object() != NULL) {
1218       f->do_oop((oop*)mid->object_addr());
1219     }
1220   }
1221 }
1222 
1223 
1224 // -----------------------------------------------------------------------------
1225 // ObjectMonitor Lifecycle
1226 // -----------------------
1227 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread
1228 // free list and associates them with objects. Deflation -- which occurs at
1229 // STW-time -- disassociates idle monitors from objects.
1230 // Such scavenged monitors are returned to the om_list_globals._free_list.
1231 //
1232 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1233 //
1234 // Lifecycle:
1235 // --   unassigned and on the om_list_globals._free_list
1236 // --   unassigned and on a per-thread free list
1237 // --   assigned to an object.  The object is inflated and the mark refers
1238 //      to the ObjectMonitor.
1239 
1240 
1241 // Constraining monitor pool growth via MonitorBound ...
1242 //
1243 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
1244 //

1245 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
1246 // the rate of scavenging is driven primarily by GC.  As such,  we can find
1247 // an inordinate number of monitors in circulation.
1248 // To avoid that scenario we can artificially induce a STW safepoint
1249 // if the pool appears to be growing past some reasonable bound.
1250 // Generally we favor time in space-time tradeoffs, but as there's no
1251 // natural back-pressure on the # of extant monitors we need to impose some
1252 // type of limit.  Beware that if MonitorBound is set to too low a value
1253 // we could just loop. In addition, if MonitorBound is set to a low value
1254 // we'll incur more safepoints, which are harmful to performance.
1255 // See also: GuaranteedSafepointInterval
1256 //
1257 // If MonitorBound is set, the boundry applies to

1258 //     (om_list_globals._population - om_list_globals._free_count)
1259 // i.e., if there are not enough ObjectMonitors on the global free list,
1260 // then a safepoint deflation is induced. Picking a good MonitorBound value
1261 // is non-trivial.










1262 
1263 static void InduceScavenge(Thread* self, const char * Whence) {


1264   // Induce STW safepoint to trim monitors
1265   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1266   // More precisely, trigger a cleanup safepoint as the number
1267   // of active monitors passes the specified threshold.
1268   // TODO: assert thread state is reasonable
1269 
1270   if (Atomic::xchg(&_forceMonitorScavenge, 1) == 0) {
1271     VMThread::check_for_forced_cleanup();
1272   }
1273 }
1274 
1275 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
1276   // A large MAXPRIVATE value reduces both list lock contention
1277   // and list coherency traffic, but also tends to increase the
1278   // number of ObjectMonitors in circulation as well as the STW
1279   // scavenge costs.  As usual, we lean toward time in space-time
1280   // tradeoffs.
1281   const int MAXPRIVATE = 1024;
1282   NoSafepointVerifier nsv;
1283 
1284   stringStream ss;
1285   for (;;) {
1286     ObjectMonitor* m;
1287 
1288     // 1: try to allocate from the thread's local om_free_list.
1289     // Threads will attempt to allocate first from their local list, then
1290     // from the global list, and only after those attempts fail will the
1291     // thread attempt to instantiate new monitors. Thread-local free lists
1292     // improve allocation latency, as well as reducing coherency traffic
1293     // on the shared global list.
1294     m = take_from_start_of_om_free_list(self);
1295     if (m != NULL) {
1296       guarantee(m->object() == NULL, "invariant");

1297       prepend_to_om_in_use_list(self, m);
1298       return m;
1299     }
1300 
1301     // 2: try to allocate from the global om_list_globals._free_list
1302     // If we're using thread-local free lists then try
1303     // to reprovision the caller's free list.
1304     if (Atomic::load(&om_list_globals._free_list) != NULL) {
1305       // Reprovision the thread's om_free_list.
1306       // Use bulk transfers to reduce the allocation rate and heat
1307       // on various locks.
1308       for (int i = self->om_free_provision; --i >= 0;) {
1309         ObjectMonitor* take = take_from_start_of_global_free_list();
1310         if (take == NULL) {
1311           break;  // No more are available.
1312         }
1313         guarantee(take->object() == NULL, "invariant");


















1314         take->Recycle();




1315         om_release(self, take, false);
1316       }
1317       self->om_free_provision += 1 + (self->om_free_provision / 2);
1318       if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1319 
1320       if (is_MonitorBound_exceeded(Atomic::load(&om_list_globals._population) -

1321                                    Atomic::load(&om_list_globals._free_count))) {
1322         // Not enough ObjectMonitors on the global free list.
1323         // We can't safely induce a STW safepoint from om_alloc() as our thread
1324         // state may not be appropriate for such activities and callers may hold
1325         // naked oops, so instead we defer the action.
1326         InduceScavenge(self, "om_alloc");
1327       }
1328       continue;
1329     }
1330 
1331     // 3: allocate a block of new ObjectMonitors
1332     // Both the local and global free lists are empty -- resort to malloc().
1333     // In the current implementation ObjectMonitors are TSM - immortal.
1334     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1335     // each ObjectMonitor to start at the beginning of a cache line,
1336     // so we use align_up().
1337     // A better solution would be to use C++ placement-new.
1338     // BEWARE: As it stands currently, we don't run the ctors!
1339     assert(_BLOCKSIZE > 1, "invariant");
1340     size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
1341     PaddedObjectMonitor* temp;
1342     size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1);
1343     void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal);
1344     temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE);
1345     (void)memset((void *) temp, 0, neededsize);
1346 
1347     // Format the block.
1348     // initialize the linked list, each monitor points to its next
1349     // forming the single linked free list, the very first monitor
1350     // will points to next block, which forms the block list.
1351     // The trick of using the 1st element in the block as g_block_list
1352     // linkage should be reconsidered.  A better implementation would
1353     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1354 
1355     for (int i = 1; i < _BLOCKSIZE; i++) {
1356       temp[i].set_next_om((ObjectMonitor*)&temp[i + 1]);

1357     }
1358 
1359     // terminate the last monitor as the end of list
1360     temp[_BLOCKSIZE - 1].set_next_om((ObjectMonitor*)NULL);
1361 
1362     // Element [0] is reserved for global list linkage
1363     temp[0].set_object(CHAINMARKER);
1364 
1365     // Consider carving out this thread's current request from the
1366     // block in hand.  This avoids some lock traffic and redundant
1367     // list activity.
1368 
1369     prepend_block_to_lists(temp);
1370   }
1371 }
1372 
1373 // Place "m" on the caller's private per-thread om_free_list.
1374 // In practice there's no need to clamp or limit the number of
1375 // monitors on a thread's om_free_list as the only non-allocation time
1376 // we'll call om_release() is to return a monitor to the free list after
1377 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1378 // accumulate on a thread's free list.
1379 //
1380 // Key constraint: all ObjectMonitors on a thread's free list and the global
1381 // free list must have their object field set to null. This prevents the
1382 // scavenger -- deflate_monitor_list() -- from reclaiming them while we
1383 // are trying to release them.
1384 
1385 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
1386                                     bool from_per_thread_alloc) {
1387   guarantee(m->header().value() == 0, "invariant");
1388   guarantee(m->object() == NULL, "invariant");
1389   NoSafepointVerifier nsv;
1390 
1391   stringStream ss;
1392   guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
1393             "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss),
1394             m->_recursions);

1395   // _next_om is used for both per-thread in-use and free lists so
1396   // we have to remove 'm' from the in-use list first (as needed).
1397   if (from_per_thread_alloc) {
1398     // Need to remove 'm' from om_in_use_list.
1399     ObjectMonitor* mid = NULL;
1400     ObjectMonitor* next = NULL;
1401 
1402     // This list walk can only race with another list walker since
1403     // deflation can only happen at a safepoint so we don't have to
1404     // worry about an ObjectMonitor being removed from this list
1405     // while we are walking it.
1406 
1407     // Lock the list head to avoid racing with another list walker.

1408     if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) {
1409       fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
1410     }
1411     next = unmarked_next(mid);
1412     if (m == mid) {
1413       // First special case:
1414       // 'm' matches mid, is the list head and is locked. Switch the list
1415       // head to next which unlocks the list head, but leaves the extracted
1416       // mid locked:
1417       Atomic::store(&self->om_in_use_list, next);
1418     } else if (m == next) {
1419       // Second special case:
1420       // 'm' matches next after the list head and we already have the list
1421       // head locked so set mid to what we are extracting:
1422       mid = next;
1423       // Lock mid to prevent races with a list walker:


1424       om_lock(mid);
1425       // Update next to what follows mid (if anything):
1426       next = unmarked_next(mid);
1427       // Switch next after the list head to new next which unlocks the
1428       // list head, but leaves the extracted mid locked:
1429       self->om_in_use_list->set_next_om(next);
1430     } else {
1431       // We have to search the list to find 'm'.
1432       om_unlock(mid);  // unlock the list head
1433       guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT
1434                 " is too short.", p2i(self), p2i(self->om_in_use_list));
1435       // Our starting anchor is next after the list head which is the
1436       // last ObjectMonitor we checked:
1437       ObjectMonitor* anchor = next;





1438       while ((mid = unmarked_next(anchor)) != NULL) {
1439         if (m == mid) {
1440           // We found 'm' on the per-thread in-use list so extract it.
1441           om_lock(anchor);  // Lock the anchor so we can safely modify it.
1442           // Update next to what follows mid (if anything):
1443           next = unmarked_next(mid);
1444           // Switch next after the anchor to new next which unlocks the
1445           // anchor, but leaves the extracted mid locked:
1446           anchor->set_next_om(next);
1447           break;
1448         } else {
1449           anchor = mid;






1450         }
1451       }
1452     }
1453 
1454     if (mid == NULL) {
1455       // Reached end of the list and didn't find 'm' so:
1456       fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list="
1457             INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list));
1458     }
1459 
1460     // At this point mid is disconnected from the in-use list so
1461     // its lock no longer has any effects on the in-use list.
1462     Atomic::dec(&self->om_in_use_count);
1463     // Unlock mid, but leave the next value for any lagging list
1464     // walkers. It will get cleaned up when mid is prepended to
1465     // the thread's free list:
1466     om_unlock(mid);
1467   }
1468 
1469   prepend_to_om_free_list(self, m);

1470 }
1471 
1472 // Return ObjectMonitors on a moribund thread's free and in-use
1473 // lists to the appropriate global lists. The ObjectMonitors on the
1474 // per-thread in-use list may still be in use by other threads.
1475 //
1476 // We currently call om_flush() from Threads::remove() before the
1477 // thread has been excised from the thread list and is no longer a
1478 // mutator. This means that om_flush() cannot run concurrently with
1479 // a safepoint and interleave with deflate_idle_monitors(). In
1480 // particular, this ensures that the thread's in-use monitors are
1481 // scanned by a GC safepoint, either via Thread::oops_do() (before
1482 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
1483 // om_flush() is called).





1484 
1485 void ObjectSynchronizer::om_flush(Thread* self) {
1486   // Process the per-thread in-use list first to be consistent.
1487   int in_use_count = 0;
1488   ObjectMonitor* in_use_list = NULL;
1489   ObjectMonitor* in_use_tail = NULL;
1490   NoSafepointVerifier nsv;
1491 
1492   // This function can race with a list walker thread so we lock the
1493   // list head to prevent confusion.



1494   if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) {
1495     // At this point, we have locked the in-use list head so a racing
1496     // thread cannot come in after us. However, a racing thread could
1497     // be ahead of us; we'll detect that and delay to let it finish.
1498     //
1499     // The thread is going away, however the ObjectMonitors on the
1500     // om_in_use_list may still be in-use by other threads. Link
1501     // them to in_use_tail, which will be linked into the global
1502     // in-use list (om_list_globals._in_use_list) below.
1503     //
1504     // Account for the in-use list head before the loop since it is
1505     // already locked (by this thread):
1506     in_use_tail = in_use_list;
1507     in_use_count++;
1508     for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL; cur_om = unmarked_next(cur_om)) {
1509       if (is_locked(cur_om)) {
1510         // cur_om is locked so there must be a racing walker thread ahead
1511         // of us so we'll give it a chance to finish.
1512         while (is_locked(cur_om)) {
1513           os::naked_short_sleep(1);
1514         }











1515       }
1516       in_use_tail = cur_om;
1517       in_use_count++;

1518     }
1519     guarantee(in_use_tail != NULL, "invariant");
1520     int l_om_in_use_count = Atomic::load(&self->om_in_use_count);
1521     assert(l_om_in_use_count == in_use_count, "in-use counts don't match: "
1522           "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
1523     Atomic::store(&self->om_in_use_count, 0);
1524     // Clear the in-use list head (which also unlocks it):
1525     Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
1526     om_unlock(in_use_list);
1527   }
1528 
1529   int free_count = 0;
1530   ObjectMonitor* free_list = NULL;
1531   ObjectMonitor* free_tail = NULL;
1532   // This function can race with a list walker thread so we lock the
1533   // list head to prevent confusion.
1534   if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) {
1535     // At this point, we have locked the free list head so a racing
1536     // thread cannot come in after us. However, a racing thread could
1537     // be ahead of us; we'll detect that and delay to let it finish.
1538     //
1539     // The thread is going away. Set 'free_tail' to the last per-thread free
1540     // monitor which will be linked to om_list_globals._free_list below.
1541     //
1542     // Account for the free list head before the loop since it is
1543     // already locked (by this thread):
1544     free_tail = free_list;
1545     free_count++;
1546     for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) {
1547       if (is_locked(s)) {
1548         // s is locked so there must be a racing walker thread ahead
1549         // of us so we'll give it a chance to finish.
1550         while (is_locked(s)) {
1551           os::naked_short_sleep(1);
1552         }
1553       }
1554       free_tail = s;
1555       free_count++;
1556       guarantee(s->object() == NULL, "invariant");
1557       stringStream ss;
1558       guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1559     }
1560     guarantee(free_tail != NULL, "invariant");
1561     int l_om_free_count = Atomic::load(&self->om_free_count);
1562     assert(l_om_free_count == free_count, "free counts don't match: "
1563            "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
1564     Atomic::store(&self->om_free_count, 0);
1565     Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
1566     om_unlock(free_list);
1567   }
1568 
1569   if (free_tail != NULL) {
1570     prepend_list_to_global_free_list(free_list, free_tail, free_count);
1571   }
1572 
1573   if (in_use_tail != NULL) {
1574     prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count);
1575   }
1576 
1577   LogStreamHandle(Debug, monitorinflation) lsh_debug;
1578   LogStreamHandle(Info, monitorinflation) lsh_info;
1579   LogStream* ls = NULL;
1580   if (log_is_enabled(Debug, monitorinflation)) {
1581     ls = &lsh_debug;
1582   } else if ((free_count != 0 || in_use_count != 0) &&


1585   }
1586   if (ls != NULL) {
1587     ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d"
1588                  ", in_use_count=%d" ", om_free_provision=%d",
1589                  p2i(self), free_count, in_use_count, self->om_free_provision);
1590   }
1591 }
1592 
1593 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1594                                        const oop obj,
1595                                        ObjectSynchronizer::InflateCause cause) {
1596   assert(event != NULL, "invariant");
1597   assert(event->should_commit(), "invariant");
1598   event->set_monitorClass(obj->klass());
1599   event->set_address((uintptr_t)(void*)obj);
1600   event->set_cause((u1)cause);
1601   event->commit();
1602 }
1603 
1604 // Fast path code shared by multiple functions
1605 void ObjectSynchronizer::inflate_helper(oop obj) {

1606   markWord mark = obj->mark();
1607   if (mark.has_monitor()) {
1608     assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid");
1609     assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header");










1610     return;
1611   }
1612   inflate(Thread::current(), obj, inflate_cause_vm_internal);
1613 }
1614 
1615 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self,
1616                                            oop object, const InflateCause cause) {
1617   // Inflate mutates the heap ...
1618   // Relaxing assertion for bug 6320749.
1619   assert(Universe::verify_in_progress() ||
1620          !SafepointSynchronize::is_at_safepoint(), "invariant");
1621 
1622   EventJavaMonitorInflate event;
1623 
1624   for (;;) {
1625     const markWord mark = object->mark();
1626     assert(!mark.has_bias_pattern(), "invariant");
1627 
1628     // The mark can be in one of the following states:
1629     // *  Inflated     - just return
1630     // *  Stack-locked - coerce it to inflated
1631     // *  INFLATING    - busy wait for conversion to complete
1632     // *  Neutral      - aggressively inflate the object.
1633     // *  BIASED       - Illegal.  We should never see this
1634 
1635     // CASE: inflated
1636     if (mark.has_monitor()) {
1637       ObjectMonitor* inf = mark.monitor();





1638       markWord dmw = inf->header();
1639       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1640       assert(inf->object() == object, "invariant");
1641       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1642       return inf;
1643     }
1644 
1645     // CASE: inflation in progress - inflating over a stack-lock.
1646     // Some other thread is converting from stack-locked to inflated.
1647     // Only that thread can complete inflation -- other threads must wait.
1648     // The INFLATING value is transient.
1649     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1650     // We could always eliminate polling by parking the thread on some auxiliary list.
1651     if (mark == markWord::INFLATING()) {
1652       read_stable_mark(object);
1653       continue;
1654     }
1655 
1656     // CASE: stack-locked
1657     // Could be stack-locked either by this thread or by some other thread.
1658     //
1659     // Note that we allocate the objectmonitor speculatively, _before_ attempting
1660     // to install INFLATING into the mark word.  We originally installed INFLATING,
1661     // allocated the objectmonitor, and then finally STed the address of the
1662     // objectmonitor into the mark.  This was correct, but artificially lengthened


1668     // critical INFLATING...ST interval.  A thread can transfer
1669     // multiple objectmonitors en-mass from the global free list to its local free list.
1670     // This reduces coherency traffic and lock contention on the global free list.
1671     // Using such local free lists, it doesn't matter if the om_alloc() call appears
1672     // before or after the CAS(INFLATING) operation.
1673     // See the comments in om_alloc().
1674 
1675     LogStreamHandle(Trace, monitorinflation) lsh;
1676 
1677     if (mark.has_locker()) {
1678       ObjectMonitor* m = om_alloc(self);
1679       // Optimistically prepare the objectmonitor - anticipate successful CAS
1680       // We do this before the CAS in order to minimize the length of time
1681       // in which INFLATING appears in the mark.
1682       m->Recycle();
1683       m->_Responsible  = NULL;
1684       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
1685 
1686       markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1687       if (cmp != mark) {

1688         om_release(self, m, true);
1689         continue;       // Interference -- just retry
1690       }
1691 
1692       // We've successfully installed INFLATING (0) into the mark-word.
1693       // This is the only case where 0 will appear in a mark-word.
1694       // Only the singular thread that successfully swings the mark-word
1695       // to 0 can perform (or more precisely, complete) inflation.
1696       //
1697       // Why do we CAS a 0 into the mark-word instead of just CASing the
1698       // mark-word from the stack-locked value directly to the new inflated state?
1699       // Consider what happens when a thread unlocks a stack-locked object.
1700       // It attempts to use CAS to swing the displaced header value from the
1701       // on-stack BasicLock back into the object header.  Recall also that the
1702       // header value (hash code, etc) can reside in (a) the object header, or
1703       // (b) a displaced header associated with the stack-lock, or (c) a displaced
1704       // header in an ObjectMonitor.  The inflate() routine must copy the header
1705       // value from the BasicLock on the owner's stack to the ObjectMonitor, all
1706       // the while preserving the hashCode stability invariants.  If the owner
1707       // decides to release the lock while the value is 0, the unlock will fail
1708       // and control will eventually pass from slow_exit() to inflate.  The owner
1709       // will then spin, waiting for the 0 value to disappear.   Put another way,
1710       // the 0 causes the owner to stall if the owner happens to try to
1711       // drop the lock (restoring the header from the BasicLock to the object)
1712       // while inflation is in-progress.  This protocol avoids races that might
1713       // would otherwise permit hashCode values to change or "flicker" for an object.
1714       // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1715       // 0 serves as a "BUSY" inflate-in-progress indicator.
1716 
1717 
1718       // fetch the displaced mark from the owner's stack.
1719       // The owner can't die or unwind past the lock while our INFLATING
1720       // object is in the mark.  Furthermore the owner can't complete
1721       // an unlock on the object, either.
1722       markWord dmw = mark.displaced_mark_helper();
1723       // Catch if the object's header is not neutral (not locked and
1724       // not marked is what we care about here).
1725       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1726 
1727       // Setup monitor fields to proper values -- prepare the monitor
1728       m->set_header(dmw);
1729 
1730       // Optimization: if the mark.locker stack address is associated
1731       // with this thread we could simply set m->_owner = self.
1732       // Note that a thread can inflate an object
1733       // that it has stack-locked -- as might happen in wait() -- directly
1734       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.



1735       m->set_owner_from(NULL, mark.locker());

1736       m->set_object(object);
1737       // TODO-FIXME: assert BasicLock->dhw != 0.
1738 


1739       // Must preserve store ordering. The monitor state must
1740       // be stable at the time of publishing the monitor address.
1741       guarantee(object->mark() == markWord::INFLATING(), "invariant");
1742       object->release_set_mark(markWord::encode(m));
1743 





1744       // Hopefully the performance counters are allocated on distinct cache lines
1745       // to avoid false sharing on MP systems ...
1746       OM_PERFDATA_OP(Inflations, inc());
1747       if (log_is_enabled(Trace, monitorinflation)) {
1748         ResourceMark rm(self);
1749         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1750                      INTPTR_FORMAT ", type='%s'", p2i(object),
1751                      object->mark().value(), object->klass()->external_name());
1752       }
1753       if (event.should_commit()) {
1754         post_monitor_inflate_event(&event, object, cause);
1755       }
1756       return m;

1757     }
1758 
1759     // CASE: neutral
1760     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1761     // If we know we're inflating for entry it's better to inflate by swinging a
1762     // pre-locked ObjectMonitor pointer into the object header.   A successful
1763     // CAS inflates the object *and* confers ownership to the inflating thread.
1764     // In the current implementation we use a 2-step mechanism where we CAS()
1765     // to inflate and then CAS() again to try to swing _owner from NULL to self.
1766     // An inflateTry() method that we could call from enter() would be useful.
1767 
1768     // Catch if the object's header is not neutral (not locked and
1769     // not marked is what we care about here).
1770     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1771     ObjectMonitor* m = om_alloc(self);
1772     // prepare m for installation - set monitor to initial state
1773     m->Recycle();
1774     m->set_header(mark);




1775     m->set_object(object);
1776     m->_Responsible  = NULL;
1777     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1778 


1779     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1780       m->set_header(markWord::zero());
1781       m->set_object(NULL);
1782       m->Recycle();


1783       om_release(self, m, true);
1784       m = NULL;
1785       continue;
1786       // interference - the markword changed - just retry.
1787       // The state-transitions are one-way, so there's no chance of
1788       // live-lock -- "Inflated" is an absorbing state.
1789     }
1790 





1791     // Hopefully the performance counters are allocated on distinct
1792     // cache lines to avoid false sharing on MP systems ...
1793     OM_PERFDATA_OP(Inflations, inc());
1794     if (log_is_enabled(Trace, monitorinflation)) {
1795       ResourceMark rm(self);
1796       lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1797                    INTPTR_FORMAT ", type='%s'", p2i(object),
1798                    object->mark().value(), object->klass()->external_name());
1799     }
1800     if (event.should_commit()) {
1801       post_monitor_inflate_event(&event, object, cause);
1802     }
1803     return m;

1804   }
1805 }
1806 
1807 
1808 // We maintain a list of in-use monitors for each thread.
1809 //

1810 // deflate_thread_local_monitors() scans a single thread's in-use list, while
1811 // deflate_idle_monitors() scans only a global list of in-use monitors which
1812 // is populated only as a thread dies (see om_flush()).
1813 //
1814 // These operations are called at all safepoints, immediately after mutators
1815 // are stopped, but before any objects have moved. Collectively they traverse
1816 // the population of in-use monitors, deflating where possible. The scavenged
1817 // monitors are returned to the global monitor free list.
1818 //
1819 // Beware that we scavenge at *every* stop-the-world point. Having a large
1820 // number of monitors in-use could negatively impact performance. We also want
1821 // to minimize the total # of monitors in circulation, as they incur a small
1822 // footprint penalty.
1823 //
1824 // Perversely, the heap size -- and thus the STW safepoint rate --
1825 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
1826 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
1827 // This is an unfortunate aspect of this design.


































1828 
1829 // Deflate a single monitor if not in-use
1830 // Return true if deflated, false if in-use
1831 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1832                                          ObjectMonitor** free_head_p,
1833                                          ObjectMonitor** free_tail_p) {
1834   bool deflated;
1835   // Normal case ... The monitor is associated with obj.
1836   const markWord mark = obj->mark();
1837   guarantee(mark == markWord::encode(mid), "should match: mark="
1838             INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
1839             markWord::encode(mid).value());
1840   // Make sure that mark.monitor() and markWord::encode() agree:
1841   guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
1842             ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
1843   const markWord dmw = mid->header();
1844   guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1845 
1846   if (mid->is_busy()) {
1847     // Easy checks are first - the ObjectMonitor is busy so no deflation.

1848     deflated = false;
1849   } else {
1850     // Deflate the monitor if it is no longer being used
1851     // It's idle - scavenge and return to the global free list
1852     // plain old deflation ...
1853     if (log_is_enabled(Trace, monitorinflation)) {
1854       ResourceMark rm;
1855       log_trace(monitorinflation)("deflate_monitor: "
1856                                   "object=" INTPTR_FORMAT ", mark="
1857                                   INTPTR_FORMAT ", type='%s'", p2i(obj),
1858                                   mark.value(), obj->klass()->external_name());
1859     }
1860 
1861     // Restore the header back to obj
1862     obj->release_set_mark(dmw);





1863     mid->clear();
1864 
1865     assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
1866            p2i(mid->object()));

1867 
1868     // Move the deflated ObjectMonitor to the working free list
1869     // defined by free_head_p and free_tail_p.
1870     if (*free_head_p == NULL) *free_head_p = mid;
1871     if (*free_tail_p != NULL) {
1872       // We append to the list so the caller can use mid->_next_om
1873       // to fix the linkages in its context.
1874       ObjectMonitor* prevtail = *free_tail_p;
1875       // Should have been cleaned up by the caller:
1876       // Note: Should not have to lock prevtail here since we're at a
1877       // safepoint and ObjectMonitors on the local free list should
1878       // not be accessed in parallel.
1879 #ifdef ASSERT
1880       ObjectMonitor* l_next_om = prevtail->next_om();
1881 #endif
1882       assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
1883       prevtail->set_next_om(mid);
1884     }
1885     *free_tail_p = mid;
1886     // At this point, mid->_next_om still refers to its current
1887     // value and another ObjectMonitor's _next_om field still
1888     // refers to this ObjectMonitor. Those linkages have to be
1889     // cleaned up by the caller who has the complete context.
1890     deflated = true;
1891   }
1892   return deflated;
1893 }
1894 


















































































































































1895 // Walk a given monitor list, and deflate idle monitors.
1896 // The given list could be a per-thread list or a global list.
1897 //
1898 // In the case of parallel processing of thread local monitor lists,
1899 // work is done by Threads::parallel_threads_do() which ensures that
1900 // each Java thread is processed by exactly one worker thread, and
1901 // thus avoid conflicts that would arise when worker threads would
1902 // process the same monitor lists concurrently.
1903 //
1904 // See also ParallelSPCleanupTask and
1905 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
1906 // Threads::parallel_java_threads_do() in thread.cpp.
1907 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
1908                                              int* count_p,
1909                                              ObjectMonitor** free_head_p,
1910                                              ObjectMonitor** free_tail_p) {
1911   ObjectMonitor* cur_mid_in_use = NULL;
1912   ObjectMonitor* mid = NULL;
1913   ObjectMonitor* next = NULL;
1914   int deflated_count = 0;


1925       // by unlinking mid from the global or per-thread in-use list.
1926       if (cur_mid_in_use == NULL) {
1927         // mid is the list head so switch the list head to next:
1928         Atomic::store(list_p, next);
1929       } else {
1930         // Switch cur_mid_in_use's next field to next:
1931         cur_mid_in_use->set_next_om(next);
1932       }
1933       // At this point mid is disconnected from the in-use list.
1934       deflated_count++;
1935       Atomic::dec(count_p);
1936       // mid is current tail in the free_head_p list so NULL terminate it:
1937       mid->set_next_om(NULL);
1938     } else {
1939       cur_mid_in_use = mid;
1940     }
1941   }
1942   return deflated_count;
1943 }
1944 













































































































































1945 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1946   counters->n_in_use = 0;              // currently associated with objects
1947   counters->n_in_circulation = 0;      // extant
1948   counters->n_scavenged = 0;           // reclaimed (global and per-thread)
1949   counters->per_thread_scavenged = 0;  // per-thread scavenge total
1950   counters->per_thread_times = 0.0;    // per-thread scavenge times
1951 }
1952 
1953 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
1954   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");









1955   bool deflated = false;
1956 
1957   ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged monitors
1958   ObjectMonitor* free_tail_p = NULL;
1959   elapsedTimer timer;
1960 
1961   if (log_is_enabled(Info, monitorinflation)) {
1962     timer.start();
1963   }
1964 
1965   // Note: the thread-local monitors lists get deflated in
1966   // a separate pass. See deflate_thread_local_monitors().
1967 
1968   // For moribund threads, scan om_list_globals._in_use_list
1969   int deflated_count = 0;
1970   if (Atomic::load(&om_list_globals._in_use_list) != NULL) {
1971     // Update n_in_circulation before om_list_globals._in_use_count is
1972     // updated by deflation.
1973     Atomic::add(&counters->n_in_circulation,
1974                 Atomic::load(&om_list_globals._in_use_count));


1987 #endif
1988     assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
1989     prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
1990     Atomic::add(&counters->n_scavenged, deflated_count);
1991   }
1992   timer.stop();
1993 
1994   LogStreamHandle(Debug, monitorinflation) lsh_debug;
1995   LogStreamHandle(Info, monitorinflation) lsh_info;
1996   LogStream* ls = NULL;
1997   if (log_is_enabled(Debug, monitorinflation)) {
1998     ls = &lsh_debug;
1999   } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2000     ls = &lsh_info;
2001   }
2002   if (ls != NULL) {
2003     ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2004   }
2005 }
2006 




























































































































































































2007 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2008   // Report the cumulative time for deflating each thread's idle
2009   // monitors. Note: if the work is split among more than one
2010   // worker thread, then the reported time will likely be more
2011   // than a beginning to end measurement of the phase.
2012   log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
2013 







2014   if (log_is_enabled(Debug, monitorinflation)) {
2015     // exit_globals()'s call to audit_and_print_stats() is done
2016     // at the Info level and not at a safepoint.



2017     ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2018   } else if (log_is_enabled(Info, monitorinflation)) {
2019     log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, "
2020                                "global_free_count=%d",
2021                                Atomic::load(&om_list_globals._population),
2022                                Atomic::load(&om_list_globals._in_use_count),
2023                                Atomic::load(&om_list_globals._free_count));

2024   }
2025 
2026   Atomic::store(&_forceMonitorScavenge, 0);    // Reset
2027 
2028   OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
2029   OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
2030 
2031   GVars.stw_random = os::random();
2032   GVars.stw_cycle++;




2033 }
2034 
2035 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2036   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2037 





2038   ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged monitors
2039   ObjectMonitor* free_tail_p = NULL;
2040   elapsedTimer timer;
2041 
2042   if (log_is_enabled(Info, safepoint, cleanup) ||
2043       log_is_enabled(Info, monitorinflation)) {
2044     timer.start();
2045   }
2046 
2047   // Update n_in_circulation before om_in_use_count is updated by deflation.
2048   Atomic::add(&counters->n_in_circulation, Atomic::load(&thread->om_in_use_count));
2049 
2050   int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
2051   Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count));
2052 
2053   if (free_head_p != NULL) {
2054     // Move the deflated ObjectMonitors back to the global free list.
2055     guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2056 #ifdef ASSERT
2057     ObjectMonitor* l_next_om = free_tail_p->next_om();


2191   if (Atomic::load(&om_list_globals._population) == chk_om_population) {
2192     ls->print_cr("global_population=%d equals chk_om_population=%d",
2193                  Atomic::load(&om_list_globals._population), chk_om_population);
2194   } else {
2195     // With fine grained locks on the monitor lists, it is possible for
2196     // log_monitor_list_counts() to return a value that doesn't match
2197     // om_list_globals._population. So far a higher value has been
2198     // seen in testing so something is being double counted by
2199     // log_monitor_list_counts().
2200     ls->print_cr("WARNING: global_population=%d is not equal to "
2201                  "chk_om_population=%d",
2202                  Atomic::load(&om_list_globals._population), chk_om_population);
2203   }
2204 
2205   // Check om_list_globals._in_use_list and om_list_globals._in_use_count:
2206   chk_global_in_use_list_and_count(ls, &error_cnt);
2207 
2208   // Check om_list_globals._free_list and om_list_globals._free_count:
2209   chk_global_free_list_and_count(ls, &error_cnt);
2210 



2211   ls->print_cr("Checking per-thread lists:");
2212 
2213   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2214     // Check om_in_use_list and om_in_use_count:
2215     chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
2216 
2217     // Check om_free_list and om_free_count:
2218     chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
2219   }
2220 
2221   if (error_cnt == 0) {
2222     ls->print_cr("No errors found in monitor list checks.");
2223   } else {
2224     log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
2225   }
2226 
2227   if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
2228       (!on_exit && log_is_enabled(Trace, monitorinflation))) {
2229     // When exiting this log output is at the Info level. When called
2230     // at a safepoint, this log output is at the Trace level since


2241 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n,
2242                                         outputStream * out, int *error_cnt_p) {
2243   stringStream ss;
2244   if (n->is_busy()) {
2245     if (jt != NULL) {
2246       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2247                     ": free per-thread monitor must not be busy: %s", p2i(jt),
2248                     p2i(n), n->is_busy_to_string(&ss));
2249     } else {
2250       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2251                     "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
2252     }
2253     *error_cnt_p = *error_cnt_p + 1;
2254   }
2255   if (n->header().value() != 0) {
2256     if (jt != NULL) {
2257       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2258                     ": free per-thread monitor must have NULL _header "
2259                     "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
2260                     n->header().value());
2261     } else {

2262       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2263                     "must have NULL _header field: _header=" INTPTR_FORMAT,
2264                     p2i(n), n->header().value());
2265     }
2266     *error_cnt_p = *error_cnt_p + 1;
2267   }

2268   if (n->object() != NULL) {
2269     if (jt != NULL) {
2270       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2271                     ": free per-thread monitor must have NULL _object "
2272                     "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
2273                     p2i(n->object()));
2274     } else {
2275       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2276                     "must have NULL _object field: _object=" INTPTR_FORMAT,
2277                     p2i(n), p2i(n->object()));
2278     }
2279     *error_cnt_p = *error_cnt_p + 1;
2280   }
2281 }
2282 
2283 // Lock the next ObjectMonitor for traversal and unlock the current
2284 // ObjectMonitor. Returns the next ObjectMonitor if there is one.
2285 // Otherwise returns NULL (after unlocking the current ObjectMonitor).
2286 // This function is used by the various list walker functions to
2287 // safely walk a list without allowing an ObjectMonitor to be moved


2313       if (cur == NULL) {
2314         break;
2315       }
2316     }
2317   }
2318   int l_free_count = Atomic::load(&om_list_globals._free_count);
2319   if (l_free_count == chk_om_free_count) {
2320     out->print_cr("global_free_count=%d equals chk_om_free_count=%d",
2321                   l_free_count, chk_om_free_count);
2322   } else {
2323     // With fine grained locks on om_list_globals._free_list, it
2324     // is possible for an ObjectMonitor to be prepended to
2325     // om_list_globals._free_list after we started calculating
2326     // chk_om_free_count so om_list_globals._free_count may not
2327     // match anymore.
2328     out->print_cr("WARNING: global_free_count=%d is not equal to "
2329                   "chk_om_free_count=%d", l_free_count, chk_om_free_count);
2330   }
2331 }
2332 






























2333 // Check the global in-use list and count; log the results of the checks.
2334 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
2335                                                           int *error_cnt_p) {
2336   int chk_om_in_use_count = 0;
2337   ObjectMonitor* cur = NULL;
2338   if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) {
2339     // Marked the global in-use list head so process the list.
2340     while (true) {
2341       chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p);
2342       chk_om_in_use_count++;
2343 
2344       cur = lock_next_for_traversal(cur);
2345       if (cur == NULL) {
2346         break;
2347       }
2348     }
2349   }
2350   int l_in_use_count = Atomic::load(&om_list_globals._in_use_count);
2351   if (l_in_use_count == chk_om_in_use_count) {
2352     out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d",


2471   if (l_om_in_use_count == chk_om_in_use_count) {
2472     out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
2473                   "chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count,
2474                   chk_om_in_use_count);
2475   } else {
2476     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
2477                   "equal to chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count,
2478                   chk_om_in_use_count);
2479     *error_cnt_p = *error_cnt_p + 1;
2480   }
2481 }
2482 
2483 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
2484 // flags indicate why the entry is in-use, 'object' and 'object type'
2485 // indicate the associated object and its type.
2486 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) {
2487   stringStream ss;
2488   if (Atomic::load(&om_list_globals._in_use_count) > 0) {
2489     out->print_cr("In-use global monitor info:");
2490     out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2491     out->print_cr("%18s  %s  %18s  %18s",
2492                   "monitor", "BHL", "object", "object type");
2493     out->print_cr("==================  ===  ==================  ==================");
2494     ObjectMonitor* cur = NULL;
2495     if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) {
2496       // Marked the global in-use list head so process the list.
2497       while (true) {
2498         const oop obj = (oop) cur->object();
2499         const markWord mark = cur->header();
2500         ResourceMark rm;
2501         out->print(INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT "  %s", p2i(cur),
2502                    cur->is_busy() != 0, mark.hash() != 0, cur->owner() != NULL,
2503                    p2i(obj), obj->klass()->external_name());
2504         if (cur->is_busy() != 0) {
2505           out->print(" (%s)", cur->is_busy_to_string(&ss));
2506           ss.reset();
2507         }
2508         out->cr();
2509 
2510         cur = lock_next_for_traversal(cur);
2511         if (cur == NULL) {
2512           break;
2513         }
2514       }
2515     }
2516   }
2517 
2518   out->print_cr("In-use per-thread monitor info:");
2519   out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2520   out->print_cr("%18s  %18s  %s  %18s  %18s",
2521                 "jt", "monitor", "BHL", "object", "object type");
2522   out->print_cr("==================  ==================  ===  ==================  ==================");
2523   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2524     ObjectMonitor* cur = NULL;
2525     if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) {
2526       // Marked the global in-use list head so process the list.
2527       while (true) {
2528         const oop obj = (oop) cur->object();
2529         const markWord mark = cur->header();
2530         ResourceMark rm;
2531         out->print(INTPTR_FORMAT "  " INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT
2532                    "  %s", p2i(jt), p2i(cur), cur->is_busy() != 0,
2533                    mark.hash() != 0, cur->owner() != NULL, p2i(obj),
2534                    obj->klass()->external_name());
2535         if (cur->is_busy() != 0) {
2536           out->print(" (%s)", cur->is_busy_to_string(&ss));
2537           ss.reset();
2538         }
2539         out->cr();
2540 
2541         cur = lock_next_for_traversal(cur);
2542         if (cur == NULL) {
2543           break;
2544         }
2545       }
2546     }
2547   }
2548 
2549   out->flush();
2550 }
2551 
2552 // Log counts for the global and per-thread monitor lists and return
2553 // the population count.
2554 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
2555   int pop_count = 0;
2556   out->print_cr("%18s  %10s  %10s  %10s",
2557                 "Global Lists:", "InUse", "Free", "Total");
2558   out->print_cr("==================  ==========  ==========  ==========");
2559   int l_in_use_count = Atomic::load(&om_list_globals._in_use_count);
2560   int l_free_count = Atomic::load(&om_list_globals._free_count);
2561   out->print_cr("%18s  %10d  %10d  %10d", "", l_in_use_count,
2562                 l_free_count, Atomic::load(&om_list_globals._population));
2563   pop_count += l_in_use_count + l_free_count;


2564 
2565   out->print_cr("%18s  %10s  %10s  %10s",
2566                 "Per-Thread Lists:", "InUse", "Free", "Provision");
2567   out->print_cr("==================  ==========  ==========  ==========");
2568 
2569   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2570     int l_om_in_use_count = Atomic::load(&jt->om_in_use_count);
2571     int l_om_free_count = Atomic::load(&jt->om_free_count);
2572     out->print_cr(INTPTR_FORMAT "  %10d  %10d  %10d", p2i(jt),
2573                   l_om_in_use_count, l_om_free_count, jt->om_free_provision);
2574     pop_count += l_om_in_use_count + l_om_free_count;
2575   }
2576   return pop_count;
2577 }
2578 
2579 #ifndef PRODUCT
2580 
2581 // Check if monitor belongs to the monitor cache
2582 // The list is grow-only so it's *relatively* safe to traverse
2583 // the list of extant blocks without taking a lock.


  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "logging/log.hpp"
  28 #include "logging/logStream.hpp"
  29 #include "jfr/jfrEvents.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "memory/metaspaceShared.hpp"
  32 #include "memory/padded.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "memory/universe.hpp"
  35 #include "oops/markWord.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "runtime/atomic.hpp"
  38 #include "runtime/biasedLocking.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/handshake.hpp"
  41 #include "runtime/interfaceSupport.inline.hpp"
  42 #include "runtime/mutexLocker.hpp"
  43 #include "runtime/objectMonitor.hpp"
  44 #include "runtime/objectMonitor.inline.hpp"
  45 #include "runtime/osThread.hpp"
  46 #include "runtime/safepointMechanism.inline.hpp"
  47 #include "runtime/safepointVerifiers.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "runtime/synchronizer.hpp"
  51 #include "runtime/thread.inline.hpp"
  52 #include "runtime/timer.hpp"
  53 #include "runtime/vframe.hpp"
  54 #include "runtime/vmThread.hpp"
  55 #include "utilities/align.hpp"
  56 #include "utilities/dtrace.hpp"
  57 #include "utilities/events.hpp"
  58 #include "utilities/preserveException.hpp"
  59 
  60 // The "core" versions of monitor enter and exit reside in this file.
  61 // The interpreter and compilers contain specialized transliterated
  62 // variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
  63 // for instance.  If you make changes here, make sure to modify the
  64 // interpreter, and both C1 and C2 fast-path inline locking code emission.
  65 //
  66 // -----------------------------------------------------------------------------


 103   }
 104 
 105 #else //  ndef DTRACE_ENABLED
 106 
 107 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
 108 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 109 
 110 #endif // ndef DTRACE_ENABLED
 111 
 112 // This exists only as a workaround of dtrace bug 6254741
 113 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
 114   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 115   return 0;
 116 }
 117 
 118 #define NINFLATIONLOCKS 256
 119 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 120 
 121 // global list of blocks of monitors
 122 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL;
 123 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 124 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false;
 125 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 126 
 127 struct ObjectMonitorListGlobals {
 128   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 129   // These are highly shared list related variables.
 130   // To avoid false-sharing they need to be the sole occupants of a cache line.
 131 
 132   // Global ObjectMonitor free list. Newly allocated and deflated
 133   // ObjectMonitors are prepended here.
 134   ObjectMonitor* _free_list;
 135   DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
 136 
 137   // Global ObjectMonitor in-use list. When a JavaThread is exiting,
 138   // ObjectMonitors on its per-thread in-use list are prepended here.
 139   ObjectMonitor* _in_use_list;
 140   DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
 141 
 142   // Global ObjectMonitor wait list. Deflated ObjectMonitors wait on
 143   // this list until after a handshake or a safepoint for platforms
 144   // that don't support handshakes. After the handshake or safepoint,
 145   // the deflated ObjectMonitors are prepended to free_list.
 146   ObjectMonitor* _wait_list;
 147   DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
 148 
 149   int _free_count;    // # on free_list
 150   DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int));
 151 
 152   int _in_use_count;  // # on in_use_list
 153   DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int));
 154 
 155   int _population;    // # Extant -- in circulation
 156   DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int));
 157 
 158   int _wait_count;    // # on wait_list
 159   DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int));
 160 };
 161 static ObjectMonitorListGlobals om_list_globals;
 162 
 163 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 164 
 165 
 166 // =====================> Spin-lock functions
 167 
 168 // ObjectMonitors are not lockable outside of this file. We use spin-locks
 169 // implemented using a bit in the _next_om field instead of the heavier
 170 // weight locking mechanisms for faster list management.
 171 
 172 #define OM_LOCK_BIT 0x1
 173 
 174 // Return true if the ObjectMonitor is locked.
 175 // Otherwise returns false.
 176 static bool is_locked(ObjectMonitor* om) {
 177   return ((intptr_t)om->next_om() & OM_LOCK_BIT) == OM_LOCK_BIT;
 178 }
 179 


 297       Atomic::add(&om_list_globals._population, _BLOCKSIZE - 1);
 298       break;
 299     }
 300     // Implied else: try it all again
 301   }
 302 
 303   // Second we handle om_list_globals._free_list:
 304   prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1,
 305                          &om_list_globals._free_list, &om_list_globals._free_count);
 306 }
 307 
 308 // Prepend a list of ObjectMonitors to om_list_globals._free_list.
 309 // 'tail' is the last ObjectMonitor in the list and there are 'count'
 310 // on the list. Also updates om_list_globals._free_count.
 311 static void prepend_list_to_global_free_list(ObjectMonitor* list,
 312                                              ObjectMonitor* tail, int count) {
 313   prepend_list_to_common(list, tail, count, &om_list_globals._free_list,
 314                          &om_list_globals._free_count);
 315 }
 316 
 317 // Prepend a list of ObjectMonitors to om_list_globals._wait_list.
 318 // 'tail' is the last ObjectMonitor in the list and there are 'count'
 319 // on the list. Also updates om_list_globals._wait_count.
 320 static void prepend_list_to_global_wait_list(ObjectMonitor* list,
 321                                              ObjectMonitor* tail, int count) {
 322   prepend_list_to_common(list, tail, count, &om_list_globals._wait_list,
 323                          &om_list_globals._wait_count);
 324 }
 325 
 326 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list.
 327 // 'tail' is the last ObjectMonitor in the list and there are 'count'
 328 // on the list. Also updates om_list_globals._in_use_list.
 329 static void prepend_list_to_global_in_use_list(ObjectMonitor* list,
 330                                                ObjectMonitor* tail, int count) {
 331   prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list,
 332                          &om_list_globals._in_use_count);
 333 }
 334 
 335 // Prepend an ObjectMonitor to the specified list. Also updates
 336 // the specified counter.
 337 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
 338                               int* count_p) {
 339   while (true) {
 340     om_lock(m);  // Lock m so we can safely update its next field.
 341     ObjectMonitor* cur = NULL;
 342     // Lock the list head to guard against races with a list walker
 343     // or async deflater thread (which only races in om_in_use_list):
 344     if ((cur = get_list_head_locked(list_p)) != NULL) {
 345       // List head is now locked so we can safely switch it.
 346       m->set_next_om(cur);  // m now points to cur (and unlocks m)
 347       Atomic::store(list_p, m);  // Switch list head to unlocked m.
 348       om_unlock(cur);
 349       break;
 350     }
 351     // The list is empty so try to set the list head.
 352     assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
 353     m->set_next_om(cur);  // m now points to NULL (and unlocks m)
 354     if (Atomic::cmpxchg(list_p, cur, m) == cur) {
 355       // List head is now unlocked m.
 356       break;
 357     }
 358     // Implied else: try it all again
 359   }
 360   Atomic::inc(count_p);
 361 }
 362 
 363 // Prepend an ObjectMonitor to a per-thread om_free_list.
 364 // Also updates the per-thread om_free_count.
 365 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) {
 366   prepend_to_common(m, &self->om_free_list, &self->om_free_count);
 367 }
 368 
 369 // Prepend an ObjectMonitor to a per-thread om_in_use_list.
 370 // Also updates the per-thread om_in_use_count.
 371 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) {
 372   prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
 373 }
 374 
 375 // Take an ObjectMonitor from the start of the specified list. Also
 376 // decrements the specified counter. Returns NULL if none are available.
 377 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p,
 378                                                 int* count_p) {
 379   ObjectMonitor* take = NULL;
 380   // Lock the list head to guard against races with a list walker
 381   // or async deflater thread (which only races in om_list_globals._free_list):
 382   if ((take = get_list_head_locked(list_p)) == NULL) {
 383     return NULL;  // None are available.
 384   }
 385   ObjectMonitor* next = unmarked_next(take);
 386   // Switch locked list head to next (which unlocks the list head, but
 387   // leaves take locked):
 388   Atomic::store(list_p, next);
 389   Atomic::dec(count_p);
 390   // Unlock take, but leave the next value for any lagging list
 391   // walkers. It will get cleaned up when take is prepended to
 392   // the in-use list:
 393   om_unlock(take);
 394   return take;
 395 }
 396 
 397 // Take an ObjectMonitor from the start of the om_list_globals._free_list.
 398 // Also updates om_list_globals._free_count. Returns NULL if none are
 399 // available.
 400 static ObjectMonitor* take_from_start_of_global_free_list() {
 401   return take_from_start_of_common(&om_list_globals._free_list,


 470   }
 471 
 472   // biased locking and any other IMS exception states take the slow-path
 473   return false;
 474 }
 475 
 476 
 477 // The LockNode emitted directly at the synchronization site would have
 478 // been too big if it were to have included support for the cases of inflated
 479 // recursive enter and exit, so they go here instead.
 480 // Note that we can't safely call AsyncPrintJavaStack() from within
 481 // quick_enter() as our thread state remains _in_Java.
 482 
 483 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
 484                                      BasicLock * lock) {
 485   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 486   assert(self->is_Java_thread(), "invariant");
 487   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
 488   NoSafepointVerifier nsv;
 489   if (obj == NULL) return false;       // Need to throw NPE
 490 
 491   while (true) {
 492     const markWord mark = obj->mark();
 493 
 494     if (mark.has_monitor()) {
 495       ObjectMonitorHandle omh;
 496       if (!omh.save_om_ptr(obj, mark)) {
 497         // Lost a race with async deflation so try again.
 498         assert(AsyncDeflateIdleMonitors, "sanity check");
 499         continue;
 500       }
 501       ObjectMonitor* const m = omh.om_ptr();
 502       assert(m->object() == obj, "invariant");
 503       Thread* const owner = (Thread *) m->_owner;
 504 
 505       // Lock contention and Transactional Lock Elision (TLE) diagnostics
 506       // and observability
 507       // Case: light contention possibly amenable to TLE
 508       // Case: TLE inimical operations such as nested/recursive synchronization
 509 
 510       if (owner == self) {
 511         m->_recursions++;
 512         return true;
 513       }
 514 
 515       // This Java Monitor is inflated so obj's header will never be
 516       // displaced to this thread's BasicLock. Make the displaced header
 517       // non-NULL so this BasicLock is not seen as recursive nor as
 518       // being locked. We do this unconditionally so that this thread's
 519       // BasicLock cannot be mis-interpreted by any stack walkers. For
 520       // performance reasons, stack walkers generally first check for
 521       // Biased Locking in the object's header, the second check is for
 522       // stack-locking in the object's header, the third check is for
 523       // recursive stack-locking in the displaced header in the BasicLock,
 524       // and last are the inflated Java Monitor (ObjectMonitor) checks.
 525       lock->set_displaced_header(markWord::unused_mark());
 526 
 527       if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) {
 528         assert(m->_recursions == 0, "invariant");
 529         return true;
 530       }
 531 
 532       if (AsyncDeflateIdleMonitors &&
 533           m->try_set_owner_from(DEFLATER_MARKER, self) == DEFLATER_MARKER) {
 534         // The deflation protocol finished the first part (setting owner),
 535         // but it failed the second part (making ref_count negative) and
 536         // bailed. Acquired the monitor.
 537         assert(m->_recursions == 0, "invariant");
 538         return true;
 539       }
 540     }
 541     break;
 542   }
 543 
 544   // Note that we could inflate in quick_enter.
 545   // This is likely a useful optimization
 546   // Critically, in quick_enter() we must not:
 547   // -- perform bias revocation, or
 548   // -- block indefinitely, or
 549   // -- reach a safepoint
 550 
 551   return false;        // revert to slow-path
 552 }
 553 
 554 // -----------------------------------------------------------------------------
 555 // Monitor Enter/Exit
 556 // The interpreter and compiler assembly code tries to lock using the fast path
 557 // of this algorithm. Make sure to update that code if the following function is
 558 // changed. The implementation is extremely sensitive to race condition. Be careful.
 559 
 560 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) {
 561   if (UseBiasedLocking) {


 573     // Anticipate successful CAS -- the ST of the displaced mark must
 574     // be visible <= the ST performed by the CAS.
 575     lock->set_displaced_header(mark);
 576     if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 577       return;
 578     }
 579     // Fall through to inflate() ...
 580   } else if (mark.has_locker() &&
 581              THREAD->is_lock_owned((address)mark.locker())) {
 582     assert(lock != mark.locker(), "must not re-lock the same lock");
 583     assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
 584     lock->set_displaced_header(markWord::from_pointer(NULL));
 585     return;
 586   }
 587 
 588   // The object header will never be displaced to this lock,
 589   // so it does not matter what the value is, except that it
 590   // must be non-zero to avoid looking like a re-entrant lock,
 591   // and must not look locked either.
 592   lock->set_displaced_header(markWord::unused_mark());
 593   ObjectMonitorHandle omh;
 594   inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter);
 595   omh.om_ptr()->enter(THREAD);
 596 }
 597 
 598 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
 599   markWord mark = object->mark();
 600   // We cannot check for Biased Locking if we are racing an inflation.
 601   assert(mark == markWord::INFLATING() ||
 602          !mark.has_bias_pattern(), "should not see bias pattern here");
 603 
 604   markWord dhw = lock->displaced_header();
 605   if (dhw.value() == 0) {
 606     // If the displaced header is NULL, then this exit matches up with
 607     // a recursive enter. No real work to do here except for diagnostics.
 608 #ifndef PRODUCT
 609     if (mark != markWord::INFLATING()) {
 610       // Only do diagnostics if we are not racing an inflation. Simply
 611       // exiting a recursive enter of a Java Monitor that is being
 612       // inflated is safe; see the has_monitor() comment below.
 613       assert(!mark.is_neutral(), "invariant");
 614       assert(!mark.has_locker() ||
 615              THREAD->is_lock_owned((address)mark.locker()), "invariant");


 624         // does not own the Java Monitor.
 625         ObjectMonitor* m = mark.monitor();
 626         assert(((oop)(m->object()))->mark() == mark, "invariant");
 627         assert(m->is_entered(THREAD), "invariant");
 628       }
 629     }
 630 #endif
 631     return;
 632   }
 633 
 634   if (mark == markWord::from_pointer(lock)) {
 635     // If the object is stack-locked by the current thread, try to
 636     // swing the displaced header from the BasicLock back to the mark.
 637     assert(dhw.is_neutral(), "invariant");
 638     if (object->cas_set_mark(dhw, mark) == mark) {
 639       return;
 640     }
 641   }
 642 
 643   // We have to take the slow-path of possible inflation and then exit.
 644   ObjectMonitorHandle omh;
 645   inflate(&omh, THREAD, object, inflate_cause_vm_internal);
 646   omh.om_ptr()->exit(true, THREAD);
 647 }
 648 
 649 // -----------------------------------------------------------------------------
 650 // Class Loader  support to workaround deadlocks on the class loader lock objects
 651 // Also used by GC
 652 // complete_exit()/reenter() are used to wait on a nested lock
 653 // i.e. to give up an outer lock completely and then re-enter
 654 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 655 //  1) complete_exit lock1 - saving recursion count
 656 //  2) wait on lock2
 657 //  3) when notified on lock2, unlock lock2
 658 //  4) reenter lock1 with original recursion count
 659 //  5) lock lock2
 660 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 661 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
 662   if (UseBiasedLocking) {
 663     BiasedLocking::revoke(obj, THREAD);
 664     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 665   }
 666 
 667   ObjectMonitorHandle omh;
 668   inflate(&omh, THREAD, obj(), inflate_cause_vm_internal);
 669   intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD);
 670   return ret_code;
 671 }
 672 
 673 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 674 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) {
 675   if (UseBiasedLocking) {
 676     BiasedLocking::revoke(obj, THREAD);
 677     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 678   }
 679 
 680   ObjectMonitorHandle omh;
 681   inflate(&omh, THREAD, obj(), inflate_cause_vm_internal);
 682   omh.om_ptr()->reenter(recursions, THREAD);
 683 }
 684 // -----------------------------------------------------------------------------
 685 // JNI locks on java objects
 686 // NOTE: must use heavy weight monitor to handle jni monitor enter
 687 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 688   // the current locking is from JNI instead of Java code
 689   if (UseBiasedLocking) {
 690     BiasedLocking::revoke(obj, THREAD);
 691     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 692   }
 693   THREAD->set_current_pending_monitor_is_from_java(false);
 694   ObjectMonitorHandle omh;
 695   inflate(&omh, THREAD, obj(), inflate_cause_jni_enter);
 696   omh.om_ptr()->enter(THREAD);
 697   THREAD->set_current_pending_monitor_is_from_java(true);
 698 }
 699 
 700 // NOTE: must use heavy weight monitor to handle jni monitor exit
 701 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 702   if (UseBiasedLocking) {
 703     Handle h_obj(THREAD, obj);
 704     BiasedLocking::revoke(h_obj, THREAD);
 705     obj = h_obj();
 706   }
 707   assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 708 
 709   ObjectMonitorHandle omh;
 710   inflate(&omh, THREAD, obj, inflate_cause_jni_exit);
 711   ObjectMonitor* monitor = omh.om_ptr();
 712   // If this thread has locked the object, exit the monitor. We
 713   // intentionally do not use CHECK here because we must exit the
 714   // monitor even if an exception is pending.
 715   if (monitor->check_owner(THREAD)) {
 716     monitor->exit(true, THREAD);
 717   }
 718 }
 719 
 720 // -----------------------------------------------------------------------------
 721 // Internal VM locks on java objects
 722 // standard constructor, allows locking failures
 723 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) {
 724   _dolock = do_lock;
 725   _thread = thread;
 726   _thread->check_for_valid_safepoint_state();
 727   _obj = obj;
 728 
 729   if (_dolock) {
 730     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 731   }
 732 }
 733 
 734 ObjectLocker::~ObjectLocker() {
 735   if (_dolock) {
 736     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 737   }
 738 }
 739 
 740 
 741 // -----------------------------------------------------------------------------
 742 //  Wait/Notify/NotifyAll
 743 // NOTE: must use heavy weight monitor to handle wait()
 744 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 745   if (UseBiasedLocking) {
 746     BiasedLocking::revoke(obj, THREAD);
 747     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 748   }
 749   if (millis < 0) {
 750     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 751   }
 752   ObjectMonitorHandle omh;
 753   inflate(&omh, THREAD, obj(), inflate_cause_wait);
 754   ObjectMonitor* monitor = omh.om_ptr();
 755 
 756   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
 757   monitor->wait(millis, true, THREAD);
 758 
 759   // This dummy call is in place to get around dtrace bug 6254741.  Once
 760   // that's fixed we can uncomment the following line, remove the call
 761   // and change this function back into a "void" func.
 762   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 763   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 764   return ret_code;
 765 }
 766 
 767 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) {
 768   if (UseBiasedLocking) {
 769     BiasedLocking::revoke(obj, THREAD);
 770     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 771   }
 772   if (millis < 0) {
 773     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 774   }
 775   ObjectMonitorHandle omh;
 776   inflate(&omh, THREAD, obj(), inflate_cause_wait);
 777   omh.om_ptr()->wait(millis, false, THREAD);
 778 }
 779 
 780 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 781   if (UseBiasedLocking) {
 782     BiasedLocking::revoke(obj, THREAD);
 783     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 784   }
 785 
 786   markWord mark = obj->mark();
 787   if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
 788     return;
 789   }
 790   ObjectMonitorHandle omh;
 791   inflate(&omh, THREAD, obj(), inflate_cause_notify);
 792   omh.om_ptr()->notify(THREAD);
 793 }
 794 
 795 // NOTE: see comment of notify()
 796 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 797   if (UseBiasedLocking) {
 798     BiasedLocking::revoke(obj, THREAD);
 799     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 800   }
 801 
 802   markWord mark = obj->mark();
 803   if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
 804     return;
 805   }
 806   ObjectMonitorHandle omh;
 807   inflate(&omh, THREAD, obj(), inflate_cause_notify);
 808   omh.om_ptr()->notifyAll(THREAD);
 809 }
 810 
 811 // -----------------------------------------------------------------------------
 812 // Hash Code handling
 813 //
 814 // Performance concern:
 815 // OrderAccess::storestore() calls release() which at one time stored 0
 816 // into the global volatile OrderAccess::dummy variable. This store was
 817 // unnecessary for correctness. Many threads storing into a common location
 818 // causes considerable cache migration or "sloshing" on large SMP systems.
 819 // As such, I avoided using OrderAccess::storestore(). In some cases
 820 // OrderAccess::fence() -- which incurs local latency on the executing
 821 // processor -- is a better choice as it scales on SMP systems.
 822 //
 823 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
 824 // a discussion of coherency costs. Note that all our current reference
 825 // platforms provide strong ST-ST order, so the issue is moot on IA32,
 826 // x64, and SPARC.
 827 //
 828 // As a general policy we use "volatile" to control compiler-based reordering


 981       Handle hobj(self, obj);
 982       // Relaxing assertion for bug 6320749.
 983       assert(Universe::verify_in_progress() ||
 984              !SafepointSynchronize::is_at_safepoint(),
 985              "biases should not be seen by VM thread here");
 986       BiasedLocking::revoke(hobj, JavaThread::current());
 987       obj = hobj();
 988       assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 989     }
 990   }
 991 
 992   // hashCode() is a heap mutator ...
 993   // Relaxing assertion for bug 6320749.
 994   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 995          !SafepointSynchronize::is_at_safepoint(), "invariant");
 996   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 997          self->is_Java_thread() , "invariant");
 998   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 999          ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant");
1000 
1001   while (true) {
1002     ObjectMonitor* monitor = NULL;
1003     markWord temp, test;
1004     intptr_t hash;
1005     markWord mark = read_stable_mark(obj);
1006 
1007     // object should remain ineligible for biased locking
1008     assert(!mark.has_bias_pattern(), "invariant");
1009 
1010     if (mark.is_neutral()) {            // if this is a normal header
1011       hash = mark.hash();
1012       if (hash != 0) {                  // if it has a hash, just return it
1013         return hash;
1014       }
1015       hash = get_next_hash(self, obj);  // get a new hash
1016       temp = mark.copy_set_hash(hash);  // merge the hash into header
1017                                         // try to install the hash
1018       test = obj->cas_set_mark(temp, mark);
1019       if (test == mark) {               // if the hash was installed, return it
1020         return hash;
1021       }
1022       // Failed to install the hash. It could be that another thread
1023       // installed the hash just before our attempt or inflation has
1024       // occurred or... so we fall thru to inflate the monitor for
1025       // stability and then install the hash.
1026     } else if (mark.has_monitor()) {
1027       ObjectMonitorHandle omh;
1028       if (!omh.save_om_ptr(obj, mark)) {
1029         // Lost a race with async deflation so try again.
1030         assert(AsyncDeflateIdleMonitors, "sanity check");
1031         continue;
1032       }
1033       monitor = omh.om_ptr();
1034       temp = monitor->header();
1035       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1036       hash = temp.hash();
1037       if (hash != 0) {                  // if it has a hash, just return it
1038         return hash;
1039       }
1040       // Fall thru so we only have one place that installs the hash in
1041       // the ObjectMonitor.
1042     } else if (self->is_lock_owned((address)mark.locker())) {
1043       // This is a stack lock owned by the calling thread so fetch the
1044       // displaced markWord from the BasicLock on the stack.
1045       temp = mark.displaced_mark_helper();
1046       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1047       hash = temp.hash();
1048       if (hash != 0) {                  // if it has a hash, just return it
1049         return hash;
1050       }
1051       // WARNING:
1052       // The displaced header in the BasicLock on a thread's stack
1053       // is strictly immutable. It CANNOT be changed in ANY cases.
1054       // So we have to inflate the stack lock into an ObjectMonitor
1055       // even if the current thread owns the lock. The BasicLock on
1056       // a thread's stack can be asynchronously read by other threads
1057       // during an inflate() call so any change to that stack memory
1058       // may not propagate to other threads correctly.
1059     }
1060 
1061     // Inflate the monitor to set the hash.
1062     ObjectMonitorHandle omh;
1063     inflate(&omh, self, obj, inflate_cause_hash_code);
1064     monitor = omh.om_ptr();
1065     // Load ObjectMonitor's header/dmw field and see if it has a hash.
1066     mark = monitor->header();
1067     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1068     hash = mark.hash();
1069     if (hash == 0) {                    // if it does not have a hash
1070       hash = get_next_hash(self, obj);  // get a new hash
1071       temp = mark.copy_set_hash(hash);  // merge the hash into header
1072       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1073       uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
1074       test = markWord(v);
1075       if (test != mark) {
1076         // The attempt to update the ObjectMonitor's header/dmw field
1077         // did not work. This can happen if another thread managed to
1078         // merge in the hash just before our cmpxchg().
1079         // If we add any new usages of the header/dmw field, this code
1080         // will need to be updated.
1081         hash = test.hash();
1082         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1083         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1084       }
1085     }
1086     // We finally get the hash.
1087     return hash;
1088   }
1089 }
1090 
1091 // Deprecated -- use FastHashCode() instead.
1092 
1093 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
1094   return FastHashCode(Thread::current(), obj());
1095 }
1096 
1097 
1098 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
1099                                                    Handle h_obj) {
1100   if (UseBiasedLocking) {
1101     BiasedLocking::revoke(h_obj, thread);
1102     assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1103   }
1104 
1105   assert(thread == JavaThread::current(), "Can only be called on current thread");
1106   oop obj = h_obj();
1107 
1108   while (true) {
1109     markWord mark = read_stable_mark(obj);
1110 
1111     // Uncontended case, header points to stack
1112     if (mark.has_locker()) {
1113       return thread->is_lock_owned((address)mark.locker());
1114     }
1115     // Contended case, header points to ObjectMonitor (tagged pointer)
1116     if (mark.has_monitor()) {
1117       ObjectMonitorHandle omh;
1118       if (!omh.save_om_ptr(obj, mark)) {
1119         // Lost a race with async deflation so try again.
1120         assert(AsyncDeflateIdleMonitors, "sanity check");
1121         continue;
1122       }
1123       bool ret_code = omh.om_ptr()->is_entered(thread) != 0;
1124       return ret_code;
1125     }
1126     // Unlocked case, header in place
1127     assert(mark.is_neutral(), "sanity check");
1128     return false;
1129   }
1130 }
1131 
1132 // Be aware of this method could revoke bias of the lock object.
1133 // This method queries the ownership of the lock handle specified by 'h_obj'.
1134 // If the current thread owns the lock, it returns owner_self. If no
1135 // thread owns the lock, it returns owner_none. Otherwise, it will return
1136 // owner_other.
1137 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
1138 (JavaThread *self, Handle h_obj) {
1139   // The caller must beware this method can revoke bias, and
1140   // revocation can result in a safepoint.
1141   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
1142   assert(self->thread_state() != _thread_blocked, "invariant");
1143 
1144   // Possible mark states: neutral, biased, stack-locked, inflated
1145 
1146   if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) {
1147     // CASE: biased
1148     BiasedLocking::revoke(h_obj, self);
1149     assert(!h_obj->mark().has_bias_pattern(),
1150            "biases should be revoked by now");
1151   }
1152 
1153   assert(self == JavaThread::current(), "Can only be called on current thread");
1154   oop obj = h_obj();
1155 
1156   while (true) {
1157     markWord mark = read_stable_mark(obj);
1158 
1159     // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
1160     if (mark.has_locker()) {
1161       return self->is_lock_owned((address)mark.locker()) ?
1162         owner_self : owner_other;
1163     }
1164 
1165     // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
1166     // The Object:ObjectMonitor relationship is stable as long as we're
1167     // not at a safepoint and AsyncDeflateIdleMonitors is false.
1168     if (mark.has_monitor()) {
1169       ObjectMonitorHandle omh;
1170       if (!omh.save_om_ptr(obj, mark)) {
1171         // Lost a race with async deflation so try again.
1172         assert(AsyncDeflateIdleMonitors, "sanity check");
1173         continue;
1174       }
1175       ObjectMonitor* monitor = omh.om_ptr();
1176       void* owner = monitor->_owner;
1177       if (owner == NULL) return owner_none;
1178       return (owner == self ||
1179               self->is_lock_owned((address)owner)) ? owner_self : owner_other;
1180     }
1181 
1182     // CASE: neutral
1183     assert(mark.is_neutral(), "sanity check");
1184     return owner_none;           // it's unlocked
1185   }
1186 }
1187 
1188 // FIXME: jvmti should call this
1189 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1190   if (UseBiasedLocking) {
1191     if (SafepointSynchronize::is_at_safepoint()) {
1192       BiasedLocking::revoke_at_safepoint(h_obj);
1193     } else {
1194       BiasedLocking::revoke(h_obj, JavaThread::current());
1195     }
1196     assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1197   }
1198 
1199   oop obj = h_obj();

1200 
1201   while (true) {
1202     address owner = NULL;
1203     markWord mark = read_stable_mark(obj);
1204 
1205     // Uncontended case, header points to stack
1206     if (mark.has_locker()) {
1207       owner = (address) mark.locker();
1208     }
1209 
1210     // Contended case, header points to ObjectMonitor (tagged pointer)
1211     else if (mark.has_monitor()) {
1212       ObjectMonitorHandle omh;
1213       if (!omh.save_om_ptr(obj, mark)) {
1214         // Lost a race with async deflation so try again.
1215         assert(AsyncDeflateIdleMonitors, "sanity check");
1216         continue;
1217       }
1218       ObjectMonitor* monitor = omh.om_ptr();
1219       assert(monitor != NULL, "monitor should be non-null");
1220       owner = (address) monitor->owner();
1221     }
1222 
1223     if (owner != NULL) {
1224       // owning_thread_from_monitor_owner() may also return NULL here
1225       return Threads::owning_thread_from_monitor_owner(t_list, owner);
1226     }
1227 
1228     // Unlocked case, header in place
1229     // Cannot have assertion since this object may have been
1230     // locked by another thread when reaching here.
1231     // assert(mark.is_neutral(), "sanity check");
1232 
1233     return NULL;
1234   }
1235 }
1236 
1237 // Visitors ...
1238 
1239 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1240   PaddedObjectMonitor* block = Atomic::load(&g_block_list);
1241   while (block != NULL) {
1242     assert(block->object() == CHAINMARKER, "must be a block header");
1243     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1244       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1245       ObjectMonitorHandle omh;
1246       if (!mid->is_free() && omh.save_om_ptr_if_safe(mid)) {
1247         // The ObjectMonitor* is not free and it has been made safe.
1248         if (mid->object() == NULL) {
1249           // Only process with closure if the object is set.
1250           continue;
1251         }
1252         closure->do_monitor(mid);
1253       }
1254     }
1255     // unmarked_next() is not needed with g_block_list (no locking
1256     // used with block linkage _next_om fields).
1257     block = (PaddedObjectMonitor*)block->next_om();
1258   }
1259 }
1260 
1261 static bool monitors_used_above_threshold() {
1262   int population = Atomic::load(&om_list_globals._population);
1263   if (population == 0) {
1264     return false;
1265   }
1266   if (MonitorUsedDeflationThreshold > 0) {
1267     int monitors_used = population - Atomic::load(&om_list_globals._free_count) -
1268                         Atomic::load(&om_list_globals._wait_count);
1269     int monitor_usage = (monitors_used * 100LL) / population;
1270     return monitor_usage > MonitorUsedDeflationThreshold;
1271   }
1272   return false;
1273 }
1274 
1275 // Returns true if MonitorBound is set (> 0) and if the specified
1276 // cnt is > MonitorBound. Otherwise returns false.
1277 static bool is_MonitorBound_exceeded(const int cnt) {
1278   const int mx = MonitorBound;
1279   return mx > 0 && cnt > mx;
1280 }
1281 
1282 bool ObjectSynchronizer::is_async_deflation_needed() {
1283   if (!AsyncDeflateIdleMonitors) {
1284     return false;
1285   }
1286   if (is_async_deflation_requested()) {
1287     // Async deflation request.
1288     return true;
1289   }
1290   if (AsyncDeflationInterval > 0 &&
1291       time_since_last_async_deflation_ms() > AsyncDeflationInterval &&
1292       monitors_used_above_threshold()) {
1293     // It's been longer than our specified deflate interval and there
1294     // are too many monitors in use. We don't deflate more frequently
1295     // than AsyncDeflationInterval (unless is_async_deflation_requested)
1296     // in order to not swamp the ServiceThread.
1297     _last_async_deflation_time_ns = os::javaTimeNanos();
1298     return true;
1299   }
1300   int monitors_used = Atomic::load(&om_list_globals._population) -
1301                       Atomic::load(&om_list_globals._free_count) -
1302                       Atomic::load(&om_list_globals._wait_count);
1303   if (is_MonitorBound_exceeded(monitors_used)) {
1304     // Not enough ObjectMonitors on the global free list.
1305     return true;
1306   }
1307   return false;
1308 }
1309 
1310 bool ObjectSynchronizer::needs_monitor_scavenge() {
1311   if (Atomic::load(&_forceMonitorScavenge) == 1) {
1312     log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
1313     return true;
1314   }
1315   return false;
1316 }
1317 
1318 bool ObjectSynchronizer::is_safepoint_deflation_needed() {
1319   if (!AsyncDeflateIdleMonitors) {
1320     if (monitors_used_above_threshold()) {
1321       // Too many monitors in use.
1322       return true;
1323     }
1324     return needs_monitor_scavenge();
1325   }
1326   if (is_special_deflation_requested()) {
1327     // For AsyncDeflateIdleMonitors only do a safepoint deflation
1328     // if there is a special deflation request.
1329     return true;
1330   }
1331   return false;
1332 }
1333 
1334 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1335   return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS);
1336 }
1337 
1338 void ObjectSynchronizer::oops_do(OopClosure* f) {
1339   // We only scan the global used list here (for moribund threads), and
1340   // the thread-local monitors in Thread::oops_do().
1341   global_used_oops_do(f);
1342 }
1343 
1344 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1345   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1346   list_oops_do(Atomic::load(&om_list_globals._in_use_list), f);
1347 }
1348 
1349 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1350   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1351   list_oops_do(thread->om_in_use_list, f);
1352 }
1353 
1354 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1355   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1356   // The oops_do() phase does not overlap with monitor deflation
1357   // so no need to lock ObjectMonitors for the list traversal and
1358   // no need to update the ObjectMonitor's ref_count for this
1359   // ObjectMonitor* use.
1360   for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1361     if (mid->object() != NULL) {
1362       f->do_oop((oop*)mid->object_addr());
1363     }
1364   }
1365 }
1366 
1367 
1368 // -----------------------------------------------------------------------------
1369 // ObjectMonitor Lifecycle
1370 // -----------------------
1371 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread
1372 // free list and associates them with objects. Deflation -- which occurs at
1373 // STW-time or asynchronously -- disassociates idle monitors from objects.
1374 // Such scavenged monitors are returned to the om_list_globals._free_list.
1375 //
1376 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1377 //
1378 // Lifecycle:
1379 // --   unassigned and on the om_list_globals._free_list
1380 // --   unassigned and on a per-thread free list
1381 // --   assigned to an object.  The object is inflated and the mark refers
1382 //      to the ObjectMonitor.
1383 
1384 
1385 // Constraining monitor pool growth via MonitorBound ...
1386 //
1387 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
1388 //
1389 // When safepoint deflation is being used (!AsyncDeflateIdleMonitors):
1390 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
1391 // the rate of scavenging is driven primarily by GC.  As such,  we can find
1392 // an inordinate number of monitors in circulation.
1393 // To avoid that scenario we can artificially induce a STW safepoint
1394 // if the pool appears to be growing past some reasonable bound.
1395 // Generally we favor time in space-time tradeoffs, but as there's no
1396 // natural back-pressure on the # of extant monitors we need to impose some
1397 // type of limit.  Beware that if MonitorBound is set to too low a value
1398 // we could just loop. In addition, if MonitorBound is set to a low value
1399 // we'll incur more safepoints, which are harmful to performance.
1400 // See also: GuaranteedSafepointInterval
1401 //
1402 // When safepoint deflation is being used and MonitorBound is set, the
1403 // boundry applies to
1404 //     (om_list_globals._population - om_list_globals._free_count)
1405 // i.e., if there are not enough ObjectMonitors on the global free list,
1406 // then a safepoint deflation is induced. Picking a good MonitorBound value
1407 // is non-trivial.
1408 //
1409 // When async deflation is being used:
1410 // The monitor pool is still grow-only. Async deflation is requested
1411 // by a safepoint's cleanup phase or by the ServiceThread at periodic
1412 // intervals when is_async_deflation_needed() returns true. In
1413 // addition to other policies that are checked, if there are not
1414 // enough ObjectMonitors on the global free list, then
1415 // is_async_deflation_needed() will return true. The ServiceThread
1416 // calls deflate_global_idle_monitors_using_JT() and also calls
1417 // deflate_per_thread_idle_monitors_using_JT() as needed.
1418 
1419 static void InduceScavenge(Thread* self, const char * Whence) {
1420   assert(!AsyncDeflateIdleMonitors, "is not used by async deflation");
1421 
1422   // Induce STW safepoint to trim monitors
1423   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1424   // More precisely, trigger a cleanup safepoint as the number
1425   // of active monitors passes the specified threshold.
1426   // TODO: assert thread state is reasonable
1427 
1428   if (Atomic::xchg(&_forceMonitorScavenge, 1) == 0) {
1429     VMThread::check_for_forced_cleanup();
1430   }
1431 }
1432 
1433 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
1434   // A large MAXPRIVATE value reduces both list lock contention
1435   // and list coherency traffic, but also tends to increase the
1436   // number of ObjectMonitors in circulation as well as the STW
1437   // scavenge costs.  As usual, we lean toward time in space-time
1438   // tradeoffs.
1439   const int MAXPRIVATE = 1024;
1440   NoSafepointVerifier nsv;
1441 
1442   stringStream ss;
1443   for (;;) {
1444     ObjectMonitor* m;
1445 
1446     // 1: try to allocate from the thread's local om_free_list.
1447     // Threads will attempt to allocate first from their local list, then
1448     // from the global list, and only after those attempts fail will the
1449     // thread attempt to instantiate new monitors. Thread-local free lists
1450     // improve allocation latency, as well as reducing coherency traffic
1451     // on the shared global list.
1452     m = take_from_start_of_om_free_list(self);
1453     if (m != NULL) {
1454       guarantee(m->object() == NULL, "invariant");
1455       m->set_allocation_state(ObjectMonitor::New);
1456       prepend_to_om_in_use_list(self, m);
1457       return m;
1458     }
1459 
1460     // 2: try to allocate from the global om_list_globals._free_list
1461     // If we're using thread-local free lists then try
1462     // to reprovision the caller's free list.
1463     if (Atomic::load(&om_list_globals._free_list) != NULL) {
1464       // Reprovision the thread's om_free_list.
1465       // Use bulk transfers to reduce the allocation rate and heat
1466       // on various locks.
1467       for (int i = self->om_free_provision; --i >= 0;) {
1468         ObjectMonitor* take = take_from_start_of_global_free_list();
1469         if (take == NULL) {
1470           break;  // No more are available.
1471         }
1472         guarantee(take->object() == NULL, "invariant");
1473         if (AsyncDeflateIdleMonitors) {
1474           // We allowed 3 field values to linger during async deflation.
1475           // Clear or restore them as appropriate.
1476           take->set_header(markWord::zero());
1477           // DEFLATER_MARKER is the only non-NULL value we should see here.
1478           take->try_set_owner_from(DEFLATER_MARKER, NULL);
1479           if (take->ref_count() < 0) {
1480             // Add back max_jint to restore the ref_count field to its
1481             // proper value.
1482             Atomic::add(&take->_ref_count, max_jint);
1483 
1484 #ifdef ASSERT
1485             jint l_ref_count = take->ref_count();
1486 #endif
1487             assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
1488                    l_ref_count, take->ref_count());
1489           }
1490         }
1491         take->Recycle();
1492         // Since we're taking from the global free-list, take must be Free.
1493         // om_release() also sets the allocation state to Free because it
1494         // is called from other code paths.
1495         assert(take->is_free(), "invariant");
1496         om_release(self, take, false);
1497       }
1498       self->om_free_provision += 1 + (self->om_free_provision / 2);
1499       if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1500 
1501       if (!AsyncDeflateIdleMonitors &&
1502           is_MonitorBound_exceeded(Atomic::load(&om_list_globals._population) -
1503                                    Atomic::load(&om_list_globals._free_count))) {
1504         // Not enough ObjectMonitors on the global free list.
1505         // We can't safely induce a STW safepoint from om_alloc() as our thread
1506         // state may not be appropriate for such activities and callers may hold
1507         // naked oops, so instead we defer the action.
1508         InduceScavenge(self, "om_alloc");
1509       }
1510       continue;
1511     }
1512 
1513     // 3: allocate a block of new ObjectMonitors
1514     // Both the local and global free lists are empty -- resort to malloc().
1515     // In the current implementation ObjectMonitors are TSM - immortal.
1516     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1517     // each ObjectMonitor to start at the beginning of a cache line,
1518     // so we use align_up().
1519     // A better solution would be to use C++ placement-new.
1520     // BEWARE: As it stands currently, we don't run the ctors!
1521     assert(_BLOCKSIZE > 1, "invariant");
1522     size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
1523     PaddedObjectMonitor* temp;
1524     size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1);
1525     void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal);
1526     temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE);
1527     (void)memset((void *) temp, 0, neededsize);
1528 
1529     // Format the block.
1530     // initialize the linked list, each monitor points to its next
1531     // forming the single linked free list, the very first monitor
1532     // will points to next block, which forms the block list.
1533     // The trick of using the 1st element in the block as g_block_list
1534     // linkage should be reconsidered.  A better implementation would
1535     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1536 
1537     for (int i = 1; i < _BLOCKSIZE; i++) {
1538       temp[i].set_next_om((ObjectMonitor*)&temp[i + 1]);
1539       assert(temp[i].is_free(), "invariant");
1540     }
1541 
1542     // terminate the last monitor as the end of list
1543     temp[_BLOCKSIZE - 1].set_next_om((ObjectMonitor*)NULL);
1544 
1545     // Element [0] is reserved for global list linkage
1546     temp[0].set_object(CHAINMARKER);
1547 
1548     // Consider carving out this thread's current request from the
1549     // block in hand.  This avoids some lock traffic and redundant
1550     // list activity.
1551 
1552     prepend_block_to_lists(temp);
1553   }
1554 }
1555 
1556 // Place "m" on the caller's private per-thread om_free_list.
1557 // In practice there's no need to clamp or limit the number of
1558 // monitors on a thread's om_free_list as the only non-allocation time
1559 // we'll call om_release() is to return a monitor to the free list after
1560 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1561 // accumulate on a thread's free list.
1562 //
1563 // Key constraint: all ObjectMonitors on a thread's free list and the global
1564 // free list must have their object field set to null. This prevents the
1565 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT()
1566 // -- from reclaiming them while we are trying to release them.
1567 
1568 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
1569                                     bool from_per_thread_alloc) {
1570   guarantee(m->header().value() == 0, "invariant");
1571   guarantee(m->object() == NULL, "invariant");
1572   NoSafepointVerifier nsv;
1573 
1574   stringStream ss;
1575   guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
1576             "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss),
1577             m->_recursions);
1578   m->set_allocation_state(ObjectMonitor::Free);
1579   // _next_om is used for both per-thread in-use and free lists so
1580   // we have to remove 'm' from the in-use list first (as needed).
1581   if (from_per_thread_alloc) {
1582     // Need to remove 'm' from om_in_use_list.
1583     ObjectMonitor* mid = NULL;
1584     ObjectMonitor* next = NULL;
1585 
1586     // This list walk can race with another list walker or with async
1587     // deflation so we have to worry about an ObjectMonitor being
1588     // removed from this list while we are walking it.

1589 
1590     // Lock the list head to avoid racing with another list walker
1591     // or with async deflation.
1592     if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) {
1593       fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
1594     }
1595     next = unmarked_next(mid);
1596     if (m == mid) {
1597       // First special case:
1598       // 'm' matches mid, is the list head and is locked. Switch the list
1599       // head to next which unlocks the list head, but leaves the extracted
1600       // mid locked:
1601       Atomic::store(&self->om_in_use_list, next);
1602     } else if (m == next) {
1603       // Second special case:
1604       // 'm' matches next after the list head and we already have the list
1605       // head locked so set mid to what we are extracting:
1606       mid = next;
1607       // Lock mid to prevent races with a list walker or an async
1608       // deflater thread that's ahead of us. The locked list head
1609       // prevents races from behind us.
1610       om_lock(mid);
1611       // Update next to what follows mid (if anything):
1612       next = unmarked_next(mid);
1613       // Switch next after the list head to new next which unlocks the
1614       // list head, but leaves the extracted mid locked:
1615       self->om_in_use_list->set_next_om(next);
1616     } else {
1617       // We have to search the list to find 'm'.

1618       guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT
1619                 " is too short.", p2i(self), p2i(self->om_in_use_list));
1620       // Our starting anchor is next after the list head which is the
1621       // last ObjectMonitor we checked:
1622       ObjectMonitor* anchor = next;
1623       // Lock anchor to prevent races with a list walker or an async
1624       // deflater thread that's ahead of us. The locked list head
1625       // prevents races from behind us.
1626       om_lock(anchor);
1627       om_unlock(mid);  // Unlock the list head now that anchor is locked.
1628       while ((mid = unmarked_next(anchor)) != NULL) {
1629         if (m == mid) {
1630           // We found 'm' on the per-thread in-use list so extract it.

1631           // Update next to what follows mid (if anything):
1632           next = unmarked_next(mid);
1633           // Switch next after the anchor to new next which unlocks the
1634           // anchor, but leaves the extracted mid locked:
1635           anchor->set_next_om(next);
1636           break;
1637         } else {
1638           // Lock the next anchor to prevent races with a list walker
1639           // or an async deflater thread that's ahead of us. The locked
1640           // current anchor prevents races from behind us.
1641           om_lock(mid);
1642           // Unlock current anchor now that next anchor is locked:
1643           om_unlock(anchor);
1644           anchor = mid;  // Advance to new anchor and try again.
1645         }
1646       }
1647     }
1648 
1649     if (mid == NULL) {
1650       // Reached end of the list and didn't find 'm' so:
1651       fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list="
1652             INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list));
1653     }
1654 
1655     // At this point mid is disconnected from the in-use list so
1656     // its lock no longer has any effects on the in-use list.
1657     Atomic::dec(&self->om_in_use_count);
1658     // Unlock mid, but leave the next value for any lagging list
1659     // walkers. It will get cleaned up when mid is prepended to
1660     // the thread's free list:
1661     om_unlock(mid);
1662   }
1663 
1664   prepend_to_om_free_list(self, m);
1665   guarantee(m->is_free(), "invariant");
1666 }
1667 
1668 // Return ObjectMonitors on a moribund thread's free and in-use
1669 // lists to the appropriate global lists. The ObjectMonitors on the
1670 // per-thread in-use list may still be in use by other threads.
1671 //
1672 // We currently call om_flush() from Threads::remove() before the
1673 // thread has been excised from the thread list and is no longer a
1674 // mutator. This means that om_flush() cannot run concurrently with
1675 // a safepoint and interleave with deflate_idle_monitors(). In
1676 // particular, this ensures that the thread's in-use monitors are
1677 // scanned by a GC safepoint, either via Thread::oops_do() (before
1678 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
1679 // om_flush() is called).
1680 //
1681 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT()
1682 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can
1683 // run at the same time as om_flush() so we have to follow a careful
1684 // protocol to prevent list corruption.
1685 
1686 void ObjectSynchronizer::om_flush(Thread* self) {
1687   // Process the per-thread in-use list first to be consistent.
1688   int in_use_count = 0;
1689   ObjectMonitor* in_use_list = NULL;
1690   ObjectMonitor* in_use_tail = NULL;
1691   NoSafepointVerifier nsv;
1692 
1693   // This function can race with a list walker or with an async
1694   // deflater thread so we lock the list head to prevent confusion.
1695   // An async deflater thread checks to see if the target thread
1696   // is exiting, but if it has made it past that check before we
1697   // started exiting, then it is racing to get to the in-use list.
1698   if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) {
1699     // At this point, we have locked the in-use list head so a racing
1700     // thread cannot come in after us. However, a racing thread could
1701     // be ahead of us; we'll detect that and delay to let it finish.
1702     //
1703     // The thread is going away, however the ObjectMonitors on the
1704     // om_in_use_list may still be in-use by other threads. Link
1705     // them to in_use_tail, which will be linked into the global
1706     // in-use list (om_list_globals._in_use_list) below.
1707     //
1708     // Account for the in-use list head before the loop since it is
1709     // already locked (by this thread):
1710     in_use_tail = in_use_list;
1711     in_use_count++;
1712     for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) {
1713       if (is_locked(cur_om)) {
1714         // cur_om is locked so there must be a racing walker or async
1715         // deflater thread ahead of us so we'll give it a chance to finish.
1716         while (is_locked(cur_om)) {
1717           os::naked_short_sleep(1);
1718         }
1719         // Refetch the possibly changed next field and try again.
1720         cur_om = unmarked_next(in_use_tail);
1721         continue;
1722       }
1723       if (cur_om->is_free()) {
1724         // cur_om was deflated and the allocation state was changed
1725         // to Free while it was locked. We happened to see it just
1726         // after it was unlocked (and added to the free list).
1727         // Refetch the possibly changed next field and try again.
1728         cur_om = unmarked_next(in_use_tail);
1729         continue;
1730       }
1731       in_use_tail = cur_om;
1732       in_use_count++;
1733       cur_om = unmarked_next(cur_om);
1734     }
1735     guarantee(in_use_tail != NULL, "invariant");
1736     int l_om_in_use_count = Atomic::load(&self->om_in_use_count);
1737     ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't match: "
1738                    "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
1739     Atomic::store(&self->om_in_use_count, 0);
1740     // Clear the in-use list head (which also unlocks it):
1741     Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
1742     om_unlock(in_use_list);
1743   }
1744 
1745   int free_count = 0;
1746   ObjectMonitor* free_list = NULL;
1747   ObjectMonitor* free_tail = NULL;
1748   // This function can race with a list walker thread so we lock the
1749   // list head to prevent confusion.
1750   if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) {
1751     // At this point, we have locked the free list head so a racing
1752     // thread cannot come in after us. However, a racing thread could
1753     // be ahead of us; we'll detect that and delay to let it finish.
1754     //
1755     // The thread is going away. Set 'free_tail' to the last per-thread free
1756     // monitor which will be linked to om_list_globals._free_list below.
1757     //
1758     // Account for the free list head before the loop since it is
1759     // already locked (by this thread):
1760     free_tail = free_list;
1761     free_count++;
1762     for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) {
1763       if (is_locked(s)) {
1764         // s is locked so there must be a racing walker thread ahead
1765         // of us so we'll give it a chance to finish.
1766         while (is_locked(s)) {
1767           os::naked_short_sleep(1);
1768         }
1769       }
1770       free_tail = s;
1771       free_count++;
1772       guarantee(s->object() == NULL, "invariant");
1773       stringStream ss;
1774       guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1775     }
1776     guarantee(free_tail != NULL, "invariant");
1777     int l_om_free_count = Atomic::load(&self->om_free_count);
1778     ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: "
1779                    "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
1780     Atomic::store(&self->om_free_count, 0);
1781     Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
1782     om_unlock(free_list);
1783   }
1784 
1785   if (free_tail != NULL) {
1786     prepend_list_to_global_free_list(free_list, free_tail, free_count);
1787   }
1788 
1789   if (in_use_tail != NULL) {
1790     prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count);
1791   }
1792 
1793   LogStreamHandle(Debug, monitorinflation) lsh_debug;
1794   LogStreamHandle(Info, monitorinflation) lsh_info;
1795   LogStream* ls = NULL;
1796   if (log_is_enabled(Debug, monitorinflation)) {
1797     ls = &lsh_debug;
1798   } else if ((free_count != 0 || in_use_count != 0) &&


1801   }
1802   if (ls != NULL) {
1803     ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d"
1804                  ", in_use_count=%d" ", om_free_provision=%d",
1805                  p2i(self), free_count, in_use_count, self->om_free_provision);
1806   }
1807 }
1808 
1809 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1810                                        const oop obj,
1811                                        ObjectSynchronizer::InflateCause cause) {
1812   assert(event != NULL, "invariant");
1813   assert(event->should_commit(), "invariant");
1814   event->set_monitorClass(obj->klass());
1815   event->set_address((uintptr_t)(void*)obj);
1816   event->set_cause((u1)cause);
1817   event->commit();
1818 }
1819 
1820 // Fast path code shared by multiple functions
1821 void ObjectSynchronizer::inflate_helper(ObjectMonitorHandle* omh_p, oop obj) {
1822   while (true) {
1823     markWord mark = obj->mark();
1824     if (mark.has_monitor()) {
1825       if (!omh_p->save_om_ptr(obj, mark)) {
1826         // Lost a race with async deflation so try again.
1827         assert(AsyncDeflateIdleMonitors, "sanity check");
1828         continue;
1829       }
1830       ObjectMonitor* monitor = omh_p->om_ptr();
1831       assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor is invalid");
1832       markWord dmw = monitor->header();
1833       assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1834       return;
1835     }
1836     inflate(omh_p, Thread::current(), obj, inflate_cause_vm_internal);
1837     return;
1838   }

1839 }
1840 
1841 void ObjectSynchronizer::inflate(ObjectMonitorHandle* omh_p, Thread* self,
1842                                  oop object, const InflateCause cause) {
1843   // Inflate mutates the heap ...
1844   // Relaxing assertion for bug 6320749.
1845   assert(Universe::verify_in_progress() ||
1846          !SafepointSynchronize::is_at_safepoint(), "invariant");
1847 
1848   EventJavaMonitorInflate event;
1849 
1850   for (;;) {
1851     const markWord mark = object->mark();
1852     assert(!mark.has_bias_pattern(), "invariant");
1853 
1854     // The mark can be in one of the following states:
1855     // *  Inflated     - just return
1856     // *  Stack-locked - coerce it to inflated
1857     // *  INFLATING    - busy wait for conversion to complete
1858     // *  Neutral      - aggressively inflate the object.
1859     // *  BIASED       - Illegal.  We should never see this
1860 
1861     // CASE: inflated
1862     if (mark.has_monitor()) {
1863       if (!omh_p->save_om_ptr(object, mark)) {
1864         // Lost a race with async deflation so try again.
1865         assert(AsyncDeflateIdleMonitors, "sanity check");
1866         continue;
1867       }
1868       ObjectMonitor* inf = omh_p->om_ptr();
1869       markWord dmw = inf->header();
1870       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1871       assert(inf->object() == object, "invariant");
1872       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1873       return;
1874     }
1875 
1876     // CASE: inflation in progress - inflating over a stack-lock.
1877     // Some other thread is converting from stack-locked to inflated.
1878     // Only that thread can complete inflation -- other threads must wait.
1879     // The INFLATING value is transient.
1880     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1881     // We could always eliminate polling by parking the thread on some auxiliary list.
1882     if (mark == markWord::INFLATING()) {
1883       read_stable_mark(object);
1884       continue;
1885     }
1886 
1887     // CASE: stack-locked
1888     // Could be stack-locked either by this thread or by some other thread.
1889     //
1890     // Note that we allocate the objectmonitor speculatively, _before_ attempting
1891     // to install INFLATING into the mark word.  We originally installed INFLATING,
1892     // allocated the objectmonitor, and then finally STed the address of the
1893     // objectmonitor into the mark.  This was correct, but artificially lengthened


1899     // critical INFLATING...ST interval.  A thread can transfer
1900     // multiple objectmonitors en-mass from the global free list to its local free list.
1901     // This reduces coherency traffic and lock contention on the global free list.
1902     // Using such local free lists, it doesn't matter if the om_alloc() call appears
1903     // before or after the CAS(INFLATING) operation.
1904     // See the comments in om_alloc().
1905 
1906     LogStreamHandle(Trace, monitorinflation) lsh;
1907 
1908     if (mark.has_locker()) {
1909       ObjectMonitor* m = om_alloc(self);
1910       // Optimistically prepare the objectmonitor - anticipate successful CAS
1911       // We do this before the CAS in order to minimize the length of time
1912       // in which INFLATING appears in the mark.
1913       m->Recycle();
1914       m->_Responsible  = NULL;
1915       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
1916 
1917       markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1918       if (cmp != mark) {
1919         // om_release() will reset the allocation state from New to Free.
1920         om_release(self, m, true);
1921         continue;       // Interference -- just retry
1922       }
1923 
1924       // We've successfully installed INFLATING (0) into the mark-word.
1925       // This is the only case where 0 will appear in a mark-word.
1926       // Only the singular thread that successfully swings the mark-word
1927       // to 0 can perform (or more precisely, complete) inflation.
1928       //
1929       // Why do we CAS a 0 into the mark-word instead of just CASing the
1930       // mark-word from the stack-locked value directly to the new inflated state?
1931       // Consider what happens when a thread unlocks a stack-locked object.
1932       // It attempts to use CAS to swing the displaced header value from the
1933       // on-stack BasicLock back into the object header.  Recall also that the
1934       // header value (hash code, etc) can reside in (a) the object header, or
1935       // (b) a displaced header associated with the stack-lock, or (c) a displaced
1936       // header in an ObjectMonitor.  The inflate() routine must copy the header
1937       // value from the BasicLock on the owner's stack to the ObjectMonitor, all
1938       // the while preserving the hashCode stability invariants.  If the owner
1939       // decides to release the lock while the value is 0, the unlock will fail
1940       // and control will eventually pass from slow_exit() to inflate.  The owner
1941       // will then spin, waiting for the 0 value to disappear.   Put another way,
1942       // the 0 causes the owner to stall if the owner happens to try to
1943       // drop the lock (restoring the header from the BasicLock to the object)
1944       // while inflation is in-progress.  This protocol avoids races that might
1945       // would otherwise permit hashCode values to change or "flicker" for an object.
1946       // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1947       // 0 serves as a "BUSY" inflate-in-progress indicator.
1948 
1949 
1950       // fetch the displaced mark from the owner's stack.
1951       // The owner can't die or unwind past the lock while our INFLATING
1952       // object is in the mark.  Furthermore the owner can't complete
1953       // an unlock on the object, either.
1954       markWord dmw = mark.displaced_mark_helper();
1955       // Catch if the object's header is not neutral (not locked and
1956       // not marked is what we care about here).
1957       ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1958 
1959       // Setup monitor fields to proper values -- prepare the monitor
1960       m->set_header(dmw);
1961 
1962       // Optimization: if the mark.locker stack address is associated
1963       // with this thread we could simply set m->_owner = self.
1964       // Note that a thread can inflate an object
1965       // that it has stack-locked -- as might happen in wait() -- directly
1966       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1967       if (AsyncDeflateIdleMonitors) {
1968         m->set_owner_from(NULL, DEFLATER_MARKER, mark.locker());
1969       } else {
1970         m->set_owner_from(NULL, mark.locker());
1971       }
1972       m->set_object(object);
1973       // TODO-FIXME: assert BasicLock->dhw != 0.
1974 
1975       omh_p->set_om_ptr(m);
1976 
1977       // Must preserve store ordering. The monitor state must
1978       // be stable at the time of publishing the monitor address.
1979       guarantee(object->mark() == markWord::INFLATING(), "invariant");
1980       object->release_set_mark(markWord::encode(m));
1981 
1982       // Once ObjectMonitor is configured and the object is associated
1983       // with the ObjectMonitor, it is safe to allow async deflation:
1984       assert(m->is_new(), "freshly allocated monitor must be new");
1985       m->set_allocation_state(ObjectMonitor::Old);
1986 
1987       // Hopefully the performance counters are allocated on distinct cache lines
1988       // to avoid false sharing on MP systems ...
1989       OM_PERFDATA_OP(Inflations, inc());
1990       if (log_is_enabled(Trace, monitorinflation)) {
1991         ResourceMark rm(self);
1992         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1993                      INTPTR_FORMAT ", type='%s'", p2i(object),
1994                      object->mark().value(), object->klass()->external_name());
1995       }
1996       if (event.should_commit()) {
1997         post_monitor_inflate_event(&event, object, cause);
1998       }
1999       ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
2000       return;
2001     }
2002 
2003     // CASE: neutral
2004     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
2005     // If we know we're inflating for entry it's better to inflate by swinging a
2006     // pre-locked ObjectMonitor pointer into the object header.   A successful
2007     // CAS inflates the object *and* confers ownership to the inflating thread.
2008     // In the current implementation we use a 2-step mechanism where we CAS()
2009     // to inflate and then CAS() again to try to swing _owner from NULL to self.
2010     // An inflateTry() method that we could call from enter() would be useful.
2011 
2012     // Catch if the object's header is not neutral (not locked and
2013     // not marked is what we care about here).
2014     ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
2015     ObjectMonitor* m = om_alloc(self);
2016     // prepare m for installation - set monitor to initial state
2017     m->Recycle();
2018     m->set_header(mark);
2019     if (AsyncDeflateIdleMonitors) {
2020       // DEFLATER_MARKER is the only non-NULL value we should see here.
2021       m->try_set_owner_from(DEFLATER_MARKER, NULL);
2022     }
2023     m->set_object(object);
2024     m->_Responsible  = NULL;
2025     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
2026 
2027     omh_p->set_om_ptr(m);
2028 
2029     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
2030       m->set_header(markWord::zero());
2031       m->set_object(NULL);
2032       m->Recycle();
2033       omh_p->set_om_ptr(NULL);
2034       // om_release() will reset the allocation state from New to Free.
2035       om_release(self, m, true);
2036       m = NULL;
2037       continue;
2038       // interference - the markword changed - just retry.
2039       // The state-transitions are one-way, so there's no chance of
2040       // live-lock -- "Inflated" is an absorbing state.
2041     }
2042 
2043     // Once the ObjectMonitor is configured and object is associated
2044     // with the ObjectMonitor, it is safe to allow async deflation:
2045     assert(m->is_new(), "freshly allocated monitor must be new");
2046     m->set_allocation_state(ObjectMonitor::Old);
2047 
2048     // Hopefully the performance counters are allocated on distinct
2049     // cache lines to avoid false sharing on MP systems ...
2050     OM_PERFDATA_OP(Inflations, inc());
2051     if (log_is_enabled(Trace, monitorinflation)) {
2052       ResourceMark rm(self);
2053       lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
2054                    INTPTR_FORMAT ", type='%s'", p2i(object),
2055                    object->mark().value(), object->klass()->external_name());
2056     }
2057     if (event.should_commit()) {
2058       post_monitor_inflate_event(&event, object, cause);
2059     }
2060     ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
2061     return;
2062   }
2063 }
2064 
2065 
2066 // We maintain a list of in-use monitors for each thread.
2067 //
2068 // For safepoint based deflation:
2069 // deflate_thread_local_monitors() scans a single thread's in-use list, while
2070 // deflate_idle_monitors() scans only a global list of in-use monitors which
2071 // is populated only as a thread dies (see om_flush()).
2072 //
2073 // These operations are called at all safepoints, immediately after mutators
2074 // are stopped, but before any objects have moved. Collectively they traverse
2075 // the population of in-use monitors, deflating where possible. The scavenged
2076 // monitors are returned to the global monitor free list.
2077 //
2078 // Beware that we scavenge at *every* stop-the-world point. Having a large
2079 // number of monitors in-use could negatively impact performance. We also want
2080 // to minimize the total # of monitors in circulation, as they incur a small
2081 // footprint penalty.
2082 //
2083 // Perversely, the heap size -- and thus the STW safepoint rate --
2084 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
2085 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
2086 // This is an unfortunate aspect of this design.
2087 //
2088 // For async deflation:
2089 // If a special deflation request is made, then the safepoint based
2090 // deflation mechanism is used. Otherwise, an async deflation request
2091 // is registered with the ServiceThread and it is notified.
2092 
2093 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) {
2094   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2095 
2096   // The per-thread in-use lists are handled in
2097   // ParallelSPCleanupThreadClosure::do_thread().
2098 
2099   if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) {
2100     // Use the older mechanism for the global in-use list or if a
2101     // special deflation has been requested before the safepoint.
2102     ObjectSynchronizer::deflate_idle_monitors(counters);
2103     return;
2104   }
2105 
2106   log_debug(monitorinflation)("requesting async deflation of idle monitors.");
2107   // Request deflation of idle monitors by the ServiceThread:
2108   set_is_async_deflation_requested(true);
2109   MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
2110   ml.notify_all();
2111 
2112   if (log_is_enabled(Debug, monitorinflation)) {
2113     // exit_globals()'s call to audit_and_print_stats() is done
2114     // at the Info level and not at a safepoint.
2115     // For safepoint based deflation, audit_and_print_stats() is called
2116     // in ObjectSynchronizer::finish_deflate_idle_monitors() at the
2117     // Debug level at a safepoint.
2118     ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2119   }
2120 }
2121 
2122 // Deflate a single monitor if not in-use
2123 // Return true if deflated, false if in-use
2124 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
2125                                          ObjectMonitor** free_head_p,
2126                                          ObjectMonitor** free_tail_p) {
2127   bool deflated;
2128   // Normal case ... The monitor is associated with obj.
2129   const markWord mark = obj->mark();
2130   guarantee(mark == markWord::encode(mid), "should match: mark="
2131             INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
2132             markWord::encode(mid).value());
2133   // Make sure that mark.monitor() and markWord::encode() agree:
2134   guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
2135             ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
2136   const markWord dmw = mid->header();
2137   guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
2138 
2139   if (mid->is_busy() || mid->ref_count() != 0) {
2140     // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
2141     // is in use so no deflation.
2142     deflated = false;
2143   } else {
2144     // Deflate the monitor if it is no longer being used
2145     // It's idle - scavenge and return to the global free list
2146     // plain old deflation ...
2147     if (log_is_enabled(Trace, monitorinflation)) {
2148       ResourceMark rm;
2149       log_trace(monitorinflation)("deflate_monitor: "
2150                                   "object=" INTPTR_FORMAT ", mark="
2151                                   INTPTR_FORMAT ", type='%s'", p2i(obj),
2152                                   mark.value(), obj->klass()->external_name());
2153     }
2154 
2155     // Restore the header back to obj
2156     obj->release_set_mark(dmw);
2157     if (AsyncDeflateIdleMonitors) {
2158       // clear() expects the owner field to be NULL.
2159       // DEFLATER_MARKER is the only non-NULL value we should see here.
2160       mid->try_set_owner_from(DEFLATER_MARKER, NULL);
2161     }
2162     mid->clear();
2163 
2164     assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
2165            p2i(mid->object()));
2166     assert(mid->is_free(), "invariant");
2167 
2168     // Move the deflated ObjectMonitor to the working free list
2169     // defined by free_head_p and free_tail_p.
2170     if (*free_head_p == NULL) *free_head_p = mid;
2171     if (*free_tail_p != NULL) {
2172       // We append to the list so the caller can use mid->_next_om
2173       // to fix the linkages in its context.
2174       ObjectMonitor* prevtail = *free_tail_p;
2175       // Should have been cleaned up by the caller:
2176       // Note: Should not have to lock prevtail here since we're at a
2177       // safepoint and ObjectMonitors on the local free list should
2178       // not be accessed in parallel.
2179 #ifdef ASSERT
2180       ObjectMonitor* l_next_om = prevtail->next_om();
2181 #endif
2182       assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2183       prevtail->set_next_om(mid);
2184     }
2185     *free_tail_p = mid;
2186     // At this point, mid->_next_om still refers to its current
2187     // value and another ObjectMonitor's _next_om field still
2188     // refers to this ObjectMonitor. Those linkages have to be
2189     // cleaned up by the caller who has the complete context.
2190     deflated = true;
2191   }
2192   return deflated;
2193 }
2194 
2195 // Deflate the specified ObjectMonitor if not in-use using a JavaThread.
2196 // Returns true if it was deflated and false otherwise.
2197 //
2198 // The async deflation protocol sets owner to DEFLATER_MARKER and
2199 // makes ref_count negative as signals to contending threads that
2200 // an async deflation is in progress. There are a number of checks
2201 // as part of the protocol to make sure that the calling thread has
2202 // not lost the race to a contending thread or to a thread that just
2203 // wants to use the ObjectMonitor*.
2204 //
2205 // The ObjectMonitor has been successfully async deflated when:
2206 // (owner == DEFLATER_MARKER && ref_count < 0)
2207 // Contending threads or ObjectMonitor* using threads that see those
2208 // values know to retry their operation.
2209 //
2210 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid,
2211                                                   ObjectMonitor** free_head_p,
2212                                                   ObjectMonitor** free_tail_p) {
2213   assert(AsyncDeflateIdleMonitors, "sanity check");
2214   assert(Thread::current()->is_Java_thread(), "precondition");
2215   // A newly allocated ObjectMonitor should not be seen here so we
2216   // avoid an endless inflate/deflate cycle.
2217   assert(mid->is_old(), "must be old: allocation_state=%d",
2218          (int) mid->allocation_state());
2219 
2220   if (mid->is_busy() || mid->ref_count() != 0) {
2221     // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
2222     // is in use so no deflation.
2223     return false;
2224   }
2225 
2226   if (mid->try_set_owner_from(NULL, DEFLATER_MARKER) == NULL) {
2227     // ObjectMonitor is not owned by another thread. Our setting
2228     // owner to DEFLATER_MARKER forces any contending thread through
2229     // the slow path. This is just the first part of the async
2230     // deflation dance.
2231 
2232     if (mid->_contentions != 0 || mid->_waiters != 0) {
2233       // Another thread has raced to enter the ObjectMonitor after
2234       // mid->is_busy() above or has already entered and waited on
2235       // it which makes it busy so no deflation. Restore owner to
2236       // NULL if it is still DEFLATER_MARKER.
2237       mid->try_set_owner_from(DEFLATER_MARKER, NULL);
2238       return false;
2239     }
2240 
2241     if (Atomic::cmpxchg(&mid->_ref_count, (jint)0, -max_jint) == 0) {
2242       // Make ref_count negative to force any contending threads or
2243       // ObjectMonitor* using threads to retry. This is the second
2244       // part of the async deflation dance.
2245 
2246       if (mid->owner_is_DEFLATER_MARKER()) {
2247         // If owner is still DEFLATER_MARKER, then we have successfully
2248         // signaled any contending threads to retry. If it is not, then we
2249         // have lost the race to an entering thread and the ObjectMonitor
2250         // is now busy. This is the third and final part of the async
2251         // deflation dance.
2252         // Note: This owner check solves the ABA problem with ref_count
2253         // where another thread acquired the ObjectMonitor, finished
2254         // using it and restored the ref_count to zero.
2255 
2256         // Sanity checks for the races:
2257         guarantee(mid->_contentions == 0, "must be 0: contentions=%d",
2258                   mid->_contentions);
2259         guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters);
2260         guarantee(mid->_cxq == NULL, "must be no contending threads: cxq="
2261                   INTPTR_FORMAT, p2i(mid->_cxq));
2262         guarantee(mid->_EntryList == NULL,
2263                   "must be no entering threads: EntryList=" INTPTR_FORMAT,
2264                   p2i(mid->_EntryList));
2265 
2266         const oop obj = (oop) mid->object();
2267         if (log_is_enabled(Trace, monitorinflation)) {
2268           ResourceMark rm;
2269           log_trace(monitorinflation)("deflate_monitor_using_JT: "
2270                                       "object=" INTPTR_FORMAT ", mark="
2271                                       INTPTR_FORMAT ", type='%s'",
2272                                       p2i(obj), obj->mark().value(),
2273                                       obj->klass()->external_name());
2274         }
2275 
2276         // Install the old mark word if nobody else has already done it.
2277         mid->install_displaced_markword_in_object(obj);
2278         mid->clear_using_JT();
2279 
2280         assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT,
2281                p2i(mid->object()));
2282         assert(mid->is_free(), "must be free: allocation_state=%d",
2283                (int) mid->allocation_state());
2284 
2285         // Move the deflated ObjectMonitor to the working free list
2286         // defined by free_head_p and free_tail_p.
2287         if (*free_head_p == NULL) {
2288           // First one on the list.
2289           *free_head_p = mid;
2290         }
2291         if (*free_tail_p != NULL) {
2292           // We append to the list so the caller can use mid->_next_om
2293           // to fix the linkages in its context.
2294           ObjectMonitor* prevtail = *free_tail_p;
2295           // Should have been cleaned up by the caller:
2296           om_lock(prevtail);
2297 #ifdef ASSERT
2298           ObjectMonitor* l_next_om = unmarked_next(prevtail);
2299 #endif
2300           assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2301           prevtail->set_next_om(mid);  // prevtail now points to mid (and is unlocked)
2302         }
2303         *free_tail_p = mid;
2304 
2305         // At this point, mid->_next_om still refers to its current
2306         // value and another ObjectMonitor's _next_om field still
2307         // refers to this ObjectMonitor. Those linkages have to be
2308         // cleaned up by the caller who has the complete context.
2309 
2310         // We leave owner == DEFLATER_MARKER and ref_count < 0
2311         // to force any racing threads to retry.
2312         return true;  // Success, ObjectMonitor has been deflated.
2313       }
2314 
2315       // The owner was changed from DEFLATER_MARKER so we lost the
2316       // race since the ObjectMonitor is now busy.
2317 
2318       // Add back max_jint to restore the ref_count field to its
2319       // proper value (which may not be what we saw above):
2320       Atomic::add(&mid->_ref_count, max_jint);
2321 
2322 #ifdef ASSERT
2323       jint l_ref_count = mid->ref_count();
2324 #endif
2325       assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
2326              l_ref_count, mid->ref_count());
2327       return false;
2328     }
2329 
2330     // The ref_count was no longer 0 so we lost the race since the
2331     // ObjectMonitor is now busy or the ObjectMonitor* is now is use.
2332     // Restore owner to NULL if it is still DEFLATER_MARKER:
2333     mid->try_set_owner_from(DEFLATER_MARKER, NULL);
2334   }
2335 
2336   // The owner field is no longer NULL so we lost the race since the
2337   // ObjectMonitor is now busy.
2338   return false;
2339 }
2340 
2341 // Walk a given monitor list, and deflate idle monitors.
2342 // The given list could be a per-thread list or a global list.
2343 //
2344 // In the case of parallel processing of thread local monitor lists,
2345 // work is done by Threads::parallel_threads_do() which ensures that
2346 // each Java thread is processed by exactly one worker thread, and
2347 // thus avoid conflicts that would arise when worker threads would
2348 // process the same monitor lists concurrently.
2349 //
2350 // See also ParallelSPCleanupTask and
2351 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
2352 // Threads::parallel_java_threads_do() in thread.cpp.
2353 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
2354                                              int* count_p,
2355                                              ObjectMonitor** free_head_p,
2356                                              ObjectMonitor** free_tail_p) {
2357   ObjectMonitor* cur_mid_in_use = NULL;
2358   ObjectMonitor* mid = NULL;
2359   ObjectMonitor* next = NULL;
2360   int deflated_count = 0;


2371       // by unlinking mid from the global or per-thread in-use list.
2372       if (cur_mid_in_use == NULL) {
2373         // mid is the list head so switch the list head to next:
2374         Atomic::store(list_p, next);
2375       } else {
2376         // Switch cur_mid_in_use's next field to next:
2377         cur_mid_in_use->set_next_om(next);
2378       }
2379       // At this point mid is disconnected from the in-use list.
2380       deflated_count++;
2381       Atomic::dec(count_p);
2382       // mid is current tail in the free_head_p list so NULL terminate it:
2383       mid->set_next_om(NULL);
2384     } else {
2385       cur_mid_in_use = mid;
2386     }
2387   }
2388   return deflated_count;
2389 }
2390 
2391 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using
2392 // a JavaThread. Returns the number of deflated ObjectMonitors. The given
2393 // list could be a per-thread in-use list or the global in-use list.
2394 // If a safepoint has started, then we save state via saved_mid_in_use_p
2395 // and return to the caller to honor the safepoint.
2396 //
2397 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
2398                                                       int* count_p,
2399                                                       ObjectMonitor** free_head_p,
2400                                                       ObjectMonitor** free_tail_p,
2401                                                       ObjectMonitor** saved_mid_in_use_p) {
2402   assert(AsyncDeflateIdleMonitors, "sanity check");
2403   JavaThread* self = JavaThread::current();
2404 
2405   ObjectMonitor* cur_mid_in_use = NULL;
2406   ObjectMonitor* mid = NULL;
2407   ObjectMonitor* next = NULL;
2408   ObjectMonitor* next_next = NULL;
2409   int deflated_count = 0;
2410   NoSafepointVerifier nsv;
2411 
2412   // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go
2413   // protocol because om_release() can do list deletions in parallel;
2414   // this also prevents races with a list walker thread. We also
2415   // lock-next-next-as-we-go to prevent an om_flush() that is behind
2416   // this thread from passing us.
2417   if (*saved_mid_in_use_p == NULL) {
2418     // No saved state so start at the beginning.
2419     // Lock the list head so we can possibly deflate it:
2420     if ((mid = get_list_head_locked(list_p)) == NULL) {
2421       return 0;  // The list is empty so nothing to deflate.
2422     }
2423     next = unmarked_next(mid);
2424   } else {
2425     // We're restarting after a safepoint so restore the necessary state
2426     // before we resume.
2427     cur_mid_in_use = *saved_mid_in_use_p;
2428     // Lock cur_mid_in_use so we can possibly update its
2429     // next field to extract a deflated ObjectMonitor.
2430     om_lock(cur_mid_in_use);
2431     mid = unmarked_next(cur_mid_in_use);
2432     if (mid == NULL) {
2433       om_unlock(cur_mid_in_use);
2434       *saved_mid_in_use_p = NULL;
2435       return 0;  // The remainder is empty so nothing more to deflate.
2436     }
2437     // Lock mid so we can possibly deflate it:
2438     om_lock(mid);
2439     next = unmarked_next(mid);
2440   }
2441 
2442   while (true) {
2443     // The current mid is locked at this point. If we have a
2444     // cur_mid_in_use, then it is also locked at this point.
2445 
2446     if (next != NULL) {
2447       // We lock next so that an om_flush() thread that is behind us
2448       // cannot pass us when we unlock the current mid.
2449       om_lock(next);
2450       next_next = unmarked_next(next);
2451     }
2452 
2453     // Only try to deflate if there is an associated Java object and if
2454     // mid is old (is not newly allocated and is not newly freed).
2455     if (mid->object() != NULL && mid->is_old() &&
2456         deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
2457       // Deflation succeeded and already updated free_head_p and
2458       // free_tail_p as needed. Finish the move to the local free list
2459       // by unlinking mid from the global or per-thread in-use list.
2460       if (cur_mid_in_use == NULL) {
2461         // mid is the list head and it is locked. Switch the list head
2462         // to next which is also locked (if not NULL) and also leave
2463         // mid locked:
2464         Atomic::store(list_p, next);
2465       } else {
2466         ObjectMonitor* locked_next = mark_om_ptr(next);
2467         // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
2468         // next field to locked_next and also leave mid locked:
2469         cur_mid_in_use->set_next_om(locked_next);
2470       }
2471       // At this point mid is disconnected from the in-use list so
2472       // its lock longer has any effects on in-use list.
2473       deflated_count++;
2474       Atomic::dec(count_p);
2475       // mid is current tail in the free_head_p list so NULL terminate it
2476       // (which also unlocks it):
2477       mid->set_next_om(NULL);
2478 
2479       // All the list management is done so move on to the next one:
2480       mid = next;  // mid keeps non-NULL next's locked state
2481       next = next_next;
2482     } else {
2483       // mid is considered in-use if it does not have an associated
2484       // Java object or mid is not old or deflation did not succeed.
2485       // A mid->is_new() node can be seen here when it is freshly
2486       // returned by om_alloc() (and skips the deflation code path).
2487       // A mid->is_old() node can be seen here when deflation failed.
2488       // A mid->is_free() node can be seen here when a fresh node from
2489       // om_alloc() is released by om_release() due to losing the race
2490       // in inflate().
2491 
2492       // All the list management is done so move on to the next one:
2493       if (cur_mid_in_use != NULL) {
2494         om_unlock(cur_mid_in_use);
2495       }
2496       // The next cur_mid_in_use keeps mid's lock state so
2497       // that it is stable for a possible next field change. It
2498       // cannot be modified by om_release() while it is locked.
2499       cur_mid_in_use = mid;
2500       mid = next;  // mid keeps non-NULL next's locked state
2501       next = next_next;
2502 
2503       if (SafepointMechanism::should_block(self) &&
2504           cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) {
2505         // If a safepoint has started and cur_mid_in_use is not the list
2506         // head and is old, then it is safe to use as saved state. Return
2507         // to the caller before blocking.
2508         *saved_mid_in_use_p = cur_mid_in_use;
2509         om_unlock(cur_mid_in_use);
2510         if (mid != NULL) {
2511           om_unlock(mid);
2512         }
2513         return deflated_count;
2514       }
2515     }
2516     if (mid == NULL) {
2517       if (cur_mid_in_use != NULL) {
2518         om_unlock(cur_mid_in_use);
2519       }
2520       break;  // Reached end of the list so nothing more to deflate.
2521     }
2522 
2523     // The current mid's next field is locked at this point. If we have
2524     // a cur_mid_in_use, then it is also locked at this point.
2525   }
2526   // We finished the list without a safepoint starting so there's
2527   // no need to save state.
2528   *saved_mid_in_use_p = NULL;
2529   return deflated_count;
2530 }
2531 
2532 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2533   counters->n_in_use = 0;              // currently associated with objects
2534   counters->n_in_circulation = 0;      // extant
2535   counters->n_scavenged = 0;           // reclaimed (global and per-thread)
2536   counters->per_thread_scavenged = 0;  // per-thread scavenge total
2537   counters->per_thread_times = 0.0;    // per-thread scavenge times
2538 }
2539 
2540 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
2541   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2542 
2543   if (AsyncDeflateIdleMonitors) {
2544     // Nothing to do when global idle ObjectMonitors are deflated using
2545     // a JavaThread unless a special deflation has been requested.
2546     if (!is_special_deflation_requested()) {
2547       return;
2548     }
2549   }
2550 
2551   bool deflated = false;
2552 
2553   ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged monitors
2554   ObjectMonitor* free_tail_p = NULL;
2555   elapsedTimer timer;
2556 
2557   if (log_is_enabled(Info, monitorinflation)) {
2558     timer.start();
2559   }
2560 
2561   // Note: the thread-local monitors lists get deflated in
2562   // a separate pass. See deflate_thread_local_monitors().
2563 
2564   // For moribund threads, scan om_list_globals._in_use_list
2565   int deflated_count = 0;
2566   if (Atomic::load(&om_list_globals._in_use_list) != NULL) {
2567     // Update n_in_circulation before om_list_globals._in_use_count is
2568     // updated by deflation.
2569     Atomic::add(&counters->n_in_circulation,
2570                 Atomic::load(&om_list_globals._in_use_count));


2583 #endif
2584     assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2585     prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
2586     Atomic::add(&counters->n_scavenged, deflated_count);
2587   }
2588   timer.stop();
2589 
2590   LogStreamHandle(Debug, monitorinflation) lsh_debug;
2591   LogStreamHandle(Info, monitorinflation) lsh_info;
2592   LogStream* ls = NULL;
2593   if (log_is_enabled(Debug, monitorinflation)) {
2594     ls = &lsh_debug;
2595   } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2596     ls = &lsh_info;
2597   }
2598   if (ls != NULL) {
2599     ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2600   }
2601 }
2602 
2603 class HandshakeForDeflation : public HandshakeClosure {
2604  public:
2605   HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
2606 
2607   void do_thread(Thread* thread) {
2608     log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
2609                                 INTPTR_FORMAT, p2i(thread));
2610   }
2611 };
2612 
2613 void ObjectSynchronizer::deflate_idle_monitors_using_JT() {
2614   assert(AsyncDeflateIdleMonitors, "sanity check");
2615 
2616   // Deflate any global idle monitors.
2617   deflate_global_idle_monitors_using_JT();
2618 
2619   int count = 0;
2620   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2621     if (Atomic::load(&jt->om_in_use_count) > 0 && !jt->is_exiting()) {
2622       // This JavaThread is using ObjectMonitors so deflate any that
2623       // are idle unless this JavaThread is exiting; do not race with
2624       // ObjectSynchronizer::om_flush().
2625       deflate_per_thread_idle_monitors_using_JT(jt);
2626       count++;
2627     }
2628   }
2629   if (count > 0) {
2630     log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count);
2631   }
2632 
2633   log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, "
2634                              "global_free_count=%d, global_wait_count=%d",
2635                              Atomic::load(&om_list_globals._population),
2636                              Atomic::load(&om_list_globals._in_use_count),
2637                              Atomic::load(&om_list_globals._free_count),
2638                              Atomic::load(&om_list_globals._wait_count));
2639 
2640   // The ServiceThread's async deflation request has been processed.
2641   set_is_async_deflation_requested(false);
2642 
2643   if (Atomic::load(&om_list_globals._wait_count) > 0) {
2644     // There are deflated ObjectMonitors waiting for a handshake
2645     // (or a safepoint) for safety.
2646 
2647     ObjectMonitor* list = Atomic::load(&om_list_globals._wait_list);
2648     ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL");
2649     int count = Atomic::load(&om_list_globals._wait_count);
2650     Atomic::store(&om_list_globals._wait_count, 0);
2651     Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL);
2652 
2653     // Find the tail for prepend_list_to_common(). No need to mark
2654     // ObjectMonitors for this list walk since only the deflater
2655     // thread manages the wait list.
2656     int l_count = 0;
2657     ObjectMonitor* tail = NULL;
2658     for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) {
2659       tail = n;
2660       l_count++;
2661     }
2662     ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count);
2663 
2664     // Will execute a safepoint if !ThreadLocalHandshakes:
2665     HandshakeForDeflation hfd_hc;
2666     Handshake::execute(&hfd_hc);
2667 
2668     prepend_list_to_common(list, tail, count, &om_list_globals._free_list,
2669                            &om_list_globals._free_count);
2670 
2671     log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count);
2672   }
2673 }
2674 
2675 // Deflate global idle ObjectMonitors using a JavaThread.
2676 //
2677 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() {
2678   assert(AsyncDeflateIdleMonitors, "sanity check");
2679   assert(Thread::current()->is_Java_thread(), "precondition");
2680   JavaThread* self = JavaThread::current();
2681 
2682   deflate_common_idle_monitors_using_JT(true /* is_global */, self);
2683 }
2684 
2685 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread.
2686 //
2687 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) {
2688   assert(AsyncDeflateIdleMonitors, "sanity check");
2689   assert(Thread::current()->is_Java_thread(), "precondition");
2690 
2691   deflate_common_idle_monitors_using_JT(false /* !is_global */, target);
2692 }
2693 
2694 // Deflate global or per-thread idle ObjectMonitors using a JavaThread.
2695 //
2696 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) {
2697   JavaThread* self = JavaThread::current();
2698 
2699   int deflated_count = 0;
2700   ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged ObjectMonitors
2701   ObjectMonitor* free_tail_p = NULL;
2702   ObjectMonitor* saved_mid_in_use_p = NULL;
2703   elapsedTimer timer;
2704 
2705   if (log_is_enabled(Info, monitorinflation)) {
2706     timer.start();
2707   }
2708 
2709   if (is_global) {
2710     OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&om_list_globals._in_use_count)));
2711   } else {
2712     OM_PERFDATA_OP(MonExtant, inc(Atomic::load(&target->om_in_use_count)));
2713   }
2714 
2715   do {
2716     int local_deflated_count;
2717     if (is_global) {
2718       local_deflated_count =
2719           deflate_monitor_list_using_JT(&om_list_globals._in_use_list,
2720                                         &om_list_globals._in_use_count,
2721                                         &free_head_p, &free_tail_p,
2722                                         &saved_mid_in_use_p);
2723     } else {
2724       local_deflated_count =
2725           deflate_monitor_list_using_JT(&target->om_in_use_list,
2726                                         &target->om_in_use_count, &free_head_p,
2727                                         &free_tail_p, &saved_mid_in_use_p);
2728     }
2729     deflated_count += local_deflated_count;
2730 
2731     if (free_head_p != NULL) {
2732       // Move the deflated ObjectMonitors to the global free list.
2733       guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
2734       // Note: The target thread can be doing an om_alloc() that
2735       // is trying to prepend an ObjectMonitor on its in-use list
2736       // at the same time that we have deflated the current in-use
2737       // list head and put it on the local free list. prepend_to_common()
2738       // will detect the race and retry which avoids list corruption,
2739       // but the next field in free_tail_p can flicker to marked
2740       // and then unmarked while prepend_to_common() is sorting it
2741       // all out.
2742 #ifdef ASSERT
2743       ObjectMonitor* l_next_om = unmarked_next(free_tail_p);
2744 #endif
2745       assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2746 
2747       prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count);
2748 
2749       OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
2750     }
2751 
2752     if (saved_mid_in_use_p != NULL) {
2753       // deflate_monitor_list_using_JT() detected a safepoint starting.
2754       timer.stop();
2755       {
2756         if (is_global) {
2757           log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
2758         } else {
2759           log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target));
2760         }
2761         assert(SafepointMechanism::should_block(self), "sanity check");
2762         ThreadBlockInVM blocker(self);
2763       }
2764       // Prepare for another loop after the safepoint.
2765       free_head_p = NULL;
2766       free_tail_p = NULL;
2767       if (log_is_enabled(Info, monitorinflation)) {
2768         timer.start();
2769       }
2770     }
2771   } while (saved_mid_in_use_p != NULL);
2772   timer.stop();
2773 
2774   LogStreamHandle(Debug, monitorinflation) lsh_debug;
2775   LogStreamHandle(Info, monitorinflation) lsh_info;
2776   LogStream* ls = NULL;
2777   if (log_is_enabled(Debug, monitorinflation)) {
2778     ls = &lsh_debug;
2779   } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2780     ls = &lsh_info;
2781   }
2782   if (ls != NULL) {
2783     if (is_global) {
2784       ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2785     } else {
2786       ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count);
2787     }
2788   }
2789 }
2790 
2791 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2792   // Report the cumulative time for deflating each thread's idle
2793   // monitors. Note: if the work is split among more than one
2794   // worker thread, then the reported time will likely be more
2795   // than a beginning to end measurement of the phase.
2796   log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
2797 
2798   bool needs_special_deflation = is_special_deflation_requested();
2799   if (AsyncDeflateIdleMonitors && !needs_special_deflation) {
2800     // Nothing to do when idle ObjectMonitors are deflated using
2801     // a JavaThread unless a special deflation has been requested.
2802     return;
2803   }
2804 
2805   if (log_is_enabled(Debug, monitorinflation)) {
2806     // exit_globals()'s call to audit_and_print_stats() is done
2807     // at the Info level and not at a safepoint.
2808     // For async deflation, audit_and_print_stats() is called in
2809     // ObjectSynchronizer::do_safepoint_work() at the Debug level
2810     // at a safepoint.
2811     ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2812   } else if (log_is_enabled(Info, monitorinflation)) {
2813     log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, "
2814                                "global_free_count=%d, global_wait_count=%d",
2815                                Atomic::load(&om_list_globals._population),
2816                                Atomic::load(&om_list_globals._in_use_count),
2817                                Atomic::load(&om_list_globals._free_count),
2818                                Atomic::load(&om_list_globals._wait_count));
2819   }
2820 
2821   Atomic::store(&_forceMonitorScavenge, 0);    // Reset
2822 
2823   OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
2824   OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
2825 
2826   GVars.stw_random = os::random();
2827   GVars.stw_cycle++;
2828 
2829   if (needs_special_deflation) {
2830     set_is_special_deflation_requested(false);  // special deflation is done
2831   }
2832 }
2833 
2834 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2835   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2836 
2837   if (AsyncDeflateIdleMonitors && !is_special_deflation_requested()) {
2838     // Nothing to do if a special deflation has NOT been requested.
2839     return;
2840   }
2841 
2842   ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged monitors
2843   ObjectMonitor* free_tail_p = NULL;
2844   elapsedTimer timer;
2845 
2846   if (log_is_enabled(Info, safepoint, cleanup) ||
2847       log_is_enabled(Info, monitorinflation)) {
2848     timer.start();
2849   }
2850 
2851   // Update n_in_circulation before om_in_use_count is updated by deflation.
2852   Atomic::add(&counters->n_in_circulation, Atomic::load(&thread->om_in_use_count));
2853 
2854   int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
2855   Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count));
2856 
2857   if (free_head_p != NULL) {
2858     // Move the deflated ObjectMonitors back to the global free list.
2859     guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2860 #ifdef ASSERT
2861     ObjectMonitor* l_next_om = free_tail_p->next_om();


2995   if (Atomic::load(&om_list_globals._population) == chk_om_population) {
2996     ls->print_cr("global_population=%d equals chk_om_population=%d",
2997                  Atomic::load(&om_list_globals._population), chk_om_population);
2998   } else {
2999     // With fine grained locks on the monitor lists, it is possible for
3000     // log_monitor_list_counts() to return a value that doesn't match
3001     // om_list_globals._population. So far a higher value has been
3002     // seen in testing so something is being double counted by
3003     // log_monitor_list_counts().
3004     ls->print_cr("WARNING: global_population=%d is not equal to "
3005                  "chk_om_population=%d",
3006                  Atomic::load(&om_list_globals._population), chk_om_population);
3007   }
3008 
3009   // Check om_list_globals._in_use_list and om_list_globals._in_use_count:
3010   chk_global_in_use_list_and_count(ls, &error_cnt);
3011 
3012   // Check om_list_globals._free_list and om_list_globals._free_count:
3013   chk_global_free_list_and_count(ls, &error_cnt);
3014 
3015   // Check om_list_globals._wait_list and om_list_globals._wait_count:
3016   chk_global_wait_list_and_count(ls, &error_cnt);
3017 
3018   ls->print_cr("Checking per-thread lists:");
3019 
3020   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3021     // Check om_in_use_list and om_in_use_count:
3022     chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
3023 
3024     // Check om_free_list and om_free_count:
3025     chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
3026   }
3027 
3028   if (error_cnt == 0) {
3029     ls->print_cr("No errors found in monitor list checks.");
3030   } else {
3031     log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
3032   }
3033 
3034   if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
3035       (!on_exit && log_is_enabled(Trace, monitorinflation))) {
3036     // When exiting this log output is at the Info level. When called
3037     // at a safepoint, this log output is at the Trace level since


3048 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n,
3049                                         outputStream * out, int *error_cnt_p) {
3050   stringStream ss;
3051   if (n->is_busy()) {
3052     if (jt != NULL) {
3053       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3054                     ": free per-thread monitor must not be busy: %s", p2i(jt),
3055                     p2i(n), n->is_busy_to_string(&ss));
3056     } else {
3057       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
3058                     "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
3059     }
3060     *error_cnt_p = *error_cnt_p + 1;
3061   }
3062   if (n->header().value() != 0) {
3063     if (jt != NULL) {
3064       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3065                     ": free per-thread monitor must have NULL _header "
3066                     "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
3067                     n->header().value());
3068       *error_cnt_p = *error_cnt_p + 1;
3069     } else if (!AsyncDeflateIdleMonitors) {
3070       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
3071                     "must have NULL _header field: _header=" INTPTR_FORMAT,
3072                     p2i(n), n->header().value());

3073       *error_cnt_p = *error_cnt_p + 1;
3074     }
3075   }
3076   if (n->object() != NULL) {
3077     if (jt != NULL) {
3078       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3079                     ": free per-thread monitor must have NULL _object "
3080                     "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
3081                     p2i(n->object()));
3082     } else {
3083       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
3084                     "must have NULL _object field: _object=" INTPTR_FORMAT,
3085                     p2i(n), p2i(n->object()));
3086     }
3087     *error_cnt_p = *error_cnt_p + 1;
3088   }
3089 }
3090 
3091 // Lock the next ObjectMonitor for traversal and unlock the current
3092 // ObjectMonitor. Returns the next ObjectMonitor if there is one.
3093 // Otherwise returns NULL (after unlocking the current ObjectMonitor).
3094 // This function is used by the various list walker functions to
3095 // safely walk a list without allowing an ObjectMonitor to be moved


3121       if (cur == NULL) {
3122         break;
3123       }
3124     }
3125   }
3126   int l_free_count = Atomic::load(&om_list_globals._free_count);
3127   if (l_free_count == chk_om_free_count) {
3128     out->print_cr("global_free_count=%d equals chk_om_free_count=%d",
3129                   l_free_count, chk_om_free_count);
3130   } else {
3131     // With fine grained locks on om_list_globals._free_list, it
3132     // is possible for an ObjectMonitor to be prepended to
3133     // om_list_globals._free_list after we started calculating
3134     // chk_om_free_count so om_list_globals._free_count may not
3135     // match anymore.
3136     out->print_cr("WARNING: global_free_count=%d is not equal to "
3137                   "chk_om_free_count=%d", l_free_count, chk_om_free_count);
3138   }
3139 }
3140 
3141 // Check the global wait list and count; log the results of the checks.
3142 void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out,
3143                                                         int *error_cnt_p) {
3144   int chk_om_wait_count = 0;
3145   ObjectMonitor* cur = NULL;
3146   if ((cur = get_list_head_locked(&om_list_globals._wait_list)) != NULL) {
3147     // Marked the global wait list head so process the list.
3148     while (true) {
3149       // Rules for om_list_globals._wait_list are the same as for
3150       // om_list_globals._free_list:
3151       chk_free_entry(NULL /* jt */, cur, out, error_cnt_p);
3152       chk_om_wait_count++;
3153 
3154       cur = lock_next_for_traversal(cur);
3155       if (cur == NULL) {
3156         break;
3157       }
3158     }
3159   }
3160   if (Atomic::load(&om_list_globals._wait_count) == chk_om_wait_count) {
3161     out->print_cr("global_wait_count=%d equals chk_om_wait_count=%d",
3162                   Atomic::load(&om_list_globals._wait_count), chk_om_wait_count);
3163   } else {
3164     out->print_cr("ERROR: global_wait_count=%d is not equal to "
3165                   "chk_om_wait_count=%d",
3166                   Atomic::load(&om_list_globals._wait_count), chk_om_wait_count);
3167     *error_cnt_p = *error_cnt_p + 1;
3168   }
3169 }
3170 
3171 // Check the global in-use list and count; log the results of the checks.
3172 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
3173                                                           int *error_cnt_p) {
3174   int chk_om_in_use_count = 0;
3175   ObjectMonitor* cur = NULL;
3176   if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) {
3177     // Marked the global in-use list head so process the list.
3178     while (true) {
3179       chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p);
3180       chk_om_in_use_count++;
3181 
3182       cur = lock_next_for_traversal(cur);
3183       if (cur == NULL) {
3184         break;
3185       }
3186     }
3187   }
3188   int l_in_use_count = Atomic::load(&om_list_globals._in_use_count);
3189   if (l_in_use_count == chk_om_in_use_count) {
3190     out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d",


3309   if (l_om_in_use_count == chk_om_in_use_count) {
3310     out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
3311                   "chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count,
3312                   chk_om_in_use_count);
3313   } else {
3314     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
3315                   "equal to chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count,
3316                   chk_om_in_use_count);
3317     *error_cnt_p = *error_cnt_p + 1;
3318   }
3319 }
3320 
3321 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
3322 // flags indicate why the entry is in-use, 'object' and 'object type'
3323 // indicate the associated object and its type.
3324 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) {
3325   stringStream ss;
3326   if (Atomic::load(&om_list_globals._in_use_count) > 0) {
3327     out->print_cr("In-use global monitor info:");
3328     out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
3329     out->print_cr("%18s  %s  %7s  %18s  %18s",
3330                   "monitor", "BHL", "ref_cnt", "object", "object type");
3331     out->print_cr("==================  ===  =======  ==================  ==================");
3332     ObjectMonitor* cur = NULL;
3333     if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) {
3334       // Marked the global in-use list head so process the list.
3335       while (true) {
3336         const oop obj = (oop) cur->object();
3337         const markWord mark = cur->header();
3338         ResourceMark rm;
3339         out->print(INTPTR_FORMAT "  %d%d%d  %7d  " INTPTR_FORMAT "  %s", p2i(cur),
3340                    cur->is_busy() != 0, mark.hash() != 0, cur->owner() != NULL,
3341                    (int)cur->ref_count(), p2i(obj), obj->klass()->external_name());
3342         if (cur->is_busy() != 0) {
3343           out->print(" (%s)", cur->is_busy_to_string(&ss));
3344           ss.reset();
3345         }
3346         out->cr();
3347 
3348         cur = lock_next_for_traversal(cur);
3349         if (cur == NULL) {
3350           break;
3351         }
3352       }
3353     }
3354   }
3355 
3356   out->print_cr("In-use per-thread monitor info:");
3357   out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
3358   out->print_cr("%18s  %18s  %s  %7s  %18s  %18s",
3359                 "jt", "monitor", "BHL", "ref_cnt", "object", "object type");
3360   out->print_cr("==================  ==================  ===  =======  ==================  ==================");
3361   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3362     ObjectMonitor* cur = NULL;
3363     if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) {
3364       // Marked the global in-use list head so process the list.
3365       while (true) {
3366         const oop obj = (oop) cur->object();
3367         const markWord mark = cur->header();
3368         ResourceMark rm;
3369         out->print(INTPTR_FORMAT "  " INTPTR_FORMAT "  %d%d%d  %7d  " INTPTR_FORMAT
3370                    "  %s", p2i(jt), p2i(cur), cur->is_busy() != 0,
3371                    mark.hash() != 0, cur->owner() != NULL, (int)cur->ref_count(),
3372                    p2i(obj), obj->klass()->external_name());
3373         if (cur->is_busy() != 0) {
3374           out->print(" (%s)", cur->is_busy_to_string(&ss));
3375           ss.reset();
3376         }
3377         out->cr();
3378 
3379         cur = lock_next_for_traversal(cur);
3380         if (cur == NULL) {
3381           break;
3382         }
3383       }
3384     }
3385   }
3386 
3387   out->flush();
3388 }
3389 
3390 // Log counts for the global and per-thread monitor lists and return
3391 // the population count.
3392 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
3393   int pop_count = 0;
3394   out->print_cr("%18s  %10s  %10s  %10s  %10s",
3395                 "Global Lists:", "InUse", "Free", "Wait", "Total");
3396   out->print_cr("==================  ==========  ==========  ==========  ==========");
3397   int l_in_use_count = Atomic::load(&om_list_globals._in_use_count);
3398   int l_free_count = Atomic::load(&om_list_globals._free_count);
3399   int l_wait_count = Atomic::load(&om_list_globals._wait_count);
3400   out->print_cr("%18s  %10d  %10d  %10d  %10d", "", l_in_use_count,
3401                 l_free_count, l_wait_count,
3402                 Atomic::load(&om_list_globals._population));
3403   pop_count += l_in_use_count + l_free_count + l_wait_count;
3404 
3405   out->print_cr("%18s  %10s  %10s  %10s",
3406                 "Per-Thread Lists:", "InUse", "Free", "Provision");
3407   out->print_cr("==================  ==========  ==========  ==========");
3408 
3409   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3410     int l_om_in_use_count = Atomic::load(&jt->om_in_use_count);
3411     int l_om_free_count = Atomic::load(&jt->om_free_count);
3412     out->print_cr(INTPTR_FORMAT "  %10d  %10d  %10d", p2i(jt),
3413                   l_om_in_use_count, l_om_free_count, jt->om_free_provision);
3414     pop_count += l_om_in_use_count + l_om_free_count;
3415   }
3416   return pop_count;
3417 }
3418 
3419 #ifndef PRODUCT
3420 
3421 // Check if monitor belongs to the monitor cache
3422 // The list is grow-only so it's *relatively* safe to traverse
3423 // the list of extant blocks without taking a lock.
< prev index next >