< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 56634 : imported patch 8230876.patch
rev 56635 : v2.00 -> v2.05 (CR5/v2.05/8-for-jdk13) patches combined into one; merge with 8229212.patch; merge with jdk-14+11; merge with 8230184.patch; merge with 8230876.patch; merge with jdk-14+15; merge with jdk-14+18.
rev 56637 : Add OM_CACHE_LINE_SIZE so that ObjectMonitor cache line sizes can be experimented with independently of DEFAULT_CACHE_LINE_SIZE; for SPARC and X64 configs that use 128 for DEFAULT_CACHE_LINE_SIZE, we are experimenting with 64; move _previous_owner_tid and _allocation_state fields to share the cache line with ObjectMonitor::_header; put ObjectMonitor::_ref_count on its own cache line after _owner; add 'int* count_p' parameter to deflate_monitor_list() and deflate_monitor_list_using_JT() and push counter updates down to where the ObjectMonitors are actually removed from the in-use lists; monitors_iterate() async deflation check should use negative ref_count; add 'JavaThread* target' param to deflate_per_thread_idle_monitors_using_JT() add deflate_common_idle_monitors_using_JT() to make it clear which JavaThread* is the target of the work and which is the calling JavaThread* (self); g_free_list, g_om_in_use_list and g_om_in_use_count are now static to synchronizer.cpp (reduce scope); add more diagnostic info to some assert()'s; minor code cleanups and code motion; save_om_ptr() should detect a race with a deflating thread that is bailing out and cause a retry when the ref_count field is not positive; merge with jdk-14+11; add special GC support for TestHumongousClassLoader.java; merge with 8230184.patch; merge with jdk-14+14; merge with jdk-14+18.
rev 56639 : loosen a couple more counter checks due to races observed in testing; simplify om_release() extraction of mid since list head or cur_mid_in_use is marked; simplify deflate_monitor_list() extraction of mid since there are no parallel deleters due to the safepoint; simplify deflate_monitor_list_using_JT() extraction of mid since list head or cur_mid_in_use is marked; prepend_block_to_lists() - simplify based on David H's comments; does not need load_acquire() or release_store() because of the cmpxchg(); prepend_to_common() - simplify to use mark_next_loop() for m and use mark_list_head() and release_store() for the non-empty list case; add more debugging for "Non-balanced monitor enter/exit" failure mode; fix race in inflate() in the "CASE: neutral" code path; install_displaced_markword_in_object() does not need to clear the header field since that is handled when the ObjectMonitor is moved from the global free list; LSuccess should clear boxReg to set ICC.ZF=1 to avoid depending on existing boxReg contents; update fast_unlock() to detect when object no longer refers to the same ObjectMonitor and take fast path exit instead; clarify fast_lock() code where we detect when object no longer refers to the same ObjectMonitor; add/update comments for movptr() calls where we move a literal into an Address; remove set_owner(); refactor setting of owner field into set_owner_from(2 versions), set_owner_from_BasicLock(), and try_set_owner_from(); the new functions include monitorinflation+owner logging; extract debug code from v2.06 and v2.07 and move to v2.07.debug; change 'jccb' -> 'jcc' and 'jmpb' -> 'jmp' as needed; checkpoint initial version of MacroAssembler::inc_om_ref_count(); update LP64 MacroAssembler::fast_lock() and fast_unlock() to use inc_om_ref_count(); fast_lock() return flag setting logic can use 'testptr(tmpReg, tmpReg)' instead of 'cmpptr(tmpReg, 0)' since that's more efficient; fast_unlock() LSuccess return flag setting logic can use 'testl (boxReg, 0)' instead of 'xorptr(boxReg, boxReg)' since that's more efficient; cleanup "fast-path" vs "fast path" and "slow-path" vs "slow path"; update MacroAssembler::rtm_inflated_locking() to use inc_om_ref_count(); update MacroAssembler::fast_lock() to preserve the flags before decrementing ref_count and restore the flags afterwards; this is more clean than depending on the contents of rax/tmpReg; coleenp CR - refactor async monitor deflation work from ServiceThread::service_thread_entry() to ObjectSynchronizer::deflate_idle_monitors_using_JT(); rehn,eosterlund CR - add support for HandshakeAfterDeflateIdleMonitors for platforms that don't have ObjectMonitor ref_count support implemented in C2 fast_lock() and fast_unlock().


 222 // * See also http://blogs.sun.com/dave
 223 
 224 
 225 void* ObjectMonitor::operator new (size_t size) throw() {
 226   return AllocateHeap(size, mtInternal);
 227 }
 228 void* ObjectMonitor::operator new[] (size_t size) throw() {
 229   return operator new (size);
 230 }
 231 void ObjectMonitor::operator delete(void* p) {
 232   FreeHeap(p);
 233 }
 234 void ObjectMonitor::operator delete[] (void *p) {
 235   operator delete(p);
 236 }
 237 
 238 // -----------------------------------------------------------------------------
 239 // Enter support
 240 
 241 void ObjectMonitor::enter(TRAPS) {


 242   // The following code is ordered to check the most common cases first
 243   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 244   Thread * const Self = THREAD;
 245 
 246   void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
 247   if (cur == NULL) {
 248     assert(_recursions == 0, "invariant");
 249     return;
 250   }
 251 
 252   if (cur == Self) {
 253     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 254     _recursions++;
 255     return;
 256   }
 257 
 258   if (Self->is_lock_owned ((address)cur)) {
 259     assert(_recursions == 0, "internal state error");
 260     _recursions = 1;
 261     // Commute owner from a thread-specific on-stack BasicLockObject address to
 262     // a full-fledged "Thread *".
 263     _owner = Self;








 264     return;
 265   }
 266 
 267   // We've encountered genuine contention.
 268   assert(Self->_Stalled == 0, "invariant");
 269   Self->_Stalled = intptr_t(this);
 270 
 271   // Try one round of spinning *before* enqueueing Self
 272   // and before going through the awkward and expensive state
 273   // transitions.  The following spin is strictly optional ...
 274   // Note that if we acquire the monitor from an initial spin
 275   // we forgo posting JVMTI events and firing DTRACE probes.
 276   if (TrySpin(Self) > 0) {
 277     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 278     assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
 279            _recursions);
 280     assert(((oop)object())->mark() == markWord::encode(this),
 281            "object mark must match encoded this: mark=" INTPTR_FORMAT
 282            ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
 283            markWord::encode(this).value());
 284     Self->_Stalled = 0;
 285     return;
 286   }
 287 
 288   assert(_owner != Self, "invariant");
 289   assert(_succ != Self, "invariant");
 290   assert(Self->is_Java_thread(), "invariant");
 291   JavaThread * jt = (JavaThread *) Self;
 292   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 293   assert(jt->thread_state() != _thread_blocked, "invariant");
 294   assert(this->object() != NULL, "invariant");
 295   assert(_contentions >= 0, "invariant");
 296 
 297   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 298   // Ensure the object-monitor relationship remains stable while there's contention.
 299   Atomic::inc(&_contentions);


 300 
 301   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 302   EventJavaMonitorEnter event;
 303   if (event.should_commit()) {
 304     event.set_monitorClass(((oop)this->object())->klass());
 305     event.set_address((uintptr_t)(this->object_addr()));
 306   }
 307 
 308   { // Change java thread status to indicate blocked on monitor enter.
 309     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 310 
 311     Self->set_current_pending_monitor(this);
 312 
 313     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 314     if (JvmtiExport::should_post_monitor_contended_enter()) {
 315       JvmtiExport::post_monitor_contended_enter(jt, this);
 316 
 317       // The current thread does not yet own the monitor and does not
 318       // yet appear on any queues that would get it made the successor.
 319       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event


 341       //
 342       _recursions = 0;
 343       _succ = NULL;
 344       exit(false, Self);
 345 
 346       jt->java_suspend_self();
 347     }
 348     Self->set_current_pending_monitor(NULL);
 349 
 350     // We cleared the pending monitor info since we've just gotten past
 351     // the enter-check-for-suspend dance and we now own the monitor free
 352     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 353     // destructor can go to a safepoint at the end of this block. If we
 354     // do a thread dump during that safepoint, then this thread will show
 355     // as having "-locked" the monitor, but the OS and java.lang.Thread
 356     // states will still report that the thread is blocked trying to
 357     // acquire it.
 358   }
 359 
 360   Atomic::dec(&_contentions);
 361   assert(_contentions >= 0, "invariant");
 362   Self->_Stalled = 0;
 363 
 364   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 365   assert(_recursions == 0, "invariant");
 366   assert(_owner == Self, "invariant");
 367   assert(_succ != Self, "invariant");
 368   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 369 
 370   // The thread -- now the owner -- is back in vm mode.
 371   // Report the glorious news via TI,DTrace and jvmstat.
 372   // The probe effect is non-trivial.  All the reportage occurs
 373   // while we hold the monitor, increasing the length of the critical
 374   // section.  Amdahl's parallel speedup law comes vividly into play.
 375   //
 376   // Another option might be to aggregate the events (thread local or
 377   // per-monitor aggregation) and defer reporting until a more opportune
 378   // time -- such as next time some thread encounters contention but has
 379   // yet to acquire the lock.  While spinning that thread could
 380   // spinning we could increment JVMStat counters, etc.
 381 


 385 
 386     // The current thread already owns the monitor and is not going to
 387     // call park() for the remainder of the monitor enter protocol. So
 388     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 389     // event handler consumed an unpark() issued by the thread that
 390     // just exited the monitor.
 391   }
 392   if (event.should_commit()) {
 393     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 394     event.commit();
 395   }
 396   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 397 }
 398 
 399 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 400 // Callers must compensate as needed.
 401 
 402 int ObjectMonitor::TryLock(Thread * Self) {
 403   void * own = _owner;
 404   if (own != NULL) return 0;
 405   if (Atomic::replace_if_null(Self, &_owner)) {
 406     assert(_recursions == 0, "invariant");
 407     return 1;
 408   }
 409   // The lock had been free momentarily, but we lost the race to the lock.
 410   // Interference -- the CAS failed.
 411   // We can either return -1 or retry.
 412   // Retry doesn't make as much sense because the lock was just acquired.
 413   return -1;
 414 }
 415 









































































 416 // Convert the fields used by is_busy() to a string that can be
 417 // used for diagnostic output.
 418 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 419   ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT
 420             ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions,
 421             _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList));







 422   return ss->base();
 423 }
 424 
 425 #define MAX_RECHECK_INTERVAL 1000
 426 
 427 void ObjectMonitor::EnterI(TRAPS) {


 428   Thread * const Self = THREAD;
 429   assert(Self->is_Java_thread(), "invariant");
 430   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 431 
 432   // Try the lock - TATAS
 433   if (TryLock (Self) > 0) {
 434     assert(_succ != Self, "invariant");
 435     assert(_owner == Self, "invariant");
 436     assert(_Responsible != Self, "invariant");
 437     return;
 438   }
 439 











 440   assert(InitDone, "Unexpectedly not initialized");
 441 
 442   // We try one round of spinning *before* enqueueing Self.
 443   //
 444   // If the _owner is ready but OFFPROC we could use a YieldTo()
 445   // operation to donate the remainder of this thread's quantum
 446   // to the owner.  This has subtle but beneficial affinity
 447   // effects.
 448 
 449   if (TrySpin(Self) > 0) {
 450     assert(_owner == Self, "invariant");
 451     assert(_succ != Self, "invariant");
 452     assert(_Responsible != Self, "invariant");
 453     return;
 454   }
 455 
 456   // The Spin failed -- Enqueue and park the thread ...
 457   assert(_succ != Self, "invariant");
 458   assert(_owner != Self, "invariant");
 459   assert(_Responsible != Self, "invariant");


 536 
 537   for (;;) {
 538 
 539     if (TryLock(Self) > 0) break;
 540     assert(_owner != Self, "invariant");
 541 
 542     // park self
 543     if (_Responsible == Self) {
 544       Self->_ParkEvent->park((jlong) recheckInterval);
 545       // Increase the recheckInterval, but clamp the value.
 546       recheckInterval *= 8;
 547       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 548         recheckInterval = MAX_RECHECK_INTERVAL;
 549       }
 550     } else {
 551       Self->_ParkEvent->park();
 552     }
 553 
 554     if (TryLock(Self) > 0) break;
 555 









 556     // The lock is still contested.
 557     // Keep a tally of the # of futile wakeups.
 558     // Note that the counter is not protected by a lock or updated by atomics.
 559     // That is by design - we trade "lossy" counters which are exposed to
 560     // races during updates for a lower probe effect.
 561 
 562     // This PerfData object can be used in parallel with a safepoint.
 563     // See the work around in PerfDataManager::destroy().
 564     OM_PERFDATA_OP(FutileWakeups, inc());
 565     ++nWakeups;
 566 
 567     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 568     // We can defer clearing _succ until after the spin completes
 569     // TrySpin() must tolerate being called with _succ == Self.
 570     // Try yet another round of adaptive spinning.
 571     if (TrySpin(Self) > 0) break;
 572 
 573     // We can find that we were unpark()ed and redesignated _succ while
 574     // we were spinning.  That's harmless.  If we iterate and call park(),
 575     // park() will consume the event and return immediately and we'll


 640   // the lock.   The barrier ensures that changes to monitor meta-data and data
 641   // protected by the lock will be visible before we release the lock, and
 642   // therefore before some other thread (CPU) has a chance to acquire the lock.
 643   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 644   //
 645   // Critically, any prior STs to _succ or EntryList must be visible before
 646   // the ST of null into _owner in the *subsequent* (following) corresponding
 647   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 648   // execute a serializing instruction.
 649 
 650   return;
 651 }
 652 
 653 // ReenterI() is a specialized inline form of the latter half of the
 654 // contended slow-path from EnterI().  We use ReenterI() only for
 655 // monitor reentry in wait().
 656 //
 657 // In the future we should reconcile EnterI() and ReenterI().
 658 
 659 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {


 660   assert(Self != NULL, "invariant");
 661   assert(SelfNode != NULL, "invariant");
 662   assert(SelfNode->_thread == Self, "invariant");
 663   assert(_waiters > 0, "invariant");
 664   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 665   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 666   JavaThread * jt = (JavaThread *) Self;
 667 
 668   int nWakeups = 0;
 669   for (;;) {
 670     ObjectWaiter::TStates v = SelfNode->TState;
 671     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 672     assert(_owner != Self, "invariant");
 673 
 674     if (TryLock(Self) > 0) break;
 675     if (TrySpin(Self) > 0) break;
 676 









 677     // State transition wrappers around park() ...
 678     // ReenterI() wisely defers state transitions until
 679     // it's clear we must park the thread.
 680     {
 681       OSThreadContendState osts(Self->osthread());
 682       ThreadBlockInVM tbivm(jt);
 683 
 684       // cleared by handle_special_suspend_equivalent_condition()
 685       // or java_suspend_self()
 686       jt->set_suspend_equivalent();
 687       Self->_ParkEvent->park();
 688 
 689       // were we externally suspended while we were waiting?
 690       for (;;) {
 691         if (!ExitSuspendEquivalent(jt)) break;
 692         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 693         jt->java_suspend_self();
 694         jt->set_suspend_equivalent();
 695       }
 696     }


 846 // the timer expires.  If the lock is high traffic then the stranding latency
 847 // will be low due to (a).  If the lock is low traffic then the odds of
 848 // stranding are lower, although the worst-case stranding latency
 849 // is longer.  Critically, we don't want to put excessive load in the
 850 // platform's timer subsystem.  We want to minimize both the timer injection
 851 // rate (timers created/sec) as well as the number of timers active at
 852 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 853 // the integral of the # of active timers at any instant over time).
 854 // Both impinge on OS scalability.  Given that, at most one thread parked on
 855 // a monitor will use a timer.
 856 //
 857 // There is also the risk of a futile wake-up. If we drop the lock
 858 // another thread can reacquire the lock immediately, and we can
 859 // then wake a thread unnecessarily. This is benign, and we've
 860 // structured the code so the windows are short and the frequency
 861 // of such futile wakups is low.
 862 
 863 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 864   Thread * const Self = THREAD;
 865   if (THREAD != _owner) {
 866     if (THREAD->is_lock_owned((address) _owner)) {
 867       // Transmute _owner from a BasicLock pointer to a Thread address.
 868       // We don't need to hold _mutex for this transition.
 869       // Non-null to Non-null is safe as long as all readers can
 870       // tolerate either flavor.
 871       assert(_recursions == 0, "invariant");
 872       _owner = THREAD;
 873       _recursions = 0;
 874     } else {
 875       // Apparent unbalanced locking ...
 876       // Naively we'd like to throw IllegalMonitorStateException.
 877       // As a practical matter we can neither allocate nor throw an
 878       // exception as ::exit() can be called from leaf routines.
 879       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 880       // Upon deeper reflection, however, in a properly run JVM the only
 881       // way we should encounter this situation is in the presence of
 882       // unbalanced JNI locking. TODO: CheckJNICalls.
 883       // See also: CR4414101
 884       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");







 885       return;
 886     }
 887   }
 888 
 889   if (_recursions != 0) {
 890     _recursions--;        // this is simple recursive enter
 891     return;
 892   }
 893 
 894   // Invariant: after setting Responsible=null an thread must execute
 895   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 896   _Responsible = NULL;
 897 
 898 #if INCLUDE_JFR
 899   // get the owner's thread id for the MonitorEnter event
 900   // if it is enabled and the thread isn't suspended
 901   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
 902     _previous_owner_tid = JFR_THREAD_ID(Self);
 903   }
 904 #endif
 905 
 906   for (;;) {
 907     assert(THREAD == _owner, "invariant");
 908 
 909     // release semantics: prior loads and stores from within the critical section
 910     // must not float (reorder) past the following store that drops the lock.
 911     // On SPARC that requires MEMBAR #loadstore|#storestore.
 912     // But of course in TSO #loadstore|#storestore is not required.



 913     OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
 914     OrderAccess::storeload();                        // See if we need to wake a successor

 915     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
 916       return;
 917     }
 918     // Other threads are blocked trying to acquire the lock.
 919 
 920     // Normally the exiting thread is responsible for ensuring succession,
 921     // but if other successors are ready or other entering threads are spinning
 922     // then this thread can simply store NULL into _owner and exit without
 923     // waking a successor.  The existence of spinners or ready successors
 924     // guarantees proper succession (liveness).  Responsibility passes to the
 925     // ready or running successors.  The exiting thread delegates the duty.
 926     // More precisely, if a successor already exists this thread is absolved
 927     // of the responsibility of waking (unparking) one.
 928     //
 929     // The _succ variable is critical to reducing futile wakeup frequency.
 930     // _succ identifies the "heir presumptive" thread that has been made
 931     // ready (unparked) but that has not yet run.  We need only one such
 932     // successor thread to guarantee progress.
 933     // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
 934     // section 3.3 "Futile Wakeup Throttling" for details.


 936     // Note that spinners in Enter() also set _succ non-null.
 937     // In the current implementation spinners opportunistically set
 938     // _succ so that exiting threads might avoid waking a successor.
 939     // Another less appealing alternative would be for the exiting thread
 940     // to drop the lock and then spin briefly to see if a spinner managed
 941     // to acquire the lock.  If so, the exiting thread could exit
 942     // immediately without waking a successor, otherwise the exiting
 943     // thread would need to dequeue and wake a successor.
 944     // (Note that we'd need to make the post-drop spin short, but no
 945     // shorter than the worst-case round-trip cache-line migration time.
 946     // The dropped lock needs to become visible to the spinner, and then
 947     // the acquisition of the lock by the spinner must become visible to
 948     // the exiting thread).
 949 
 950     // It appears that an heir-presumptive (successor) must be made ready.
 951     // Only the current lock owner can manipulate the EntryList or
 952     // drain _cxq, so we need to reacquire the lock.  If we fail
 953     // to reacquire the lock the responsibility for ensuring succession
 954     // falls to the new owner.
 955     //
 956     if (!Atomic::replace_if_null(THREAD, &_owner)) {
 957       return;
 958     }
 959 
 960     guarantee(_owner == THREAD, "invariant");
 961 
 962     ObjectWaiter * w = NULL;
 963 
 964     w = _EntryList;
 965     if (w != NULL) {
 966       // I'd like to write: guarantee (w->_thread != Self).
 967       // But in practice an exiting thread may find itself on the EntryList.
 968       // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
 969       // then calls exit().  Exit release the lock by setting O._owner to NULL.
 970       // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
 971       // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
 972       // release the lock "O".  T2 resumes immediately after the ST of null into
 973       // _owner, above.  T2 notices that the EntryList is populated, so it
 974       // reacquires the lock and then finds itself on the EntryList.
 975       // Given all that, we have to tolerate the circumstance where "w" is
 976       // associated with Self.


1069 
1070 
1071 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1072   assert(_owner == Self, "invariant");
1073 
1074   // Exit protocol:
1075   // 1. ST _succ = wakee
1076   // 2. membar #loadstore|#storestore;
1077   // 2. ST _owner = NULL
1078   // 3. unpark(wakee)
1079 
1080   _succ = Wakee->_thread;
1081   ParkEvent * Trigger = Wakee->_event;
1082 
1083   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1084   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1085   // out-of-scope (non-extant).
1086   Wakee  = NULL;
1087 
1088   // Drop the lock



1089   OrderAccess::release_store(&_owner, (void*)NULL);
1090   OrderAccess::fence();                               // ST _owner vs LD in unpark()

1091 
1092   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1093   Trigger->unpark();
1094 
1095   // Maintain stats and report events to JVMTI
1096   OM_PERFDATA_OP(Parks, inc());
1097 }
1098 
1099 
1100 // -----------------------------------------------------------------------------
1101 // Class Loader deadlock handling.
1102 //
1103 // complete_exit exits a lock returning recursion count
1104 // complete_exit/reenter operate as a wait without waiting
1105 // complete_exit requires an inflated monitor
1106 // The _owner field is not always the Thread addr even with an
1107 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1108 // thread due to contention.
1109 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1110   Thread * const Self = THREAD;
1111   assert(Self->is_Java_thread(), "Must be Java thread!");
1112   JavaThread *jt = (JavaThread *)THREAD;
1113 
1114   assert(InitDone, "Unexpectedly not initialized");
1115 
1116   if (THREAD != _owner) {
1117     if (THREAD->is_lock_owned ((address)_owner)) {

1118       assert(_recursions == 0, "internal state error");
1119       _owner = THREAD;   // Convert from basiclock addr to Thread addr
1120       _recursions = 0;
1121     }
1122   }
1123 
1124   guarantee(Self == _owner, "complete_exit not owner");
1125   intptr_t save = _recursions; // record the old recursion count
1126   _recursions = 0;        // set the recursion level to be 0
1127   exit(true, Self);           // exit the monitor
1128   guarantee(_owner != Self, "invariant");
1129   return save;
1130 }
1131 
1132 // reenter() enters a lock and sets recursion count
1133 // complete_exit/reenter operate as a wait without waiting
1134 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1135   Thread * const Self = THREAD;
1136   assert(Self->is_Java_thread(), "Must be Java thread!");
1137   JavaThread *jt = (JavaThread *)THREAD;
1138 
1139   guarantee(_owner != Self, "reenter already owner");
1140   enter(THREAD);       // enter the monitor

1141   guarantee(_recursions == 0, "reenter recursion");
1142   _recursions = recursions;
1143   return;
1144 }
1145 
1146 // Checks that the current THREAD owns this monitor and causes an
1147 // immediate return if it doesn't. We don't use the CHECK macro
1148 // because we want the IMSE to be the only exception that is thrown
1149 // from the call site when false is returned. Any other pending
1150 // exception is ignored.
1151 #define CHECK_OWNER()                                                  \
1152   do {                                                                 \
1153     if (!check_owner(THREAD)) {                                        \
1154        assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1155        return;                                                         \
1156      }                                                                 \
1157   } while (false)
1158 
1159 // Returns true if the specified thread owns the ObjectMonitor.
1160 // Otherwise returns false and throws IllegalMonitorStateException
1161 // (IMSE). If there is a pending exception and the specified thread
1162 // is not the owner, that exception will be replaced by the IMSE.
1163 bool ObjectMonitor::check_owner(Thread* THREAD) {
1164   if (_owner == THREAD) {
1165     return true;
1166   }
1167   if (THREAD->is_lock_owned((address)_owner)) {
1168     _owner = THREAD;  // convert from BasicLock addr to Thread addr

1169     _recursions = 0;
1170     return true;
1171   }
1172   THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1173              "current thread is not owner", false);
1174 }
1175 
1176 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1177                                     ObjectMonitor* monitor,
1178                                     jlong notifier_tid,
1179                                     jlong timeout,
1180                                     bool timedout) {
1181   assert(event != NULL, "invariant");
1182   assert(monitor != NULL, "invariant");
1183   event->set_monitorClass(((oop)monitor->object())->klass());
1184   event->set_timeout(timeout);
1185   event->set_address((uintptr_t)monitor->object_addr());
1186   event->set_notifier(notifier_tid);
1187   event->set_timedOut(timedout);
1188   event->commit();


1653     // We periodically check to see if there's a safepoint pending.
1654     if ((ctr & 0xFF) == 0) {
1655       if (SafepointMechanism::should_block(Self)) {
1656         goto Abort;           // abrupt spin egress
1657       }
1658       SpinPause();
1659     }
1660 
1661     // Probe _owner with TATAS
1662     // If this thread observes the monitor transition or flicker
1663     // from locked to unlocked to locked, then the odds that this
1664     // thread will acquire the lock in this spin attempt go down
1665     // considerably.  The same argument applies if the CAS fails
1666     // or if we observe _owner change from one non-null value to
1667     // another non-null value.   In such cases we might abort
1668     // the spin without prejudice or apply a "penalty" to the
1669     // spin count-down variable "ctr", reducing it by 100, say.
1670 
1671     Thread * ox = (Thread *) _owner;
1672     if (ox == NULL) {
1673       ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
1674       if (ox == NULL) {
1675         // The CAS succeeded -- this thread acquired ownership
1676         // Take care of some bookkeeping to exit spin state.
1677         if (_succ == Self) {
1678           _succ = NULL;
1679         }
1680 
1681         // Increase _SpinDuration :
1682         // The spin was successful (profitable) so we tend toward
1683         // longer spin attempts in the future.
1684         // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1685         // If we acquired the lock early in the spin cycle it
1686         // makes sense to increase _SpinDuration proportionally.
1687         // Note that we don't clamp SpinDuration precisely at SpinLimit.
1688         int x = _SpinDuration;
1689         if (x < Knob_SpinLimit) {
1690           if (x < Knob_Poverty) x = Knob_Poverty;
1691           _SpinDuration = x + Knob_Bonus;
1692         }
1693         return 1;


1917   }
1918 #define NEWPERFVARIABLE(n)                                                \
1919   {                                                                       \
1920     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
1921                                          CHECK);                          \
1922   }
1923     NEWPERFCOUNTER(_sync_Inflations);
1924     NEWPERFCOUNTER(_sync_Deflations);
1925     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
1926     NEWPERFCOUNTER(_sync_FutileWakeups);
1927     NEWPERFCOUNTER(_sync_Parks);
1928     NEWPERFCOUNTER(_sync_Notifications);
1929     NEWPERFVARIABLE(_sync_MonExtant);
1930 #undef NEWPERFCOUNTER
1931 #undef NEWPERFVARIABLE
1932   }
1933 
1934   DEBUG_ONLY(InitDone = true;)
1935 }
1936 

















































































1937 void ObjectMonitor::print_on(outputStream* st) const {
1938   // The minimal things to print for markWord printing, more can be added for debugging and logging.
1939   st->print("{contentions=0x%08x,waiters=0x%08x"
1940             ",recursions=" INTPTR_FORMAT ",owner=" INTPTR_FORMAT "}",
1941             contentions(), waiters(), recursions(),
1942             p2i(owner()));
1943 }
1944 void ObjectMonitor::print() const { print_on(tty); }
























































































 222 // * See also http://blogs.sun.com/dave
 223 
 224 
 225 void* ObjectMonitor::operator new (size_t size) throw() {
 226   return AllocateHeap(size, mtInternal);
 227 }
 228 void* ObjectMonitor::operator new[] (size_t size) throw() {
 229   return operator new (size);
 230 }
 231 void ObjectMonitor::operator delete(void* p) {
 232   FreeHeap(p);
 233 }
 234 void ObjectMonitor::operator delete[] (void *p) {
 235   operator delete(p);
 236 }
 237 
 238 // -----------------------------------------------------------------------------
 239 // Enter support
 240 
 241 void ObjectMonitor::enter(TRAPS) {
 242   ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
 243 
 244   // The following code is ordered to check the most common cases first
 245   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 246   Thread * const Self = THREAD;
 247 
 248   void* cur = try_set_owner_from(Self, NULL);
 249   if (cur == NULL) {
 250     assert(_recursions == 0, "invariant");
 251     return;
 252   }
 253 
 254   if (cur == Self) {
 255     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 256     _recursions++;
 257     return;
 258   }
 259 
 260   if (Self->is_lock_owned ((address)cur)) {
 261     assert(_recursions == 0, "internal state error");
 262     _recursions = 1;
 263     set_owner_from_BasicLock(Self, cur);  // Convert from BasicLock* to Thread*.
 264     return;
 265   }
 266 
 267   if (AsyncDeflateIdleMonitors &&
 268       try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) {
 269     // The deflation protocol finished the first part (setting owner),
 270     // but it failed the second part (making ref_count negative) and
 271     // bailed. Or the ObjectMonitor was async deflated and reused.
 272     // Acquired the monitor.
 273     assert(_recursions == 0, "invariant");
 274     return;
 275   }
 276 
 277   // We've encountered genuine contention.
 278   assert(Self->_Stalled == 0, "invariant");
 279   Self->_Stalled = intptr_t(this);
 280 
 281   // Try one round of spinning *before* enqueueing Self
 282   // and before going through the awkward and expensive state
 283   // transitions.  The following spin is strictly optional ...
 284   // Note that if we acquire the monitor from an initial spin
 285   // we forgo posting JVMTI events and firing DTRACE probes.
 286   if (TrySpin(Self) > 0) {
 287     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 288     assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);

 289     assert(((oop)object())->mark() == markWord::encode(this),
 290            "object mark must match encoded this: mark=" INTPTR_FORMAT
 291            ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
 292            markWord::encode(this).value());
 293     Self->_Stalled = 0;
 294     return;
 295   }
 296 
 297   assert(_owner != Self, "invariant");
 298   assert(_succ != Self, "invariant");
 299   assert(Self->is_Java_thread(), "invariant");
 300   JavaThread * jt = (JavaThread *) Self;
 301   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 302   assert(jt->thread_state() != _thread_blocked, "invariant");
 303   assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
 304   assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
 305 
 306   // Prevent deflation. See ObjectSynchronizer::deflate_monitor(),
 307   // ObjectSynchronizer::deflate_monitor_using_JT() and is_busy().
 308   // Ensure the object <-> monitor relationship remains stable while
 309   // there's contention.
 310   Atomic::add(1, &_contentions);
 311 
 312   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 313   EventJavaMonitorEnter event;
 314   if (event.should_commit()) {
 315     event.set_monitorClass(((oop)this->object())->klass());
 316     event.set_address((uintptr_t)(this->object_addr()));
 317   }
 318 
 319   { // Change java thread status to indicate blocked on monitor enter.
 320     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 321 
 322     Self->set_current_pending_monitor(this);
 323 
 324     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 325     if (JvmtiExport::should_post_monitor_contended_enter()) {
 326       JvmtiExport::post_monitor_contended_enter(jt, this);
 327 
 328       // The current thread does not yet own the monitor and does not
 329       // yet appear on any queues that would get it made the successor.
 330       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event


 352       //
 353       _recursions = 0;
 354       _succ = NULL;
 355       exit(false, Self);
 356 
 357       jt->java_suspend_self();
 358     }
 359     Self->set_current_pending_monitor(NULL);
 360 
 361     // We cleared the pending monitor info since we've just gotten past
 362     // the enter-check-for-suspend dance and we now own the monitor free
 363     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 364     // destructor can go to a safepoint at the end of this block. If we
 365     // do a thread dump during that safepoint, then this thread will show
 366     // as having "-locked" the monitor, but the OS and java.lang.Thread
 367     // states will still report that the thread is blocked trying to
 368     // acquire it.
 369   }
 370 
 371   Atomic::dec(&_contentions);
 372   assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
 373   Self->_Stalled = 0;
 374 
 375   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 376   assert(_recursions == 0, "invariant");
 377   assert(_owner == Self, "invariant");
 378   assert(_succ != Self, "invariant");
 379   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 380 
 381   // The thread -- now the owner -- is back in vm mode.
 382   // Report the glorious news via TI,DTrace and jvmstat.
 383   // The probe effect is non-trivial.  All the reportage occurs
 384   // while we hold the monitor, increasing the length of the critical
 385   // section.  Amdahl's parallel speedup law comes vividly into play.
 386   //
 387   // Another option might be to aggregate the events (thread local or
 388   // per-monitor aggregation) and defer reporting until a more opportune
 389   // time -- such as next time some thread encounters contention but has
 390   // yet to acquire the lock.  While spinning that thread could
 391   // spinning we could increment JVMStat counters, etc.
 392 


 396 
 397     // The current thread already owns the monitor and is not going to
 398     // call park() for the remainder of the monitor enter protocol. So
 399     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 400     // event handler consumed an unpark() issued by the thread that
 401     // just exited the monitor.
 402   }
 403   if (event.should_commit()) {
 404     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 405     event.commit();
 406   }
 407   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 408 }
 409 
 410 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 411 // Callers must compensate as needed.
 412 
 413 int ObjectMonitor::TryLock(Thread * Self) {
 414   void * own = _owner;
 415   if (own != NULL) return 0;
 416   if (try_set_owner_from(Self, NULL) == NULL) {
 417     assert(_recursions == 0, "invariant");
 418     return 1;
 419   }
 420   // The lock had been free momentarily, but we lost the race to the lock.
 421   // Interference -- the CAS failed.
 422   // We can either return -1 or retry.
 423   // Retry doesn't make as much sense because the lock was just acquired.
 424   return -1;
 425 }
 426 
 427 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
 428 // into the header of the object associated with the monitor. This
 429 // idempotent method is called by a thread that is deflating a
 430 // monitor and by other threads that have detected a race with the
 431 // deflation process.
 432 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
 433   // This function must only be called when (owner == DEFLATER_MARKER
 434   // && ref_count <= 0), but we can't guarantee that here because
 435   // those values could change when the ObjectMonitor gets moved from
 436   // the global free list to a per-thread free list.
 437 
 438   guarantee(obj != NULL, "must be non-NULL");
 439   if (object() != obj) {
 440     // ObjectMonitor's object ref no longer refers to the target object
 441     // so the object's header has already been restored.
 442     return;
 443   }
 444 
 445   markWord dmw = header();
 446   if (dmw.value() == 0) {
 447     // ObjectMonitor's header/dmw has been cleared so the ObjectMonitor
 448     // has been deflated and taken off the global free list.
 449     return;
 450   }
 451 
 452   // A non-NULL dmw has to be either neutral (not locked and not marked)
 453   // or is already participating in this restoration protocol.
 454   assert(dmw.is_neutral() || (dmw.is_marked() && dmw.hash() == 0),
 455          "failed precondition: dmw=" INTPTR_FORMAT, dmw.value());
 456 
 457   markWord marked_dmw = markWord::zero();
 458   if (!dmw.is_marked() && dmw.hash() == 0) {
 459     // This dmw has not yet started the restoration protocol so we
 460     // mark a copy of the dmw to begin the protocol.
 461     // Note: A dmw with a hashcode does not take this code path.
 462     marked_dmw = dmw.set_marked();
 463 
 464     // All of the callers to this function can be racing with each
 465     // other trying to update the _header field.
 466     dmw = (markWord) Atomic::cmpxchg(marked_dmw, &_header, dmw);
 467     if (dmw.value() == 0) {
 468       // ObjectMonitor's header/dmw has been cleared so the object's
 469       // header has already been restored.
 470       return;
 471     }
 472     // The _header field is now marked. The winner's 'dmw' variable
 473     // contains the original, unmarked header/dmw value and any
 474     // losers have a marked header/dmw value that will be cleaned
 475     // up below.
 476   }
 477 
 478   if (dmw.is_marked()) {
 479     // Clear the mark from the header/dmw copy in preparation for
 480     // possible restoration from this thread.
 481     assert(dmw.hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT,
 482            dmw.value());
 483     dmw = dmw.set_unmarked();
 484   }
 485   assert(dmw.is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, dmw.value());
 486 
 487   // Install displaced mark word if the object's header still points
 488   // to this ObjectMonitor. All racing callers to this function will
 489   // reach this point, but only one can win.
 490   obj->cas_set_mark(dmw, markWord::encode(this));
 491 
 492   // Note: It does not matter which thread restored the header/dmw
 493   // into the object's header. The thread deflating the monitor just
 494   // wanted the object's header restored and it is. The threads that
 495   // detected a race with the deflation process also wanted the
 496   // object's header restored before they retry their operation and
 497   // because it is restored they will only retry once.
 498 }
 499 
 500 // Convert the fields used by is_busy() to a string that can be
 501 // used for diagnostic output.
 502 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 503   ss->print("is_busy: contentions=%d, waiters=%d, ", _contentions, _waiters);
 504   if (!AsyncDeflateIdleMonitors) {
 505     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 506   } else if (_owner != DEFLATER_MARKER) {
 507     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 508   } else {
 509     ss->print("owner=" INTPTR_FORMAT, NULL);
 510   }
 511   ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq),
 512             p2i(_EntryList));
 513   return ss->base();
 514 }
 515 
 516 #define MAX_RECHECK_INTERVAL 1000
 517 
 518 void ObjectMonitor::EnterI(TRAPS) {
 519   ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
 520 
 521   Thread * const Self = THREAD;
 522   assert(Self->is_Java_thread(), "invariant");
 523   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 524 
 525   // Try the lock - TATAS
 526   if (TryLock (Self) > 0) {
 527     assert(_succ != Self, "invariant");
 528     assert(_owner == Self, "invariant");
 529     assert(_Responsible != Self, "invariant");
 530     return;
 531   }
 532 
 533   if (AsyncDeflateIdleMonitors &&
 534       try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) {
 535     // The deflation protocol finished the first part (setting owner),
 536     // but it failed the second part (making ref_count negative) and
 537     // bailed. Or the ObjectMonitor was async deflated and reused.
 538     // Acquired the monitor.
 539     assert(_succ != Self, "invariant");
 540     assert(_Responsible != Self, "invariant");
 541     return;
 542   }
 543 
 544   assert(InitDone, "Unexpectedly not initialized");
 545 
 546   // We try one round of spinning *before* enqueueing Self.
 547   //
 548   // If the _owner is ready but OFFPROC we could use a YieldTo()
 549   // operation to donate the remainder of this thread's quantum
 550   // to the owner.  This has subtle but beneficial affinity
 551   // effects.
 552 
 553   if (TrySpin(Self) > 0) {
 554     assert(_owner == Self, "invariant");
 555     assert(_succ != Self, "invariant");
 556     assert(_Responsible != Self, "invariant");
 557     return;
 558   }
 559 
 560   // The Spin failed -- Enqueue and park the thread ...
 561   assert(_succ != Self, "invariant");
 562   assert(_owner != Self, "invariant");
 563   assert(_Responsible != Self, "invariant");


 640 
 641   for (;;) {
 642 
 643     if (TryLock(Self) > 0) break;
 644     assert(_owner != Self, "invariant");
 645 
 646     // park self
 647     if (_Responsible == Self) {
 648       Self->_ParkEvent->park((jlong) recheckInterval);
 649       // Increase the recheckInterval, but clamp the value.
 650       recheckInterval *= 8;
 651       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 652         recheckInterval = MAX_RECHECK_INTERVAL;
 653       }
 654     } else {
 655       Self->_ParkEvent->park();
 656     }
 657 
 658     if (TryLock(Self) > 0) break;
 659 
 660     if (AsyncDeflateIdleMonitors &&
 661         try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) {
 662       // The deflation protocol finished the first part (setting owner),
 663       // but it failed the second part (making ref_count negative) and
 664       // bailed. Or the ObjectMonitor was async deflated and reused.
 665       // Acquired the monitor.
 666       break;
 667     }
 668 
 669     // The lock is still contested.
 670     // Keep a tally of the # of futile wakeups.
 671     // Note that the counter is not protected by a lock or updated by atomics.
 672     // That is by design - we trade "lossy" counters which are exposed to
 673     // races during updates for a lower probe effect.
 674 
 675     // This PerfData object can be used in parallel with a safepoint.
 676     // See the work around in PerfDataManager::destroy().
 677     OM_PERFDATA_OP(FutileWakeups, inc());
 678     ++nWakeups;
 679 
 680     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 681     // We can defer clearing _succ until after the spin completes
 682     // TrySpin() must tolerate being called with _succ == Self.
 683     // Try yet another round of adaptive spinning.
 684     if (TrySpin(Self) > 0) break;
 685 
 686     // We can find that we were unpark()ed and redesignated _succ while
 687     // we were spinning.  That's harmless.  If we iterate and call park(),
 688     // park() will consume the event and return immediately and we'll


 753   // the lock.   The barrier ensures that changes to monitor meta-data and data
 754   // protected by the lock will be visible before we release the lock, and
 755   // therefore before some other thread (CPU) has a chance to acquire the lock.
 756   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 757   //
 758   // Critically, any prior STs to _succ or EntryList must be visible before
 759   // the ST of null into _owner in the *subsequent* (following) corresponding
 760   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 761   // execute a serializing instruction.
 762 
 763   return;
 764 }
 765 
 766 // ReenterI() is a specialized inline form of the latter half of the
 767 // contended slow-path from EnterI().  We use ReenterI() only for
 768 // monitor reentry in wait().
 769 //
 770 // In the future we should reconcile EnterI() and ReenterI().
 771 
 772 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 773   ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
 774 
 775   assert(Self != NULL, "invariant");
 776   assert(SelfNode != NULL, "invariant");
 777   assert(SelfNode->_thread == Self, "invariant");
 778   assert(_waiters > 0, "invariant");
 779   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 780   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 781   JavaThread * jt = (JavaThread *) Self;
 782 
 783   int nWakeups = 0;
 784   for (;;) {
 785     ObjectWaiter::TStates v = SelfNode->TState;
 786     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 787     assert(_owner != Self, "invariant");
 788 
 789     if (TryLock(Self) > 0) break;
 790     if (TrySpin(Self) > 0) break;
 791 
 792     if (AsyncDeflateIdleMonitors &&
 793         try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) {
 794       // The deflation protocol finished the first part (setting owner),
 795       // but it failed the second part (making ref_count negative) and
 796       // bailed. Or the ObjectMonitor was async deflated and reused.
 797       // Acquired the monitor.
 798       break;
 799     }
 800 
 801     // State transition wrappers around park() ...
 802     // ReenterI() wisely defers state transitions until
 803     // it's clear we must park the thread.
 804     {
 805       OSThreadContendState osts(Self->osthread());
 806       ThreadBlockInVM tbivm(jt);
 807 
 808       // cleared by handle_special_suspend_equivalent_condition()
 809       // or java_suspend_self()
 810       jt->set_suspend_equivalent();
 811       Self->_ParkEvent->park();
 812 
 813       // were we externally suspended while we were waiting?
 814       for (;;) {
 815         if (!ExitSuspendEquivalent(jt)) break;
 816         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 817         jt->java_suspend_self();
 818         jt->set_suspend_equivalent();
 819       }
 820     }


 970 // the timer expires.  If the lock is high traffic then the stranding latency
 971 // will be low due to (a).  If the lock is low traffic then the odds of
 972 // stranding are lower, although the worst-case stranding latency
 973 // is longer.  Critically, we don't want to put excessive load in the
 974 // platform's timer subsystem.  We want to minimize both the timer injection
 975 // rate (timers created/sec) as well as the number of timers active at
 976 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 977 // the integral of the # of active timers at any instant over time).
 978 // Both impinge on OS scalability.  Given that, at most one thread parked on
 979 // a monitor will use a timer.
 980 //
 981 // There is also the risk of a futile wake-up. If we drop the lock
 982 // another thread can reacquire the lock immediately, and we can
 983 // then wake a thread unnecessarily. This is benign, and we've
 984 // structured the code so the windows are short and the frequency
 985 // of such futile wakups is low.
 986 
 987 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 988   Thread * const Self = THREAD;
 989   if (THREAD != _owner) {
 990     void* cur = _owner;
 991     if (THREAD->is_lock_owned((address)cur)) {



 992       assert(_recursions == 0, "invariant");
 993       set_owner_from_BasicLock(Self, cur);  // Convert from BasicLock* to Thread*.
 994       _recursions = 0;
 995     } else {
 996       // Apparent unbalanced locking ...
 997       // Naively we'd like to throw IllegalMonitorStateException.
 998       // As a practical matter we can neither allocate nor throw an
 999       // exception as ::exit() can be called from leaf routines.
1000       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
1001       // Upon deeper reflection, however, in a properly run JVM the only
1002       // way we should encounter this situation is in the presence of
1003       // unbalanced JNI locking. TODO: CheckJNICalls.
1004       // See also: CR4414101
1005       tty->print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
1006                     " is exiting an ObjectMonitor it does not own.",
1007                     p2i(THREAD));
1008       tty->print_cr("The imbalance is possibly caused by JNI locking.");
1009       print_debug_style_on(tty);
1010       // Changing this from an assert() to ADIM_guarantee() may run
1011       // afoul of any test that is inducing non-balanced JNI locking.
1012       ADIM_guarantee(false, "Non-balanced monitor enter/exit!");
1013       return;
1014     }
1015   }
1016 
1017   if (_recursions != 0) {
1018     _recursions--;        // this is simple recursive enter
1019     return;
1020   }
1021 
1022   // Invariant: after setting Responsible=null an thread must execute
1023   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1024   _Responsible = NULL;
1025 
1026 #if INCLUDE_JFR
1027   // get the owner's thread id for the MonitorEnter event
1028   // if it is enabled and the thread isn't suspended
1029   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1030     _previous_owner_tid = JFR_THREAD_ID(Self);
1031   }
1032 #endif
1033 
1034   for (;;) {
1035     assert(THREAD == _owner, "invariant");
1036 
1037     // release semantics: prior loads and stores from within the critical section
1038     // must not float (reorder) past the following store that drops the lock.
1039     // On SPARC that requires MEMBAR #loadstore|#storestore.
1040     // But of course in TSO #loadstore|#storestore is not required.
1041     if (AsyncDeflateIdleMonitors) {
1042       set_owner_from(NULL, Self);
1043     } else {
1044       OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
1045       OrderAccess::storeload();                        // See if we need to wake a successor
1046     }
1047     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1048       return;
1049     }
1050     // Other threads are blocked trying to acquire the lock.
1051 
1052     // Normally the exiting thread is responsible for ensuring succession,
1053     // but if other successors are ready or other entering threads are spinning
1054     // then this thread can simply store NULL into _owner and exit without
1055     // waking a successor.  The existence of spinners or ready successors
1056     // guarantees proper succession (liveness).  Responsibility passes to the
1057     // ready or running successors.  The exiting thread delegates the duty.
1058     // More precisely, if a successor already exists this thread is absolved
1059     // of the responsibility of waking (unparking) one.
1060     //
1061     // The _succ variable is critical to reducing futile wakeup frequency.
1062     // _succ identifies the "heir presumptive" thread that has been made
1063     // ready (unparked) but that has not yet run.  We need only one such
1064     // successor thread to guarantee progress.
1065     // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
1066     // section 3.3 "Futile Wakeup Throttling" for details.


1068     // Note that spinners in Enter() also set _succ non-null.
1069     // In the current implementation spinners opportunistically set
1070     // _succ so that exiting threads might avoid waking a successor.
1071     // Another less appealing alternative would be for the exiting thread
1072     // to drop the lock and then spin briefly to see if a spinner managed
1073     // to acquire the lock.  If so, the exiting thread could exit
1074     // immediately without waking a successor, otherwise the exiting
1075     // thread would need to dequeue and wake a successor.
1076     // (Note that we'd need to make the post-drop spin short, but no
1077     // shorter than the worst-case round-trip cache-line migration time.
1078     // The dropped lock needs to become visible to the spinner, and then
1079     // the acquisition of the lock by the spinner must become visible to
1080     // the exiting thread).
1081 
1082     // It appears that an heir-presumptive (successor) must be made ready.
1083     // Only the current lock owner can manipulate the EntryList or
1084     // drain _cxq, so we need to reacquire the lock.  If we fail
1085     // to reacquire the lock the responsibility for ensuring succession
1086     // falls to the new owner.
1087     //
1088     if (try_set_owner_from(Self, NULL) != NULL) {
1089       return;
1090     }
1091 
1092     guarantee(_owner == THREAD, "invariant");
1093 
1094     ObjectWaiter * w = NULL;
1095 
1096     w = _EntryList;
1097     if (w != NULL) {
1098       // I'd like to write: guarantee (w->_thread != Self).
1099       // But in practice an exiting thread may find itself on the EntryList.
1100       // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
1101       // then calls exit().  Exit release the lock by setting O._owner to NULL.
1102       // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
1103       // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1104       // release the lock "O".  T2 resumes immediately after the ST of null into
1105       // _owner, above.  T2 notices that the EntryList is populated, so it
1106       // reacquires the lock and then finds itself on the EntryList.
1107       // Given all that, we have to tolerate the circumstance where "w" is
1108       // associated with Self.


1201 
1202 
1203 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1204   assert(_owner == Self, "invariant");
1205 
1206   // Exit protocol:
1207   // 1. ST _succ = wakee
1208   // 2. membar #loadstore|#storestore;
1209   // 2. ST _owner = NULL
1210   // 3. unpark(wakee)
1211 
1212   _succ = Wakee->_thread;
1213   ParkEvent * Trigger = Wakee->_event;
1214 
1215   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1216   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1217   // out-of-scope (non-extant).
1218   Wakee  = NULL;
1219 
1220   // Drop the lock
1221   if (AsyncDeflateIdleMonitors) {
1222     set_owner_from(NULL, Self);
1223   } else {
1224     OrderAccess::release_store(&_owner, (void*)NULL);
1225     OrderAccess::fence();                               // ST _owner vs LD in unpark()
1226   }
1227 
1228   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1229   Trigger->unpark();
1230 
1231   // Maintain stats and report events to JVMTI
1232   OM_PERFDATA_OP(Parks, inc());
1233 }
1234 
1235 
1236 // -----------------------------------------------------------------------------
1237 // Class Loader deadlock handling.
1238 //
1239 // complete_exit exits a lock returning recursion count
1240 // complete_exit/reenter operate as a wait without waiting
1241 // complete_exit requires an inflated monitor
1242 // The _owner field is not always the Thread addr even with an
1243 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1244 // thread due to contention.
1245 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1246   Thread * const Self = THREAD;
1247   assert(Self->is_Java_thread(), "Must be Java thread!");
1248   JavaThread *jt = (JavaThread *)THREAD;
1249 
1250   assert(InitDone, "Unexpectedly not initialized");
1251 
1252   if (THREAD != _owner) {
1253     void* cur = _owner;
1254     if (THREAD->is_lock_owned((address)cur)) {
1255       assert(_recursions == 0, "internal state error");
1256       set_owner_from_BasicLock(Self, cur);  // Convert from BasicLock* to Thread*.
1257       _recursions = 0;
1258     }
1259   }
1260 
1261   guarantee(Self == _owner, "complete_exit not owner");
1262   intptr_t save = _recursions; // record the old recursion count
1263   _recursions = 0;        // set the recursion level to be 0
1264   exit(true, Self);           // exit the monitor
1265   guarantee(_owner != Self, "invariant");
1266   return save;
1267 }
1268 
1269 // reenter() enters a lock and sets recursion count
1270 // complete_exit/reenter operate as a wait without waiting
1271 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1272   Thread * const Self = THREAD;
1273   assert(Self->is_Java_thread(), "Must be Java thread!");
1274   JavaThread *jt = (JavaThread *)THREAD;
1275 
1276   guarantee(_owner != Self, "reenter already owner");
1277   enter(THREAD);
1278   // Entered the monitor.
1279   guarantee(_recursions == 0, "reenter recursion");
1280   _recursions = recursions;

1281 }
1282 
1283 // Checks that the current THREAD owns this monitor and causes an
1284 // immediate return if it doesn't. We don't use the CHECK macro
1285 // because we want the IMSE to be the only exception that is thrown
1286 // from the call site when false is returned. Any other pending
1287 // exception is ignored.
1288 #define CHECK_OWNER()                                                  \
1289   do {                                                                 \
1290     if (!check_owner(THREAD)) {                                        \
1291        assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1292        return;                                                         \
1293      }                                                                 \
1294   } while (false)
1295 
1296 // Returns true if the specified thread owns the ObjectMonitor.
1297 // Otherwise returns false and throws IllegalMonitorStateException
1298 // (IMSE). If there is a pending exception and the specified thread
1299 // is not the owner, that exception will be replaced by the IMSE.
1300 bool ObjectMonitor::check_owner(Thread* THREAD) {
1301   if (_owner == THREAD) {
1302     return true;
1303   }
1304   void* cur = _owner;
1305   if (THREAD->is_lock_owned((address)cur)) {
1306     set_owner_from_BasicLock(THREAD, cur);  // Convert from BasicLock* to Thread*.
1307     _recursions = 0;
1308     return true;
1309   }
1310   THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1311              "current thread is not owner", false);
1312 }
1313 
1314 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1315                                     ObjectMonitor* monitor,
1316                                     jlong notifier_tid,
1317                                     jlong timeout,
1318                                     bool timedout) {
1319   assert(event != NULL, "invariant");
1320   assert(monitor != NULL, "invariant");
1321   event->set_monitorClass(((oop)monitor->object())->klass());
1322   event->set_timeout(timeout);
1323   event->set_address((uintptr_t)monitor->object_addr());
1324   event->set_notifier(notifier_tid);
1325   event->set_timedOut(timedout);
1326   event->commit();


1791     // We periodically check to see if there's a safepoint pending.
1792     if ((ctr & 0xFF) == 0) {
1793       if (SafepointMechanism::should_block(Self)) {
1794         goto Abort;           // abrupt spin egress
1795       }
1796       SpinPause();
1797     }
1798 
1799     // Probe _owner with TATAS
1800     // If this thread observes the monitor transition or flicker
1801     // from locked to unlocked to locked, then the odds that this
1802     // thread will acquire the lock in this spin attempt go down
1803     // considerably.  The same argument applies if the CAS fails
1804     // or if we observe _owner change from one non-null value to
1805     // another non-null value.   In such cases we might abort
1806     // the spin without prejudice or apply a "penalty" to the
1807     // spin count-down variable "ctr", reducing it by 100, say.
1808 
1809     Thread * ox = (Thread *) _owner;
1810     if (ox == NULL) {
1811       ox = (Thread*)try_set_owner_from(Self, NULL);
1812       if (ox == NULL) {
1813         // The CAS succeeded -- this thread acquired ownership
1814         // Take care of some bookkeeping to exit spin state.
1815         if (_succ == Self) {
1816           _succ = NULL;
1817         }
1818 
1819         // Increase _SpinDuration :
1820         // The spin was successful (profitable) so we tend toward
1821         // longer spin attempts in the future.
1822         // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1823         // If we acquired the lock early in the spin cycle it
1824         // makes sense to increase _SpinDuration proportionally.
1825         // Note that we don't clamp SpinDuration precisely at SpinLimit.
1826         int x = _SpinDuration;
1827         if (x < Knob_SpinLimit) {
1828           if (x < Knob_Poverty) x = Knob_Poverty;
1829           _SpinDuration = x + Knob_Bonus;
1830         }
1831         return 1;


2055   }
2056 #define NEWPERFVARIABLE(n)                                                \
2057   {                                                                       \
2058     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
2059                                          CHECK);                          \
2060   }
2061     NEWPERFCOUNTER(_sync_Inflations);
2062     NEWPERFCOUNTER(_sync_Deflations);
2063     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2064     NEWPERFCOUNTER(_sync_FutileWakeups);
2065     NEWPERFCOUNTER(_sync_Parks);
2066     NEWPERFCOUNTER(_sync_Notifications);
2067     NEWPERFVARIABLE(_sync_MonExtant);
2068 #undef NEWPERFCOUNTER
2069 #undef NEWPERFVARIABLE
2070   }
2071 
2072   DEBUG_ONLY(InitDone = true;)
2073 }
2074 
2075 // For internal use by ObjectSynchronizer::monitors_iterate().
2076 ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) {
2077   om_ptr->inc_ref_count();
2078   _om_ptr = om_ptr;
2079 }
2080 
2081 ObjectMonitorHandle::~ObjectMonitorHandle() {
2082   if (_om_ptr != NULL) {
2083     _om_ptr->dec_ref_count();
2084     _om_ptr = NULL;
2085   }
2086 }
2087 
2088 // Save the ObjectMonitor* associated with the specified markWord and
2089 // increment the ref_count. This function should only be called if
2090 // the caller has verified mark.has_monitor() == true. The object
2091 // parameter is needed to verify that ObjectMonitor* has not been
2092 // deflated and reused for another object.
2093 //
2094 // This function returns true if the ObjectMonitor* has been safely
2095 // saved. This function returns false if we have lost a race with
2096 // async deflation; the caller should retry as appropriate.
2097 //
2098 bool ObjectMonitorHandle::save_om_ptr(oop object, markWord mark) {
2099   guarantee(mark.has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
2100             mark.value());
2101 
2102   ObjectMonitor * om_ptr = mark.monitor();
2103   om_ptr->inc_ref_count();
2104 
2105   if (AsyncDeflateIdleMonitors) {
2106     // Race here if monitor is not owned! The above ref_count bump
2107     // will cause subsequent async deflation to skip it. However,
2108     // previous or concurrent async deflation is a race.
2109     if (om_ptr->owner_is_DEFLATER_MARKER() && om_ptr->ref_count() <= 0) {
2110       // Async deflation is in progress and our ref_count increment
2111       // above lost the race to async deflation. Attempt to restore
2112       // the header/dmw to the object's header so that we only retry
2113       // once if the deflater thread happens to be slow.
2114       om_ptr->install_displaced_markword_in_object(object);
2115       om_ptr->dec_ref_count();
2116       return false;
2117     }
2118     if (om_ptr->ref_count() <= 0) {
2119       // Async deflation is in the process of bailing out, but has not
2120       // yet restored the ref_count field so we return false to force
2121       // a retry. We want a positive ref_count value for a true return.
2122       om_ptr->dec_ref_count();
2123       return false;
2124     }
2125     // The ObjectMonitor could have been deflated and reused for
2126     // another object before we bumped the ref_count so make sure
2127     // our object still refers to this ObjectMonitor.
2128     const markWord tmp = object->mark();
2129     if (!tmp.has_monitor() || tmp.monitor() != om_ptr) {
2130       // Async deflation and reuse won the race so we have to retry.
2131       // Skip object header restoration since that's already done.
2132       om_ptr->dec_ref_count();
2133       return false;
2134     }
2135   }
2136 
2137   ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2138                  p2i(_om_ptr));
2139   _om_ptr = om_ptr;
2140   return true;
2141 }
2142 
2143 // For internal use by ObjectSynchronizer::inflate().
2144 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
2145   if (_om_ptr == NULL) {
2146     ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
2147     om_ptr->inc_ref_count();
2148     _om_ptr = om_ptr;
2149   } else {
2150     ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr");
2151     _om_ptr->dec_ref_count();
2152     _om_ptr = NULL;
2153   }
2154 }
2155 
2156 void ObjectMonitor::print_on(outputStream* st) const {
2157   // The minimal things to print for markWord printing, more can be added for debugging and logging.
2158   st->print("{contentions=0x%08x,waiters=0x%08x"
2159             ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
2160             contentions(), waiters(), recursions(),
2161             p2i(owner()));
2162 }
2163 void ObjectMonitor::print() const { print_on(tty); }
2164 
2165 // Print the ObjectMonitor like a debugger would:
2166 //
2167 // (ObjectMonitor) 0x00007fdfb6012e40 = {
2168 //   _header = (_value = 1)
2169 //   _object = 0x000000070ff45fd0
2170 //   _allocation_state = Old
2171 //   _pad_buf0 = {
2172 //     [0] = '\0'
2173 //     ...
2174 //     [43] = '\0'
2175 //   }
2176 //   _owner = 0x0000000000000000
2177 //   _previous_owner_tid = 0
2178 //   _pad_buf1 = {
2179 //     [0] = '\0'
2180 //     ...
2181 //     [47] = '\0'
2182 //   }
2183 //   _ref_count = 1
2184 //   _pad_buf2 = {
2185 //     [0] = '\0'
2186 //     ...
2187 //     [59] = '\0'
2188 //   }
2189 //   _next_om = 0x0000000000000000
2190 //   _recursions = 0
2191 //   _EntryList = 0x0000000000000000
2192 //   _cxq = 0x0000000000000000
2193 //   _succ = 0x0000000000000000
2194 //   _Responsible = 0x0000000000000000
2195 //   _Spinner = 0
2196 //   _SpinDuration = 5000
2197 //   _contentions = 0
2198 //   _WaitSet = 0x0000700009756248
2199 //   _waiters = 1
2200 //   _WaitSetLock = 0
2201 // }
2202 //
2203 void ObjectMonitor::print_debug_style_on(outputStream* st) const {
2204   st->print_cr("(ObjectMonitor *) " INTPTR_FORMAT " = {", p2i(this));
2205   st->print_cr("  _header = " INTPTR_FORMAT, header().value());
2206   st->print_cr("  _object = " INTPTR_FORMAT, p2i(_object));
2207   st->print("  _allocation_state = ");
2208   if (is_free()) {
2209     st->print("Free");
2210   } else if (is_old()) {
2211     st->print("Old");
2212   } else if (is_new()) {
2213     st->print("New");
2214   } else {
2215     st->print("unknown=%d", _allocation_state);
2216   }
2217   st->cr();
2218   st->print_cr("  _pad_buf0 = {");
2219   st->print_cr("    [0] = '\\0'");
2220   st->print_cr("    ...");
2221   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2222   st->print_cr("  }");
2223   st->print_cr("  _owner = " INTPTR_FORMAT, p2i(_owner));
2224   st->print_cr("  _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid);
2225   st->print_cr("  _pad_buf1 = {");
2226   st->print_cr("    [0] = '\\0'");
2227   st->print_cr("    ...");
2228   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2229   st->print_cr("  }");
2230   st->print_cr("  _ref_count = %d", ref_count());
2231   st->print_cr("  _pad_buf2 = {");
2232   st->print_cr("    [0] = '\\0'");
2233   st->print_cr("    ...");
2234   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2235   st->print_cr("  }");
2236   st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(_next_om));
2237   st->print_cr("  _recursions = " INTX_FORMAT, _recursions);
2238   st->print_cr("  _EntryList = " INTPTR_FORMAT, p2i(_EntryList));
2239   st->print_cr("  _cxq = " INTPTR_FORMAT, p2i(_cxq));
2240   st->print_cr("  _succ = " INTPTR_FORMAT, p2i(_succ));
2241   st->print_cr("  _Responsible = " INTPTR_FORMAT, p2i(_Responsible));
2242   st->print_cr("  _Spinner = %d", _Spinner);
2243   st->print_cr("  _SpinDuration = %d", _SpinDuration);
2244   st->print_cr("  _contentions = %d", _contentions);
2245   st->print_cr("  _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet));
2246   st->print_cr("  _waiters = %d", _waiters);
2247   st->print_cr("  _WaitSetLock = %d", _WaitSetLock);
2248   st->print_cr("}");
2249 }
< prev index next >