< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 57587 : imported patch 8236035.patch.cr0
rev 57588 : dholmes CR - rename simply_set_owner_from() -> set_owner_from() and simply_set_owner_from_BasicLock() -> set_owner_from_BasicLock(); rename release_clear_owner_with_barrier() -> release_clear_owner() and refactor barrier code back into the call sites.
rev 57589 : kbarrett CR - rearrange some loads of _owner field to be more efficient; clarify header comment for try_set_owner_from() declaration; make some loads of _owner field DEBUG_ONLY since they only exist for assert()'s; update related logging calls to use the existing function parameter instead.
rev 57591 : imported patch 8235795.patch.cr0.merged
rev 57593 : coleenp CR part1: add ObjectMonitor::next_om(), set_next_om(), and try_set_next_om(); ObjectMonitor::_next_om field is now private; rename ListGlobals -> ObjectMonitorListGlobals, rename LVars -> om_list_globals, and prefix each ObjectMonitorListGlobals field with '_'; delete static set_next() function; clarify comments; coleenp CR part2: delete stale comments about mux*().
rev 57595 : v2.09a with 8235795, 8235931 and 8236035 extracted; rebased to jdk-14+28; merge with 8236035.patch.cr1; merge with 8235795.patch.cr1; merge with 8236035.patch.cr2; merge with 8235795.patch.cr2; merge with 8235795.patch.cr3.


 224 // * See also http://blogs.sun.com/dave
 225 
 226 
 227 void* ObjectMonitor::operator new (size_t size) throw() {
 228   return AllocateHeap(size, mtInternal);
 229 }
 230 void* ObjectMonitor::operator new[] (size_t size) throw() {
 231   return operator new (size);
 232 }
 233 void ObjectMonitor::operator delete(void* p) {
 234   FreeHeap(p);
 235 }
 236 void ObjectMonitor::operator delete[] (void *p) {
 237   operator delete(p);
 238 }
 239 
 240 // -----------------------------------------------------------------------------
 241 // Enter support
 242 
 243 void ObjectMonitor::enter(TRAPS) {



 244   // The following code is ordered to check the most common cases first
 245   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 246   Thread * const Self = THREAD;
 247 
 248   void * cur = Atomic::cmpxchg(&_owner, (void*)NULL, Self);
 249   if (cur == NULL) {
 250     assert(_recursions == 0, "invariant");
 251     return;
 252   }
 253 
 254   if (cur == Self) {
 255     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 256     _recursions++;
 257     return;
 258   }
 259 
 260   if (Self->is_lock_owned((address)cur)) {
 261     assert(_recursions == 0, "internal state error");
 262     _recursions = 1;
 263     // Commute owner from a thread-specific on-stack BasicLockObject address to
 264     // a full-fledged "Thread *".
 265     _owner = Self;








 266     return;
 267   }
 268 
 269   // We've encountered genuine contention.
 270   assert(Self->_Stalled == 0, "invariant");
 271   Self->_Stalled = intptr_t(this);
 272 
 273   // Try one round of spinning *before* enqueueing Self
 274   // and before going through the awkward and expensive state
 275   // transitions.  The following spin is strictly optional ...
 276   // Note that if we acquire the monitor from an initial spin
 277   // we forgo posting JVMTI events and firing DTRACE probes.
 278   if (TrySpin(Self) > 0) {
 279     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 280     assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
 281     assert(((oop)object())->mark() == markWord::encode(this),
 282            "object mark must match encoded this: mark=" INTPTR_FORMAT
 283            ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
 284            markWord::encode(this).value());
 285     Self->_Stalled = 0;
 286     return;
 287   }
 288 
 289   assert(_owner != Self, "invariant");
 290   assert(_succ != Self, "invariant");
 291   assert(Self->is_Java_thread(), "invariant");
 292   JavaThread * jt = (JavaThread *) Self;
 293   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 294   assert(jt->thread_state() != _thread_blocked, "invariant");
 295   assert(this->object() != NULL, "invariant");
 296   assert(_contentions >= 0, "invariant");
 297 
 298   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 299   // Ensure the object-monitor relationship remains stable while there's contention.
 300   Atomic::inc(&_contentions);
 301 
 302   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 303   EventJavaMonitorEnter event;
 304   if (event.should_commit()) {
 305     event.set_monitorClass(((oop)this->object())->klass());
 306     event.set_address((uintptr_t)(this->object_addr()));
 307   }
 308 
 309   { // Change java thread status to indicate blocked on monitor enter.
 310     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 311 
 312     Self->set_current_pending_monitor(this);
 313 
 314     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 315     if (JvmtiExport::should_post_monitor_contended_enter()) {
 316       JvmtiExport::post_monitor_contended_enter(jt, this);
 317 
 318       // The current thread does not yet own the monitor and does not
 319       // yet appear on any queues that would get it made the successor.


 342       //
 343       _recursions = 0;
 344       _succ = NULL;
 345       exit(false, Self);
 346 
 347       jt->java_suspend_self();
 348     }
 349     Self->set_current_pending_monitor(NULL);
 350 
 351     // We cleared the pending monitor info since we've just gotten past
 352     // the enter-check-for-suspend dance and we now own the monitor free
 353     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 354     // destructor can go to a safepoint at the end of this block. If we
 355     // do a thread dump during that safepoint, then this thread will show
 356     // as having "-locked" the monitor, but the OS and java.lang.Thread
 357     // states will still report that the thread is blocked trying to
 358     // acquire it.
 359   }
 360 
 361   Atomic::dec(&_contentions);
 362   assert(_contentions >= 0, "invariant");
 363   Self->_Stalled = 0;
 364 
 365   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 366   assert(_recursions == 0, "invariant");
 367   assert(_owner == Self, "invariant");
 368   assert(_succ != Self, "invariant");
 369   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 370 
 371   // The thread -- now the owner -- is back in vm mode.
 372   // Report the glorious news via TI,DTrace and jvmstat.
 373   // The probe effect is non-trivial.  All the reportage occurs
 374   // while we hold the monitor, increasing the length of the critical
 375   // section.  Amdahl's parallel speedup law comes vividly into play.
 376   //
 377   // Another option might be to aggregate the events (thread local or
 378   // per-monitor aggregation) and defer reporting until a more opportune
 379   // time -- such as next time some thread encounters contention but has
 380   // yet to acquire the lock.  While spinning that thread could
 381   // spinning we could increment JVMStat counters, etc.
 382 


 386 
 387     // The current thread already owns the monitor and is not going to
 388     // call park() for the remainder of the monitor enter protocol. So
 389     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 390     // event handler consumed an unpark() issued by the thread that
 391     // just exited the monitor.
 392   }
 393   if (event.should_commit()) {
 394     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 395     event.commit();
 396   }
 397   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 398 }
 399 
 400 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 401 // Callers must compensate as needed.
 402 
 403 int ObjectMonitor::TryLock(Thread * Self) {
 404   void * own = _owner;
 405   if (own != NULL) return 0;
 406   if (Atomic::replace_if_null(&_owner, Self)) {
 407     assert(_recursions == 0, "invariant");
 408     return 1;
 409   }
 410   // The lock had been free momentarily, but we lost the race to the lock.
 411   // Interference -- the CAS failed.
 412   // We can either return -1 or retry.
 413   // Retry doesn't make as much sense because the lock was just acquired.
 414   return -1;
 415 }
 416 









































































 417 // Convert the fields used by is_busy() to a string that can be
 418 // used for diagnostic output.
 419 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 420   ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT
 421             ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions,
 422             _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList));









 423   return ss->base();
 424 }
 425 
 426 #define MAX_RECHECK_INTERVAL 1000
 427 
 428 void ObjectMonitor::EnterI(TRAPS) {



 429   Thread * const Self = THREAD;
 430   assert(Self->is_Java_thread(), "invariant");
 431   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 432 
 433   // Try the lock - TATAS
 434   if (TryLock (Self) > 0) {
 435     assert(_succ != Self, "invariant");
 436     assert(_owner == Self, "invariant");
 437     assert(_Responsible != Self, "invariant");
 438     return;
 439   }
 440 











 441   assert(InitDone, "Unexpectedly not initialized");
 442 
 443   // We try one round of spinning *before* enqueueing Self.
 444   //
 445   // If the _owner is ready but OFFPROC we could use a YieldTo()
 446   // operation to donate the remainder of this thread's quantum
 447   // to the owner.  This has subtle but beneficial affinity
 448   // effects.
 449 
 450   if (TrySpin(Self) > 0) {
 451     assert(_owner == Self, "invariant");
 452     assert(_succ != Self, "invariant");
 453     assert(_Responsible != Self, "invariant");
 454     return;
 455   }
 456 
 457   // The Spin failed -- Enqueue and park the thread ...
 458   assert(_succ != Self, "invariant");
 459   assert(_owner != Self, "invariant");
 460   assert(_Responsible != Self, "invariant");


 537 
 538   for (;;) {
 539 
 540     if (TryLock(Self) > 0) break;
 541     assert(_owner != Self, "invariant");
 542 
 543     // park self
 544     if (_Responsible == Self) {
 545       Self->_ParkEvent->park((jlong) recheckInterval);
 546       // Increase the recheckInterval, but clamp the value.
 547       recheckInterval *= 8;
 548       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 549         recheckInterval = MAX_RECHECK_INTERVAL;
 550       }
 551     } else {
 552       Self->_ParkEvent->park();
 553     }
 554 
 555     if (TryLock(Self) > 0) break;
 556 









 557     // The lock is still contested.
 558     // Keep a tally of the # of futile wakeups.
 559     // Note that the counter is not protected by a lock or updated by atomics.
 560     // That is by design - we trade "lossy" counters which are exposed to
 561     // races during updates for a lower probe effect.
 562 
 563     // This PerfData object can be used in parallel with a safepoint.
 564     // See the work around in PerfDataManager::destroy().
 565     OM_PERFDATA_OP(FutileWakeups, inc());
 566     ++nWakeups;
 567 
 568     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 569     // We can defer clearing _succ until after the spin completes
 570     // TrySpin() must tolerate being called with _succ == Self.
 571     // Try yet another round of adaptive spinning.
 572     if (TrySpin(Self) > 0) break;
 573 
 574     // We can find that we were unpark()ed and redesignated _succ while
 575     // we were spinning.  That's harmless.  If we iterate and call park(),
 576     // park() will consume the event and return immediately and we'll


 641   // the lock.   The barrier ensures that changes to monitor meta-data and data
 642   // protected by the lock will be visible before we release the lock, and
 643   // therefore before some other thread (CPU) has a chance to acquire the lock.
 644   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 645   //
 646   // Critically, any prior STs to _succ or EntryList must be visible before
 647   // the ST of null into _owner in the *subsequent* (following) corresponding
 648   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 649   // execute a serializing instruction.
 650 
 651   return;
 652 }
 653 
 654 // ReenterI() is a specialized inline form of the latter half of the
 655 // contended slow-path from EnterI().  We use ReenterI() only for
 656 // monitor reentry in wait().
 657 //
 658 // In the future we should reconcile EnterI() and ReenterI().
 659 
 660 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {



 661   assert(Self != NULL, "invariant");
 662   assert(SelfNode != NULL, "invariant");
 663   assert(SelfNode->_thread == Self, "invariant");
 664   assert(_waiters > 0, "invariant");
 665   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 666   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 667   JavaThread * jt = (JavaThread *) Self;
 668 
 669   int nWakeups = 0;
 670   for (;;) {
 671     ObjectWaiter::TStates v = SelfNode->TState;
 672     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 673     assert(_owner != Self, "invariant");
 674 
 675     if (TryLock(Self) > 0) break;
 676     if (TrySpin(Self) > 0) break;
 677 









 678     // State transition wrappers around park() ...
 679     // ReenterI() wisely defers state transitions until
 680     // it's clear we must park the thread.
 681     {
 682       OSThreadContendState osts(Self->osthread());
 683       ThreadBlockInVM tbivm(jt);
 684 
 685       // cleared by handle_special_suspend_equivalent_condition()
 686       // or java_suspend_self()
 687       jt->set_suspend_equivalent();
 688       Self->_ParkEvent->park();
 689 
 690       // were we externally suspended while we were waiting?
 691       for (;;) {
 692         if (!ExitSuspendEquivalent(jt)) break;
 693         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 694         jt->java_suspend_self();
 695         jt->set_suspend_equivalent();
 696       }
 697     }


 801   SelfNode->_prev  = (ObjectWaiter *) 0xBAD;
 802   SelfNode->_next  = (ObjectWaiter *) 0xBAD;
 803   SelfNode->TState = ObjectWaiter::TS_RUN;
 804 #endif
 805 }
 806 
 807 // -----------------------------------------------------------------------------
 808 // Exit support
 809 //
 810 // exit()
 811 // ~~~~~~
 812 // Note that the collector can't reclaim the objectMonitor or deflate
 813 // the object out from underneath the thread calling ::exit() as the
 814 // thread calling ::exit() never transitions to a stable state.
 815 // This inhibits GC, which in turn inhibits asynchronous (and
 816 // inopportune) reclamation of "this".
 817 //
 818 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
 819 // There's one exception to the claim above, however.  EnterI() can call
 820 // exit() to drop a lock if the acquirer has been externally suspended.
 821 // In that case exit() is called with _thread_state as _thread_blocked,
 822 // but the monitor's _contentions field is > 0, which inhibits reclamation.
 823 //
 824 // 1-0 exit
 825 // ~~~~~~~~
 826 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
 827 // the fast-path operators have been optimized so the common ::exit()
 828 // operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock().
 829 // The code emitted by fast_unlock() elides the usual MEMBAR.  This
 830 // greatly improves latency -- MEMBAR and CAS having considerable local
 831 // latency on modern processors -- but at the cost of "stranding".  Absent the
 832 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
 833 // ::enter() path, resulting in the entering thread being stranding
 834 // and a progress-liveness failure.   Stranding is extremely rare.
 835 // We use timers (timed park operations) & periodic polling to detect
 836 // and recover from stranding.  Potentially stranded threads periodically
 837 // wake up and poll the lock.  See the usage of the _Responsible variable.
 838 //
 839 // The CAS() in enter provides for safety and exclusion, while the CAS or
 840 // MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
 841 // eliminates the CAS/MEMBAR from the exit path, but it admits stranding.
 842 // We detect and recover from stranding with timers.


 845 // thread acquires the lock and then drops the lock, at which time the
 846 // exiting thread will notice and unpark the stranded thread, or, (b)
 847 // the timer expires.  If the lock is high traffic then the stranding latency
 848 // will be low due to (a).  If the lock is low traffic then the odds of
 849 // stranding are lower, although the worst-case stranding latency
 850 // is longer.  Critically, we don't want to put excessive load in the
 851 // platform's timer subsystem.  We want to minimize both the timer injection
 852 // rate (timers created/sec) as well as the number of timers active at
 853 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 854 // the integral of the # of active timers at any instant over time).
 855 // Both impinge on OS scalability.  Given that, at most one thread parked on
 856 // a monitor will use a timer.
 857 //
 858 // There is also the risk of a futile wake-up. If we drop the lock
 859 // another thread can reacquire the lock immediately, and we can
 860 // then wake a thread unnecessarily. This is benign, and we've
 861 // structured the code so the windows are short and the frequency
 862 // of such futile wakups is low.
 863 
 864 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 865   Thread * const Self = THREAD;
 866   if (THREAD != _owner) {
 867     if (THREAD->is_lock_owned((address) _owner)) {
 868       // Transmute _owner from a BasicLock pointer to a Thread address.
 869       // We don't need to hold _mutex for this transition.
 870       // Non-null to Non-null is safe as long as all readers can
 871       // tolerate either flavor.
 872       assert(_recursions == 0, "invariant");
 873       _owner = THREAD;
 874       _recursions = 0;
 875     } else {
 876       // Apparent unbalanced locking ...
 877       // Naively we'd like to throw IllegalMonitorStateException.
 878       // As a practical matter we can neither allocate nor throw an
 879       // exception as ::exit() can be called from leaf routines.
 880       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 881       // Upon deeper reflection, however, in a properly run JVM the only
 882       // way we should encounter this situation is in the presence of
 883       // unbalanced JNI locking. TODO: CheckJNICalls.
 884       // See also: CR4414101
 885 #ifdef ASSERT
 886       LogStreamHandle(Error, monitorinflation) lsh;
 887       lsh.print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
 888                     " is exiting an ObjectMonitor it does not own.", p2i(THREAD));
 889       lsh.print_cr("The imbalance is possibly caused by JNI locking.");
 890       print_debug_style_on(&lsh);
 891 #endif
 892       assert(false, "Non-balanced monitor enter/exit!");
 893       return;


 897   if (_recursions != 0) {
 898     _recursions--;        // this is simple recursive enter
 899     return;
 900   }
 901 
 902   // Invariant: after setting Responsible=null an thread must execute
 903   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 904   _Responsible = NULL;
 905 
 906 #if INCLUDE_JFR
 907   // get the owner's thread id for the MonitorEnter event
 908   // if it is enabled and the thread isn't suspended
 909   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
 910     _previous_owner_tid = JFR_THREAD_ID(Self);
 911   }
 912 #endif
 913 
 914   for (;;) {
 915     assert(THREAD == _owner, "invariant");
 916 

 917     // release semantics: prior loads and stores from within the critical section
 918     // must not float (reorder) past the following store that drops the lock.
 919     Atomic::release_store(&_owner, (void*)NULL);   // drop the lock
 920     OrderAccess::storeload();                      // See if we need to wake a successor




 921     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
 922       return;
 923     }
 924     // Other threads are blocked trying to acquire the lock.
 925 
 926     // Normally the exiting thread is responsible for ensuring succession,
 927     // but if other successors are ready or other entering threads are spinning
 928     // then this thread can simply store NULL into _owner and exit without
 929     // waking a successor.  The existence of spinners or ready successors
 930     // guarantees proper succession (liveness).  Responsibility passes to the
 931     // ready or running successors.  The exiting thread delegates the duty.
 932     // More precisely, if a successor already exists this thread is absolved
 933     // of the responsibility of waking (unparking) one.
 934     //
 935     // The _succ variable is critical to reducing futile wakeup frequency.
 936     // _succ identifies the "heir presumptive" thread that has been made
 937     // ready (unparked) but that has not yet run.  We need only one such
 938     // successor thread to guarantee progress.
 939     // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
 940     // section 3.3 "Futile Wakeup Throttling" for details.


 942     // Note that spinners in Enter() also set _succ non-null.
 943     // In the current implementation spinners opportunistically set
 944     // _succ so that exiting threads might avoid waking a successor.
 945     // Another less appealing alternative would be for the exiting thread
 946     // to drop the lock and then spin briefly to see if a spinner managed
 947     // to acquire the lock.  If so, the exiting thread could exit
 948     // immediately without waking a successor, otherwise the exiting
 949     // thread would need to dequeue and wake a successor.
 950     // (Note that we'd need to make the post-drop spin short, but no
 951     // shorter than the worst-case round-trip cache-line migration time.
 952     // The dropped lock needs to become visible to the spinner, and then
 953     // the acquisition of the lock by the spinner must become visible to
 954     // the exiting thread).
 955 
 956     // It appears that an heir-presumptive (successor) must be made ready.
 957     // Only the current lock owner can manipulate the EntryList or
 958     // drain _cxq, so we need to reacquire the lock.  If we fail
 959     // to reacquire the lock the responsibility for ensuring succession
 960     // falls to the new owner.
 961     //
 962     if (!Atomic::replace_if_null(&_owner, THREAD)) {
 963       return;
 964     }
 965 
 966     guarantee(_owner == THREAD, "invariant");
 967 
 968     ObjectWaiter * w = NULL;
 969 
 970     w = _EntryList;
 971     if (w != NULL) {
 972       // I'd like to write: guarantee (w->_thread != Self).
 973       // But in practice an exiting thread may find itself on the EntryList.
 974       // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
 975       // then calls exit().  Exit release the lock by setting O._owner to NULL.
 976       // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
 977       // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
 978       // release the lock "O".  T2 resumes immediately after the ST of null into
 979       // _owner, above.  T2 notices that the EntryList is populated, so it
 980       // reacquires the lock and then finds itself on the EntryList.
 981       // Given all that, we have to tolerate the circumstance where "w" is
 982       // associated with Self.


1074 }
1075 
1076 
1077 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1078   assert(_owner == Self, "invariant");
1079 
1080   // Exit protocol:
1081   // 1. ST _succ = wakee
1082   // 2. membar #loadstore|#storestore;
1083   // 2. ST _owner = NULL
1084   // 3. unpark(wakee)
1085 
1086   _succ = Wakee->_thread;
1087   ParkEvent * Trigger = Wakee->_event;
1088 
1089   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1090   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1091   // out-of-scope (non-extant).
1092   Wakee  = NULL;
1093 
1094   // Drop the lock
1095   Atomic::release_store(&_owner, (void*)NULL);
1096   OrderAccess::fence();                               // ST _owner vs LD in unpark()

1097 
1098   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1099   Trigger->unpark();
1100 
1101   // Maintain stats and report events to JVMTI
1102   OM_PERFDATA_OP(Parks, inc());
1103 }
1104 
1105 
1106 // -----------------------------------------------------------------------------
1107 // Class Loader deadlock handling.
1108 //
1109 // complete_exit exits a lock returning recursion count
1110 // complete_exit/reenter operate as a wait without waiting
1111 // complete_exit requires an inflated monitor
1112 // The _owner field is not always the Thread addr even with an
1113 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1114 // thread due to contention.
1115 intx ObjectMonitor::complete_exit(TRAPS) {
1116   Thread * const Self = THREAD;
1117   assert(Self->is_Java_thread(), "Must be Java thread!");
1118   JavaThread *jt = (JavaThread *)THREAD;
1119 
1120   assert(InitDone, "Unexpectedly not initialized");
1121 
1122   if (THREAD != _owner) {
1123     if (THREAD->is_lock_owned ((address)_owner)) {

1124       assert(_recursions == 0, "internal state error");
1125       _owner = THREAD;   // Convert from basiclock addr to Thread addr
1126       _recursions = 0;
1127     }
1128   }
1129 
1130   guarantee(Self == _owner, "complete_exit not owner");
1131   intx save = _recursions; // record the old recursion count
1132   _recursions = 0;        // set the recursion level to be 0
1133   exit(true, Self);           // exit the monitor
1134   guarantee(_owner != Self, "invariant");
1135   return save;
1136 }
1137 
1138 // reenter() enters a lock and sets recursion count
1139 // complete_exit/reenter operate as a wait without waiting
1140 void ObjectMonitor::reenter(intx recursions, TRAPS) {
1141   Thread * const Self = THREAD;
1142   assert(Self->is_Java_thread(), "Must be Java thread!");
1143   JavaThread *jt = (JavaThread *)THREAD;
1144 
1145   guarantee(_owner != Self, "reenter already owner");
1146   enter(THREAD);       // enter the monitor

1147   guarantee(_recursions == 0, "reenter recursion");
1148   _recursions = recursions;
1149   return;
1150 }
1151 
1152 // Checks that the current THREAD owns this monitor and causes an
1153 // immediate return if it doesn't. We don't use the CHECK macro
1154 // because we want the IMSE to be the only exception that is thrown
1155 // from the call site when false is returned. Any other pending
1156 // exception is ignored.
1157 #define CHECK_OWNER()                                                  \
1158   do {                                                                 \
1159     if (!check_owner(THREAD)) {                                        \
1160        assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1161        return;                                                         \
1162      }                                                                 \
1163   } while (false)
1164 
1165 // Returns true if the specified thread owns the ObjectMonitor.
1166 // Otherwise returns false and throws IllegalMonitorStateException
1167 // (IMSE). If there is a pending exception and the specified thread
1168 // is not the owner, that exception will be replaced by the IMSE.
1169 bool ObjectMonitor::check_owner(Thread* THREAD) {
1170   if (_owner == THREAD) {

1171     return true;
1172   }
1173   if (THREAD->is_lock_owned((address)_owner)) {
1174     _owner = THREAD;  // convert from BasicLock addr to Thread addr
1175     _recursions = 0;
1176     return true;
1177   }
1178   THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1179              "current thread is not owner", false);
1180 }
1181 
1182 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1183                                     ObjectMonitor* monitor,
1184                                     jlong notifier_tid,
1185                                     jlong timeout,
1186                                     bool timedout) {
1187   assert(event != NULL, "invariant");
1188   assert(monitor != NULL, "invariant");
1189   event->set_monitorClass(((oop)monitor->object())->klass());
1190   event->set_timeout(timeout);
1191   event->set_address((uintptr_t)monitor->object_addr());
1192   event->set_notifier(notifier_tid);
1193   event->set_timedOut(timedout);
1194   event->commit();


1663     // We periodically check to see if there's a safepoint pending.
1664     if ((ctr & 0xFF) == 0) {
1665       if (SafepointMechanism::should_block(Self)) {
1666         goto Abort;           // abrupt spin egress
1667       }
1668       SpinPause();
1669     }
1670 
1671     // Probe _owner with TATAS
1672     // If this thread observes the monitor transition or flicker
1673     // from locked to unlocked to locked, then the odds that this
1674     // thread will acquire the lock in this spin attempt go down
1675     // considerably.  The same argument applies if the CAS fails
1676     // or if we observe _owner change from one non-null value to
1677     // another non-null value.   In such cases we might abort
1678     // the spin without prejudice or apply a "penalty" to the
1679     // spin count-down variable "ctr", reducing it by 100, say.
1680 
1681     Thread * ox = (Thread *) _owner;
1682     if (ox == NULL) {
1683       ox = (Thread*)Atomic::cmpxchg(&_owner, (void*)NULL, Self);
1684       if (ox == NULL) {
1685         // The CAS succeeded -- this thread acquired ownership
1686         // Take care of some bookkeeping to exit spin state.
1687         if (_succ == Self) {
1688           _succ = NULL;
1689         }
1690 
1691         // Increase _SpinDuration :
1692         // The spin was successful (profitable) so we tend toward
1693         // longer spin attempts in the future.
1694         // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1695         // If we acquired the lock early in the spin cycle it
1696         // makes sense to increase _SpinDuration proportionally.
1697         // Note that we don't clamp SpinDuration precisely at SpinLimit.
1698         int x = _SpinDuration;
1699         if (x < Knob_SpinLimit) {
1700           if (x < Knob_Poverty) x = Knob_Poverty;
1701           _SpinDuration = x + Knob_Bonus;
1702         }
1703         return 1;


1927   }
1928 #define NEWPERFVARIABLE(n)                                                \
1929   {                                                                       \
1930     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
1931                                          CHECK);                          \
1932   }
1933     NEWPERFCOUNTER(_sync_Inflations);
1934     NEWPERFCOUNTER(_sync_Deflations);
1935     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
1936     NEWPERFCOUNTER(_sync_FutileWakeups);
1937     NEWPERFCOUNTER(_sync_Parks);
1938     NEWPERFCOUNTER(_sync_Notifications);
1939     NEWPERFVARIABLE(_sync_MonExtant);
1940 #undef NEWPERFCOUNTER
1941 #undef NEWPERFVARIABLE
1942   }
1943 
1944   DEBUG_ONLY(InitDone = true;)
1945 }
1946 





























































































































1947 void ObjectMonitor::print_on(outputStream* st) const {
1948   // The minimal things to print for markWord printing, more can be added for debugging and logging.
1949   st->print("{contentions=0x%08x,waiters=0x%08x"
1950             ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
1951             contentions(), waiters(), recursions(),
1952             p2i(owner()));
1953 }
1954 void ObjectMonitor::print() const { print_on(tty); }
1955 
1956 #ifdef ASSERT
1957 // Print the ObjectMonitor like a debugger would:
1958 //
1959 // (ObjectMonitor) 0x00007fdfb6012e40 = {
1960 //   _header = 0x0000000000000001
1961 //   _object = 0x000000070ff45fd0
1962 //   _next_om = 0x0000000000000000
1963 //   _pad_buf0 = {
1964 //     [0] = '\0'
1965 //     ...
1966 //     [103] = '\0'
1967 //   }
1968 //   _owner = 0x0000000000000000
1969 //   _previous_owner_tid = 0












1970 //   _recursions = 0
1971 //   _EntryList = 0x0000000000000000
1972 //   _cxq = 0x0000000000000000
1973 //   _succ = 0x0000000000000000
1974 //   _Responsible = 0x0000000000000000
1975 //   _Spinner = 0
1976 //   _SpinDuration = 5000
1977 //   _contentions = 0
1978 //   _WaitSet = 0x0000700009756248
1979 //   _waiters = 1
1980 //   _WaitSetLock = 0
1981 // }
1982 //
1983 void ObjectMonitor::print_debug_style_on(outputStream* st) const {
1984   st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
1985   st->print_cr("  _header = " INTPTR_FORMAT, header().value());
1986   st->print_cr("  _object = " INTPTR_FORMAT, p2i(_object));
1987   st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(_next_om));










1988   st->print_cr("  _pad_buf0 = {");
1989   st->print_cr("    [0] = '\\0'");
1990   st->print_cr("    ...");
1991   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
1992   st->print_cr("  }");
1993   st->print_cr("  _owner = " INTPTR_FORMAT, p2i(_owner));
1994   st->print_cr("  _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid);












1995   st->print_cr("  _recursions = " INTX_FORMAT, _recursions);
1996   st->print_cr("  _EntryList = " INTPTR_FORMAT, p2i(_EntryList));
1997   st->print_cr("  _cxq = " INTPTR_FORMAT, p2i(_cxq));
1998   st->print_cr("  _succ = " INTPTR_FORMAT, p2i(_succ));
1999   st->print_cr("  _Responsible = " INTPTR_FORMAT, p2i(_Responsible));
2000   st->print_cr("  _Spinner = %d", _Spinner);
2001   st->print_cr("  _SpinDuration = %d", _SpinDuration);
2002   st->print_cr("  _contentions = %d", _contentions);
2003   st->print_cr("  _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet));
2004   st->print_cr("  _waiters = %d", _waiters);
2005   st->print_cr("  _WaitSetLock = %d", _WaitSetLock);
2006   st->print_cr("}");
2007 }
2008 #endif


 224 // * See also http://blogs.sun.com/dave
 225 
 226 
 227 void* ObjectMonitor::operator new (size_t size) throw() {
 228   return AllocateHeap(size, mtInternal);
 229 }
 230 void* ObjectMonitor::operator new[] (size_t size) throw() {
 231   return operator new (size);
 232 }
 233 void ObjectMonitor::operator delete(void* p) {
 234   FreeHeap(p);
 235 }
 236 void ObjectMonitor::operator delete[] (void *p) {
 237   operator delete(p);
 238 }
 239 
 240 // -----------------------------------------------------------------------------
 241 // Enter support
 242 
 243 void ObjectMonitor::enter(TRAPS) {
 244   jint l_ref_count = ref_count();
 245   ADIM_guarantee(l_ref_count > 0, "must be positive: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count());
 246 
 247   // The following code is ordered to check the most common cases first
 248   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 249   Thread * const Self = THREAD;
 250 
 251   void* cur = try_set_owner_from(NULL, Self);
 252   if (cur == NULL) {
 253     assert(_recursions == 0, "invariant");
 254     return;
 255   }
 256 
 257   if (cur == Self) {
 258     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 259     _recursions++;
 260     return;
 261   }
 262 
 263   if (Self->is_lock_owned((address)cur)) {
 264     assert(_recursions == 0, "internal state error");
 265     _recursions = 1;
 266     set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
 267     return;
 268   }
 269 
 270   if (AsyncDeflateIdleMonitors &&
 271       try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
 272     // The deflation protocol finished the first part (setting owner),
 273     // but it failed the second part (making ref_count negative) and
 274     // bailed. Or the ObjectMonitor was async deflated and reused.
 275     // Acquired the monitor.
 276     assert(_recursions == 0, "invariant");
 277     return;
 278   }
 279 
 280   // We've encountered genuine contention.
 281   assert(Self->_Stalled == 0, "invariant");
 282   Self->_Stalled = intptr_t(this);
 283 
 284   // Try one round of spinning *before* enqueueing Self
 285   // and before going through the awkward and expensive state
 286   // transitions.  The following spin is strictly optional ...
 287   // Note that if we acquire the monitor from an initial spin
 288   // we forgo posting JVMTI events and firing DTRACE probes.
 289   if (TrySpin(Self) > 0) {
 290     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 291     assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
 292     assert(((oop)object())->mark() == markWord::encode(this),
 293            "object mark must match encoded this: mark=" INTPTR_FORMAT
 294            ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
 295            markWord::encode(this).value());
 296     Self->_Stalled = 0;
 297     return;
 298   }
 299 
 300   assert(_owner != Self, "invariant");
 301   assert(_succ != Self, "invariant");
 302   assert(Self->is_Java_thread(), "invariant");
 303   JavaThread * jt = (JavaThread *) Self;
 304   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 305   assert(jt->thread_state() != _thread_blocked, "invariant");
 306   assert(this->object() != NULL, "invariant");
 307   assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
 308 
 309   // Keep track of contention for JVM/TI and M&M queries.

 310   Atomic::inc(&_contentions);
 311 
 312   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 313   EventJavaMonitorEnter event;
 314   if (event.should_commit()) {
 315     event.set_monitorClass(((oop)this->object())->klass());
 316     event.set_address((uintptr_t)(this->object_addr()));
 317   }
 318 
 319   { // Change java thread status to indicate blocked on monitor enter.
 320     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 321 
 322     Self->set_current_pending_monitor(this);
 323 
 324     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 325     if (JvmtiExport::should_post_monitor_contended_enter()) {
 326       JvmtiExport::post_monitor_contended_enter(jt, this);
 327 
 328       // The current thread does not yet own the monitor and does not
 329       // yet appear on any queues that would get it made the successor.


 352       //
 353       _recursions = 0;
 354       _succ = NULL;
 355       exit(false, Self);
 356 
 357       jt->java_suspend_self();
 358     }
 359     Self->set_current_pending_monitor(NULL);
 360 
 361     // We cleared the pending monitor info since we've just gotten past
 362     // the enter-check-for-suspend dance and we now own the monitor free
 363     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 364     // destructor can go to a safepoint at the end of this block. If we
 365     // do a thread dump during that safepoint, then this thread will show
 366     // as having "-locked" the monitor, but the OS and java.lang.Thread
 367     // states will still report that the thread is blocked trying to
 368     // acquire it.
 369   }
 370 
 371   Atomic::dec(&_contentions);
 372   assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
 373   Self->_Stalled = 0;
 374 
 375   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 376   assert(_recursions == 0, "invariant");
 377   assert(_owner == Self, "invariant");
 378   assert(_succ != Self, "invariant");
 379   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 380 
 381   // The thread -- now the owner -- is back in vm mode.
 382   // Report the glorious news via TI,DTrace and jvmstat.
 383   // The probe effect is non-trivial.  All the reportage occurs
 384   // while we hold the monitor, increasing the length of the critical
 385   // section.  Amdahl's parallel speedup law comes vividly into play.
 386   //
 387   // Another option might be to aggregate the events (thread local or
 388   // per-monitor aggregation) and defer reporting until a more opportune
 389   // time -- such as next time some thread encounters contention but has
 390   // yet to acquire the lock.  While spinning that thread could
 391   // spinning we could increment JVMStat counters, etc.
 392 


 396 
 397     // The current thread already owns the monitor and is not going to
 398     // call park() for the remainder of the monitor enter protocol. So
 399     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 400     // event handler consumed an unpark() issued by the thread that
 401     // just exited the monitor.
 402   }
 403   if (event.should_commit()) {
 404     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 405     event.commit();
 406   }
 407   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 408 }
 409 
 410 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 411 // Callers must compensate as needed.
 412 
 413 int ObjectMonitor::TryLock(Thread * Self) {
 414   void * own = _owner;
 415   if (own != NULL) return 0;
 416   if (try_set_owner_from(NULL, Self) == NULL) {
 417     assert(_recursions == 0, "invariant");
 418     return 1;
 419   }
 420   // The lock had been free momentarily, but we lost the race to the lock.
 421   // Interference -- the CAS failed.
 422   // We can either return -1 or retry.
 423   // Retry doesn't make as much sense because the lock was just acquired.
 424   return -1;
 425 }
 426 
 427 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
 428 // into the header of the object associated with the monitor. This
 429 // idempotent method is called by a thread that is deflating a
 430 // monitor and by other threads that have detected a race with the
 431 // deflation process.
 432 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
 433   // This function must only be called when (owner == DEFLATER_MARKER
 434   // && ref_count <= 0), but we can't guarantee that here because
 435   // those values could change when the ObjectMonitor gets moved from
 436   // the global free list to a per-thread free list.
 437 
 438   guarantee(obj != NULL, "must be non-NULL");
 439   if (object() != obj) {
 440     // ObjectMonitor's object ref no longer refers to the target object
 441     // so the object's header has already been restored.
 442     return;
 443   }
 444 
 445   markWord dmw = header();
 446   if (dmw.value() == 0) {
 447     // ObjectMonitor's header/dmw has been cleared so the ObjectMonitor
 448     // has been deflated and taken off the global free list.
 449     return;
 450   }
 451 
 452   // A non-NULL dmw has to be either neutral (not locked and not marked)
 453   // or is already participating in this restoration protocol.
 454   assert(dmw.is_neutral() || (dmw.is_marked() && dmw.hash() == 0),
 455          "failed precondition: dmw=" INTPTR_FORMAT, dmw.value());
 456 
 457   markWord marked_dmw = markWord::zero();
 458   if (!dmw.is_marked() && dmw.hash() == 0) {
 459     // This dmw has not yet started the restoration protocol so we
 460     // mark a copy of the dmw to begin the protocol.
 461     // Note: A dmw with a hashcode does not take this code path.
 462     marked_dmw = dmw.set_marked();
 463 
 464     // All of the callers to this function can be racing with each
 465     // other trying to update the _header field.
 466     dmw = (markWord) Atomic::cmpxchg(&_header, dmw, marked_dmw);
 467     if (dmw.value() == 0) {
 468       // ObjectMonitor's header/dmw has been cleared so the object's
 469       // header has already been restored.
 470       return;
 471     }
 472     // The _header field is now marked. The winner's 'dmw' variable
 473     // contains the original, unmarked header/dmw value and any
 474     // losers have a marked header/dmw value that will be cleaned
 475     // up below.
 476   }
 477 
 478   if (dmw.is_marked()) {
 479     // Clear the mark from the header/dmw copy in preparation for
 480     // possible restoration from this thread.
 481     assert(dmw.hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT,
 482            dmw.value());
 483     dmw = dmw.set_unmarked();
 484   }
 485   assert(dmw.is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, dmw.value());
 486 
 487   // Install displaced mark word if the object's header still points
 488   // to this ObjectMonitor. All racing callers to this function will
 489   // reach this point, but only one can win.
 490   obj->cas_set_mark(dmw, markWord::encode(this));
 491 
 492   // Note: It does not matter which thread restored the header/dmw
 493   // into the object's header. The thread deflating the monitor just
 494   // wanted the object's header restored and it is. The threads that
 495   // detected a race with the deflation process also wanted the
 496   // object's header restored before they retry their operation and
 497   // because it is restored they will only retry once.
 498 }
 499 
 500 // Convert the fields used by is_busy() to a string that can be
 501 // used for diagnostic output.
 502 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 503   ss->print("is_busy: contentions=%d, waiters=%d, ", _contentions, _waiters);
 504   if (!AsyncDeflateIdleMonitors) {
 505     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 506   } else if (_owner != DEFLATER_MARKER) {
 507     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 508   } else {
 509     // We report NULL instead of DEFLATER_MARKER here because is_busy()
 510     // ignores DEFLATER_MARKER values.
 511     ss->print("owner=" INTPTR_FORMAT, NULL);
 512   }
 513   ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq),
 514             p2i(_EntryList));
 515   return ss->base();
 516 }
 517 
 518 #define MAX_RECHECK_INTERVAL 1000
 519 
 520 void ObjectMonitor::EnterI(TRAPS) {
 521   jint l_ref_count = ref_count();
 522   ADIM_guarantee(l_ref_count > 0, "must be positive: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count());
 523 
 524   Thread * const Self = THREAD;
 525   assert(Self->is_Java_thread(), "invariant");
 526   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 527 
 528   // Try the lock - TATAS
 529   if (TryLock (Self) > 0) {
 530     assert(_succ != Self, "invariant");
 531     assert(_owner == Self, "invariant");
 532     assert(_Responsible != Self, "invariant");
 533     return;
 534   }
 535 
 536   if (AsyncDeflateIdleMonitors &&
 537       try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
 538     // The deflation protocol finished the first part (setting owner),
 539     // but it failed the second part (making ref_count negative) and
 540     // bailed. Or the ObjectMonitor was async deflated and reused.
 541     // Acquired the monitor.
 542     assert(_succ != Self, "invariant");
 543     assert(_Responsible != Self, "invariant");
 544     return;
 545   }
 546 
 547   assert(InitDone, "Unexpectedly not initialized");
 548 
 549   // We try one round of spinning *before* enqueueing Self.
 550   //
 551   // If the _owner is ready but OFFPROC we could use a YieldTo()
 552   // operation to donate the remainder of this thread's quantum
 553   // to the owner.  This has subtle but beneficial affinity
 554   // effects.
 555 
 556   if (TrySpin(Self) > 0) {
 557     assert(_owner == Self, "invariant");
 558     assert(_succ != Self, "invariant");
 559     assert(_Responsible != Self, "invariant");
 560     return;
 561   }
 562 
 563   // The Spin failed -- Enqueue and park the thread ...
 564   assert(_succ != Self, "invariant");
 565   assert(_owner != Self, "invariant");
 566   assert(_Responsible != Self, "invariant");


 643 
 644   for (;;) {
 645 
 646     if (TryLock(Self) > 0) break;
 647     assert(_owner != Self, "invariant");
 648 
 649     // park self
 650     if (_Responsible == Self) {
 651       Self->_ParkEvent->park((jlong) recheckInterval);
 652       // Increase the recheckInterval, but clamp the value.
 653       recheckInterval *= 8;
 654       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 655         recheckInterval = MAX_RECHECK_INTERVAL;
 656       }
 657     } else {
 658       Self->_ParkEvent->park();
 659     }
 660 
 661     if (TryLock(Self) > 0) break;
 662 
 663     if (AsyncDeflateIdleMonitors &&
 664         try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
 665       // The deflation protocol finished the first part (setting owner),
 666       // but it failed the second part (making ref_count negative) and
 667       // bailed. Or the ObjectMonitor was async deflated and reused.
 668       // Acquired the monitor.
 669       break;
 670     }
 671 
 672     // The lock is still contested.
 673     // Keep a tally of the # of futile wakeups.
 674     // Note that the counter is not protected by a lock or updated by atomics.
 675     // That is by design - we trade "lossy" counters which are exposed to
 676     // races during updates for a lower probe effect.
 677 
 678     // This PerfData object can be used in parallel with a safepoint.
 679     // See the work around in PerfDataManager::destroy().
 680     OM_PERFDATA_OP(FutileWakeups, inc());
 681     ++nWakeups;
 682 
 683     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 684     // We can defer clearing _succ until after the spin completes
 685     // TrySpin() must tolerate being called with _succ == Self.
 686     // Try yet another round of adaptive spinning.
 687     if (TrySpin(Self) > 0) break;
 688 
 689     // We can find that we were unpark()ed and redesignated _succ while
 690     // we were spinning.  That's harmless.  If we iterate and call park(),
 691     // park() will consume the event and return immediately and we'll


 756   // the lock.   The barrier ensures that changes to monitor meta-data and data
 757   // protected by the lock will be visible before we release the lock, and
 758   // therefore before some other thread (CPU) has a chance to acquire the lock.
 759   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 760   //
 761   // Critically, any prior STs to _succ or EntryList must be visible before
 762   // the ST of null into _owner in the *subsequent* (following) corresponding
 763   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 764   // execute a serializing instruction.
 765 
 766   return;
 767 }
 768 
 769 // ReenterI() is a specialized inline form of the latter half of the
 770 // contended slow-path from EnterI().  We use ReenterI() only for
 771 // monitor reentry in wait().
 772 //
 773 // In the future we should reconcile EnterI() and ReenterI().
 774 
 775 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 776   jint l_ref_count = ref_count();
 777   ADIM_guarantee(l_ref_count > 0, "must be positive: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count());
 778 
 779   assert(Self != NULL, "invariant");
 780   assert(SelfNode != NULL, "invariant");
 781   assert(SelfNode->_thread == Self, "invariant");
 782   assert(_waiters > 0, "invariant");
 783   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 784   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 785   JavaThread * jt = (JavaThread *) Self;
 786 
 787   int nWakeups = 0;
 788   for (;;) {
 789     ObjectWaiter::TStates v = SelfNode->TState;
 790     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 791     assert(_owner != Self, "invariant");
 792 
 793     if (TryLock(Self) > 0) break;
 794     if (TrySpin(Self) > 0) break;
 795 
 796     if (AsyncDeflateIdleMonitors &&
 797         try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
 798       // The deflation protocol finished the first part (setting owner),
 799       // but it failed the second part (making ref_count negative) and
 800       // bailed. Or the ObjectMonitor was async deflated and reused.
 801       // Acquired the monitor.
 802       break;
 803     }
 804 
 805     // State transition wrappers around park() ...
 806     // ReenterI() wisely defers state transitions until
 807     // it's clear we must park the thread.
 808     {
 809       OSThreadContendState osts(Self->osthread());
 810       ThreadBlockInVM tbivm(jt);
 811 
 812       // cleared by handle_special_suspend_equivalent_condition()
 813       // or java_suspend_self()
 814       jt->set_suspend_equivalent();
 815       Self->_ParkEvent->park();
 816 
 817       // were we externally suspended while we were waiting?
 818       for (;;) {
 819         if (!ExitSuspendEquivalent(jt)) break;
 820         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 821         jt->java_suspend_self();
 822         jt->set_suspend_equivalent();
 823       }
 824     }


 928   SelfNode->_prev  = (ObjectWaiter *) 0xBAD;
 929   SelfNode->_next  = (ObjectWaiter *) 0xBAD;
 930   SelfNode->TState = ObjectWaiter::TS_RUN;
 931 #endif
 932 }
 933 
 934 // -----------------------------------------------------------------------------
 935 // Exit support
 936 //
 937 // exit()
 938 // ~~~~~~
 939 // Note that the collector can't reclaim the objectMonitor or deflate
 940 // the object out from underneath the thread calling ::exit() as the
 941 // thread calling ::exit() never transitions to a stable state.
 942 // This inhibits GC, which in turn inhibits asynchronous (and
 943 // inopportune) reclamation of "this".
 944 //
 945 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
 946 // There's one exception to the claim above, however.  EnterI() can call
 947 // exit() to drop a lock if the acquirer has been externally suspended.
 948 // In that case exit() is called with _thread_state == _thread_blocked,
 949 // but the monitor's ref_count is > 0, which inhibits reclamation.
 950 //
 951 // 1-0 exit
 952 // ~~~~~~~~
 953 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
 954 // the fast-path operators have been optimized so the common ::exit()
 955 // operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock().
 956 // The code emitted by fast_unlock() elides the usual MEMBAR.  This
 957 // greatly improves latency -- MEMBAR and CAS having considerable local
 958 // latency on modern processors -- but at the cost of "stranding".  Absent the
 959 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
 960 // ::enter() path, resulting in the entering thread being stranding
 961 // and a progress-liveness failure.   Stranding is extremely rare.
 962 // We use timers (timed park operations) & periodic polling to detect
 963 // and recover from stranding.  Potentially stranded threads periodically
 964 // wake up and poll the lock.  See the usage of the _Responsible variable.
 965 //
 966 // The CAS() in enter provides for safety and exclusion, while the CAS or
 967 // MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
 968 // eliminates the CAS/MEMBAR from the exit path, but it admits stranding.
 969 // We detect and recover from stranding with timers.


 972 // thread acquires the lock and then drops the lock, at which time the
 973 // exiting thread will notice and unpark the stranded thread, or, (b)
 974 // the timer expires.  If the lock is high traffic then the stranding latency
 975 // will be low due to (a).  If the lock is low traffic then the odds of
 976 // stranding are lower, although the worst-case stranding latency
 977 // is longer.  Critically, we don't want to put excessive load in the
 978 // platform's timer subsystem.  We want to minimize both the timer injection
 979 // rate (timers created/sec) as well as the number of timers active at
 980 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 981 // the integral of the # of active timers at any instant over time).
 982 // Both impinge on OS scalability.  Given that, at most one thread parked on
 983 // a monitor will use a timer.
 984 //
 985 // There is also the risk of a futile wake-up. If we drop the lock
 986 // another thread can reacquire the lock immediately, and we can
 987 // then wake a thread unnecessarily. This is benign, and we've
 988 // structured the code so the windows are short and the frequency
 989 // of such futile wakups is low.
 990 
 991 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 992   Thread* const Self = THREAD;
 993   void* cur = Atomic::load(&_owner);
 994   if (THREAD != cur) {
 995     if (THREAD->is_lock_owned((address)cur)) {



 996       assert(_recursions == 0, "invariant");
 997       set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
 998       _recursions = 0;
 999     } else {
1000       // Apparent unbalanced locking ...
1001       // Naively we'd like to throw IllegalMonitorStateException.
1002       // As a practical matter we can neither allocate nor throw an
1003       // exception as ::exit() can be called from leaf routines.
1004       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
1005       // Upon deeper reflection, however, in a properly run JVM the only
1006       // way we should encounter this situation is in the presence of
1007       // unbalanced JNI locking. TODO: CheckJNICalls.
1008       // See also: CR4414101
1009 #ifdef ASSERT
1010       LogStreamHandle(Error, monitorinflation) lsh;
1011       lsh.print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
1012                     " is exiting an ObjectMonitor it does not own.", p2i(THREAD));
1013       lsh.print_cr("The imbalance is possibly caused by JNI locking.");
1014       print_debug_style_on(&lsh);
1015 #endif
1016       assert(false, "Non-balanced monitor enter/exit!");
1017       return;


1021   if (_recursions != 0) {
1022     _recursions--;        // this is simple recursive enter
1023     return;
1024   }
1025 
1026   // Invariant: after setting Responsible=null an thread must execute
1027   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1028   _Responsible = NULL;
1029 
1030 #if INCLUDE_JFR
1031   // get the owner's thread id for the MonitorEnter event
1032   // if it is enabled and the thread isn't suspended
1033   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1034     _previous_owner_tid = JFR_THREAD_ID(Self);
1035   }
1036 #endif
1037 
1038   for (;;) {
1039     assert(THREAD == _owner, "invariant");
1040 
1041     // Drop the lock.
1042     // release semantics: prior loads and stores from within the critical section
1043     // must not float (reorder) past the following store that drops the lock.
1044     // Uses a storeload to separate release_store(owner) from the
1045     // successor check. The try_set_owner() below uses cmpxchg() so
1046     // we get the fence down there.
1047     release_clear_owner(Self);
1048     OrderAccess::storeload();
1049 
1050     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1051       return;
1052     }
1053     // Other threads are blocked trying to acquire the lock.
1054 
1055     // Normally the exiting thread is responsible for ensuring succession,
1056     // but if other successors are ready or other entering threads are spinning
1057     // then this thread can simply store NULL into _owner and exit without
1058     // waking a successor.  The existence of spinners or ready successors
1059     // guarantees proper succession (liveness).  Responsibility passes to the
1060     // ready or running successors.  The exiting thread delegates the duty.
1061     // More precisely, if a successor already exists this thread is absolved
1062     // of the responsibility of waking (unparking) one.
1063     //
1064     // The _succ variable is critical to reducing futile wakeup frequency.
1065     // _succ identifies the "heir presumptive" thread that has been made
1066     // ready (unparked) but that has not yet run.  We need only one such
1067     // successor thread to guarantee progress.
1068     // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
1069     // section 3.3 "Futile Wakeup Throttling" for details.


1071     // Note that spinners in Enter() also set _succ non-null.
1072     // In the current implementation spinners opportunistically set
1073     // _succ so that exiting threads might avoid waking a successor.
1074     // Another less appealing alternative would be for the exiting thread
1075     // to drop the lock and then spin briefly to see if a spinner managed
1076     // to acquire the lock.  If so, the exiting thread could exit
1077     // immediately without waking a successor, otherwise the exiting
1078     // thread would need to dequeue and wake a successor.
1079     // (Note that we'd need to make the post-drop spin short, but no
1080     // shorter than the worst-case round-trip cache-line migration time.
1081     // The dropped lock needs to become visible to the spinner, and then
1082     // the acquisition of the lock by the spinner must become visible to
1083     // the exiting thread).
1084 
1085     // It appears that an heir-presumptive (successor) must be made ready.
1086     // Only the current lock owner can manipulate the EntryList or
1087     // drain _cxq, so we need to reacquire the lock.  If we fail
1088     // to reacquire the lock the responsibility for ensuring succession
1089     // falls to the new owner.
1090     //
1091     if (try_set_owner_from(NULL, Self) != NULL) {
1092       return;
1093     }
1094 
1095     guarantee(_owner == THREAD, "invariant");
1096 
1097     ObjectWaiter * w = NULL;
1098 
1099     w = _EntryList;
1100     if (w != NULL) {
1101       // I'd like to write: guarantee (w->_thread != Self).
1102       // But in practice an exiting thread may find itself on the EntryList.
1103       // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
1104       // then calls exit().  Exit release the lock by setting O._owner to NULL.
1105       // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
1106       // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1107       // release the lock "O".  T2 resumes immediately after the ST of null into
1108       // _owner, above.  T2 notices that the EntryList is populated, so it
1109       // reacquires the lock and then finds itself on the EntryList.
1110       // Given all that, we have to tolerate the circumstance where "w" is
1111       // associated with Self.


1203 }
1204 
1205 
1206 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1207   assert(_owner == Self, "invariant");
1208 
1209   // Exit protocol:
1210   // 1. ST _succ = wakee
1211   // 2. membar #loadstore|#storestore;
1212   // 2. ST _owner = NULL
1213   // 3. unpark(wakee)
1214 
1215   _succ = Wakee->_thread;
1216   ParkEvent * Trigger = Wakee->_event;
1217 
1218   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1219   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1220   // out-of-scope (non-extant).
1221   Wakee  = NULL;
1222 
1223   // Drop the lock.
1224   // Uses a fence to separate release_store(owner) from the LD in unpark().
1225   release_clear_owner(Self);
1226   OrderAccess::fence();
1227 
1228   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1229   Trigger->unpark();
1230 
1231   // Maintain stats and report events to JVMTI
1232   OM_PERFDATA_OP(Parks, inc());
1233 }
1234 
1235 
1236 // -----------------------------------------------------------------------------
1237 // Class Loader deadlock handling.
1238 //
1239 // complete_exit exits a lock returning recursion count
1240 // complete_exit/reenter operate as a wait without waiting
1241 // complete_exit requires an inflated monitor
1242 // The _owner field is not always the Thread addr even with an
1243 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1244 // thread due to contention.
1245 intx ObjectMonitor::complete_exit(TRAPS) {
1246   Thread * const Self = THREAD;
1247   assert(Self->is_Java_thread(), "Must be Java thread!");
1248   JavaThread *jt = (JavaThread *)THREAD;
1249 
1250   assert(InitDone, "Unexpectedly not initialized");
1251 
1252   void* cur = Atomic::load(&_owner);
1253   if (THREAD != cur) {
1254     if (THREAD->is_lock_owned((address)cur)) {
1255       assert(_recursions == 0, "internal state error");
1256       set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
1257       _recursions = 0;
1258     }
1259   }
1260 
1261   guarantee(Self == _owner, "complete_exit not owner");
1262   intx save = _recursions; // record the old recursion count
1263   _recursions = 0;        // set the recursion level to be 0
1264   exit(true, Self);           // exit the monitor
1265   guarantee(_owner != Self, "invariant");
1266   return save;
1267 }
1268 
1269 // reenter() enters a lock and sets recursion count
1270 // complete_exit/reenter operate as a wait without waiting
1271 void ObjectMonitor::reenter(intx recursions, TRAPS) {
1272   Thread * const Self = THREAD;
1273   assert(Self->is_Java_thread(), "Must be Java thread!");
1274   JavaThread *jt = (JavaThread *)THREAD;
1275 
1276   guarantee(_owner != Self, "reenter already owner");
1277   enter(THREAD);
1278   // Entered the monitor.
1279   guarantee(_recursions == 0, "reenter recursion");
1280   _recursions = recursions;

1281 }
1282 
1283 // Checks that the current THREAD owns this monitor and causes an
1284 // immediate return if it doesn't. We don't use the CHECK macro
1285 // because we want the IMSE to be the only exception that is thrown
1286 // from the call site when false is returned. Any other pending
1287 // exception is ignored.
1288 #define CHECK_OWNER()                                                  \
1289   do {                                                                 \
1290     if (!check_owner(THREAD)) {                                        \
1291        assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1292        return;                                                         \
1293      }                                                                 \
1294   } while (false)
1295 
1296 // Returns true if the specified thread owns the ObjectMonitor.
1297 // Otherwise returns false and throws IllegalMonitorStateException
1298 // (IMSE). If there is a pending exception and the specified thread
1299 // is not the owner, that exception will be replaced by the IMSE.
1300 bool ObjectMonitor::check_owner(Thread* THREAD) {
1301   void* cur = Atomic::load(&_owner);
1302   if (cur == THREAD) {
1303     return true;
1304   }
1305   if (THREAD->is_lock_owned((address)cur)) {
1306     set_owner_from_BasicLock(cur, THREAD);  // Convert from BasicLock* to Thread*.
1307     _recursions = 0;
1308     return true;
1309   }
1310   THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1311              "current thread is not owner", false);
1312 }
1313 
1314 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1315                                     ObjectMonitor* monitor,
1316                                     jlong notifier_tid,
1317                                     jlong timeout,
1318                                     bool timedout) {
1319   assert(event != NULL, "invariant");
1320   assert(monitor != NULL, "invariant");
1321   event->set_monitorClass(((oop)monitor->object())->klass());
1322   event->set_timeout(timeout);
1323   event->set_address((uintptr_t)monitor->object_addr());
1324   event->set_notifier(notifier_tid);
1325   event->set_timedOut(timedout);
1326   event->commit();


1795     // We periodically check to see if there's a safepoint pending.
1796     if ((ctr & 0xFF) == 0) {
1797       if (SafepointMechanism::should_block(Self)) {
1798         goto Abort;           // abrupt spin egress
1799       }
1800       SpinPause();
1801     }
1802 
1803     // Probe _owner with TATAS
1804     // If this thread observes the monitor transition or flicker
1805     // from locked to unlocked to locked, then the odds that this
1806     // thread will acquire the lock in this spin attempt go down
1807     // considerably.  The same argument applies if the CAS fails
1808     // or if we observe _owner change from one non-null value to
1809     // another non-null value.   In such cases we might abort
1810     // the spin without prejudice or apply a "penalty" to the
1811     // spin count-down variable "ctr", reducing it by 100, say.
1812 
1813     Thread * ox = (Thread *) _owner;
1814     if (ox == NULL) {
1815       ox = (Thread*)try_set_owner_from(NULL, Self);
1816       if (ox == NULL) {
1817         // The CAS succeeded -- this thread acquired ownership
1818         // Take care of some bookkeeping to exit spin state.
1819         if (_succ == Self) {
1820           _succ = NULL;
1821         }
1822 
1823         // Increase _SpinDuration :
1824         // The spin was successful (profitable) so we tend toward
1825         // longer spin attempts in the future.
1826         // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1827         // If we acquired the lock early in the spin cycle it
1828         // makes sense to increase _SpinDuration proportionally.
1829         // Note that we don't clamp SpinDuration precisely at SpinLimit.
1830         int x = _SpinDuration;
1831         if (x < Knob_SpinLimit) {
1832           if (x < Knob_Poverty) x = Knob_Poverty;
1833           _SpinDuration = x + Knob_Bonus;
1834         }
1835         return 1;


2059   }
2060 #define NEWPERFVARIABLE(n)                                                \
2061   {                                                                       \
2062     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
2063                                          CHECK);                          \
2064   }
2065     NEWPERFCOUNTER(_sync_Inflations);
2066     NEWPERFCOUNTER(_sync_Deflations);
2067     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2068     NEWPERFCOUNTER(_sync_FutileWakeups);
2069     NEWPERFCOUNTER(_sync_Parks);
2070     NEWPERFCOUNTER(_sync_Notifications);
2071     NEWPERFVARIABLE(_sync_MonExtant);
2072 #undef NEWPERFCOUNTER
2073 #undef NEWPERFVARIABLE
2074   }
2075 
2076   DEBUG_ONLY(InitDone = true;)
2077 }
2078 
2079 ObjectMonitorHandle::~ObjectMonitorHandle() {
2080   if (_om_ptr != NULL) {
2081     _om_ptr->dec_ref_count();
2082     _om_ptr = NULL;
2083   }
2084 }
2085 
2086 // Save the ObjectMonitor* associated with the specified markWord and
2087 // increment the ref_count. This function should only be called if
2088 // the caller has verified mark.has_monitor() == true. The object
2089 // parameter is needed to verify that ObjectMonitor* has not been
2090 // deflated and reused for another object.
2091 //
2092 // This function returns true if the ObjectMonitor* has been safely
2093 // saved. This function returns false if we have lost a race with
2094 // async deflation; the caller should retry as appropriate.
2095 //
2096 bool ObjectMonitorHandle::save_om_ptr(oop object, markWord mark) {
2097   // is_marked() is a superset of has_monitor() so make sure we
2098   // are called with the proper markWord value.
2099   guarantee(mark.has_monitor() && !mark.is_marked(), "sanity check: mark="
2100             INTPTR_FORMAT, mark.value());
2101 
2102   ObjectMonitor* om_ptr = mark.monitor();
2103   om_ptr->inc_ref_count();
2104 
2105   if (AsyncDeflateIdleMonitors) {
2106     // Race here if monitor is not owned! The above ref_count bump
2107     // will cause subsequent async deflation to skip it. However,
2108     // previous or concurrent async deflation is a race.
2109     if (om_ptr->owner_is_DEFLATER_MARKER() && om_ptr->ref_count() <= 0) {
2110       // Async deflation is in progress and our ref_count increment
2111       // above lost the race to async deflation. Attempt to restore
2112       // the header/dmw to the object's header so that we only retry
2113       // once if the deflater thread happens to be slow.
2114       om_ptr->install_displaced_markword_in_object(object);
2115       om_ptr->dec_ref_count();
2116       return false;
2117     }
2118     if (om_ptr->ref_count() <= 0) {
2119       // Async deflation is in the process of bailing out, but has not
2120       // yet restored the ref_count field so we return false to force
2121       // a retry. We want a positive ref_count value for a true return.
2122       om_ptr->dec_ref_count();
2123       return false;
2124     }
2125     // The ObjectMonitor could have been deflated and reused for
2126     // another object before we bumped the ref_count so make sure
2127     // our object still refers to this ObjectMonitor.
2128     const markWord tmp = object->mark();
2129     if (!tmp.has_monitor() || tmp.monitor() != om_ptr) {
2130       // Async deflation and reuse won the race so we have to retry.
2131       // Skip object header restoration since that's already done.
2132       om_ptr->dec_ref_count();
2133       return false;
2134     }
2135   }
2136 
2137   ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2138                  p2i(_om_ptr));
2139   _om_ptr = om_ptr;
2140   return true;
2141 }
2142 
2143 // For internal use by ObjectSynchronizer::inflate().
2144 // This function is only used when we don't have to worry about async
2145 // deflation of the specified ObjectMonitor*.
2146 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor* om_ptr) {
2147   if (_om_ptr == NULL) {
2148     ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
2149     om_ptr->inc_ref_count();
2150     _om_ptr = om_ptr;
2151   } else {
2152     ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr");
2153     _om_ptr->dec_ref_count();
2154     _om_ptr = NULL;
2155   }
2156 }
2157 
2158 // Save the specified ObjectMonitor* if it is safe, i.e., not being
2159 // async deflated.
2160 //
2161 // This function returns true if the ObjectMonitor* has been safely
2162 // saved. This function returns false if the specified ObjectMonitor*
2163 // is NULL or if we have lost a race with async deflation; the caller
2164 // can retry as appropriate.
2165 bool ObjectMonitorHandle::set_om_ptr_if_safe(ObjectMonitor* om_ptr) {
2166   if (om_ptr == NULL) {
2167     return false;  // Nothing to save if input is NULL
2168   }
2169 
2170   om_ptr->inc_ref_count();
2171 
2172   if (AsyncDeflateIdleMonitors) {
2173     if (om_ptr->owner_is_DEFLATER_MARKER() && om_ptr->ref_count() <= 0) {
2174       // Async deflation is in progress and our ref_count increment
2175       // above lost the race to async deflation.
2176       om_ptr->dec_ref_count();
2177       return false;
2178     }
2179     if (om_ptr->ref_count() <= 0) {
2180       // Async deflation is in the process of bailing out, but has not
2181       // yet restored the ref_count field so we return false to force
2182       // a retry. We want a positive ref_count value for a true return.
2183       om_ptr->dec_ref_count();
2184       return false;
2185     }
2186     // Unlike save_om_ptr(), we don't have context to determine if
2187     // the ObjectMonitor has been deflated and reused for another
2188     // object.
2189   }
2190 
2191   ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2192                  p2i(_om_ptr));
2193   _om_ptr = om_ptr;
2194   return true;
2195 }
2196 
2197 // Unset the _om_ptr field and decrement the ref_count field.
2198 void ObjectMonitorHandle::unset_om_ptr() {
2199   ADIM_guarantee(_om_ptr != NULL, "_om_ptr must not be NULL");
2200   _om_ptr->dec_ref_count();
2201   _om_ptr = NULL;
2202 }
2203 
2204 void ObjectMonitor::print_on(outputStream* st) const {
2205   // The minimal things to print for markWord printing, more can be added for debugging and logging.
2206   st->print("{contentions=0x%08x,waiters=0x%08x"
2207             ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
2208             contentions(), waiters(), recursions(),
2209             p2i(owner()));
2210 }
2211 void ObjectMonitor::print() const { print_on(tty); }
2212 
2213 #ifdef ASSERT
2214 // Print the ObjectMonitor like a debugger would:
2215 //
2216 // (ObjectMonitor) 0x00007fdfb6012e40 = {
2217 //   _header = 0x0000000000000001
2218 //   _object = 0x000000070ff45fd0
2219 //   _allocation_state = Old
2220 //   _pad_buf0 = {
2221 //     [0] = '\0'
2222 //     ...
2223 //     [43] = '\0'
2224 //   }
2225 //   _owner = 0x0000000000000000
2226 //   _previous_owner_tid = 0
2227 //   _pad_buf1 = {
2228 //     [0] = '\0'
2229 //     ...
2230 //     [47] = '\0'
2231 //   }
2232 //   _ref_count = 1
2233 //   _pad_buf2 = {
2234 //     [0] = '\0'
2235 //     ...
2236 //     [47] = '\0'
2237 //   }
2238 //   _next_om = 0x0000000000000000
2239 //   _recursions = 0
2240 //   _EntryList = 0x0000000000000000
2241 //   _cxq = 0x0000000000000000
2242 //   _succ = 0x0000000000000000
2243 //   _Responsible = 0x0000000000000000
2244 //   _Spinner = 0
2245 //   _SpinDuration = 5000
2246 //   _contentions = 0
2247 //   _WaitSet = 0x0000700009756248
2248 //   _waiters = 1
2249 //   _WaitSetLock = 0
2250 // }
2251 //
2252 void ObjectMonitor::print_debug_style_on(outputStream* st) const {
2253   st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
2254   st->print_cr("  _header = " INTPTR_FORMAT, header().value());
2255   st->print_cr("  _object = " INTPTR_FORMAT, p2i(_object));
2256   st->print("  _allocation_state = ");
2257   if (is_free()) {
2258     st->print("Free");
2259   } else if (is_old()) {
2260     st->print("Old");
2261   } else if (is_new()) {
2262     st->print("New");
2263   } else {
2264     st->print("unknown=%d", _allocation_state);
2265   }
2266   st->cr();
2267   st->print_cr("  _pad_buf0 = {");
2268   st->print_cr("    [0] = '\\0'");
2269   st->print_cr("    ...");
2270   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2271   st->print_cr("  }");
2272   st->print_cr("  _owner = " INTPTR_FORMAT, p2i(_owner));
2273   st->print_cr("  _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid);
2274   st->print_cr("  _pad_buf1 = {");
2275   st->print_cr("    [0] = '\\0'");
2276   st->print_cr("    ...");
2277   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2278   st->print_cr("  }");
2279   st->print_cr("  _ref_count = %d", ref_count());
2280   st->print_cr("  _pad_buf2 = {");
2281   st->print_cr("    [0] = '\\0'");
2282   st->print_cr("    ...");
2283   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2284   st->print_cr("  }");
2285   st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(next_om()));
2286   st->print_cr("  _recursions = " INTX_FORMAT, _recursions);
2287   st->print_cr("  _EntryList = " INTPTR_FORMAT, p2i(_EntryList));
2288   st->print_cr("  _cxq = " INTPTR_FORMAT, p2i(_cxq));
2289   st->print_cr("  _succ = " INTPTR_FORMAT, p2i(_succ));
2290   st->print_cr("  _Responsible = " INTPTR_FORMAT, p2i(_Responsible));
2291   st->print_cr("  _Spinner = %d", _Spinner);
2292   st->print_cr("  _SpinDuration = %d", _SpinDuration);
2293   st->print_cr("  _contentions = %d", _contentions);
2294   st->print_cr("  _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet));
2295   st->print_cr("  _waiters = %d", _waiters);
2296   st->print_cr("  _WaitSetLock = %d", _WaitSetLock);
2297   st->print_cr("}");
2298 }
2299 #endif
< prev index next >