< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 56775 : imported patch 8230876.patch
rev 56776 : v2.00 -> v2.07 (CR7/v2.07/10-for-jdk14) patches combined into one; merge with 8230876.patch (2019.10.17) and jdk-14+21.
rev 56777 : See CR7-to-CR8-changes.


 222 // * See also http://blogs.sun.com/dave
 223 
 224 
 225 void* ObjectMonitor::operator new (size_t size) throw() {
 226   return AllocateHeap(size, mtInternal);
 227 }
 228 void* ObjectMonitor::operator new[] (size_t size) throw() {
 229   return operator new (size);
 230 }
 231 void ObjectMonitor::operator delete(void* p) {
 232   FreeHeap(p);
 233 }
 234 void ObjectMonitor::operator delete[] (void *p) {
 235   operator delete(p);
 236 }
 237 
 238 // -----------------------------------------------------------------------------
 239 // Enter support
 240 
 241 void ObjectMonitor::enter(TRAPS) {



 242   // The following code is ordered to check the most common cases first
 243   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 244   Thread * const Self = THREAD;
 245 
 246   void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
 247   if (cur == NULL) {
 248     assert(_recursions == 0, "invariant");
 249     return;
 250   }
 251 
 252   if (cur == Self) {
 253     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 254     _recursions++;
 255     return;
 256   }
 257 
 258   if (Self->is_lock_owned ((address)cur)) {
 259     assert(_recursions == 0, "internal state error");
 260     _recursions = 1;
 261     // Commute owner from a thread-specific on-stack BasicLockObject address to
 262     // a full-fledged "Thread *".
 263     _owner = Self;








 264     return;
 265   }
 266 
 267   // We've encountered genuine contention.
 268   assert(Self->_Stalled == 0, "invariant");
 269   Self->_Stalled = intptr_t(this);
 270 
 271   // Try one round of spinning *before* enqueueing Self
 272   // and before going through the awkward and expensive state
 273   // transitions.  The following spin is strictly optional ...
 274   // Note that if we acquire the monitor from an initial spin
 275   // we forgo posting JVMTI events and firing DTRACE probes.
 276   if (TrySpin(Self) > 0) {
 277     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 278     assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
 279            _recursions);
 280     assert(((oop)object())->mark() == markWord::encode(this),
 281            "object mark must match encoded this: mark=" INTPTR_FORMAT
 282            ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
 283            markWord::encode(this).value());
 284     Self->_Stalled = 0;
 285     return;
 286   }
 287 
 288   assert(_owner != Self, "invariant");
 289   assert(_succ != Self, "invariant");
 290   assert(Self->is_Java_thread(), "invariant");
 291   JavaThread * jt = (JavaThread *) Self;
 292   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 293   assert(jt->thread_state() != _thread_blocked, "invariant");
 294   assert(this->object() != NULL, "invariant");
 295   assert(_contentions >= 0, "invariant");
 296 
 297   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 298   // Ensure the object-monitor relationship remains stable while there's contention.
 299   Atomic::inc(&_contentions);


 300 
 301   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 302   EventJavaMonitorEnter event;
 303   if (event.should_commit()) {
 304     event.set_monitorClass(((oop)this->object())->klass());
 305     event.set_address((uintptr_t)(this->object_addr()));
 306   }
 307 
 308   { // Change java thread status to indicate blocked on monitor enter.
 309     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 310 
 311     Self->set_current_pending_monitor(this);
 312 
 313     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 314     if (JvmtiExport::should_post_monitor_contended_enter()) {
 315       JvmtiExport::post_monitor_contended_enter(jt, this);
 316 
 317       // The current thread does not yet own the monitor and does not
 318       // yet appear on any queues that would get it made the successor.
 319       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event


 341       //
 342       _recursions = 0;
 343       _succ = NULL;
 344       exit(false, Self);
 345 
 346       jt->java_suspend_self();
 347     }
 348     Self->set_current_pending_monitor(NULL);
 349 
 350     // We cleared the pending monitor info since we've just gotten past
 351     // the enter-check-for-suspend dance and we now own the monitor free
 352     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 353     // destructor can go to a safepoint at the end of this block. If we
 354     // do a thread dump during that safepoint, then this thread will show
 355     // as having "-locked" the monitor, but the OS and java.lang.Thread
 356     // states will still report that the thread is blocked trying to
 357     // acquire it.
 358   }
 359 
 360   Atomic::dec(&_contentions);
 361   assert(_contentions >= 0, "invariant");
 362   Self->_Stalled = 0;
 363 
 364   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 365   assert(_recursions == 0, "invariant");
 366   assert(_owner == Self, "invariant");
 367   assert(_succ != Self, "invariant");
 368   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 369 
 370   // The thread -- now the owner -- is back in vm mode.
 371   // Report the glorious news via TI,DTrace and jvmstat.
 372   // The probe effect is non-trivial.  All the reportage occurs
 373   // while we hold the monitor, increasing the length of the critical
 374   // section.  Amdahl's parallel speedup law comes vividly into play.
 375   //
 376   // Another option might be to aggregate the events (thread local or
 377   // per-monitor aggregation) and defer reporting until a more opportune
 378   // time -- such as next time some thread encounters contention but has
 379   // yet to acquire the lock.  While spinning that thread could
 380   // spinning we could increment JVMStat counters, etc.
 381 


 385 
 386     // The current thread already owns the monitor and is not going to
 387     // call park() for the remainder of the monitor enter protocol. So
 388     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 389     // event handler consumed an unpark() issued by the thread that
 390     // just exited the monitor.
 391   }
 392   if (event.should_commit()) {
 393     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 394     event.commit();
 395   }
 396   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 397 }
 398 
 399 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 400 // Callers must compensate as needed.
 401 
 402 int ObjectMonitor::TryLock(Thread * Self) {
 403   void * own = _owner;
 404   if (own != NULL) return 0;
 405   if (Atomic::replace_if_null(Self, &_owner)) {
 406     assert(_recursions == 0, "invariant");
 407     return 1;
 408   }
 409   // The lock had been free momentarily, but we lost the race to the lock.
 410   // Interference -- the CAS failed.
 411   // We can either return -1 or retry.
 412   // Retry doesn't make as much sense because the lock was just acquired.
 413   return -1;
 414 }
 415 









































































 416 // Convert the fields used by is_busy() to a string that can be
 417 // used for diagnostic output.
 418 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 419   ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT
 420             ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions,
 421             _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList));









 422   return ss->base();
 423 }
 424 
 425 #define MAX_RECHECK_INTERVAL 1000
 426 
 427 void ObjectMonitor::EnterI(TRAPS) {



 428   Thread * const Self = THREAD;
 429   assert(Self->is_Java_thread(), "invariant");
 430   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 431 
 432   // Try the lock - TATAS
 433   if (TryLock (Self) > 0) {
 434     assert(_succ != Self, "invariant");
 435     assert(_owner == Self, "invariant");
 436     assert(_Responsible != Self, "invariant");
 437     return;
 438   }
 439 











 440   assert(InitDone, "Unexpectedly not initialized");
 441 
 442   // We try one round of spinning *before* enqueueing Self.
 443   //
 444   // If the _owner is ready but OFFPROC we could use a YieldTo()
 445   // operation to donate the remainder of this thread's quantum
 446   // to the owner.  This has subtle but beneficial affinity
 447   // effects.
 448 
 449   if (TrySpin(Self) > 0) {
 450     assert(_owner == Self, "invariant");
 451     assert(_succ != Self, "invariant");
 452     assert(_Responsible != Self, "invariant");
 453     return;
 454   }
 455 
 456   // The Spin failed -- Enqueue and park the thread ...
 457   assert(_succ != Self, "invariant");
 458   assert(_owner != Self, "invariant");
 459   assert(_Responsible != Self, "invariant");


 536 
 537   for (;;) {
 538 
 539     if (TryLock(Self) > 0) break;
 540     assert(_owner != Self, "invariant");
 541 
 542     // park self
 543     if (_Responsible == Self) {
 544       Self->_ParkEvent->park((jlong) recheckInterval);
 545       // Increase the recheckInterval, but clamp the value.
 546       recheckInterval *= 8;
 547       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 548         recheckInterval = MAX_RECHECK_INTERVAL;
 549       }
 550     } else {
 551       Self->_ParkEvent->park();
 552     }
 553 
 554     if (TryLock(Self) > 0) break;
 555 









 556     // The lock is still contested.
 557     // Keep a tally of the # of futile wakeups.
 558     // Note that the counter is not protected by a lock or updated by atomics.
 559     // That is by design - we trade "lossy" counters which are exposed to
 560     // races during updates for a lower probe effect.
 561 
 562     // This PerfData object can be used in parallel with a safepoint.
 563     // See the work around in PerfDataManager::destroy().
 564     OM_PERFDATA_OP(FutileWakeups, inc());
 565     ++nWakeups;
 566 
 567     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 568     // We can defer clearing _succ until after the spin completes
 569     // TrySpin() must tolerate being called with _succ == Self.
 570     // Try yet another round of adaptive spinning.
 571     if (TrySpin(Self) > 0) break;
 572 
 573     // We can find that we were unpark()ed and redesignated _succ while
 574     // we were spinning.  That's harmless.  If we iterate and call park(),
 575     // park() will consume the event and return immediately and we'll


 640   // the lock.   The barrier ensures that changes to monitor meta-data and data
 641   // protected by the lock will be visible before we release the lock, and
 642   // therefore before some other thread (CPU) has a chance to acquire the lock.
 643   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 644   //
 645   // Critically, any prior STs to _succ or EntryList must be visible before
 646   // the ST of null into _owner in the *subsequent* (following) corresponding
 647   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 648   // execute a serializing instruction.
 649 
 650   return;
 651 }
 652 
 653 // ReenterI() is a specialized inline form of the latter half of the
 654 // contended slow-path from EnterI().  We use ReenterI() only for
 655 // monitor reentry in wait().
 656 //
 657 // In the future we should reconcile EnterI() and ReenterI().
 658 
 659 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {



 660   assert(Self != NULL, "invariant");
 661   assert(SelfNode != NULL, "invariant");
 662   assert(SelfNode->_thread == Self, "invariant");
 663   assert(_waiters > 0, "invariant");
 664   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 665   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 666   JavaThread * jt = (JavaThread *) Self;
 667 
 668   int nWakeups = 0;
 669   for (;;) {
 670     ObjectWaiter::TStates v = SelfNode->TState;
 671     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 672     assert(_owner != Self, "invariant");
 673 
 674     if (TryLock(Self) > 0) break;
 675     if (TrySpin(Self) > 0) break;
 676 









 677     // State transition wrappers around park() ...
 678     // ReenterI() wisely defers state transitions until
 679     // it's clear we must park the thread.
 680     {
 681       OSThreadContendState osts(Self->osthread());
 682       ThreadBlockInVM tbivm(jt);
 683 
 684       // cleared by handle_special_suspend_equivalent_condition()
 685       // or java_suspend_self()
 686       jt->set_suspend_equivalent();
 687       Self->_ParkEvent->park();
 688 
 689       // were we externally suspended while we were waiting?
 690       for (;;) {
 691         if (!ExitSuspendEquivalent(jt)) break;
 692         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 693         jt->java_suspend_self();
 694         jt->set_suspend_equivalent();
 695       }
 696     }


 846 // the timer expires.  If the lock is high traffic then the stranding latency
 847 // will be low due to (a).  If the lock is low traffic then the odds of
 848 // stranding are lower, although the worst-case stranding latency
 849 // is longer.  Critically, we don't want to put excessive load in the
 850 // platform's timer subsystem.  We want to minimize both the timer injection
 851 // rate (timers created/sec) as well as the number of timers active at
 852 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 853 // the integral of the # of active timers at any instant over time).
 854 // Both impinge on OS scalability.  Given that, at most one thread parked on
 855 // a monitor will use a timer.
 856 //
 857 // There is also the risk of a futile wake-up. If we drop the lock
 858 // another thread can reacquire the lock immediately, and we can
 859 // then wake a thread unnecessarily. This is benign, and we've
 860 // structured the code so the windows are short and the frequency
 861 // of such futile wakups is low.
 862 
 863 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 864   Thread * const Self = THREAD;
 865   if (THREAD != _owner) {
 866     if (THREAD->is_lock_owned((address) _owner)) {
 867       // Transmute _owner from a BasicLock pointer to a Thread address.
 868       // We don't need to hold _mutex for this transition.
 869       // Non-null to Non-null is safe as long as all readers can
 870       // tolerate either flavor.
 871       assert(_recursions == 0, "invariant");
 872       _owner = THREAD;
 873       _recursions = 0;
 874     } else {
 875       // Apparent unbalanced locking ...
 876       // Naively we'd like to throw IllegalMonitorStateException.
 877       // As a practical matter we can neither allocate nor throw an
 878       // exception as ::exit() can be called from leaf routines.
 879       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 880       // Upon deeper reflection, however, in a properly run JVM the only
 881       // way we should encounter this situation is in the presence of
 882       // unbalanced JNI locking. TODO: CheckJNICalls.
 883       // See also: CR4414101
 884       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");







 885       return;
 886     }
 887   }
 888 
 889   if (_recursions != 0) {
 890     _recursions--;        // this is simple recursive enter
 891     return;
 892   }
 893 
 894   // Invariant: after setting Responsible=null an thread must execute
 895   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 896   _Responsible = NULL;
 897 
 898 #if INCLUDE_JFR
 899   // get the owner's thread id for the MonitorEnter event
 900   // if it is enabled and the thread isn't suspended
 901   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
 902     _previous_owner_tid = JFR_THREAD_ID(Self);
 903   }
 904 #endif
 905 
 906   for (;;) {
 907     assert(THREAD == _owner, "invariant");
 908 
 909     // release semantics: prior loads and stores from within the critical section
 910     // must not float (reorder) past the following store that drops the lock.
 911     // On SPARC that requires MEMBAR #loadstore|#storestore.
 912     // But of course in TSO #loadstore|#storestore is not required.

 913     OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
 914     OrderAccess::storeload();                        // See if we need to wake a successor

 915     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
 916       return;
 917     }
 918     // Other threads are blocked trying to acquire the lock.
 919 
 920     // Normally the exiting thread is responsible for ensuring succession,
 921     // but if other successors are ready or other entering threads are spinning
 922     // then this thread can simply store NULL into _owner and exit without
 923     // waking a successor.  The existence of spinners or ready successors
 924     // guarantees proper succession (liveness).  Responsibility passes to the
 925     // ready or running successors.  The exiting thread delegates the duty.
 926     // More precisely, if a successor already exists this thread is absolved
 927     // of the responsibility of waking (unparking) one.
 928     //
 929     // The _succ variable is critical to reducing futile wakeup frequency.
 930     // _succ identifies the "heir presumptive" thread that has been made
 931     // ready (unparked) but that has not yet run.  We need only one such
 932     // successor thread to guarantee progress.
 933     // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
 934     // section 3.3 "Futile Wakeup Throttling" for details.


 936     // Note that spinners in Enter() also set _succ non-null.
 937     // In the current implementation spinners opportunistically set
 938     // _succ so that exiting threads might avoid waking a successor.
 939     // Another less appealing alternative would be for the exiting thread
 940     // to drop the lock and then spin briefly to see if a spinner managed
 941     // to acquire the lock.  If so, the exiting thread could exit
 942     // immediately without waking a successor, otherwise the exiting
 943     // thread would need to dequeue and wake a successor.
 944     // (Note that we'd need to make the post-drop spin short, but no
 945     // shorter than the worst-case round-trip cache-line migration time.
 946     // The dropped lock needs to become visible to the spinner, and then
 947     // the acquisition of the lock by the spinner must become visible to
 948     // the exiting thread).
 949 
 950     // It appears that an heir-presumptive (successor) must be made ready.
 951     // Only the current lock owner can manipulate the EntryList or
 952     // drain _cxq, so we need to reacquire the lock.  If we fail
 953     // to reacquire the lock the responsibility for ensuring succession
 954     // falls to the new owner.
 955     //
 956     if (!Atomic::replace_if_null(THREAD, &_owner)) {
 957       return;
 958     }
 959 
 960     guarantee(_owner == THREAD, "invariant");
 961 
 962     ObjectWaiter * w = NULL;
 963 
 964     w = _EntryList;
 965     if (w != NULL) {
 966       // I'd like to write: guarantee (w->_thread != Self).
 967       // But in practice an exiting thread may find itself on the EntryList.
 968       // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
 969       // then calls exit().  Exit release the lock by setting O._owner to NULL.
 970       // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
 971       // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
 972       // release the lock "O".  T2 resumes immediately after the ST of null into
 973       // _owner, above.  T2 notices that the EntryList is populated, so it
 974       // reacquires the lock and then finds itself on the EntryList.
 975       // Given all that, we have to tolerate the circumstance where "w" is
 976       // associated with Self.


1069 
1070 
1071 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1072   assert(_owner == Self, "invariant");
1073 
1074   // Exit protocol:
1075   // 1. ST _succ = wakee
1076   // 2. membar #loadstore|#storestore;
1077   // 2. ST _owner = NULL
1078   // 3. unpark(wakee)
1079 
1080   _succ = Wakee->_thread;
1081   ParkEvent * Trigger = Wakee->_event;
1082 
1083   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1084   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1085   // out-of-scope (non-extant).
1086   Wakee  = NULL;
1087 
1088   // Drop the lock



1089   OrderAccess::release_store(&_owner, (void*)NULL);
1090   OrderAccess::fence();                               // ST _owner vs LD in unpark()

1091 
1092   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1093   Trigger->unpark();
1094 
1095   // Maintain stats and report events to JVMTI
1096   OM_PERFDATA_OP(Parks, inc());
1097 }
1098 
1099 
1100 // -----------------------------------------------------------------------------
1101 // Class Loader deadlock handling.
1102 //
1103 // complete_exit exits a lock returning recursion count
1104 // complete_exit/reenter operate as a wait without waiting
1105 // complete_exit requires an inflated monitor
1106 // The _owner field is not always the Thread addr even with an
1107 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1108 // thread due to contention.
1109 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1110   Thread * const Self = THREAD;
1111   assert(Self->is_Java_thread(), "Must be Java thread!");
1112   JavaThread *jt = (JavaThread *)THREAD;
1113 
1114   assert(InitDone, "Unexpectedly not initialized");
1115 
1116   if (THREAD != _owner) {
1117     if (THREAD->is_lock_owned ((address)_owner)) {

1118       assert(_recursions == 0, "internal state error");
1119       _owner = THREAD;   // Convert from basiclock addr to Thread addr
1120       _recursions = 0;
1121     }
1122   }
1123 
1124   guarantee(Self == _owner, "complete_exit not owner");
1125   intptr_t save = _recursions; // record the old recursion count
1126   _recursions = 0;        // set the recursion level to be 0
1127   exit(true, Self);           // exit the monitor
1128   guarantee(_owner != Self, "invariant");
1129   return save;
1130 }
1131 
1132 // reenter() enters a lock and sets recursion count
1133 // complete_exit/reenter operate as a wait without waiting
1134 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1135   Thread * const Self = THREAD;
1136   assert(Self->is_Java_thread(), "Must be Java thread!");
1137   JavaThread *jt = (JavaThread *)THREAD;
1138 
1139   guarantee(_owner != Self, "reenter already owner");
1140   enter(THREAD);       // enter the monitor

1141   guarantee(_recursions == 0, "reenter recursion");
1142   _recursions = recursions;
1143   return;
1144 }
1145 
1146 // Checks that the current THREAD owns this monitor and causes an
1147 // immediate return if it doesn't. We don't use the CHECK macro
1148 // because we want the IMSE to be the only exception that is thrown
1149 // from the call site when false is returned. Any other pending
1150 // exception is ignored.
1151 #define CHECK_OWNER()                                                  \
1152   do {                                                                 \
1153     if (!check_owner(THREAD)) {                                        \
1154        assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1155        return;                                                         \
1156      }                                                                 \
1157   } while (false)
1158 
1159 // Returns true if the specified thread owns the ObjectMonitor.
1160 // Otherwise returns false and throws IllegalMonitorStateException
1161 // (IMSE). If there is a pending exception and the specified thread
1162 // is not the owner, that exception will be replaced by the IMSE.
1163 bool ObjectMonitor::check_owner(Thread* THREAD) {
1164   if (_owner == THREAD) {
1165     return true;
1166   }
1167   if (THREAD->is_lock_owned((address)_owner)) {
1168     _owner = THREAD;  // convert from BasicLock addr to Thread addr

1169     _recursions = 0;
1170     return true;
1171   }
1172   THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1173              "current thread is not owner", false);
1174 }
1175 
1176 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1177                                     ObjectMonitor* monitor,
1178                                     jlong notifier_tid,
1179                                     jlong timeout,
1180                                     bool timedout) {
1181   assert(event != NULL, "invariant");
1182   assert(monitor != NULL, "invariant");
1183   event->set_monitorClass(((oop)monitor->object())->klass());
1184   event->set_timeout(timeout);
1185   event->set_address((uintptr_t)monitor->object_addr());
1186   event->set_notifier(notifier_tid);
1187   event->set_timedOut(timedout);
1188   event->commit();


1653     // We periodically check to see if there's a safepoint pending.
1654     if ((ctr & 0xFF) == 0) {
1655       if (SafepointMechanism::should_block(Self)) {
1656         goto Abort;           // abrupt spin egress
1657       }
1658       SpinPause();
1659     }
1660 
1661     // Probe _owner with TATAS
1662     // If this thread observes the monitor transition or flicker
1663     // from locked to unlocked to locked, then the odds that this
1664     // thread will acquire the lock in this spin attempt go down
1665     // considerably.  The same argument applies if the CAS fails
1666     // or if we observe _owner change from one non-null value to
1667     // another non-null value.   In such cases we might abort
1668     // the spin without prejudice or apply a "penalty" to the
1669     // spin count-down variable "ctr", reducing it by 100, say.
1670 
1671     Thread * ox = (Thread *) _owner;
1672     if (ox == NULL) {
1673       ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
1674       if (ox == NULL) {
1675         // The CAS succeeded -- this thread acquired ownership
1676         // Take care of some bookkeeping to exit spin state.
1677         if (_succ == Self) {
1678           _succ = NULL;
1679         }
1680 
1681         // Increase _SpinDuration :
1682         // The spin was successful (profitable) so we tend toward
1683         // longer spin attempts in the future.
1684         // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1685         // If we acquired the lock early in the spin cycle it
1686         // makes sense to increase _SpinDuration proportionally.
1687         // Note that we don't clamp SpinDuration precisely at SpinLimit.
1688         int x = _SpinDuration;
1689         if (x < Knob_SpinLimit) {
1690           if (x < Knob_Poverty) x = Knob_Poverty;
1691           _SpinDuration = x + Knob_Bonus;
1692         }
1693         return 1;


1917   }
1918 #define NEWPERFVARIABLE(n)                                                \
1919   {                                                                       \
1920     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
1921                                          CHECK);                          \
1922   }
1923     NEWPERFCOUNTER(_sync_Inflations);
1924     NEWPERFCOUNTER(_sync_Deflations);
1925     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
1926     NEWPERFCOUNTER(_sync_FutileWakeups);
1927     NEWPERFCOUNTER(_sync_Parks);
1928     NEWPERFCOUNTER(_sync_Notifications);
1929     NEWPERFVARIABLE(_sync_MonExtant);
1930 #undef NEWPERFCOUNTER
1931 #undef NEWPERFVARIABLE
1932   }
1933 
1934   DEBUG_ONLY(InitDone = true;)
1935 }
1936 



























































































































1937 void ObjectMonitor::print_on(outputStream* st) const {
1938   // The minimal things to print for markWord printing, more can be added for debugging and logging.
1939   st->print("{contentions=0x%08x,waiters=0x%08x"
1940             ",recursions=" INTPTR_FORMAT ",owner=" INTPTR_FORMAT "}",
1941             contentions(), waiters(), recursions(),
1942             p2i(owner()));
1943 }
1944 void ObjectMonitor::print() const { print_on(tty); }
























































































 222 // * See also http://blogs.sun.com/dave
 223 
 224 
 225 void* ObjectMonitor::operator new (size_t size) throw() {
 226   return AllocateHeap(size, mtInternal);
 227 }
 228 void* ObjectMonitor::operator new[] (size_t size) throw() {
 229   return operator new (size);
 230 }
 231 void ObjectMonitor::operator delete(void* p) {
 232   FreeHeap(p);
 233 }
 234 void ObjectMonitor::operator delete[] (void *p) {
 235   operator delete(p);
 236 }
 237 
 238 // -----------------------------------------------------------------------------
 239 // Enter support
 240 
 241 void ObjectMonitor::enter(TRAPS) {
 242   jint l_ref_count = ref_count();
 243   ADIM_guarantee(l_ref_count > 0, "must be positive: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count());
 244 
 245   // The following code is ordered to check the most common cases first
 246   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 247   Thread * const Self = THREAD;
 248 
 249   void* cur = try_set_owner_from(Self, NULL);
 250   if (cur == NULL) {
 251     assert(_recursions == 0, "invariant");
 252     return;
 253   }
 254 
 255   if (cur == Self) {
 256     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 257     _recursions++;
 258     return;
 259   }
 260 
 261   if (Self->is_lock_owned((address)cur)) {
 262     assert(_recursions == 0, "internal state error");
 263     _recursions = 1;
 264     simply_set_owner_from_BasicLock(Self, cur);  // Convert from BasicLock* to Thread*.
 265     return;
 266   }
 267 
 268   if (AsyncDeflateIdleMonitors &&
 269       try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) {
 270     // The deflation protocol finished the first part (setting owner),
 271     // but it failed the second part (making ref_count negative) and
 272     // bailed. Or the ObjectMonitor was async deflated and reused.
 273     // Acquired the monitor.
 274     assert(_recursions == 0, "invariant");
 275     return;
 276   }
 277 
 278   // We've encountered genuine contention.
 279   assert(Self->_Stalled == 0, "invariant");
 280   Self->_Stalled = intptr_t(this);
 281 
 282   // Try one round of spinning *before* enqueueing Self
 283   // and before going through the awkward and expensive state
 284   // transitions.  The following spin is strictly optional ...
 285   // Note that if we acquire the monitor from an initial spin
 286   // we forgo posting JVMTI events and firing DTRACE probes.
 287   if (TrySpin(Self) > 0) {
 288     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 289     assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);

 290     assert(((oop)object())->mark() == markWord::encode(this),
 291            "object mark must match encoded this: mark=" INTPTR_FORMAT
 292            ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
 293            markWord::encode(this).value());
 294     Self->_Stalled = 0;
 295     return;
 296   }
 297 
 298   assert(_owner != Self, "invariant");
 299   assert(_succ != Self, "invariant");
 300   assert(Self->is_Java_thread(), "invariant");
 301   JavaThread * jt = (JavaThread *) Self;
 302   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 303   assert(jt->thread_state() != _thread_blocked, "invariant");
 304   assert(this->object() != NULL, "invariant");
 305   assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
 306 
 307   // Prevent deflation. See ObjectSynchronizer::deflate_monitor(),
 308   // ObjectSynchronizer::deflate_monitor_using_JT() and is_busy().
 309   // Ensure the object <-> monitor relationship remains stable while
 310   // there's contention.
 311   Atomic::add(1, &_contentions);
 312 
 313   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 314   EventJavaMonitorEnter event;
 315   if (event.should_commit()) {
 316     event.set_monitorClass(((oop)this->object())->klass());
 317     event.set_address((uintptr_t)(this->object_addr()));
 318   }
 319 
 320   { // Change java thread status to indicate blocked on monitor enter.
 321     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 322 
 323     Self->set_current_pending_monitor(this);
 324 
 325     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 326     if (JvmtiExport::should_post_monitor_contended_enter()) {
 327       JvmtiExport::post_monitor_contended_enter(jt, this);
 328 
 329       // The current thread does not yet own the monitor and does not
 330       // yet appear on any queues that would get it made the successor.
 331       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event


 353       //
 354       _recursions = 0;
 355       _succ = NULL;
 356       exit(false, Self);
 357 
 358       jt->java_suspend_self();
 359     }
 360     Self->set_current_pending_monitor(NULL);
 361 
 362     // We cleared the pending monitor info since we've just gotten past
 363     // the enter-check-for-suspend dance and we now own the monitor free
 364     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 365     // destructor can go to a safepoint at the end of this block. If we
 366     // do a thread dump during that safepoint, then this thread will show
 367     // as having "-locked" the monitor, but the OS and java.lang.Thread
 368     // states will still report that the thread is blocked trying to
 369     // acquire it.
 370   }
 371 
 372   Atomic::dec(&_contentions);
 373   assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
 374   Self->_Stalled = 0;
 375 
 376   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 377   assert(_recursions == 0, "invariant");
 378   assert(_owner == Self, "invariant");
 379   assert(_succ != Self, "invariant");
 380   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 381 
 382   // The thread -- now the owner -- is back in vm mode.
 383   // Report the glorious news via TI,DTrace and jvmstat.
 384   // The probe effect is non-trivial.  All the reportage occurs
 385   // while we hold the monitor, increasing the length of the critical
 386   // section.  Amdahl's parallel speedup law comes vividly into play.
 387   //
 388   // Another option might be to aggregate the events (thread local or
 389   // per-monitor aggregation) and defer reporting until a more opportune
 390   // time -- such as next time some thread encounters contention but has
 391   // yet to acquire the lock.  While spinning that thread could
 392   // spinning we could increment JVMStat counters, etc.
 393 


 397 
 398     // The current thread already owns the monitor and is not going to
 399     // call park() for the remainder of the monitor enter protocol. So
 400     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 401     // event handler consumed an unpark() issued by the thread that
 402     // just exited the monitor.
 403   }
 404   if (event.should_commit()) {
 405     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 406     event.commit();
 407   }
 408   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 409 }
 410 
 411 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 412 // Callers must compensate as needed.
 413 
 414 int ObjectMonitor::TryLock(Thread * Self) {
 415   void * own = _owner;
 416   if (own != NULL) return 0;
 417   if (try_set_owner_from(Self, NULL) == NULL) {
 418     assert(_recursions == 0, "invariant");
 419     return 1;
 420   }
 421   // The lock had been free momentarily, but we lost the race to the lock.
 422   // Interference -- the CAS failed.
 423   // We can either return -1 or retry.
 424   // Retry doesn't make as much sense because the lock was just acquired.
 425   return -1;
 426 }
 427 
 428 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
 429 // into the header of the object associated with the monitor. This
 430 // idempotent method is called by a thread that is deflating a
 431 // monitor and by other threads that have detected a race with the
 432 // deflation process.
 433 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
 434   // This function must only be called when (owner == DEFLATER_MARKER
 435   // && ref_count <= 0), but we can't guarantee that here because
 436   // those values could change when the ObjectMonitor gets moved from
 437   // the global free list to a per-thread free list.
 438 
 439   guarantee(obj != NULL, "must be non-NULL");
 440   if (object() != obj) {
 441     // ObjectMonitor's object ref no longer refers to the target object
 442     // so the object's header has already been restored.
 443     return;
 444   }
 445 
 446   markWord dmw = header();
 447   if (dmw.value() == 0) {
 448     // ObjectMonitor's header/dmw has been cleared so the ObjectMonitor
 449     // has been deflated and taken off the global free list.
 450     return;
 451   }
 452 
 453   // A non-NULL dmw has to be either neutral (not locked and not marked)
 454   // or is already participating in this restoration protocol.
 455   assert(dmw.is_neutral() || (dmw.is_marked() && dmw.hash() == 0),
 456          "failed precondition: dmw=" INTPTR_FORMAT, dmw.value());
 457 
 458   markWord marked_dmw = markWord::zero();
 459   if (!dmw.is_marked() && dmw.hash() == 0) {
 460     // This dmw has not yet started the restoration protocol so we
 461     // mark a copy of the dmw to begin the protocol.
 462     // Note: A dmw with a hashcode does not take this code path.
 463     marked_dmw = dmw.set_marked();
 464 
 465     // All of the callers to this function can be racing with each
 466     // other trying to update the _header field.
 467     dmw = (markWord) Atomic::cmpxchg(marked_dmw, &_header, dmw);
 468     if (dmw.value() == 0) {
 469       // ObjectMonitor's header/dmw has been cleared so the object's
 470       // header has already been restored.
 471       return;
 472     }
 473     // The _header field is now marked. The winner's 'dmw' variable
 474     // contains the original, unmarked header/dmw value and any
 475     // losers have a marked header/dmw value that will be cleaned
 476     // up below.
 477   }
 478 
 479   if (dmw.is_marked()) {
 480     // Clear the mark from the header/dmw copy in preparation for
 481     // possible restoration from this thread.
 482     assert(dmw.hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT,
 483            dmw.value());
 484     dmw = dmw.set_unmarked();
 485   }
 486   assert(dmw.is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, dmw.value());
 487 
 488   // Install displaced mark word if the object's header still points
 489   // to this ObjectMonitor. All racing callers to this function will
 490   // reach this point, but only one can win.
 491   obj->cas_set_mark(dmw, markWord::encode(this));
 492 
 493   // Note: It does not matter which thread restored the header/dmw
 494   // into the object's header. The thread deflating the monitor just
 495   // wanted the object's header restored and it is. The threads that
 496   // detected a race with the deflation process also wanted the
 497   // object's header restored before they retry their operation and
 498   // because it is restored they will only retry once.
 499 }
 500 
 501 // Convert the fields used by is_busy() to a string that can be
 502 // used for diagnostic output.
 503 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 504   ss->print("is_busy: contentions=%d, waiters=%d, ", _contentions, _waiters);
 505   if (!AsyncDeflateIdleMonitors) {
 506     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 507   } else if (_owner != DEFLATER_MARKER) {
 508     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 509   } else {
 510     // We report NULL instead of DEFLATER_MARKER here because is_busy()
 511     // ignores DEFLATER_MARKER values.
 512     ss->print("owner=" INTPTR_FORMAT, NULL);
 513   }
 514   ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq),
 515             p2i(_EntryList));
 516   return ss->base();
 517 }
 518 
 519 #define MAX_RECHECK_INTERVAL 1000
 520 
 521 void ObjectMonitor::EnterI(TRAPS) {
 522   jint l_ref_count = ref_count();
 523   ADIM_guarantee(l_ref_count > 0, "must be positive: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count());
 524 
 525   Thread * const Self = THREAD;
 526   assert(Self->is_Java_thread(), "invariant");
 527   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 528 
 529   // Try the lock - TATAS
 530   if (TryLock (Self) > 0) {
 531     assert(_succ != Self, "invariant");
 532     assert(_owner == Self, "invariant");
 533     assert(_Responsible != Self, "invariant");
 534     return;
 535   }
 536 
 537   if (AsyncDeflateIdleMonitors &&
 538       try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) {
 539     // The deflation protocol finished the first part (setting owner),
 540     // but it failed the second part (making ref_count negative) and
 541     // bailed. Or the ObjectMonitor was async deflated and reused.
 542     // Acquired the monitor.
 543     assert(_succ != Self, "invariant");
 544     assert(_Responsible != Self, "invariant");
 545     return;
 546   }
 547 
 548   assert(InitDone, "Unexpectedly not initialized");
 549 
 550   // We try one round of spinning *before* enqueueing Self.
 551   //
 552   // If the _owner is ready but OFFPROC we could use a YieldTo()
 553   // operation to donate the remainder of this thread's quantum
 554   // to the owner.  This has subtle but beneficial affinity
 555   // effects.
 556 
 557   if (TrySpin(Self) > 0) {
 558     assert(_owner == Self, "invariant");
 559     assert(_succ != Self, "invariant");
 560     assert(_Responsible != Self, "invariant");
 561     return;
 562   }
 563 
 564   // The Spin failed -- Enqueue and park the thread ...
 565   assert(_succ != Self, "invariant");
 566   assert(_owner != Self, "invariant");
 567   assert(_Responsible != Self, "invariant");


 644 
 645   for (;;) {
 646 
 647     if (TryLock(Self) > 0) break;
 648     assert(_owner != Self, "invariant");
 649 
 650     // park self
 651     if (_Responsible == Self) {
 652       Self->_ParkEvent->park((jlong) recheckInterval);
 653       // Increase the recheckInterval, but clamp the value.
 654       recheckInterval *= 8;
 655       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 656         recheckInterval = MAX_RECHECK_INTERVAL;
 657       }
 658     } else {
 659       Self->_ParkEvent->park();
 660     }
 661 
 662     if (TryLock(Self) > 0) break;
 663 
 664     if (AsyncDeflateIdleMonitors &&
 665         try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) {
 666       // The deflation protocol finished the first part (setting owner),
 667       // but it failed the second part (making ref_count negative) and
 668       // bailed. Or the ObjectMonitor was async deflated and reused.
 669       // Acquired the monitor.
 670       break;
 671     }
 672 
 673     // The lock is still contested.
 674     // Keep a tally of the # of futile wakeups.
 675     // Note that the counter is not protected by a lock or updated by atomics.
 676     // That is by design - we trade "lossy" counters which are exposed to
 677     // races during updates for a lower probe effect.
 678 
 679     // This PerfData object can be used in parallel with a safepoint.
 680     // See the work around in PerfDataManager::destroy().
 681     OM_PERFDATA_OP(FutileWakeups, inc());
 682     ++nWakeups;
 683 
 684     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 685     // We can defer clearing _succ until after the spin completes
 686     // TrySpin() must tolerate being called with _succ == Self.
 687     // Try yet another round of adaptive spinning.
 688     if (TrySpin(Self) > 0) break;
 689 
 690     // We can find that we were unpark()ed and redesignated _succ while
 691     // we were spinning.  That's harmless.  If we iterate and call park(),
 692     // park() will consume the event and return immediately and we'll


 757   // the lock.   The barrier ensures that changes to monitor meta-data and data
 758   // protected by the lock will be visible before we release the lock, and
 759   // therefore before some other thread (CPU) has a chance to acquire the lock.
 760   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 761   //
 762   // Critically, any prior STs to _succ or EntryList must be visible before
 763   // the ST of null into _owner in the *subsequent* (following) corresponding
 764   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 765   // execute a serializing instruction.
 766 
 767   return;
 768 }
 769 
 770 // ReenterI() is a specialized inline form of the latter half of the
 771 // contended slow-path from EnterI().  We use ReenterI() only for
 772 // monitor reentry in wait().
 773 //
 774 // In the future we should reconcile EnterI() and ReenterI().
 775 
 776 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 777   jint l_ref_count = ref_count();
 778   ADIM_guarantee(l_ref_count > 0, "must be positive: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count());
 779 
 780   assert(Self != NULL, "invariant");
 781   assert(SelfNode != NULL, "invariant");
 782   assert(SelfNode->_thread == Self, "invariant");
 783   assert(_waiters > 0, "invariant");
 784   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 785   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 786   JavaThread * jt = (JavaThread *) Self;
 787 
 788   int nWakeups = 0;
 789   for (;;) {
 790     ObjectWaiter::TStates v = SelfNode->TState;
 791     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 792     assert(_owner != Self, "invariant");
 793 
 794     if (TryLock(Self) > 0) break;
 795     if (TrySpin(Self) > 0) break;
 796 
 797     if (AsyncDeflateIdleMonitors &&
 798         try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) {
 799       // The deflation protocol finished the first part (setting owner),
 800       // but it failed the second part (making ref_count negative) and
 801       // bailed. Or the ObjectMonitor was async deflated and reused.
 802       // Acquired the monitor.
 803       break;
 804     }
 805 
 806     // State transition wrappers around park() ...
 807     // ReenterI() wisely defers state transitions until
 808     // it's clear we must park the thread.
 809     {
 810       OSThreadContendState osts(Self->osthread());
 811       ThreadBlockInVM tbivm(jt);
 812 
 813       // cleared by handle_special_suspend_equivalent_condition()
 814       // or java_suspend_self()
 815       jt->set_suspend_equivalent();
 816       Self->_ParkEvent->park();
 817 
 818       // were we externally suspended while we were waiting?
 819       for (;;) {
 820         if (!ExitSuspendEquivalent(jt)) break;
 821         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 822         jt->java_suspend_self();
 823         jt->set_suspend_equivalent();
 824       }
 825     }


 975 // the timer expires.  If the lock is high traffic then the stranding latency
 976 // will be low due to (a).  If the lock is low traffic then the odds of
 977 // stranding are lower, although the worst-case stranding latency
 978 // is longer.  Critically, we don't want to put excessive load in the
 979 // platform's timer subsystem.  We want to minimize both the timer injection
 980 // rate (timers created/sec) as well as the number of timers active at
 981 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 982 // the integral of the # of active timers at any instant over time).
 983 // Both impinge on OS scalability.  Given that, at most one thread parked on
 984 // a monitor will use a timer.
 985 //
 986 // There is also the risk of a futile wake-up. If we drop the lock
 987 // another thread can reacquire the lock immediately, and we can
 988 // then wake a thread unnecessarily. This is benign, and we've
 989 // structured the code so the windows are short and the frequency
 990 // of such futile wakups is low.
 991 
 992 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 993   Thread * const Self = THREAD;
 994   if (THREAD != _owner) {
 995     void* cur = _owner;
 996     if (THREAD->is_lock_owned((address)cur)) {



 997       assert(_recursions == 0, "invariant");
 998       simply_set_owner_from_BasicLock(Self, cur);  // Convert from BasicLock* to Thread*.
 999       _recursions = 0;
1000     } else {
1001       // Apparent unbalanced locking ...
1002       // Naively we'd like to throw IllegalMonitorStateException.
1003       // As a practical matter we can neither allocate nor throw an
1004       // exception as ::exit() can be called from leaf routines.
1005       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
1006       // Upon deeper reflection, however, in a properly run JVM the only
1007       // way we should encounter this situation is in the presence of
1008       // unbalanced JNI locking. TODO: CheckJNICalls.
1009       // See also: CR4414101
1010       tty->print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
1011                     " is exiting an ObjectMonitor it does not own.",
1012                     p2i(THREAD));
1013       tty->print_cr("The imbalance is possibly caused by JNI locking.");
1014       print_debug_style_on(tty);
1015       // Changing this from an assert() to ADIM_guarantee() may run
1016       // afoul of any test that is inducing non-balanced JNI locking.
1017       ADIM_guarantee(false, "Non-balanced monitor enter/exit!");
1018       return;
1019     }
1020   }
1021 
1022   if (_recursions != 0) {
1023     _recursions--;        // this is simple recursive enter
1024     return;
1025   }
1026 
1027   // Invariant: after setting Responsible=null an thread must execute
1028   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1029   _Responsible = NULL;
1030 
1031 #if INCLUDE_JFR
1032   // get the owner's thread id for the MonitorEnter event
1033   // if it is enabled and the thread isn't suspended
1034   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1035     _previous_owner_tid = JFR_THREAD_ID(Self);
1036   }
1037 #endif
1038 
1039   for (;;) {
1040     assert(THREAD == _owner, "invariant");
1041 
1042     // release semantics: prior loads and stores from within the critical section
1043     // must not float (reorder) past the following store that drops the lock.
1044     if (AsyncDeflateIdleMonitors) {
1045       set_owner_from(NULL, Self);
1046     } else {
1047       OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
1048       OrderAccess::storeload();                        // See if we need to wake a successor
1049     }
1050     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1051       return;
1052     }
1053     // Other threads are blocked trying to acquire the lock.
1054 
1055     // Normally the exiting thread is responsible for ensuring succession,
1056     // but if other successors are ready or other entering threads are spinning
1057     // then this thread can simply store NULL into _owner and exit without
1058     // waking a successor.  The existence of spinners or ready successors
1059     // guarantees proper succession (liveness).  Responsibility passes to the
1060     // ready or running successors.  The exiting thread delegates the duty.
1061     // More precisely, if a successor already exists this thread is absolved
1062     // of the responsibility of waking (unparking) one.
1063     //
1064     // The _succ variable is critical to reducing futile wakeup frequency.
1065     // _succ identifies the "heir presumptive" thread that has been made
1066     // ready (unparked) but that has not yet run.  We need only one such
1067     // successor thread to guarantee progress.
1068     // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
1069     // section 3.3 "Futile Wakeup Throttling" for details.


1071     // Note that spinners in Enter() also set _succ non-null.
1072     // In the current implementation spinners opportunistically set
1073     // _succ so that exiting threads might avoid waking a successor.
1074     // Another less appealing alternative would be for the exiting thread
1075     // to drop the lock and then spin briefly to see if a spinner managed
1076     // to acquire the lock.  If so, the exiting thread could exit
1077     // immediately without waking a successor, otherwise the exiting
1078     // thread would need to dequeue and wake a successor.
1079     // (Note that we'd need to make the post-drop spin short, but no
1080     // shorter than the worst-case round-trip cache-line migration time.
1081     // The dropped lock needs to become visible to the spinner, and then
1082     // the acquisition of the lock by the spinner must become visible to
1083     // the exiting thread).
1084 
1085     // It appears that an heir-presumptive (successor) must be made ready.
1086     // Only the current lock owner can manipulate the EntryList or
1087     // drain _cxq, so we need to reacquire the lock.  If we fail
1088     // to reacquire the lock the responsibility for ensuring succession
1089     // falls to the new owner.
1090     //
1091     if (try_set_owner_from(Self, NULL) != NULL) {
1092       return;
1093     }
1094 
1095     guarantee(_owner == THREAD, "invariant");
1096 
1097     ObjectWaiter * w = NULL;
1098 
1099     w = _EntryList;
1100     if (w != NULL) {
1101       // I'd like to write: guarantee (w->_thread != Self).
1102       // But in practice an exiting thread may find itself on the EntryList.
1103       // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
1104       // then calls exit().  Exit release the lock by setting O._owner to NULL.
1105       // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
1106       // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1107       // release the lock "O".  T2 resumes immediately after the ST of null into
1108       // _owner, above.  T2 notices that the EntryList is populated, so it
1109       // reacquires the lock and then finds itself on the EntryList.
1110       // Given all that, we have to tolerate the circumstance where "w" is
1111       // associated with Self.


1204 
1205 
1206 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1207   assert(_owner == Self, "invariant");
1208 
1209   // Exit protocol:
1210   // 1. ST _succ = wakee
1211   // 2. membar #loadstore|#storestore;
1212   // 2. ST _owner = NULL
1213   // 3. unpark(wakee)
1214 
1215   _succ = Wakee->_thread;
1216   ParkEvent * Trigger = Wakee->_event;
1217 
1218   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1219   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1220   // out-of-scope (non-extant).
1221   Wakee  = NULL;
1222 
1223   // Drop the lock
1224   if (AsyncDeflateIdleMonitors) {
1225     set_owner_from(NULL, Self);
1226   } else {
1227     OrderAccess::release_store(&_owner, (void*)NULL);
1228     OrderAccess::fence();                               // ST _owner vs LD in unpark()
1229   }
1230 
1231   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1232   Trigger->unpark();
1233 
1234   // Maintain stats and report events to JVMTI
1235   OM_PERFDATA_OP(Parks, inc());
1236 }
1237 
1238 
1239 // -----------------------------------------------------------------------------
1240 // Class Loader deadlock handling.
1241 //
1242 // complete_exit exits a lock returning recursion count
1243 // complete_exit/reenter operate as a wait without waiting
1244 // complete_exit requires an inflated monitor
1245 // The _owner field is not always the Thread addr even with an
1246 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1247 // thread due to contention.
1248 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1249   Thread * const Self = THREAD;
1250   assert(Self->is_Java_thread(), "Must be Java thread!");
1251   JavaThread *jt = (JavaThread *)THREAD;
1252 
1253   assert(InitDone, "Unexpectedly not initialized");
1254 
1255   if (THREAD != _owner) {
1256     void* cur = _owner;
1257     if (THREAD->is_lock_owned((address)cur)) {
1258       assert(_recursions == 0, "internal state error");
1259       simply_set_owner_from_BasicLock(Self, cur);  // Convert from BasicLock* to Thread*.
1260       _recursions = 0;
1261     }
1262   }
1263 
1264   guarantee(Self == _owner, "complete_exit not owner");
1265   intptr_t save = _recursions; // record the old recursion count
1266   _recursions = 0;        // set the recursion level to be 0
1267   exit(true, Self);           // exit the monitor
1268   guarantee(_owner != Self, "invariant");
1269   return save;
1270 }
1271 
1272 // reenter() enters a lock and sets recursion count
1273 // complete_exit/reenter operate as a wait without waiting
1274 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1275   Thread * const Self = THREAD;
1276   assert(Self->is_Java_thread(), "Must be Java thread!");
1277   JavaThread *jt = (JavaThread *)THREAD;
1278 
1279   guarantee(_owner != Self, "reenter already owner");
1280   enter(THREAD);
1281   // Entered the monitor.
1282   guarantee(_recursions == 0, "reenter recursion");
1283   _recursions = recursions;

1284 }
1285 
1286 // Checks that the current THREAD owns this monitor and causes an
1287 // immediate return if it doesn't. We don't use the CHECK macro
1288 // because we want the IMSE to be the only exception that is thrown
1289 // from the call site when false is returned. Any other pending
1290 // exception is ignored.
1291 #define CHECK_OWNER()                                                  \
1292   do {                                                                 \
1293     if (!check_owner(THREAD)) {                                        \
1294        assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1295        return;                                                         \
1296      }                                                                 \
1297   } while (false)
1298 
1299 // Returns true if the specified thread owns the ObjectMonitor.
1300 // Otherwise returns false and throws IllegalMonitorStateException
1301 // (IMSE). If there is a pending exception and the specified thread
1302 // is not the owner, that exception will be replaced by the IMSE.
1303 bool ObjectMonitor::check_owner(Thread* THREAD) {
1304   if (_owner == THREAD) {
1305     return true;
1306   }
1307   void* cur = _owner;
1308   if (THREAD->is_lock_owned((address)cur)) {
1309     simply_set_owner_from_BasicLock(THREAD, cur);  // Convert from BasicLock* to Thread*.
1310     _recursions = 0;
1311     return true;
1312   }
1313   THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1314              "current thread is not owner", false);
1315 }
1316 
1317 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1318                                     ObjectMonitor* monitor,
1319                                     jlong notifier_tid,
1320                                     jlong timeout,
1321                                     bool timedout) {
1322   assert(event != NULL, "invariant");
1323   assert(monitor != NULL, "invariant");
1324   event->set_monitorClass(((oop)monitor->object())->klass());
1325   event->set_timeout(timeout);
1326   event->set_address((uintptr_t)monitor->object_addr());
1327   event->set_notifier(notifier_tid);
1328   event->set_timedOut(timedout);
1329   event->commit();


1794     // We periodically check to see if there's a safepoint pending.
1795     if ((ctr & 0xFF) == 0) {
1796       if (SafepointMechanism::should_block(Self)) {
1797         goto Abort;           // abrupt spin egress
1798       }
1799       SpinPause();
1800     }
1801 
1802     // Probe _owner with TATAS
1803     // If this thread observes the monitor transition or flicker
1804     // from locked to unlocked to locked, then the odds that this
1805     // thread will acquire the lock in this spin attempt go down
1806     // considerably.  The same argument applies if the CAS fails
1807     // or if we observe _owner change from one non-null value to
1808     // another non-null value.   In such cases we might abort
1809     // the spin without prejudice or apply a "penalty" to the
1810     // spin count-down variable "ctr", reducing it by 100, say.
1811 
1812     Thread * ox = (Thread *) _owner;
1813     if (ox == NULL) {
1814       ox = (Thread*)try_set_owner_from(Self, NULL);
1815       if (ox == NULL) {
1816         // The CAS succeeded -- this thread acquired ownership
1817         // Take care of some bookkeeping to exit spin state.
1818         if (_succ == Self) {
1819           _succ = NULL;
1820         }
1821 
1822         // Increase _SpinDuration :
1823         // The spin was successful (profitable) so we tend toward
1824         // longer spin attempts in the future.
1825         // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1826         // If we acquired the lock early in the spin cycle it
1827         // makes sense to increase _SpinDuration proportionally.
1828         // Note that we don't clamp SpinDuration precisely at SpinLimit.
1829         int x = _SpinDuration;
1830         if (x < Knob_SpinLimit) {
1831           if (x < Knob_Poverty) x = Knob_Poverty;
1832           _SpinDuration = x + Knob_Bonus;
1833         }
1834         return 1;


2058   }
2059 #define NEWPERFVARIABLE(n)                                                \
2060   {                                                                       \
2061     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
2062                                          CHECK);                          \
2063   }
2064     NEWPERFCOUNTER(_sync_Inflations);
2065     NEWPERFCOUNTER(_sync_Deflations);
2066     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2067     NEWPERFCOUNTER(_sync_FutileWakeups);
2068     NEWPERFCOUNTER(_sync_Parks);
2069     NEWPERFCOUNTER(_sync_Notifications);
2070     NEWPERFVARIABLE(_sync_MonExtant);
2071 #undef NEWPERFCOUNTER
2072 #undef NEWPERFVARIABLE
2073   }
2074 
2075   DEBUG_ONLY(InitDone = true;)
2076 }
2077 
2078 ObjectMonitorHandle::~ObjectMonitorHandle() {
2079   if (_om_ptr != NULL) {
2080     _om_ptr->dec_ref_count();
2081     _om_ptr = NULL;
2082   }
2083 }
2084 
2085 // Save the ObjectMonitor* associated with the specified markWord and
2086 // increment the ref_count. This function should only be called if
2087 // the caller has verified mark.has_monitor() == true. The object
2088 // parameter is needed to verify that ObjectMonitor* has not been
2089 // deflated and reused for another object.
2090 //
2091 // This function returns true if the ObjectMonitor* has been safely
2092 // saved. This function returns false if we have lost a race with
2093 // async deflation; the caller should retry as appropriate.
2094 //
2095 bool ObjectMonitorHandle::save_om_ptr(oop object, markWord mark) {
2096   guarantee(mark.has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
2097             mark.value());
2098 
2099   ObjectMonitor* om_ptr = mark.monitor();
2100   om_ptr->inc_ref_count();
2101 
2102   if (AsyncDeflateIdleMonitors) {
2103     // Race here if monitor is not owned! The above ref_count bump
2104     // will cause subsequent async deflation to skip it. However,
2105     // previous or concurrent async deflation is a race.
2106     if (om_ptr->owner_is_DEFLATER_MARKER() && om_ptr->ref_count() <= 0) {
2107       // Async deflation is in progress and our ref_count increment
2108       // above lost the race to async deflation. Attempt to restore
2109       // the header/dmw to the object's header so that we only retry
2110       // once if the deflater thread happens to be slow.
2111       om_ptr->install_displaced_markword_in_object(object);
2112       om_ptr->dec_ref_count();
2113       return false;
2114     }
2115     if (om_ptr->ref_count() <= 0) {
2116       // Async deflation is in the process of bailing out, but has not
2117       // yet restored the ref_count field so we return false to force
2118       // a retry. We want a positive ref_count value for a true return.
2119       om_ptr->dec_ref_count();
2120       return false;
2121     }
2122     // The ObjectMonitor could have been deflated and reused for
2123     // another object before we bumped the ref_count so make sure
2124     // our object still refers to this ObjectMonitor.
2125     const markWord tmp = object->mark();
2126     if (!tmp.has_monitor() || tmp.monitor() != om_ptr) {
2127       // Async deflation and reuse won the race so we have to retry.
2128       // Skip object header restoration since that's already done.
2129       om_ptr->dec_ref_count();
2130       return false;
2131     }
2132   }
2133 
2134   ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2135                  p2i(_om_ptr));
2136   _om_ptr = om_ptr;
2137   return true;
2138 }
2139 
2140 // For internal use by ObjectSynchronizer::inflate().
2141 // This function is only used when we don't have to worry about async
2142 // deflation of the specified ObjectMonitor*.
2143 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor* om_ptr) {
2144   if (_om_ptr == NULL) {
2145     ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
2146     om_ptr->inc_ref_count();
2147     _om_ptr = om_ptr;
2148   } else {
2149     ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr");
2150     _om_ptr->dec_ref_count();
2151     _om_ptr = NULL;
2152   }
2153 }
2154 
2155 // Save the specified ObjectMonitor* if it is safe, i.e., not being
2156 // async deflated.
2157 //
2158 // This function returns true if the ObjectMonitor* has been safely
2159 // saved. This function returns false if the specified ObjectMonitor*
2160 // is NULL or if we have lost a race with async deflation; the caller
2161 // can retry as appropriate.
2162 bool ObjectMonitorHandle::set_om_ptr_if_safe(ObjectMonitor* om_ptr) {
2163   if (om_ptr == NULL) {
2164     return false;  // Nothing to save if input is NULL
2165   }
2166 
2167   om_ptr->inc_ref_count();
2168 
2169   if (AsyncDeflateIdleMonitors) {
2170     if (om_ptr->owner_is_DEFLATER_MARKER() && om_ptr->ref_count() <= 0) {
2171       // Async deflation is in progress and our ref_count increment
2172       // above lost the race to async deflation.
2173       om_ptr->dec_ref_count();
2174       return false;
2175     }
2176     if (om_ptr->ref_count() <= 0) {
2177       // Async deflation is in the process of bailing out, but has not
2178       // yet restored the ref_count field so we return false to force
2179       // a retry. We want a positive ref_count value for a true return.
2180       om_ptr->dec_ref_count();
2181       return false;
2182     }
2183     // Unlike save_om_ptr(), we don't have context to determine if
2184     // the ObjectMonitor has been deflated and reused for another
2185     // object.
2186   }
2187 
2188   ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2189                  p2i(_om_ptr));
2190   _om_ptr = om_ptr;
2191   return true;
2192 }
2193 
2194 // Unset the _om_ptr field and decrement the ref_count field.
2195 void ObjectMonitorHandle::unset_om_ptr() {
2196   ADIM_guarantee(_om_ptr != NULL, "_om_ptr must not be NULL");
2197   _om_ptr->dec_ref_count();
2198   _om_ptr = NULL;
2199 }
2200 
2201 void ObjectMonitor::print_on(outputStream* st) const {
2202   // The minimal things to print for markWord printing, more can be added for debugging and logging.
2203   st->print("{contentions=0x%08x,waiters=0x%08x"
2204             ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
2205             contentions(), waiters(), recursions(),
2206             p2i(owner()));
2207 }
2208 void ObjectMonitor::print() const { print_on(tty); }
2209 
2210 // Print the ObjectMonitor like a debugger would:
2211 //
2212 // (ObjectMonitor) 0x00007fdfb6012e40 = {
2213 //   _header = (_value = 1)
2214 //   _object = 0x000000070ff45fd0
2215 //   _allocation_state = Old
2216 //   _pad_buf0 = {
2217 //     [0] = '\0'
2218 //     ...
2219 //     [43] = '\0'
2220 //   }
2221 //   _owner = 0x0000000000000000
2222 //   _previous_owner_tid = 0
2223 //   _pad_buf1 = {
2224 //     [0] = '\0'
2225 //     ...
2226 //     [47] = '\0'
2227 //   }
2228 //   _ref_count = 1
2229 //   _pad_buf2 = {
2230 //     [0] = '\0'
2231 //     ...
2232 //     [59] = '\0'
2233 //   }
2234 //   _next_om = 0x0000000000000000
2235 //   _recursions = 0
2236 //   _EntryList = 0x0000000000000000
2237 //   _cxq = 0x0000000000000000
2238 //   _succ = 0x0000000000000000
2239 //   _Responsible = 0x0000000000000000
2240 //   _Spinner = 0
2241 //   _SpinDuration = 5000
2242 //   _contentions = 0
2243 //   _WaitSet = 0x0000700009756248
2244 //   _waiters = 1
2245 //   _WaitSetLock = 0
2246 // }
2247 //
2248 void ObjectMonitor::print_debug_style_on(outputStream* st) const {
2249   st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
2250   st->print_cr("  _header = " INTPTR_FORMAT, header().value());
2251   st->print_cr("  _object = " INTPTR_FORMAT, p2i(_object));
2252   st->print("  _allocation_state = ");
2253   if (is_free()) {
2254     st->print("Free");
2255   } else if (is_old()) {
2256     st->print("Old");
2257   } else if (is_new()) {
2258     st->print("New");
2259   } else {
2260     st->print("unknown=%d", _allocation_state);
2261   }
2262   st->cr();
2263   st->print_cr("  _pad_buf0 = {");
2264   st->print_cr("    [0] = '\\0'");
2265   st->print_cr("    ...");
2266   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2267   st->print_cr("  }");
2268   st->print_cr("  _owner = " INTPTR_FORMAT, p2i(_owner));
2269   st->print_cr("  _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid);
2270   st->print_cr("  _pad_buf1 = {");
2271   st->print_cr("    [0] = '\\0'");
2272   st->print_cr("    ...");
2273   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2274   st->print_cr("  }");
2275   st->print_cr("  _ref_count = %d", ref_count());
2276   st->print_cr("  _pad_buf2 = {");
2277   st->print_cr("    [0] = '\\0'");
2278   st->print_cr("    ...");
2279   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2280   st->print_cr("  }");
2281   st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(_next_om));
2282   st->print_cr("  _recursions = " INTX_FORMAT, _recursions);
2283   st->print_cr("  _EntryList = " INTPTR_FORMAT, p2i(_EntryList));
2284   st->print_cr("  _cxq = " INTPTR_FORMAT, p2i(_cxq));
2285   st->print_cr("  _succ = " INTPTR_FORMAT, p2i(_succ));
2286   st->print_cr("  _Responsible = " INTPTR_FORMAT, p2i(_Responsible));
2287   st->print_cr("  _Spinner = %d", _Spinner);
2288   st->print_cr("  _SpinDuration = %d", _SpinDuration);
2289   st->print_cr("  _contentions = %d", _contentions);
2290   st->print_cr("  _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet));
2291   st->print_cr("  _waiters = %d", _waiters);
2292   st->print_cr("  _WaitSetLock = %d", _WaitSetLock);
2293   st->print_cr("}");
2294 }
< prev index next >