< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 57560 : imported patch 8236035.patch.cr0
rev 57561 : dholmes CR - rename simply_set_owner_from() -> set_owner_from() and simply_set_owner_from_BasicLock() -> set_owner_from_BasicLock(); rename release_clear_owner_with_barrier() -> release_clear_owner() and refactor barrier code back into the call sites.
rev 57562 : kbarrett CR - rearrange some loads of _owner field to be more efficient; clarify header comment for try_set_owner_from() declaration; make some loads of _owner field DEBUG_ONLY since they only exist for assert()'s; update related logging calls to use the existing function parameter instead.


 228   return AllocateHeap(size, mtInternal);
 229 }
 230 void* ObjectMonitor::operator new[] (size_t size) throw() {
 231   return operator new (size);
 232 }
 233 void ObjectMonitor::operator delete(void* p) {
 234   FreeHeap(p);
 235 }
 236 void ObjectMonitor::operator delete[] (void *p) {
 237   operator delete(p);
 238 }
 239 
 240 // -----------------------------------------------------------------------------
 241 // Enter support
 242 
 243 void ObjectMonitor::enter(TRAPS) {
 244   // The following code is ordered to check the most common cases first
 245   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 246   Thread * const Self = THREAD;
 247 
 248   void * cur = Atomic::cmpxchg(&_owner, (void*)NULL, Self);
 249   if (cur == NULL) {
 250     assert(_recursions == 0, "invariant");
 251     return;
 252   }
 253 
 254   if (cur == Self) {
 255     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 256     _recursions++;
 257     return;
 258   }
 259 
 260   if (Self->is_lock_owned((address)cur)) {
 261     assert(_recursions == 0, "internal state error");
 262     _recursions = 1;
 263     // Commute owner from a thread-specific on-stack BasicLockObject address to
 264     // a full-fledged "Thread *".
 265     _owner = Self;
 266     return;
 267   }
 268 
 269   // We've encountered genuine contention.
 270   assert(Self->_Stalled == 0, "invariant");
 271   Self->_Stalled = intptr_t(this);
 272 
 273   // Try one round of spinning *before* enqueueing Self
 274   // and before going through the awkward and expensive state
 275   // transitions.  The following spin is strictly optional ...
 276   // Note that if we acquire the monitor from an initial spin
 277   // we forgo posting JVMTI events and firing DTRACE probes.
 278   if (TrySpin(Self) > 0) {
 279     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 280     assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
 281     assert(((oop)object())->mark() == markWord::encode(this),
 282            "object mark must match encoded this: mark=" INTPTR_FORMAT
 283            ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
 284            markWord::encode(this).value());
 285     Self->_Stalled = 0;


 386 
 387     // The current thread already owns the monitor and is not going to
 388     // call park() for the remainder of the monitor enter protocol. So
 389     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 390     // event handler consumed an unpark() issued by the thread that
 391     // just exited the monitor.
 392   }
 393   if (event.should_commit()) {
 394     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 395     event.commit();
 396   }
 397   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 398 }
 399 
 400 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 401 // Callers must compensate as needed.
 402 
 403 int ObjectMonitor::TryLock(Thread * Self) {
 404   void * own = _owner;
 405   if (own != NULL) return 0;
 406   if (Atomic::replace_if_null(&_owner, Self)) {
 407     assert(_recursions == 0, "invariant");
 408     return 1;
 409   }
 410   // The lock had been free momentarily, but we lost the race to the lock.
 411   // Interference -- the CAS failed.
 412   // We can either return -1 or retry.
 413   // Retry doesn't make as much sense because the lock was just acquired.
 414   return -1;
 415 }
 416 
 417 // Convert the fields used by is_busy() to a string that can be
 418 // used for diagnostic output.
 419 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 420   ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT
 421             ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions,
 422             _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList));
 423   return ss->base();
 424 }
 425 
 426 #define MAX_RECHECK_INTERVAL 1000


 845 // thread acquires the lock and then drops the lock, at which time the
 846 // exiting thread will notice and unpark the stranded thread, or, (b)
 847 // the timer expires.  If the lock is high traffic then the stranding latency
 848 // will be low due to (a).  If the lock is low traffic then the odds of
 849 // stranding are lower, although the worst-case stranding latency
 850 // is longer.  Critically, we don't want to put excessive load in the
 851 // platform's timer subsystem.  We want to minimize both the timer injection
 852 // rate (timers created/sec) as well as the number of timers active at
 853 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 854 // the integral of the # of active timers at any instant over time).
 855 // Both impinge on OS scalability.  Given that, at most one thread parked on
 856 // a monitor will use a timer.
 857 //
 858 // There is also the risk of a futile wake-up. If we drop the lock
 859 // another thread can reacquire the lock immediately, and we can
 860 // then wake a thread unnecessarily. This is benign, and we've
 861 // structured the code so the windows are short and the frequency
 862 // of such futile wakups is low.
 863 
 864 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 865   Thread * const Self = THREAD;
 866   if (THREAD != _owner) {
 867     if (THREAD->is_lock_owned((address) _owner)) {
 868       // Transmute _owner from a BasicLock pointer to a Thread address.
 869       // We don't need to hold _mutex for this transition.
 870       // Non-null to Non-null is safe as long as all readers can
 871       // tolerate either flavor.
 872       assert(_recursions == 0, "invariant");
 873       _owner = THREAD;
 874       _recursions = 0;
 875     } else {
 876       // Apparent unbalanced locking ...
 877       // Naively we'd like to throw IllegalMonitorStateException.
 878       // As a practical matter we can neither allocate nor throw an
 879       // exception as ::exit() can be called from leaf routines.
 880       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 881       // Upon deeper reflection, however, in a properly run JVM the only
 882       // way we should encounter this situation is in the presence of
 883       // unbalanced JNI locking. TODO: CheckJNICalls.
 884       // See also: CR4414101
 885 #ifdef ASSERT
 886       LogStreamHandle(Error, monitorinflation) lsh;
 887       lsh.print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
 888                     " is exiting an ObjectMonitor it does not own.", p2i(THREAD));
 889       lsh.print_cr("The imbalance is possibly caused by JNI locking.");
 890       print_debug_style_on(&lsh);
 891 #endif
 892       assert(false, "Non-balanced monitor enter/exit!");
 893       return;


 897   if (_recursions != 0) {
 898     _recursions--;        // this is simple recursive enter
 899     return;
 900   }
 901 
 902   // Invariant: after setting Responsible=null an thread must execute
 903   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 904   _Responsible = NULL;
 905 
 906 #if INCLUDE_JFR
 907   // get the owner's thread id for the MonitorEnter event
 908   // if it is enabled and the thread isn't suspended
 909   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
 910     _previous_owner_tid = JFR_THREAD_ID(Self);
 911   }
 912 #endif
 913 
 914   for (;;) {
 915     assert(THREAD == _owner, "invariant");
 916 

 917     // release semantics: prior loads and stores from within the critical section
 918     // must not float (reorder) past the following store that drops the lock.
 919     Atomic::release_store(&_owner, (void*)NULL);   // drop the lock
 920     OrderAccess::storeload();                      // See if we need to wake a successor




 921     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
 922       return;
 923     }
 924     // Other threads are blocked trying to acquire the lock.
 925 
 926     // Normally the exiting thread is responsible for ensuring succession,
 927     // but if other successors are ready or other entering threads are spinning
 928     // then this thread can simply store NULL into _owner and exit without
 929     // waking a successor.  The existence of spinners or ready successors
 930     // guarantees proper succession (liveness).  Responsibility passes to the
 931     // ready or running successors.  The exiting thread delegates the duty.
 932     // More precisely, if a successor already exists this thread is absolved
 933     // of the responsibility of waking (unparking) one.
 934     //
 935     // The _succ variable is critical to reducing futile wakeup frequency.
 936     // _succ identifies the "heir presumptive" thread that has been made
 937     // ready (unparked) but that has not yet run.  We need only one such
 938     // successor thread to guarantee progress.
 939     // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
 940     // section 3.3 "Futile Wakeup Throttling" for details.


 942     // Note that spinners in Enter() also set _succ non-null.
 943     // In the current implementation spinners opportunistically set
 944     // _succ so that exiting threads might avoid waking a successor.
 945     // Another less appealing alternative would be for the exiting thread
 946     // to drop the lock and then spin briefly to see if a spinner managed
 947     // to acquire the lock.  If so, the exiting thread could exit
 948     // immediately without waking a successor, otherwise the exiting
 949     // thread would need to dequeue and wake a successor.
 950     // (Note that we'd need to make the post-drop spin short, but no
 951     // shorter than the worst-case round-trip cache-line migration time.
 952     // The dropped lock needs to become visible to the spinner, and then
 953     // the acquisition of the lock by the spinner must become visible to
 954     // the exiting thread).
 955 
 956     // It appears that an heir-presumptive (successor) must be made ready.
 957     // Only the current lock owner can manipulate the EntryList or
 958     // drain _cxq, so we need to reacquire the lock.  If we fail
 959     // to reacquire the lock the responsibility for ensuring succession
 960     // falls to the new owner.
 961     //
 962     if (!Atomic::replace_if_null(&_owner, THREAD)) {
 963       return;
 964     }
 965 
 966     guarantee(_owner == THREAD, "invariant");
 967 
 968     ObjectWaiter * w = NULL;
 969 
 970     w = _EntryList;
 971     if (w != NULL) {
 972       // I'd like to write: guarantee (w->_thread != Self).
 973       // But in practice an exiting thread may find itself on the EntryList.
 974       // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
 975       // then calls exit().  Exit release the lock by setting O._owner to NULL.
 976       // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
 977       // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
 978       // release the lock "O".  T2 resumes immediately after the ST of null into
 979       // _owner, above.  T2 notices that the EntryList is populated, so it
 980       // reacquires the lock and then finds itself on the EntryList.
 981       // Given all that, we have to tolerate the circumstance where "w" is
 982       // associated with Self.


1075 
1076 
1077 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1078   assert(_owner == Self, "invariant");
1079 
1080   // Exit protocol:
1081   // 1. ST _succ = wakee
1082   // 2. membar #loadstore|#storestore;
1083   // 2. ST _owner = NULL
1084   // 3. unpark(wakee)
1085 
1086   _succ = Wakee->_thread;
1087   ParkEvent * Trigger = Wakee->_event;
1088 
1089   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1090   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1091   // out-of-scope (non-extant).
1092   Wakee  = NULL;
1093 
1094   // Drop the lock
1095   Atomic::release_store(&_owner, (void*)NULL);
1096   OrderAccess::fence();                               // ST _owner vs LD in unpark()

1097 
1098   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1099   Trigger->unpark();
1100 
1101   // Maintain stats and report events to JVMTI
1102   OM_PERFDATA_OP(Parks, inc());
1103 }
1104 
1105 
1106 // -----------------------------------------------------------------------------
1107 // Class Loader deadlock handling.
1108 //
1109 // complete_exit exits a lock returning recursion count
1110 // complete_exit/reenter operate as a wait without waiting
1111 // complete_exit requires an inflated monitor
1112 // The _owner field is not always the Thread addr even with an
1113 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1114 // thread due to contention.
1115 intx ObjectMonitor::complete_exit(TRAPS) {
1116   Thread * const Self = THREAD;
1117   assert(Self->is_Java_thread(), "Must be Java thread!");
1118   JavaThread *jt = (JavaThread *)THREAD;
1119 
1120   assert(InitDone, "Unexpectedly not initialized");
1121 
1122   if (THREAD != _owner) {
1123     if (THREAD->is_lock_owned ((address)_owner)) {

1124       assert(_recursions == 0, "internal state error");
1125       _owner = THREAD;   // Convert from basiclock addr to Thread addr
1126       _recursions = 0;
1127     }
1128   }
1129 
1130   guarantee(Self == _owner, "complete_exit not owner");
1131   intx save = _recursions; // record the old recursion count
1132   _recursions = 0;        // set the recursion level to be 0
1133   exit(true, Self);           // exit the monitor
1134   guarantee(_owner != Self, "invariant");
1135   return save;
1136 }
1137 
1138 // reenter() enters a lock and sets recursion count
1139 // complete_exit/reenter operate as a wait without waiting
1140 void ObjectMonitor::reenter(intx recursions, TRAPS) {
1141   Thread * const Self = THREAD;
1142   assert(Self->is_Java_thread(), "Must be Java thread!");
1143   JavaThread *jt = (JavaThread *)THREAD;
1144 
1145   guarantee(_owner != Self, "reenter already owner");


1150 }
1151 
1152 // Checks that the current THREAD owns this monitor and causes an
1153 // immediate return if it doesn't. We don't use the CHECK macro
1154 // because we want the IMSE to be the only exception that is thrown
1155 // from the call site when false is returned. Any other pending
1156 // exception is ignored.
1157 #define CHECK_OWNER()                                                  \
1158   do {                                                                 \
1159     if (!check_owner(THREAD)) {                                        \
1160        assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1161        return;                                                         \
1162      }                                                                 \
1163   } while (false)
1164 
1165 // Returns true if the specified thread owns the ObjectMonitor.
1166 // Otherwise returns false and throws IllegalMonitorStateException
1167 // (IMSE). If there is a pending exception and the specified thread
1168 // is not the owner, that exception will be replaced by the IMSE.
1169 bool ObjectMonitor::check_owner(Thread* THREAD) {
1170   if (_owner == THREAD) {

1171     return true;
1172   }
1173   if (THREAD->is_lock_owned((address)_owner)) {
1174     _owner = THREAD;  // convert from BasicLock addr to Thread addr
1175     _recursions = 0;
1176     return true;
1177   }
1178   THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1179              "current thread is not owner", false);
1180 }
1181 
1182 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1183                                     ObjectMonitor* monitor,
1184                                     jlong notifier_tid,
1185                                     jlong timeout,
1186                                     bool timedout) {
1187   assert(event != NULL, "invariant");
1188   assert(monitor != NULL, "invariant");
1189   event->set_monitorClass(((oop)monitor->object())->klass());
1190   event->set_timeout(timeout);
1191   event->set_address((uintptr_t)monitor->object_addr());
1192   event->set_notifier(notifier_tid);
1193   event->set_timedOut(timedout);
1194   event->commit();


1663     // We periodically check to see if there's a safepoint pending.
1664     if ((ctr & 0xFF) == 0) {
1665       if (SafepointMechanism::should_block(Self)) {
1666         goto Abort;           // abrupt spin egress
1667       }
1668       SpinPause();
1669     }
1670 
1671     // Probe _owner with TATAS
1672     // If this thread observes the monitor transition or flicker
1673     // from locked to unlocked to locked, then the odds that this
1674     // thread will acquire the lock in this spin attempt go down
1675     // considerably.  The same argument applies if the CAS fails
1676     // or if we observe _owner change from one non-null value to
1677     // another non-null value.   In such cases we might abort
1678     // the spin without prejudice or apply a "penalty" to the
1679     // spin count-down variable "ctr", reducing it by 100, say.
1680 
1681     Thread * ox = (Thread *) _owner;
1682     if (ox == NULL) {
1683       ox = (Thread*)Atomic::cmpxchg(&_owner, (void*)NULL, Self);
1684       if (ox == NULL) {
1685         // The CAS succeeded -- this thread acquired ownership
1686         // Take care of some bookkeeping to exit spin state.
1687         if (_succ == Self) {
1688           _succ = NULL;
1689         }
1690 
1691         // Increase _SpinDuration :
1692         // The spin was successful (profitable) so we tend toward
1693         // longer spin attempts in the future.
1694         // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1695         // If we acquired the lock early in the spin cycle it
1696         // makes sense to increase _SpinDuration proportionally.
1697         // Note that we don't clamp SpinDuration precisely at SpinLimit.
1698         int x = _SpinDuration;
1699         if (x < Knob_SpinLimit) {
1700           if (x < Knob_Poverty) x = Knob_Poverty;
1701           _SpinDuration = x + Knob_Bonus;
1702         }
1703         return 1;




 228   return AllocateHeap(size, mtInternal);
 229 }
 230 void* ObjectMonitor::operator new[] (size_t size) throw() {
 231   return operator new (size);
 232 }
 233 void ObjectMonitor::operator delete(void* p) {
 234   FreeHeap(p);
 235 }
 236 void ObjectMonitor::operator delete[] (void *p) {
 237   operator delete(p);
 238 }
 239 
 240 // -----------------------------------------------------------------------------
 241 // Enter support
 242 
 243 void ObjectMonitor::enter(TRAPS) {
 244   // The following code is ordered to check the most common cases first
 245   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 246   Thread * const Self = THREAD;
 247 
 248   void* cur = try_set_owner_from(NULL, Self);
 249   if (cur == NULL) {
 250     assert(_recursions == 0, "invariant");
 251     return;
 252   }
 253 
 254   if (cur == Self) {
 255     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 256     _recursions++;
 257     return;
 258   }
 259 
 260   if (Self->is_lock_owned((address)cur)) {
 261     assert(_recursions == 0, "internal state error");
 262     _recursions = 1;
 263     set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.


 264     return;
 265   }
 266 
 267   // We've encountered genuine contention.
 268   assert(Self->_Stalled == 0, "invariant");
 269   Self->_Stalled = intptr_t(this);
 270 
 271   // Try one round of spinning *before* enqueueing Self
 272   // and before going through the awkward and expensive state
 273   // transitions.  The following spin is strictly optional ...
 274   // Note that if we acquire the monitor from an initial spin
 275   // we forgo posting JVMTI events and firing DTRACE probes.
 276   if (TrySpin(Self) > 0) {
 277     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 278     assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
 279     assert(((oop)object())->mark() == markWord::encode(this),
 280            "object mark must match encoded this: mark=" INTPTR_FORMAT
 281            ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
 282            markWord::encode(this).value());
 283     Self->_Stalled = 0;


 384 
 385     // The current thread already owns the monitor and is not going to
 386     // call park() for the remainder of the monitor enter protocol. So
 387     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 388     // event handler consumed an unpark() issued by the thread that
 389     // just exited the monitor.
 390   }
 391   if (event.should_commit()) {
 392     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 393     event.commit();
 394   }
 395   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 396 }
 397 
 398 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 399 // Callers must compensate as needed.
 400 
 401 int ObjectMonitor::TryLock(Thread * Self) {
 402   void * own = _owner;
 403   if (own != NULL) return 0;
 404   if (try_set_owner_from(NULL, Self) == NULL) {
 405     assert(_recursions == 0, "invariant");
 406     return 1;
 407   }
 408   // The lock had been free momentarily, but we lost the race to the lock.
 409   // Interference -- the CAS failed.
 410   // We can either return -1 or retry.
 411   // Retry doesn't make as much sense because the lock was just acquired.
 412   return -1;
 413 }
 414 
 415 // Convert the fields used by is_busy() to a string that can be
 416 // used for diagnostic output.
 417 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 418   ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT
 419             ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions,
 420             _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList));
 421   return ss->base();
 422 }
 423 
 424 #define MAX_RECHECK_INTERVAL 1000


 843 // thread acquires the lock and then drops the lock, at which time the
 844 // exiting thread will notice and unpark the stranded thread, or, (b)
 845 // the timer expires.  If the lock is high traffic then the stranding latency
 846 // will be low due to (a).  If the lock is low traffic then the odds of
 847 // stranding are lower, although the worst-case stranding latency
 848 // is longer.  Critically, we don't want to put excessive load in the
 849 // platform's timer subsystem.  We want to minimize both the timer injection
 850 // rate (timers created/sec) as well as the number of timers active at
 851 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 852 // the integral of the # of active timers at any instant over time).
 853 // Both impinge on OS scalability.  Given that, at most one thread parked on
 854 // a monitor will use a timer.
 855 //
 856 // There is also the risk of a futile wake-up. If we drop the lock
 857 // another thread can reacquire the lock immediately, and we can
 858 // then wake a thread unnecessarily. This is benign, and we've
 859 // structured the code so the windows are short and the frequency
 860 // of such futile wakups is low.
 861 
 862 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 863   Thread* const Self = THREAD;
 864   void* cur = Atomic::load(&_owner);
 865   if (THREAD != cur) {
 866     if (THREAD->is_lock_owned((address)cur)) {



 867       assert(_recursions == 0, "invariant");
 868       set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
 869       _recursions = 0;
 870     } else {
 871       // Apparent unbalanced locking ...
 872       // Naively we'd like to throw IllegalMonitorStateException.
 873       // As a practical matter we can neither allocate nor throw an
 874       // exception as ::exit() can be called from leaf routines.
 875       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 876       // Upon deeper reflection, however, in a properly run JVM the only
 877       // way we should encounter this situation is in the presence of
 878       // unbalanced JNI locking. TODO: CheckJNICalls.
 879       // See also: CR4414101
 880 #ifdef ASSERT
 881       LogStreamHandle(Error, monitorinflation) lsh;
 882       lsh.print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
 883                     " is exiting an ObjectMonitor it does not own.", p2i(THREAD));
 884       lsh.print_cr("The imbalance is possibly caused by JNI locking.");
 885       print_debug_style_on(&lsh);
 886 #endif
 887       assert(false, "Non-balanced monitor enter/exit!");
 888       return;


 892   if (_recursions != 0) {
 893     _recursions--;        // this is simple recursive enter
 894     return;
 895   }
 896 
 897   // Invariant: after setting Responsible=null an thread must execute
 898   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 899   _Responsible = NULL;
 900 
 901 #if INCLUDE_JFR
 902   // get the owner's thread id for the MonitorEnter event
 903   // if it is enabled and the thread isn't suspended
 904   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
 905     _previous_owner_tid = JFR_THREAD_ID(Self);
 906   }
 907 #endif
 908 
 909   for (;;) {
 910     assert(THREAD == _owner, "invariant");
 911 
 912     // Drop the lock.
 913     // release semantics: prior loads and stores from within the critical section
 914     // must not float (reorder) past the following store that drops the lock.
 915     // Uses a storeload to separate release_store(owner) from the
 916     // successor check. The try_set_owner() below uses cmpxchg() so
 917     // we get the fence down there.
 918     release_clear_owner(Self);
 919     OrderAccess::storeload();
 920 
 921     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
 922       return;
 923     }
 924     // Other threads are blocked trying to acquire the lock.
 925 
 926     // Normally the exiting thread is responsible for ensuring succession,
 927     // but if other successors are ready or other entering threads are spinning
 928     // then this thread can simply store NULL into _owner and exit without
 929     // waking a successor.  The existence of spinners or ready successors
 930     // guarantees proper succession (liveness).  Responsibility passes to the
 931     // ready or running successors.  The exiting thread delegates the duty.
 932     // More precisely, if a successor already exists this thread is absolved
 933     // of the responsibility of waking (unparking) one.
 934     //
 935     // The _succ variable is critical to reducing futile wakeup frequency.
 936     // _succ identifies the "heir presumptive" thread that has been made
 937     // ready (unparked) but that has not yet run.  We need only one such
 938     // successor thread to guarantee progress.
 939     // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
 940     // section 3.3 "Futile Wakeup Throttling" for details.


 942     // Note that spinners in Enter() also set _succ non-null.
 943     // In the current implementation spinners opportunistically set
 944     // _succ so that exiting threads might avoid waking a successor.
 945     // Another less appealing alternative would be for the exiting thread
 946     // to drop the lock and then spin briefly to see if a spinner managed
 947     // to acquire the lock.  If so, the exiting thread could exit
 948     // immediately without waking a successor, otherwise the exiting
 949     // thread would need to dequeue and wake a successor.
 950     // (Note that we'd need to make the post-drop spin short, but no
 951     // shorter than the worst-case round-trip cache-line migration time.
 952     // The dropped lock needs to become visible to the spinner, and then
 953     // the acquisition of the lock by the spinner must become visible to
 954     // the exiting thread).
 955 
 956     // It appears that an heir-presumptive (successor) must be made ready.
 957     // Only the current lock owner can manipulate the EntryList or
 958     // drain _cxq, so we need to reacquire the lock.  If we fail
 959     // to reacquire the lock the responsibility for ensuring succession
 960     // falls to the new owner.
 961     //
 962     if (try_set_owner_from(NULL, Self) != NULL) {
 963       return;
 964     }
 965 
 966     guarantee(_owner == THREAD, "invariant");
 967 
 968     ObjectWaiter * w = NULL;
 969 
 970     w = _EntryList;
 971     if (w != NULL) {
 972       // I'd like to write: guarantee (w->_thread != Self).
 973       // But in practice an exiting thread may find itself on the EntryList.
 974       // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
 975       // then calls exit().  Exit release the lock by setting O._owner to NULL.
 976       // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
 977       // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
 978       // release the lock "O".  T2 resumes immediately after the ST of null into
 979       // _owner, above.  T2 notices that the EntryList is populated, so it
 980       // reacquires the lock and then finds itself on the EntryList.
 981       // Given all that, we have to tolerate the circumstance where "w" is
 982       // associated with Self.


1075 
1076 
1077 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1078   assert(_owner == Self, "invariant");
1079 
1080   // Exit protocol:
1081   // 1. ST _succ = wakee
1082   // 2. membar #loadstore|#storestore;
1083   // 2. ST _owner = NULL
1084   // 3. unpark(wakee)
1085 
1086   _succ = Wakee->_thread;
1087   ParkEvent * Trigger = Wakee->_event;
1088 
1089   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1090   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1091   // out-of-scope (non-extant).
1092   Wakee  = NULL;
1093 
1094   // Drop the lock
1095   // Uses a fence to separate release_store(owner) from the LD in unpark().
1096   release_clear_owner(Self);
1097   OrderAccess::fence();
1098 
1099   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1100   Trigger->unpark();
1101 
1102   // Maintain stats and report events to JVMTI
1103   OM_PERFDATA_OP(Parks, inc());
1104 }
1105 
1106 
1107 // -----------------------------------------------------------------------------
1108 // Class Loader deadlock handling.
1109 //
1110 // complete_exit exits a lock returning recursion count
1111 // complete_exit/reenter operate as a wait without waiting
1112 // complete_exit requires an inflated monitor
1113 // The _owner field is not always the Thread addr even with an
1114 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1115 // thread due to contention.
1116 intx ObjectMonitor::complete_exit(TRAPS) {
1117   Thread * const Self = THREAD;
1118   assert(Self->is_Java_thread(), "Must be Java thread!");
1119   JavaThread *jt = (JavaThread *)THREAD;
1120 
1121   assert(InitDone, "Unexpectedly not initialized");
1122 
1123   void* cur = Atomic::load(&_owner);
1124   if (THREAD != cur) {
1125     if (THREAD->is_lock_owned((address)cur)) {
1126       assert(_recursions == 0, "internal state error");
1127       set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
1128       _recursions = 0;
1129     }
1130   }
1131 
1132   guarantee(Self == _owner, "complete_exit not owner");
1133   intx save = _recursions; // record the old recursion count
1134   _recursions = 0;        // set the recursion level to be 0
1135   exit(true, Self);           // exit the monitor
1136   guarantee(_owner != Self, "invariant");
1137   return save;
1138 }
1139 
1140 // reenter() enters a lock and sets recursion count
1141 // complete_exit/reenter operate as a wait without waiting
1142 void ObjectMonitor::reenter(intx recursions, TRAPS) {
1143   Thread * const Self = THREAD;
1144   assert(Self->is_Java_thread(), "Must be Java thread!");
1145   JavaThread *jt = (JavaThread *)THREAD;
1146 
1147   guarantee(_owner != Self, "reenter already owner");


1152 }
1153 
1154 // Checks that the current THREAD owns this monitor and causes an
1155 // immediate return if it doesn't. We don't use the CHECK macro
1156 // because we want the IMSE to be the only exception that is thrown
1157 // from the call site when false is returned. Any other pending
1158 // exception is ignored.
1159 #define CHECK_OWNER()                                                  \
1160   do {                                                                 \
1161     if (!check_owner(THREAD)) {                                        \
1162        assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1163        return;                                                         \
1164      }                                                                 \
1165   } while (false)
1166 
1167 // Returns true if the specified thread owns the ObjectMonitor.
1168 // Otherwise returns false and throws IllegalMonitorStateException
1169 // (IMSE). If there is a pending exception and the specified thread
1170 // is not the owner, that exception will be replaced by the IMSE.
1171 bool ObjectMonitor::check_owner(Thread* THREAD) {
1172   void* cur = Atomic::load(&_owner);
1173   if (cur == THREAD) {
1174     return true;
1175   }
1176   if (THREAD->is_lock_owned((address)cur)) {
1177     set_owner_from_BasicLock(cur, THREAD);  // Convert from BasicLock* to Thread*.
1178     _recursions = 0;
1179     return true;
1180   }
1181   THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1182              "current thread is not owner", false);
1183 }
1184 
1185 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1186                                     ObjectMonitor* monitor,
1187                                     jlong notifier_tid,
1188                                     jlong timeout,
1189                                     bool timedout) {
1190   assert(event != NULL, "invariant");
1191   assert(monitor != NULL, "invariant");
1192   event->set_monitorClass(((oop)monitor->object())->klass());
1193   event->set_timeout(timeout);
1194   event->set_address((uintptr_t)monitor->object_addr());
1195   event->set_notifier(notifier_tid);
1196   event->set_timedOut(timedout);
1197   event->commit();


1666     // We periodically check to see if there's a safepoint pending.
1667     if ((ctr & 0xFF) == 0) {
1668       if (SafepointMechanism::should_block(Self)) {
1669         goto Abort;           // abrupt spin egress
1670       }
1671       SpinPause();
1672     }
1673 
1674     // Probe _owner with TATAS
1675     // If this thread observes the monitor transition or flicker
1676     // from locked to unlocked to locked, then the odds that this
1677     // thread will acquire the lock in this spin attempt go down
1678     // considerably.  The same argument applies if the CAS fails
1679     // or if we observe _owner change from one non-null value to
1680     // another non-null value.   In such cases we might abort
1681     // the spin without prejudice or apply a "penalty" to the
1682     // spin count-down variable "ctr", reducing it by 100, say.
1683 
1684     Thread * ox = (Thread *) _owner;
1685     if (ox == NULL) {
1686       ox = (Thread*)try_set_owner_from(NULL, Self);
1687       if (ox == NULL) {
1688         // The CAS succeeded -- this thread acquired ownership
1689         // Take care of some bookkeeping to exit spin state.
1690         if (_succ == Self) {
1691           _succ = NULL;
1692         }
1693 
1694         // Increase _SpinDuration :
1695         // The spin was successful (profitable) so we tend toward
1696         // longer spin attempts in the future.
1697         // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1698         // If we acquired the lock early in the spin cycle it
1699         // makes sense to increase _SpinDuration proportionally.
1700         // Note that we don't clamp SpinDuration precisely at SpinLimit.
1701         int x = _SpinDuration;
1702         if (x < Knob_SpinLimit) {
1703           if (x < Knob_Poverty) x = Knob_Poverty;
1704           _SpinDuration = x + Knob_Bonus;
1705         }
1706         return 1;


< prev index next >