< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 54415 : 8222295: more baseline cleanups from Async Monitor Deflation project
rev 54416 : Checkpoint latest preliminary review patches for full OpenJDK review; merge with 8222295.patch.
rev 54417 : imported patch dcubed.monitor_deflate_conc.v2.01


 221 //
 222 // * See also http://blogs.sun.com/dave
 223 
 224 
 225 void* ObjectMonitor::operator new (size_t size) throw() {
 226   return AllocateHeap(size, mtInternal);
 227 }
 228 void* ObjectMonitor::operator new[] (size_t size) throw() {
 229   return operator new (size);
 230 }
 231 void ObjectMonitor::operator delete(void* p) {
 232   FreeHeap(p);
 233 }
 234 void ObjectMonitor::operator delete[] (void *p) {
 235   operator delete(p);
 236 }
 237 
 238 // -----------------------------------------------------------------------------
 239 // Enter support
 240 
 241 void ObjectMonitor::enter(TRAPS) {
 242   // The following code is ordered to check the most common cases first
 243   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 244   Thread * const Self = THREAD;
 245 
 246   void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
 247   if (cur == NULL) {
 248     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 249     assert(_recursions == 0, "invariant");
 250     assert(_owner == Self, "invariant");
 251     return;
 252   }
 253 
 254   if (cur == Self) {
 255     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 256     _recursions++;
 257     return;
 258   }
 259 
 260   if (Self->is_lock_owned ((address)cur)) {
 261     assert(_recursions == 0, "internal state error");
 262     _recursions = 1;
 263     // Commute owner from a thread-specific on-stack BasicLockObject address to
 264     // a full-fledged "Thread *".
 265     _owner = Self;
 266     return;
 267   }
 268 
 269   // We've encountered genuine contention.
 270   assert(Self->_Stalled == 0, "invariant");
 271   Self->_Stalled = intptr_t(this);
 272 
 273   // Try one round of spinning *before* enqueueing Self
 274   // and before going through the awkward and expensive state
 275   // transitions.  The following spin is strictly optional ...
 276   // Note that if we acquire the monitor from an initial spin
 277   // we forgo posting JVMTI events and firing DTRACE probes.
 278   if (TrySpin(Self) > 0) {
 279     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 280     assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
 281            _recursions);
 282     assert(((oop)object())->mark() == markOopDesc::encode(this),
 283            "object mark must match encoded this: mark=" INTPTR_FORMAT
 284            ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
 285            p2i(markOopDesc::encode(this)));
 286     Self->_Stalled = 0;
 287     return;
 288   }
 289 
 290   assert(_owner != Self, "invariant");
 291   assert(_succ != Self, "invariant");
 292   assert(Self->is_Java_thread(), "invariant");
 293   JavaThread * jt = (JavaThread *) Self;
 294   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 295   assert(jt->thread_state() != _thread_blocked, "invariant");
 296   assert(this->object() != NULL, "invariant");
 297   assert(_contentions >= 0, "invariant");
 298 
 299   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 300   // Ensure the object-monitor relationship remains stable while there's contention.
 301   Atomic::inc(&_contentions);










 302 
 303   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 304   EventJavaMonitorEnter event;
 305   if (event.should_commit()) {
 306     event.set_monitorClass(((oop)this->object())->klass());
 307     event.set_address((uintptr_t)(this->object_addr()));
 308   }
 309 
 310   { // Change java thread status to indicate blocked on monitor enter.
 311     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 312 
 313     Self->set_current_pending_monitor(this);
 314 
 315     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 316     if (JvmtiExport::should_post_monitor_contended_enter()) {
 317       JvmtiExport::post_monitor_contended_enter(jt, this);
 318 
 319       // The current thread does not yet own the monitor and does not
 320       // yet appear on any queues that would get it made the successor.
 321       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event


 343       //
 344       _recursions = 0;
 345       _succ = NULL;
 346       exit(false, Self);
 347 
 348       jt->java_suspend_self();
 349     }
 350     Self->set_current_pending_monitor(NULL);
 351 
 352     // We cleared the pending monitor info since we've just gotten past
 353     // the enter-check-for-suspend dance and we now own the monitor free
 354     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 355     // destructor can go to a safepoint at the end of this block. If we
 356     // do a thread dump during that safepoint, then this thread will show
 357     // as having "-locked" the monitor, but the OS and java.lang.Thread
 358     // states will still report that the thread is blocked trying to
 359     // acquire it.
 360   }
 361 
 362   Atomic::dec(&_contentions);
 363   assert(_contentions >= 0, "invariant");
 364   Self->_Stalled = 0;
 365 
 366   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 367   assert(_recursions == 0, "invariant");
 368   assert(_owner == Self, "invariant");
 369   assert(_succ != Self, "invariant");
 370   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 371 
 372   // The thread -- now the owner -- is back in vm mode.
 373   // Report the glorious news via TI,DTrace and jvmstat.
 374   // The probe effect is non-trivial.  All the reportage occurs
 375   // while we hold the monitor, increasing the length of the critical
 376   // section.  Amdahl's parallel speedup law comes vividly into play.
 377   //
 378   // Another option might be to aggregate the events (thread local or
 379   // per-monitor aggregation) and defer reporting until a more opportune
 380   // time -- such as next time some thread encounters contention but has
 381   // yet to acquire the lock.  While spinning that thread could
 382   // spinning we could increment JVMStat counters, etc.
 383 
 384   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
 385   if (JvmtiExport::should_post_monitor_contended_entered()) {
 386     JvmtiExport::post_monitor_contended_entered(jt, this);
 387 
 388     // The current thread already owns the monitor and is not going to
 389     // call park() for the remainder of the monitor enter protocol. So
 390     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 391     // event handler consumed an unpark() issued by the thread that
 392     // just exited the monitor.
 393   }
 394   if (event.should_commit()) {
 395     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 396     event.commit();
 397   }
 398   OM_PERFDATA_OP(ContendedLockAttempts, inc());

 399 }
 400 
 401 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 402 // Callers must compensate as needed.
 403 
 404 int ObjectMonitor::TryLock(Thread * Self) {
 405   void * own = _owner;
 406   if (own != NULL) return 0;
 407   if (Atomic::replace_if_null(Self, &_owner)) {
 408     // Either guarantee _recursions == 0 or set _recursions = 0.
 409     assert(_recursions == 0, "invariant");
 410     assert(_owner == Self, "invariant");
 411     return 1;
 412   }
 413   // The lock had been free momentarily, but we lost the race to the lock.
 414   // Interference -- the CAS failed.
 415   // We can either return -1 or retry.
 416   // Retry doesn't make as much sense because the lock was just acquired.
 417   return -1;
 418 }
 419 















































































 420 #define MAX_RECHECK_INTERVAL 1000
 421 
 422 void ObjectMonitor::EnterI(TRAPS) {
 423   Thread * const Self = THREAD;
 424   assert(Self->is_Java_thread(), "invariant");
 425   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 426 
 427   // Try the lock - TATAS
 428   if (TryLock (Self) > 0) {
 429     assert(_succ != Self, "invariant");
 430     assert(_owner == Self, "invariant");
 431     assert(_Responsible != Self, "invariant");
 432     return;
 433   }
 434 















 435   assert(InitDone, "Unexpectedly not initialized");
 436 
 437   // We try one round of spinning *before* enqueueing Self.
 438   //
 439   // If the _owner is ready but OFFPROC we could use a YieldTo()
 440   // operation to donate the remainder of this thread's quantum
 441   // to the owner.  This has subtle but beneficial affinity
 442   // effects.
 443 
 444   if (TrySpin(Self) > 0) {
 445     assert(_owner == Self, "invariant");
 446     assert(_succ != Self, "invariant");
 447     assert(_Responsible != Self, "invariant");
 448     return;
 449   }
 450 
 451   // The Spin failed -- Enqueue and park the thread ...
 452   assert(_succ != Self, "invariant");
 453   assert(_owner != Self, "invariant");
 454   assert(_Responsible != Self, "invariant");


 531 
 532   for (;;) {
 533 
 534     if (TryLock(Self) > 0) break;
 535     assert(_owner != Self, "invariant");
 536 
 537     // park self
 538     if (_Responsible == Self) {
 539       Self->_ParkEvent->park((jlong) recheckInterval);
 540       // Increase the recheckInterval, but clamp the value.
 541       recheckInterval *= 8;
 542       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 543         recheckInterval = MAX_RECHECK_INTERVAL;
 544       }
 545     } else {
 546       Self->_ParkEvent->park();
 547     }
 548 
 549     if (TryLock(Self) > 0) break;
 550 













 551     // The lock is still contested.
 552     // Keep a tally of the # of futile wakeups.
 553     // Note that the counter is not protected by a lock or updated by atomics.
 554     // That is by design - we trade "lossy" counters which are exposed to
 555     // races during updates for a lower probe effect.
 556 
 557     // This PerfData object can be used in parallel with a safepoint.
 558     // See the work around in PerfDataManager::destroy().
 559     OM_PERFDATA_OP(FutileWakeups, inc());
 560     ++nWakeups;
 561 
 562     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 563     // We can defer clearing _succ until after the spin completes
 564     // TrySpin() must tolerate being called with _succ == Self.
 565     // Try yet another round of adaptive spinning.
 566     if (TrySpin(Self) > 0) break;
 567 
 568     // We can find that we were unpark()ed and redesignated _succ while
 569     // we were spinning.  That's harmless.  If we iterate and call park(),
 570     // park() will consume the event and return immediately and we'll


 652 // In the future we should reconcile EnterI() and ReenterI().
 653 
 654 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 655   assert(Self != NULL, "invariant");
 656   assert(SelfNode != NULL, "invariant");
 657   assert(SelfNode->_thread == Self, "invariant");
 658   assert(_waiters > 0, "invariant");
 659   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 660   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 661   JavaThread * jt = (JavaThread *) Self;
 662 
 663   int nWakeups = 0;
 664   for (;;) {
 665     ObjectWaiter::TStates v = SelfNode->TState;
 666     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 667     assert(_owner != Self, "invariant");
 668 
 669     if (TryLock(Self) > 0) break;
 670     if (TrySpin(Self) > 0) break;
 671 













 672     // State transition wrappers around park() ...
 673     // ReenterI() wisely defers state transitions until
 674     // it's clear we must park the thread.
 675     {
 676       OSThreadContendState osts(Self->osthread());
 677       ThreadBlockInVM tbivm(jt);
 678 
 679       // cleared by handle_special_suspend_equivalent_condition()
 680       // or java_suspend_self()
 681       jt->set_suspend_equivalent();
 682       Self->_ParkEvent->park();
 683 
 684       // were we externally suspended while we were waiting?
 685       for (;;) {
 686         if (!ExitSuspendEquivalent(jt)) break;
 687         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 688         jt->java_suspend_self();
 689         jt->set_suspend_equivalent();
 690       }
 691     }


 859   Thread * const Self = THREAD;
 860   if (THREAD != _owner) {
 861     if (THREAD->is_lock_owned((address) _owner)) {
 862       // Transmute _owner from a BasicLock pointer to a Thread address.
 863       // We don't need to hold _mutex for this transition.
 864       // Non-null to Non-null is safe as long as all readers can
 865       // tolerate either flavor.
 866       assert(_recursions == 0, "invariant");
 867       _owner = THREAD;
 868       _recursions = 0;
 869     } else {
 870       // Apparent unbalanced locking ...
 871       // Naively we'd like to throw IllegalMonitorStateException.
 872       // As a practical matter we can neither allocate nor throw an
 873       // exception as ::exit() can be called from leaf routines.
 874       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 875       // Upon deeper reflection, however, in a properly run JVM the only
 876       // way we should encounter this situation is in the presence of
 877       // unbalanced JNI locking. TODO: CheckJNICalls.
 878       // See also: CR4414101
 879       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");

 880       return;
 881     }
 882   }
 883 
 884   if (_recursions != 0) {
 885     _recursions--;        // this is simple recursive enter
 886     return;
 887   }
 888 
 889   // Invariant: after setting Responsible=null an thread must execute
 890   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 891   _Responsible = NULL;
 892 
 893 #if INCLUDE_JFR
 894   // get the owner's thread id for the MonitorEnter event
 895   // if it is enabled and the thread isn't suspended
 896   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
 897     _previous_owner_tid = JFR_THREAD_ID(Self);
 898   }
 899 #endif


1109   assert(InitDone, "Unexpectedly not initialized");
1110 
1111   if (THREAD != _owner) {
1112     if (THREAD->is_lock_owned ((address)_owner)) {
1113       assert(_recursions == 0, "internal state error");
1114       _owner = THREAD;   // Convert from basiclock addr to Thread addr
1115       _recursions = 0;
1116     }
1117   }
1118 
1119   guarantee(Self == _owner, "complete_exit not owner");
1120   intptr_t save = _recursions; // record the old recursion count
1121   _recursions = 0;        // set the recursion level to be 0
1122   exit(true, Self);           // exit the monitor
1123   guarantee(_owner != Self, "invariant");
1124   return save;
1125 }
1126 
1127 // reenter() enters a lock and sets recursion count
1128 // complete_exit/reenter operate as a wait without waiting
1129 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1130   Thread * const Self = THREAD;
1131   assert(Self->is_Java_thread(), "Must be Java thread!");
1132   JavaThread *jt = (JavaThread *)THREAD;
1133 
1134   guarantee(_owner != Self, "reenter already owner");
1135   enter(THREAD);       // enter the monitor




1136   guarantee(_recursions == 0, "reenter recursion");
1137   _recursions = recursions;
1138   return;
1139 }
1140 
1141 
1142 // -----------------------------------------------------------------------------
1143 // A macro is used below because there may already be a pending
1144 // exception which should not abort the execution of the routines
1145 // which use this (which is why we don't put this into check_slow and
1146 // call it with a CHECK argument).
1147 
1148 #define CHECK_OWNER()                                                       \
1149   do {                                                                      \
1150     if (THREAD != _owner) {                                                 \
1151       if (THREAD->is_lock_owned((address) _owner)) {                        \
1152         _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
1153         _recursions = 0;                                                    \
1154       } else {                                                              \
1155         THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
1156       }                                                                     \
1157     }                                                                       \
1158   } while (false)


1346         //
1347         // We redo the unpark() to ensure forward progress, i.e., we
1348         // don't want all pending threads hanging (parked) with none
1349         // entering the unlocked monitor.
1350         node._event->unpark();
1351       }
1352     }
1353 
1354     if (event.should_commit()) {
1355       post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1356     }
1357 
1358     OrderAccess::fence();
1359 
1360     assert(Self->_Stalled != 0, "invariant");
1361     Self->_Stalled = 0;
1362 
1363     assert(_owner != Self, "invariant");
1364     ObjectWaiter::TStates v = node.TState;
1365     if (v == ObjectWaiter::TS_RUN) {
1366       enter(Self);

1367     } else {
1368       guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1369       ReenterI(Self, &node);
1370       node.wait_reenter_end(this);
1371     }
1372 
1373     // Self has reacquired the lock.
1374     // Lifecycle - the node representing Self must not appear on any queues.
1375     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1376     // want residual elements associated with this thread left on any lists.
1377     guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1378     assert(_owner == Self, "invariant");
1379     assert(_succ != Self, "invariant");
1380   } // OSThreadWaitState()
1381 
1382   jt->set_current_waiting_monitor(NULL);
1383 
1384   guarantee(_recursions == 0, "invariant");
1385   _recursions = save;     // restore the old recursion count
1386   _waiters--;             // decrement the number of waiters


1908     n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,  \
1909                                         CHECK);                          \
1910   }
1911 #define NEWPERFVARIABLE(n)                                                \
1912   {                                                                       \
1913     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
1914                                          CHECK);                          \
1915   }
1916     NEWPERFCOUNTER(_sync_Inflations);
1917     NEWPERFCOUNTER(_sync_Deflations);
1918     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
1919     NEWPERFCOUNTER(_sync_FutileWakeups);
1920     NEWPERFCOUNTER(_sync_Parks);
1921     NEWPERFCOUNTER(_sync_Notifications);
1922     NEWPERFVARIABLE(_sync_MonExtant);
1923 #undef NEWPERFCOUNTER
1924 #undef NEWPERFVARIABLE
1925   }
1926 
1927   DEBUG_ONLY(InitDone = true;)









































































1928 }


 221 //
 222 // * See also http://blogs.sun.com/dave
 223 
 224 
 225 void* ObjectMonitor::operator new (size_t size) throw() {
 226   return AllocateHeap(size, mtInternal);
 227 }
 228 void* ObjectMonitor::operator new[] (size_t size) throw() {
 229   return operator new (size);
 230 }
 231 void ObjectMonitor::operator delete(void* p) {
 232   FreeHeap(p);
 233 }
 234 void ObjectMonitor::operator delete[] (void *p) {
 235   operator delete(p);
 236 }
 237 
 238 // -----------------------------------------------------------------------------
 239 // Enter support
 240 
 241 bool ObjectMonitor::enter(TRAPS) {
 242   // The following code is ordered to check the most common cases first
 243   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 244   Thread * const Self = THREAD;
 245 
 246   void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
 247   if (cur == NULL) {
 248     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 249     assert(_recursions == 0, "invariant");
 250     assert(_owner == Self, "invariant");
 251     return true;
 252   }
 253 
 254   if (cur == Self) {
 255     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 256     _recursions++;
 257     return true;
 258   }
 259 
 260   if (Self->is_lock_owned ((address)cur)) {
 261     assert(_recursions == 0, "internal state error");
 262     _recursions = 1;
 263     // Commute owner from a thread-specific on-stack BasicLockObject address to
 264     // a full-fledged "Thread *".
 265     _owner = Self;
 266     return true;
 267   }
 268 
 269   // We've encountered genuine contention.
 270   assert(Self->_Stalled == 0, "invariant");
 271   Self->_Stalled = intptr_t(this);
 272 
 273   // Try one round of spinning *before* enqueueing Self
 274   // and before going through the awkward and expensive state
 275   // transitions.  The following spin is strictly optional ...
 276   // Note that if we acquire the monitor from an initial spin
 277   // we forgo posting JVMTI events and firing DTRACE probes.
 278   if (TrySpin(Self) > 0) {
 279     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 280     assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
 281            _recursions);
 282     assert(((oop)object())->mark() == markOopDesc::encode(this),
 283            "object mark must match encoded this: mark=" INTPTR_FORMAT
 284            ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
 285            p2i(markOopDesc::encode(this)));
 286     Self->_Stalled = 0;
 287     return true;
 288   }
 289 
 290   assert(_owner != Self, "invariant");
 291   assert(_succ != Self, "invariant");
 292   assert(Self->is_Java_thread(), "invariant");
 293   JavaThread * jt = (JavaThread *) Self;
 294   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 295   assert(jt->thread_state() != _thread_blocked, "invariant");
 296   assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
 297   assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant");
 298 
 299   // Prevent deflation. See ObjectSynchronizer::deflate_monitor() and is_busy().
 300   // Ensure the object-monitor relationship remains stable while there's contention.
 301   const jint contentions = Atomic::add(1, &_contentions);
 302   if (contentions <= 0 && _owner == DEFLATER_MARKER) {
 303     // Async deflation is in progress. Attempt to restore the
 304     // header/dmw to the object's header so that we only retry once
 305     // if the deflater thread happens to be slow.
 306     const oop obj = (oop) object();
 307     install_displaced_markword_in_object(obj);
 308     Self->_Stalled = 0;
 309     return false;  // Caller should retry. Never mind about _contentions as this monitor has been deflated.
 310   }
 311   // The deflater thread will not deflate this monitor and the monitor is contended, continue.
 312 
 313   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 314   EventJavaMonitorEnter event;
 315   if (event.should_commit()) {
 316     event.set_monitorClass(((oop)this->object())->klass());
 317     event.set_address((uintptr_t)(this->object_addr()));
 318   }
 319 
 320   { // Change java thread status to indicate blocked on monitor enter.
 321     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 322 
 323     Self->set_current_pending_monitor(this);
 324 
 325     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 326     if (JvmtiExport::should_post_monitor_contended_enter()) {
 327       JvmtiExport::post_monitor_contended_enter(jt, this);
 328 
 329       // The current thread does not yet own the monitor and does not
 330       // yet appear on any queues that would get it made the successor.
 331       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event


 353       //
 354       _recursions = 0;
 355       _succ = NULL;
 356       exit(false, Self);
 357 
 358       jt->java_suspend_self();
 359     }
 360     Self->set_current_pending_monitor(NULL);
 361 
 362     // We cleared the pending monitor info since we've just gotten past
 363     // the enter-check-for-suspend dance and we now own the monitor free
 364     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 365     // destructor can go to a safepoint at the end of this block. If we
 366     // do a thread dump during that safepoint, then this thread will show
 367     // as having "-locked" the monitor, but the OS and java.lang.Thread
 368     // states will still report that the thread is blocked trying to
 369     // acquire it.
 370   }
 371 
 372   Atomic::dec(&_contentions);
 373   assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant");
 374   Self->_Stalled = 0;
 375 
 376   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 377   assert(_recursions == 0, "invariant");
 378   assert(_owner == Self, "invariant");
 379   assert(_succ != Self, "invariant");
 380   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 381 
 382   // The thread -- now the owner -- is back in vm mode.
 383   // Report the glorious news via TI,DTrace and jvmstat.
 384   // The probe effect is non-trivial.  All the reportage occurs
 385   // while we hold the monitor, increasing the length of the critical
 386   // section.  Amdahl's parallel speedup law comes vividly into play.
 387   //
 388   // Another option might be to aggregate the events (thread local or
 389   // per-monitor aggregation) and defer reporting until a more opportune
 390   // time -- such as next time some thread encounters contention but has
 391   // yet to acquire the lock.  While spinning that thread could
 392   // spinning we could increment JVMStat counters, etc.
 393 
 394   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
 395   if (JvmtiExport::should_post_monitor_contended_entered()) {
 396     JvmtiExport::post_monitor_contended_entered(jt, this);
 397 
 398     // The current thread already owns the monitor and is not going to
 399     // call park() for the remainder of the monitor enter protocol. So
 400     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 401     // event handler consumed an unpark() issued by the thread that
 402     // just exited the monitor.
 403   }
 404   if (event.should_commit()) {
 405     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 406     event.commit();
 407   }
 408   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 409   return true;
 410 }
 411 
 412 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 413 // Callers must compensate as needed.
 414 
 415 int ObjectMonitor::TryLock(Thread * Self) {
 416   void * own = _owner;
 417   if (own != NULL) return 0;
 418   if (Atomic::replace_if_null(Self, &_owner)) {
 419     // Either guarantee _recursions == 0 or set _recursions = 0.
 420     assert(_recursions == 0, "invariant");
 421     assert(_owner == Self, "invariant");
 422     return 1;
 423   }
 424   // The lock had been free momentarily, but we lost the race to the lock.
 425   // Interference -- the CAS failed.
 426   // We can either return -1 or retry.
 427   // Retry doesn't make as much sense because the lock was just acquired.
 428   return -1;
 429 }
 430 
 431 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
 432 // into the header of the object associated with the monitor. This
 433 // idempotent method is called by a thread that is deflating a
 434 // monitor and by other threads that have detected a race with the
 435 // deflation process.
 436 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
 437   // This function must only be called when (owner == DEFLATER_MARKER
 438   // && contentions <= 0), but we can't guarantee that here because
 439   // those values could change when the ObjectMonitor gets moved from
 440   // the global free list to a per-thread free list.
 441 
 442   guarantee(obj != NULL, "must be non-NULL");
 443   if (object() != obj) {
 444     // ObjectMonitor's object ref no longer refers to the target object
 445     // so the object's header has already been restored.
 446     return;
 447   }
 448 
 449   markOop dmw = header();
 450   if (dmw == NULL) {
 451     // ObjectMonitor's header/dmw has been cleared by the deflating
 452     // thread so the object's header has already been restored.
 453     return;
 454   }
 455 
 456   // A non-NULL dmw has to be either neutral (not locked and not marked)
 457   // or is already participating in this restoration protocol.
 458   assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0),
 459          "failed precondition: dmw=" INTPTR_FORMAT, p2i(dmw));
 460 
 461   markOop marked_dmw = NULL;
 462   if (!dmw->is_marked() && dmw->hash() == 0) {
 463     // This dmw has not yet started the restoration protocol so we
 464     // mark a copy of the dmw to begin the protocol.
 465     // Note: A dmw with a hashcode does not take this code path.
 466     marked_dmw = dmw->set_marked();
 467 
 468     // All of the callers to this function can be racing with each
 469     // other trying to update the _header field.
 470     dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw);
 471     if (dmw == NULL) {
 472       // ObjectMonitor's header/dmw has been cleared by the deflating
 473       // thread so the object's header has already been restored.
 474       return;
 475     }
 476     // The _header field is now marked. The winner's 'dmw' variable
 477     // contains the original, unmarked header/dmw value and any
 478     // losers have a marked header/dmw value that will be cleaned
 479     // up below.
 480   }
 481 
 482   if (dmw->is_marked()) {
 483     // Clear the mark from the header/dmw copy in preparation for
 484     // possible restoration from this thread.
 485     assert(dmw->hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT,
 486            p2i(dmw));
 487     dmw = dmw->set_unmarked();
 488   }
 489   assert(dmw->is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, p2i(dmw));
 490 
 491   // Install displaced mark word if the object's header still points
 492   // to this ObjectMonitor. All racing callers to this function will
 493   // reach this point, but only one can win.
 494   obj->cas_set_mark(dmw, markOopDesc::encode(this));
 495 
 496   // Note: It does not matter which thread restored the header/dmw
 497   // into the object's header. The thread deflating the monitor just
 498   // wanted the object's header restored and it is. The threads that
 499   // detected a race with the deflation process also wanted the
 500   // object's header restored before they retry their operation and
 501   // because it is restored they will only retry once.
 502 
 503   if (marked_dmw != NULL) {
 504     // Clear _header to NULL if it is still marked_dmw so a racing
 505     // install_displaced_markword_in_object() can bail out sooner.
 506     Atomic::cmpxchg((markOop)NULL, &_header, marked_dmw);
 507   }
 508 }
 509 
 510 #define MAX_RECHECK_INTERVAL 1000
 511 
 512 void ObjectMonitor::EnterI(TRAPS) {
 513   Thread * const Self = THREAD;
 514   assert(Self->is_Java_thread(), "invariant");
 515   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 516 
 517   // Try the lock - TATAS
 518   if (TryLock (Self) > 0) {
 519     assert(_succ != Self, "invariant");
 520     assert(_owner == Self, "invariant");
 521     assert(_Responsible != Self, "invariant");
 522     return;
 523   }
 524 
 525   if (_owner == DEFLATER_MARKER) {
 526     // The deflation protocol finished the first part (setting _owner), but
 527     // it failed the second part (making _contentions negative) and bailed.
 528     // Because we're called from enter() we have at least one contention.
 529     guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 "
 530               "should have been handled by the caller: contentions=%d",
 531               _contentions);
 532     if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 533       // Acquired the monitor.
 534       assert(_succ != Self, "invariant");
 535       assert(_Responsible != Self, "invariant");
 536       return;
 537     }
 538   }
 539 
 540   assert(InitDone, "Unexpectedly not initialized");
 541 
 542   // We try one round of spinning *before* enqueueing Self.
 543   //
 544   // If the _owner is ready but OFFPROC we could use a YieldTo()
 545   // operation to donate the remainder of this thread's quantum
 546   // to the owner.  This has subtle but beneficial affinity
 547   // effects.
 548 
 549   if (TrySpin(Self) > 0) {
 550     assert(_owner == Self, "invariant");
 551     assert(_succ != Self, "invariant");
 552     assert(_Responsible != Self, "invariant");
 553     return;
 554   }
 555 
 556   // The Spin failed -- Enqueue and park the thread ...
 557   assert(_succ != Self, "invariant");
 558   assert(_owner != Self, "invariant");
 559   assert(_Responsible != Self, "invariant");


 636 
 637   for (;;) {
 638 
 639     if (TryLock(Self) > 0) break;
 640     assert(_owner != Self, "invariant");
 641 
 642     // park self
 643     if (_Responsible == Self) {
 644       Self->_ParkEvent->park((jlong) recheckInterval);
 645       // Increase the recheckInterval, but clamp the value.
 646       recheckInterval *= 8;
 647       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 648         recheckInterval = MAX_RECHECK_INTERVAL;
 649       }
 650     } else {
 651       Self->_ParkEvent->park();
 652     }
 653 
 654     if (TryLock(Self) > 0) break;
 655 
 656     if (_owner == DEFLATER_MARKER) {
 657       // The deflation protocol finished the first part (setting _owner), but
 658       // it failed the second part (making _contentions negative) and bailed.
 659       // Because we're called from enter() we have at least one contention.
 660       guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 "
 661                 "should have been handled by the caller: contentions=%d",
 662                 _contentions);
 663       if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 664         // Acquired the monitor.
 665         break;
 666       }
 667     }
 668 
 669     // The lock is still contested.
 670     // Keep a tally of the # of futile wakeups.
 671     // Note that the counter is not protected by a lock or updated by atomics.
 672     // That is by design - we trade "lossy" counters which are exposed to
 673     // races during updates for a lower probe effect.
 674 
 675     // This PerfData object can be used in parallel with a safepoint.
 676     // See the work around in PerfDataManager::destroy().
 677     OM_PERFDATA_OP(FutileWakeups, inc());
 678     ++nWakeups;
 679 
 680     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 681     // We can defer clearing _succ until after the spin completes
 682     // TrySpin() must tolerate being called with _succ == Self.
 683     // Try yet another round of adaptive spinning.
 684     if (TrySpin(Self) > 0) break;
 685 
 686     // We can find that we were unpark()ed and redesignated _succ while
 687     // we were spinning.  That's harmless.  If we iterate and call park(),
 688     // park() will consume the event and return immediately and we'll


 770 // In the future we should reconcile EnterI() and ReenterI().
 771 
 772 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 773   assert(Self != NULL, "invariant");
 774   assert(SelfNode != NULL, "invariant");
 775   assert(SelfNode->_thread == Self, "invariant");
 776   assert(_waiters > 0, "invariant");
 777   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 778   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 779   JavaThread * jt = (JavaThread *) Self;
 780 
 781   int nWakeups = 0;
 782   for (;;) {
 783     ObjectWaiter::TStates v = SelfNode->TState;
 784     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 785     assert(_owner != Self, "invariant");
 786 
 787     if (TryLock(Self) > 0) break;
 788     if (TrySpin(Self) > 0) break;
 789 
 790     if (_owner == DEFLATER_MARKER) {
 791       // The deflation protocol finished the first part (setting _owner),
 792       // but it will observe _waiters != 0 and will bail out. Because we're
 793       // called from wait() we may or may not have any contentions.
 794       guarantee(_contentions >= 0, "owner == DEFLATER_MARKER && contentions < 0 "
 795                 "should have been handled by the caller: contentions=%d",
 796                 _contentions);
 797       if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 798         // Acquired the monitor.
 799         break;
 800       }
 801     }
 802 
 803     // State transition wrappers around park() ...
 804     // ReenterI() wisely defers state transitions until
 805     // it's clear we must park the thread.
 806     {
 807       OSThreadContendState osts(Self->osthread());
 808       ThreadBlockInVM tbivm(jt);
 809 
 810       // cleared by handle_special_suspend_equivalent_condition()
 811       // or java_suspend_self()
 812       jt->set_suspend_equivalent();
 813       Self->_ParkEvent->park();
 814 
 815       // were we externally suspended while we were waiting?
 816       for (;;) {
 817         if (!ExitSuspendEquivalent(jt)) break;
 818         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 819         jt->java_suspend_self();
 820         jt->set_suspend_equivalent();
 821       }
 822     }


 990   Thread * const Self = THREAD;
 991   if (THREAD != _owner) {
 992     if (THREAD->is_lock_owned((address) _owner)) {
 993       // Transmute _owner from a BasicLock pointer to a Thread address.
 994       // We don't need to hold _mutex for this transition.
 995       // Non-null to Non-null is safe as long as all readers can
 996       // tolerate either flavor.
 997       assert(_recursions == 0, "invariant");
 998       _owner = THREAD;
 999       _recursions = 0;
1000     } else {
1001       // Apparent unbalanced locking ...
1002       // Naively we'd like to throw IllegalMonitorStateException.
1003       // As a practical matter we can neither allocate nor throw an
1004       // exception as ::exit() can be called from leaf routines.
1005       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
1006       // Upon deeper reflection, however, in a properly run JVM the only
1007       // way we should encounter this situation is in the presence of
1008       // unbalanced JNI locking. TODO: CheckJNICalls.
1009       // See also: CR4414101
1010       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking: "
1011              "owner=" INTPTR_FORMAT, p2i(_owner));
1012       return;
1013     }
1014   }
1015 
1016   if (_recursions != 0) {
1017     _recursions--;        // this is simple recursive enter
1018     return;
1019   }
1020 
1021   // Invariant: after setting Responsible=null an thread must execute
1022   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1023   _Responsible = NULL;
1024 
1025 #if INCLUDE_JFR
1026   // get the owner's thread id for the MonitorEnter event
1027   // if it is enabled and the thread isn't suspended
1028   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1029     _previous_owner_tid = JFR_THREAD_ID(Self);
1030   }
1031 #endif


1241   assert(InitDone, "Unexpectedly not initialized");
1242 
1243   if (THREAD != _owner) {
1244     if (THREAD->is_lock_owned ((address)_owner)) {
1245       assert(_recursions == 0, "internal state error");
1246       _owner = THREAD;   // Convert from basiclock addr to Thread addr
1247       _recursions = 0;
1248     }
1249   }
1250 
1251   guarantee(Self == _owner, "complete_exit not owner");
1252   intptr_t save = _recursions; // record the old recursion count
1253   _recursions = 0;        // set the recursion level to be 0
1254   exit(true, Self);           // exit the monitor
1255   guarantee(_owner != Self, "invariant");
1256   return save;
1257 }
1258 
1259 // reenter() enters a lock and sets recursion count
1260 // complete_exit/reenter operate as a wait without waiting
1261 bool ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1262   Thread * const Self = THREAD;
1263   assert(Self->is_Java_thread(), "Must be Java thread!");
1264   JavaThread *jt = (JavaThread *)THREAD;
1265 
1266   guarantee(_owner != Self, "reenter already owner");
1267   if (!enter(THREAD)) {
1268     // Failed to enter the monitor so return for a retry.
1269     return false;
1270   }
1271   // Entered the monitor.
1272   guarantee(_recursions == 0, "reenter recursion");
1273   _recursions = recursions;
1274   return true;
1275 }
1276 
1277 
1278 // -----------------------------------------------------------------------------
1279 // A macro is used below because there may already be a pending
1280 // exception which should not abort the execution of the routines
1281 // which use this (which is why we don't put this into check_slow and
1282 // call it with a CHECK argument).
1283 
1284 #define CHECK_OWNER()                                                       \
1285   do {                                                                      \
1286     if (THREAD != _owner) {                                                 \
1287       if (THREAD->is_lock_owned((address) _owner)) {                        \
1288         _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
1289         _recursions = 0;                                                    \
1290       } else {                                                              \
1291         THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
1292       }                                                                     \
1293     }                                                                       \
1294   } while (false)


1482         //
1483         // We redo the unpark() to ensure forward progress, i.e., we
1484         // don't want all pending threads hanging (parked) with none
1485         // entering the unlocked monitor.
1486         node._event->unpark();
1487       }
1488     }
1489 
1490     if (event.should_commit()) {
1491       post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1492     }
1493 
1494     OrderAccess::fence();
1495 
1496     assert(Self->_Stalled != 0, "invariant");
1497     Self->_Stalled = 0;
1498 
1499     assert(_owner != Self, "invariant");
1500     ObjectWaiter::TStates v = node.TState;
1501     if (v == ObjectWaiter::TS_RUN) {
1502       const bool success = enter(Self);
1503       ADIM_guarantee(success, "enter signaled for a retry, but monitor should not have been deflated as waiters > 0");
1504     } else {
1505       guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1506       ReenterI(Self, &node);
1507       node.wait_reenter_end(this);
1508     }
1509 
1510     // Self has reacquired the lock.
1511     // Lifecycle - the node representing Self must not appear on any queues.
1512     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1513     // want residual elements associated with this thread left on any lists.
1514     guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1515     assert(_owner == Self, "invariant");
1516     assert(_succ != Self, "invariant");
1517   } // OSThreadWaitState()
1518 
1519   jt->set_current_waiting_monitor(NULL);
1520 
1521   guarantee(_recursions == 0, "invariant");
1522   _recursions = save;     // restore the old recursion count
1523   _waiters--;             // decrement the number of waiters


2045     n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,  \
2046                                         CHECK);                          \
2047   }
2048 #define NEWPERFVARIABLE(n)                                                \
2049   {                                                                       \
2050     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
2051                                          CHECK);                          \
2052   }
2053     NEWPERFCOUNTER(_sync_Inflations);
2054     NEWPERFCOUNTER(_sync_Deflations);
2055     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2056     NEWPERFCOUNTER(_sync_FutileWakeups);
2057     NEWPERFCOUNTER(_sync_Parks);
2058     NEWPERFCOUNTER(_sync_Notifications);
2059     NEWPERFVARIABLE(_sync_MonExtant);
2060 #undef NEWPERFCOUNTER
2061 #undef NEWPERFVARIABLE
2062   }
2063 
2064   DEBUG_ONLY(InitDone = true;)
2065 }
2066 
2067 // For internal used by ObjectSynchronizer::monitors_iterate().
2068 ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) {
2069   om_ptr->inc_ref_count();
2070   _om_ptr = om_ptr;
2071 }
2072 
2073 ObjectMonitorHandle::~ObjectMonitorHandle() {
2074   if (_om_ptr != NULL) {
2075     _om_ptr->dec_ref_count();
2076     _om_ptr = NULL;
2077   }
2078 }
2079 
2080 // Save the ObjectMonitor* associated with the specified markOop and
2081 // increment the ref_count. This function should only be called if
2082 // the caller has verified mark->has_monitor() == true. The object
2083 // parameter is needed to verify that ObjectMonitor* has not been
2084 // deflated and reused for another object.
2085 //
2086 // This function returns true if the ObjectMonitor* has been safely
2087 // saved. This function returns false if we have lost a race with
2088 // async deflation; the caller should retry as appropriate.
2089 //
2090 bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) {
2091   guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
2092             p2i(mark));
2093 
2094   ObjectMonitor * om_ptr = mark->monitor();
2095   om_ptr->inc_ref_count();
2096 
2097   if (AsyncDeflateIdleMonitors) {
2098     // Race here if monitor is not owned! The above ref_count bump
2099     // will cause subsequent async deflation to skip it. However,
2100     // previous or concurrent async deflation is a race.
2101     if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->_contentions <= 0) {
2102       // Async deflation is in progress. Attempt to restore the
2103       // header/dmw to the object's header so that we only retry once
2104       // if the deflater thread happens to be slow.
2105       om_ptr->install_displaced_markword_in_object(object);
2106       om_ptr->dec_ref_count();
2107       return false;
2108     }
2109     // The ObjectMonitor could have been deflated and reused for
2110     // another object before we bumped the ref_count so make sure
2111     // our object still refers to this ObjectMonitor.
2112     const markOop tmp = object->mark();
2113     if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {
2114       // Async deflation and reuse won the race so we have to retry.
2115       // Skip object header restoration since that's already done.
2116       om_ptr->dec_ref_count();
2117       return false;
2118     }
2119   }
2120 
2121   guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2122             p2i(_om_ptr));
2123   _om_ptr = om_ptr;
2124   return true;
2125 }
2126 
2127 // For internal use by ObjectSynchronizer::inflate().
2128 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
2129   if (_om_ptr == NULL) {
2130     guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
2131     om_ptr->inc_ref_count();
2132     _om_ptr = om_ptr;
2133   } else {
2134     guarantee(om_ptr == NULL, "can only clear a set om_ptr");
2135     _om_ptr->dec_ref_count();
2136     _om_ptr = NULL;
2137   }
2138 }
< prev index next >