< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 55489 : Checkpoint latest preliminary review patches for full OpenJDK review; merge with 8222295.patch.
rev 55490 : imported patch dcubed.monitor_deflate_conc.v2.01
rev 55491 : imported patch dcubed.monitor_deflate_conc.v2.02
rev 55492 : imported patch dcubed.monitor_deflate_conc.v2.03
rev 55494 : imported patch dcubed.monitor_deflate_conc.v2.05


 222 // * See also http://blogs.sun.com/dave
 223 
 224 
 225 void* ObjectMonitor::operator new (size_t size) throw() {
 226   return AllocateHeap(size, mtInternal);
 227 }
 228 void* ObjectMonitor::operator new[] (size_t size) throw() {
 229   return operator new (size);
 230 }
 231 void ObjectMonitor::operator delete(void* p) {
 232   FreeHeap(p);
 233 }
 234 void ObjectMonitor::operator delete[] (void *p) {
 235   operator delete(p);
 236 }
 237 
 238 // -----------------------------------------------------------------------------
 239 // Enter support
 240 
 241 void ObjectMonitor::enter(TRAPS) {


 242   // The following code is ordered to check the most common cases first
 243   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 244   Thread * const Self = THREAD;
 245 
 246   void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
 247   if (cur == NULL) {
 248     assert(_recursions == 0, "invariant");
 249     return;
 250   }
 251 
 252   if (cur == Self) {
 253     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 254     _recursions++;
 255     return;
 256   }
 257 
 258   if (Self->is_lock_owned ((address)cur)) {
 259     assert(_recursions == 0, "internal state error");
 260     _recursions = 1;
 261     // Commute owner from a thread-specific on-stack BasicLockObject address to
 262     // a full-fledged "Thread *".
 263     _owner = Self;
 264     return;
 265   }
 266 










 267   // We've encountered genuine contention.
 268   assert(Self->_Stalled == 0, "invariant");
 269   Self->_Stalled = intptr_t(this);
 270 
 271   // Try one round of spinning *before* enqueueing Self
 272   // and before going through the awkward and expensive state
 273   // transitions.  The following spin is strictly optional ...
 274   // Note that if we acquire the monitor from an initial spin
 275   // we forgo posting JVMTI events and firing DTRACE probes.
 276   if (TrySpin(Self) > 0) {
 277     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 278     assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
 279            _recursions);
 280     assert(((oop)object())->mark() == markOopDesc::encode(this),
 281            "object mark must match encoded this: mark=" INTPTR_FORMAT
 282            ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
 283            p2i(markOopDesc::encode(this)));
 284     Self->_Stalled = 0;
 285     return;
 286   }
 287 
 288   assert(_owner != Self, "invariant");
 289   assert(_succ != Self, "invariant");
 290   assert(Self->is_Java_thread(), "invariant");
 291   JavaThread * jt = (JavaThread *) Self;
 292   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 293   assert(jt->thread_state() != _thread_blocked, "invariant");
 294   assert(this->object() != NULL, "invariant");
 295   assert(_contentions >= 0, "invariant");
 296 
 297   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 298   // Ensure the object-monitor relationship remains stable while there's contention.
 299   Atomic::inc(&_contentions);


 300 
 301   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 302   EventJavaMonitorEnter event;
 303   if (event.should_commit()) {
 304     event.set_monitorClass(((oop)this->object())->klass());
 305     event.set_address((uintptr_t)(this->object_addr()));
 306   }
 307 
 308   { // Change java thread status to indicate blocked on monitor enter.
 309     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 310 
 311     Self->set_current_pending_monitor(this);
 312 
 313     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 314     if (JvmtiExport::should_post_monitor_contended_enter()) {
 315       JvmtiExport::post_monitor_contended_enter(jt, this);
 316 
 317       // The current thread does not yet own the monitor and does not
 318       // yet appear on any queues that would get it made the successor.
 319       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event


 341       //
 342       _recursions = 0;
 343       _succ = NULL;
 344       exit(false, Self);
 345 
 346       jt->java_suspend_self();
 347     }
 348     Self->set_current_pending_monitor(NULL);
 349 
 350     // We cleared the pending monitor info since we've just gotten past
 351     // the enter-check-for-suspend dance and we now own the monitor free
 352     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 353     // destructor can go to a safepoint at the end of this block. If we
 354     // do a thread dump during that safepoint, then this thread will show
 355     // as having "-locked" the monitor, but the OS and java.lang.Thread
 356     // states will still report that the thread is blocked trying to
 357     // acquire it.
 358   }
 359 
 360   Atomic::dec(&_contentions);
 361   assert(_contentions >= 0, "invariant");
 362   Self->_Stalled = 0;
 363 
 364   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 365   assert(_recursions == 0, "invariant");
 366   assert(_owner == Self, "invariant");
 367   assert(_succ != Self, "invariant");
 368   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 369 
 370   // The thread -- now the owner -- is back in vm mode.
 371   // Report the glorious news via TI,DTrace and jvmstat.
 372   // The probe effect is non-trivial.  All the reportage occurs
 373   // while we hold the monitor, increasing the length of the critical
 374   // section.  Amdahl's parallel speedup law comes vividly into play.
 375   //
 376   // Another option might be to aggregate the events (thread local or
 377   // per-monitor aggregation) and defer reporting until a more opportune
 378   // time -- such as next time some thread encounters contention but has
 379   // yet to acquire the lock.  While spinning that thread could
 380   // spinning we could increment JVMStat counters, etc.
 381 


 396   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 397 }
 398 
 399 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 400 // Callers must compensate as needed.
 401 
 402 int ObjectMonitor::TryLock(Thread * Self) {
 403   void * own = _owner;
 404   if (own != NULL) return 0;
 405   if (Atomic::replace_if_null(Self, &_owner)) {
 406     assert(_recursions == 0, "invariant");
 407     return 1;
 408   }
 409   // The lock had been free momentarily, but we lost the race to the lock.
 410   // Interference -- the CAS failed.
 411   // We can either return -1 or retry.
 412   // Retry doesn't make as much sense because the lock was just acquired.
 413   return -1;
 414 }
 415 















































































 416 // Convert the fields used by is_busy() to a string that can be
 417 // used for diagnostic output.
 418 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 419   ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT
 420             ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions,
 421             _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList));







 422   return ss->base();
 423 }
 424 
 425 #define MAX_RECHECK_INTERVAL 1000
 426 
 427 void ObjectMonitor::EnterI(TRAPS) {


 428   Thread * const Self = THREAD;
 429   assert(Self->is_Java_thread(), "invariant");
 430   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 431 
 432   // Try the lock - TATAS
 433   if (TryLock (Self) > 0) {
 434     assert(_succ != Self, "invariant");
 435     assert(_owner == Self, "invariant");
 436     assert(_Responsible != Self, "invariant");
 437     return;
 438   }
 439 











 440   assert(InitDone, "Unexpectedly not initialized");
 441 
 442   // We try one round of spinning *before* enqueueing Self.
 443   //
 444   // If the _owner is ready but OFFPROC we could use a YieldTo()
 445   // operation to donate the remainder of this thread's quantum
 446   // to the owner.  This has subtle but beneficial affinity
 447   // effects.
 448 
 449   if (TrySpin(Self) > 0) {
 450     assert(_owner == Self, "invariant");
 451     assert(_succ != Self, "invariant");
 452     assert(_Responsible != Self, "invariant");
 453     return;
 454   }
 455 
 456   // The Spin failed -- Enqueue and park the thread ...
 457   assert(_succ != Self, "invariant");
 458   assert(_owner != Self, "invariant");
 459   assert(_Responsible != Self, "invariant");


 536 
 537   for (;;) {
 538 
 539     if (TryLock(Self) > 0) break;
 540     assert(_owner != Self, "invariant");
 541 
 542     // park self
 543     if (_Responsible == Self) {
 544       Self->_ParkEvent->park((jlong) recheckInterval);
 545       // Increase the recheckInterval, but clamp the value.
 546       recheckInterval *= 8;
 547       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 548         recheckInterval = MAX_RECHECK_INTERVAL;
 549       }
 550     } else {
 551       Self->_ParkEvent->park();
 552     }
 553 
 554     if (TryLock(Self) > 0) break;
 555 









 556     // The lock is still contested.
 557     // Keep a tally of the # of futile wakeups.
 558     // Note that the counter is not protected by a lock or updated by atomics.
 559     // That is by design - we trade "lossy" counters which are exposed to
 560     // races during updates for a lower probe effect.
 561 
 562     // This PerfData object can be used in parallel with a safepoint.
 563     // See the work around in PerfDataManager::destroy().
 564     OM_PERFDATA_OP(FutileWakeups, inc());
 565     ++nWakeups;
 566 
 567     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 568     // We can defer clearing _succ until after the spin completes
 569     // TrySpin() must tolerate being called with _succ == Self.
 570     // Try yet another round of adaptive spinning.
 571     if (TrySpin(Self) > 0) break;
 572 
 573     // We can find that we were unpark()ed and redesignated _succ while
 574     // we were spinning.  That's harmless.  If we iterate and call park(),
 575     // park() will consume the event and return immediately and we'll


 640   // the lock.   The barrier ensures that changes to monitor meta-data and data
 641   // protected by the lock will be visible before we release the lock, and
 642   // therefore before some other thread (CPU) has a chance to acquire the lock.
 643   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 644   //
 645   // Critically, any prior STs to _succ or EntryList must be visible before
 646   // the ST of null into _owner in the *subsequent* (following) corresponding
 647   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 648   // execute a serializing instruction.
 649 
 650   return;
 651 }
 652 
 653 // ReenterI() is a specialized inline form of the latter half of the
 654 // contended slow-path from EnterI().  We use ReenterI() only for
 655 // monitor reentry in wait().
 656 //
 657 // In the future we should reconcile EnterI() and ReenterI().
 658 
 659 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {


 660   assert(Self != NULL, "invariant");
 661   assert(SelfNode != NULL, "invariant");
 662   assert(SelfNode->_thread == Self, "invariant");
 663   assert(_waiters > 0, "invariant");
 664   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 665   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 666   JavaThread * jt = (JavaThread *) Self;
 667 
 668   int nWakeups = 0;
 669   for (;;) {
 670     ObjectWaiter::TStates v = SelfNode->TState;
 671     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 672     assert(_owner != Self, "invariant");
 673 
 674     if (TryLock(Self) > 0) break;
 675     if (TrySpin(Self) > 0) break;
 676 









 677     // State transition wrappers around park() ...
 678     // ReenterI() wisely defers state transitions until
 679     // it's clear we must park the thread.
 680     {
 681       OSThreadContendState osts(Self->osthread());
 682       ThreadBlockInVM tbivm(jt);
 683 
 684       // cleared by handle_special_suspend_equivalent_condition()
 685       // or java_suspend_self()
 686       jt->set_suspend_equivalent();
 687       Self->_ParkEvent->park();
 688 
 689       // were we externally suspended while we were waiting?
 690       for (;;) {
 691         if (!ExitSuspendEquivalent(jt)) break;
 692         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 693         jt->java_suspend_self();
 694         jt->set_suspend_equivalent();
 695       }
 696     }


 864   Thread * const Self = THREAD;
 865   if (THREAD != _owner) {
 866     if (THREAD->is_lock_owned((address) _owner)) {
 867       // Transmute _owner from a BasicLock pointer to a Thread address.
 868       // We don't need to hold _mutex for this transition.
 869       // Non-null to Non-null is safe as long as all readers can
 870       // tolerate either flavor.
 871       assert(_recursions == 0, "invariant");
 872       _owner = THREAD;
 873       _recursions = 0;
 874     } else {
 875       // Apparent unbalanced locking ...
 876       // Naively we'd like to throw IllegalMonitorStateException.
 877       // As a practical matter we can neither allocate nor throw an
 878       // exception as ::exit() can be called from leaf routines.
 879       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 880       // Upon deeper reflection, however, in a properly run JVM the only
 881       // way we should encounter this situation is in the presence of
 882       // unbalanced JNI locking. TODO: CheckJNICalls.
 883       // See also: CR4414101
 884       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");

 885       return;
 886     }
 887   }
 888 
 889   if (_recursions != 0) {
 890     _recursions--;        // this is simple recursive enter
 891     return;
 892   }
 893 
 894   // Invariant: after setting Responsible=null an thread must execute
 895   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 896   _Responsible = NULL;
 897 
 898 #if INCLUDE_JFR
 899   // get the owner's thread id for the MonitorEnter event
 900   // if it is enabled and the thread isn't suspended
 901   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
 902     _previous_owner_tid = JFR_THREAD_ID(Self);
 903   }
 904 #endif


1120       _recursions = 0;
1121     }
1122   }
1123 
1124   guarantee(Self == _owner, "complete_exit not owner");
1125   intptr_t save = _recursions; // record the old recursion count
1126   _recursions = 0;        // set the recursion level to be 0
1127   exit(true, Self);           // exit the monitor
1128   guarantee(_owner != Self, "invariant");
1129   return save;
1130 }
1131 
1132 // reenter() enters a lock and sets recursion count
1133 // complete_exit/reenter operate as a wait without waiting
1134 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1135   Thread * const Self = THREAD;
1136   assert(Self->is_Java_thread(), "Must be Java thread!");
1137   JavaThread *jt = (JavaThread *)THREAD;
1138 
1139   guarantee(_owner != Self, "reenter already owner");
1140   enter(THREAD);       // enter the monitor

1141   guarantee(_recursions == 0, "reenter recursion");
1142   _recursions = recursions;
1143   return;
1144 }
1145 
1146 
1147 // -----------------------------------------------------------------------------
1148 // A macro is used below because there may already be a pending
1149 // exception which should not abort the execution of the routines
1150 // which use this (which is why we don't put this into check_slow and
1151 // call it with a CHECK argument).
1152 
1153 #define CHECK_OWNER()                                                       \
1154   do {                                                                      \
1155     if (THREAD != _owner) {                                                 \
1156       if (THREAD->is_lock_owned((address) _owner)) {                        \
1157         _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
1158         _recursions = 0;                                                    \
1159       } else {                                                              \
1160         THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
1161       }                                                                     \
1162     }                                                                       \
1163   } while (false)


1913     n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,  \
1914                                         CHECK);                          \
1915   }
1916 #define NEWPERFVARIABLE(n)                                                \
1917   {                                                                       \
1918     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
1919                                          CHECK);                          \
1920   }
1921     NEWPERFCOUNTER(_sync_Inflations);
1922     NEWPERFCOUNTER(_sync_Deflations);
1923     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
1924     NEWPERFCOUNTER(_sync_FutileWakeups);
1925     NEWPERFCOUNTER(_sync_Parks);
1926     NEWPERFCOUNTER(_sync_Notifications);
1927     NEWPERFVARIABLE(_sync_MonExtant);
1928 #undef NEWPERFCOUNTER
1929 #undef NEWPERFVARIABLE
1930   }
1931 
1932   DEBUG_ONLY(InitDone = true;)










































































1933 }
1934 
1935 void ObjectMonitor::print_on(outputStream* st) const {
1936   // The minimal things to print for markOop printing, more can be added for debugging and logging.
1937   st->print("{contentions=0x%08x,waiters=0x%08x"
1938             ",recursions=" INTPTR_FORMAT ",owner=" INTPTR_FORMAT "}",
1939             contentions(), waiters(), recursions(),
1940             p2i(owner()));
1941 }
1942 void ObjectMonitor::print() const { print_on(tty); }


 222 // * See also http://blogs.sun.com/dave
 223 
 224 
 225 void* ObjectMonitor::operator new (size_t size) throw() {
 226   return AllocateHeap(size, mtInternal);
 227 }
 228 void* ObjectMonitor::operator new[] (size_t size) throw() {
 229   return operator new (size);
 230 }
 231 void ObjectMonitor::operator delete(void* p) {
 232   FreeHeap(p);
 233 }
 234 void ObjectMonitor::operator delete[] (void *p) {
 235   operator delete(p);
 236 }
 237 
 238 // -----------------------------------------------------------------------------
 239 // Enter support
 240 
 241 void ObjectMonitor::enter(TRAPS) {
 242   ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
 243 
 244   // The following code is ordered to check the most common cases first
 245   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 246   Thread * const Self = THREAD;
 247 
 248   void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
 249   if (cur == NULL) {
 250     assert(_recursions == 0, "invariant");
 251     return;
 252   }
 253 
 254   if (cur == Self) {
 255     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 256     _recursions++;
 257     return;
 258   }
 259 
 260   if (Self->is_lock_owned ((address)cur)) {
 261     assert(_recursions == 0, "internal state error");
 262     _recursions = 1;
 263     // Commute owner from a thread-specific on-stack BasicLockObject address to
 264     // a full-fledged "Thread *".
 265     _owner = Self;
 266     return;
 267   }
 268 
 269   if (AsyncDeflateIdleMonitors &&
 270       Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 271     // The deflation protocol finished the first part (setting owner),
 272     // but it failed the second part (making ref_count negative) and
 273     // bailed. Or the ObjectMonitor was async deflated and reused.
 274     // Acquired the monitor.
 275     assert(_recursions == 0, "invariant");
 276     return;
 277   }
 278 
 279   // We've encountered genuine contention.
 280   assert(Self->_Stalled == 0, "invariant");
 281   Self->_Stalled = intptr_t(this);
 282 
 283   // Try one round of spinning *before* enqueueing Self
 284   // and before going through the awkward and expensive state
 285   // transitions.  The following spin is strictly optional ...
 286   // Note that if we acquire the monitor from an initial spin
 287   // we forgo posting JVMTI events and firing DTRACE probes.
 288   if (TrySpin(Self) > 0) {
 289     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 290     assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
 291            _recursions);
 292     assert(((oop)object())->mark() == markOopDesc::encode(this),
 293            "object mark must match encoded this: mark=" INTPTR_FORMAT
 294            ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
 295            p2i(markOopDesc::encode(this)));
 296     Self->_Stalled = 0;
 297     return;
 298   }
 299 
 300   assert(_owner != Self, "invariant");
 301   assert(_succ != Self, "invariant");
 302   assert(Self->is_Java_thread(), "invariant");
 303   JavaThread * jt = (JavaThread *) Self;
 304   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 305   assert(jt->thread_state() != _thread_blocked, "invariant");
 306   assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
 307   assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
 308 
 309   // Prevent deflation. See ObjectSynchronizer::deflate_monitor(),
 310   // ObjectSynchronizer::deflate_monitor_using_JT() and is_busy().
 311   // Ensure the object <-> monitor relationship remains stable while
 312   // there's contention.
 313   Atomic::add(1, &_contentions);
 314 
 315   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 316   EventJavaMonitorEnter event;
 317   if (event.should_commit()) {
 318     event.set_monitorClass(((oop)this->object())->klass());
 319     event.set_address((uintptr_t)(this->object_addr()));
 320   }
 321 
 322   { // Change java thread status to indicate blocked on monitor enter.
 323     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 324 
 325     Self->set_current_pending_monitor(this);
 326 
 327     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 328     if (JvmtiExport::should_post_monitor_contended_enter()) {
 329       JvmtiExport::post_monitor_contended_enter(jt, this);
 330 
 331       // The current thread does not yet own the monitor and does not
 332       // yet appear on any queues that would get it made the successor.
 333       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event


 355       //
 356       _recursions = 0;
 357       _succ = NULL;
 358       exit(false, Self);
 359 
 360       jt->java_suspend_self();
 361     }
 362     Self->set_current_pending_monitor(NULL);
 363 
 364     // We cleared the pending monitor info since we've just gotten past
 365     // the enter-check-for-suspend dance and we now own the monitor free
 366     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 367     // destructor can go to a safepoint at the end of this block. If we
 368     // do a thread dump during that safepoint, then this thread will show
 369     // as having "-locked" the monitor, but the OS and java.lang.Thread
 370     // states will still report that the thread is blocked trying to
 371     // acquire it.
 372   }
 373 
 374   Atomic::dec(&_contentions);
 375   assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
 376   Self->_Stalled = 0;
 377 
 378   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 379   assert(_recursions == 0, "invariant");
 380   assert(_owner == Self, "invariant");
 381   assert(_succ != Self, "invariant");
 382   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 383 
 384   // The thread -- now the owner -- is back in vm mode.
 385   // Report the glorious news via TI,DTrace and jvmstat.
 386   // The probe effect is non-trivial.  All the reportage occurs
 387   // while we hold the monitor, increasing the length of the critical
 388   // section.  Amdahl's parallel speedup law comes vividly into play.
 389   //
 390   // Another option might be to aggregate the events (thread local or
 391   // per-monitor aggregation) and defer reporting until a more opportune
 392   // time -- such as next time some thread encounters contention but has
 393   // yet to acquire the lock.  While spinning that thread could
 394   // spinning we could increment JVMStat counters, etc.
 395 


 410   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 411 }
 412 
 413 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 414 // Callers must compensate as needed.
 415 
 416 int ObjectMonitor::TryLock(Thread * Self) {
 417   void * own = _owner;
 418   if (own != NULL) return 0;
 419   if (Atomic::replace_if_null(Self, &_owner)) {
 420     assert(_recursions == 0, "invariant");
 421     return 1;
 422   }
 423   // The lock had been free momentarily, but we lost the race to the lock.
 424   // Interference -- the CAS failed.
 425   // We can either return -1 or retry.
 426   // Retry doesn't make as much sense because the lock was just acquired.
 427   return -1;
 428 }
 429 
 430 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
 431 // into the header of the object associated with the monitor. This
 432 // idempotent method is called by a thread that is deflating a
 433 // monitor and by other threads that have detected a race with the
 434 // deflation process.
 435 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
 436   // This function must only be called when (owner == DEFLATER_MARKER
 437   // && ref_count <= 0), but we can't guarantee that here because
 438   // those values could change when the ObjectMonitor gets moved from
 439   // the global free list to a per-thread free list.
 440 
 441   guarantee(obj != NULL, "must be non-NULL");
 442   if (object() != obj) {
 443     // ObjectMonitor's object ref no longer refers to the target object
 444     // so the object's header has already been restored.
 445     return;
 446   }
 447 
 448   markOop dmw = header();
 449   if (dmw == NULL) {
 450     // ObjectMonitor's header/dmw has been cleared so the object's
 451     // header has already been restored.
 452     return;
 453   }
 454 
 455   // A non-NULL dmw has to be either neutral (not locked and not marked)
 456   // or is already participating in this restoration protocol.
 457   assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0),
 458          "failed precondition: dmw=" INTPTR_FORMAT, p2i(dmw));
 459 
 460   markOop marked_dmw = NULL;
 461   if (!dmw->is_marked() && dmw->hash() == 0) {
 462     // This dmw has not yet started the restoration protocol so we
 463     // mark a copy of the dmw to begin the protocol.
 464     // Note: A dmw with a hashcode does not take this code path.
 465     marked_dmw = dmw->set_marked();
 466 
 467     // All of the callers to this function can be racing with each
 468     // other trying to update the _header field.
 469     dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw);
 470     if (dmw == NULL) {
 471       // ObjectMonitor's header/dmw has been cleared so the object's
 472       // header has already been restored.
 473       return;
 474     }
 475     // The _header field is now marked. The winner's 'dmw' variable
 476     // contains the original, unmarked header/dmw value and any
 477     // losers have a marked header/dmw value that will be cleaned
 478     // up below.
 479   }
 480 
 481   if (dmw->is_marked()) {
 482     // Clear the mark from the header/dmw copy in preparation for
 483     // possible restoration from this thread.
 484     assert(dmw->hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT,
 485            p2i(dmw));
 486     dmw = dmw->set_unmarked();
 487   }
 488   assert(dmw->is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, p2i(dmw));
 489 
 490   // Install displaced mark word if the object's header still points
 491   // to this ObjectMonitor. All racing callers to this function will
 492   // reach this point, but only one can win.
 493   obj->cas_set_mark(dmw, markOopDesc::encode(this));
 494 
 495   // Note: It does not matter which thread restored the header/dmw
 496   // into the object's header. The thread deflating the monitor just
 497   // wanted the object's header restored and it is. The threads that
 498   // detected a race with the deflation process also wanted the
 499   // object's header restored before they retry their operation and
 500   // because it is restored they will only retry once.
 501 
 502   if (marked_dmw != NULL) {
 503     // Clear _header to NULL if it is still marked_dmw so a racing
 504     // install_displaced_markword_in_object() can bail out sooner.
 505     Atomic::cmpxchg((markOop)NULL, &_header, marked_dmw);
 506   }
 507 }
 508 
 509 // Convert the fields used by is_busy() to a string that can be
 510 // used for diagnostic output.
 511 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 512   ss->print("is_busy: contentions=%d, waiters=%d, ", _contentions, _waiters);
 513   if (!AsyncDeflateIdleMonitors) {
 514     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 515   } else if (_owner != DEFLATER_MARKER) {
 516     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 517   } else {
 518     ss->print("owner=" INTPTR_FORMAT, NULL);
 519   }
 520   ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq),
 521             p2i(_EntryList));
 522   return ss->base();
 523 }
 524 
 525 #define MAX_RECHECK_INTERVAL 1000
 526 
 527 void ObjectMonitor::EnterI(TRAPS) {
 528   ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
 529 
 530   Thread * const Self = THREAD;
 531   assert(Self->is_Java_thread(), "invariant");
 532   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 533 
 534   // Try the lock - TATAS
 535   if (TryLock (Self) > 0) {
 536     assert(_succ != Self, "invariant");
 537     assert(_owner == Self, "invariant");
 538     assert(_Responsible != Self, "invariant");
 539     return;
 540   }
 541 
 542   if (AsyncDeflateIdleMonitors &&
 543       Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 544     // The deflation protocol finished the first part (setting owner),
 545     // but it failed the second part (making ref_count negative) and
 546     // bailed. Or the ObjectMonitor was async deflated and reused.
 547     // Acquired the monitor.
 548     assert(_succ != Self, "invariant");
 549     assert(_Responsible != Self, "invariant");
 550     return;
 551   }
 552 
 553   assert(InitDone, "Unexpectedly not initialized");
 554 
 555   // We try one round of spinning *before* enqueueing Self.
 556   //
 557   // If the _owner is ready but OFFPROC we could use a YieldTo()
 558   // operation to donate the remainder of this thread's quantum
 559   // to the owner.  This has subtle but beneficial affinity
 560   // effects.
 561 
 562   if (TrySpin(Self) > 0) {
 563     assert(_owner == Self, "invariant");
 564     assert(_succ != Self, "invariant");
 565     assert(_Responsible != Self, "invariant");
 566     return;
 567   }
 568 
 569   // The Spin failed -- Enqueue and park the thread ...
 570   assert(_succ != Self, "invariant");
 571   assert(_owner != Self, "invariant");
 572   assert(_Responsible != Self, "invariant");


 649 
 650   for (;;) {
 651 
 652     if (TryLock(Self) > 0) break;
 653     assert(_owner != Self, "invariant");
 654 
 655     // park self
 656     if (_Responsible == Self) {
 657       Self->_ParkEvent->park((jlong) recheckInterval);
 658       // Increase the recheckInterval, but clamp the value.
 659       recheckInterval *= 8;
 660       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 661         recheckInterval = MAX_RECHECK_INTERVAL;
 662       }
 663     } else {
 664       Self->_ParkEvent->park();
 665     }
 666 
 667     if (TryLock(Self) > 0) break;
 668 
 669     if (AsyncDeflateIdleMonitors &&
 670         Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 671       // The deflation protocol finished the first part (setting owner),
 672       // but it failed the second part (making ref_count negative) and
 673       // bailed. Or the ObjectMonitor was async deflated and reused.
 674       // Acquired the monitor.
 675       break;
 676     }
 677 
 678     // The lock is still contested.
 679     // Keep a tally of the # of futile wakeups.
 680     // Note that the counter is not protected by a lock or updated by atomics.
 681     // That is by design - we trade "lossy" counters which are exposed to
 682     // races during updates for a lower probe effect.
 683 
 684     // This PerfData object can be used in parallel with a safepoint.
 685     // See the work around in PerfDataManager::destroy().
 686     OM_PERFDATA_OP(FutileWakeups, inc());
 687     ++nWakeups;
 688 
 689     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 690     // We can defer clearing _succ until after the spin completes
 691     // TrySpin() must tolerate being called with _succ == Self.
 692     // Try yet another round of adaptive spinning.
 693     if (TrySpin(Self) > 0) break;
 694 
 695     // We can find that we were unpark()ed and redesignated _succ while
 696     // we were spinning.  That's harmless.  If we iterate and call park(),
 697     // park() will consume the event and return immediately and we'll


 762   // the lock.   The barrier ensures that changes to monitor meta-data and data
 763   // protected by the lock will be visible before we release the lock, and
 764   // therefore before some other thread (CPU) has a chance to acquire the lock.
 765   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 766   //
 767   // Critically, any prior STs to _succ or EntryList must be visible before
 768   // the ST of null into _owner in the *subsequent* (following) corresponding
 769   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 770   // execute a serializing instruction.
 771 
 772   return;
 773 }
 774 
 775 // ReenterI() is a specialized inline form of the latter half of the
 776 // contended slow-path from EnterI().  We use ReenterI() only for
 777 // monitor reentry in wait().
 778 //
 779 // In the future we should reconcile EnterI() and ReenterI().
 780 
 781 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 782   ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
 783 
 784   assert(Self != NULL, "invariant");
 785   assert(SelfNode != NULL, "invariant");
 786   assert(SelfNode->_thread == Self, "invariant");
 787   assert(_waiters > 0, "invariant");
 788   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 789   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 790   JavaThread * jt = (JavaThread *) Self;
 791 
 792   int nWakeups = 0;
 793   for (;;) {
 794     ObjectWaiter::TStates v = SelfNode->TState;
 795     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 796     assert(_owner != Self, "invariant");
 797 
 798     if (TryLock(Self) > 0) break;
 799     if (TrySpin(Self) > 0) break;
 800 
 801     if (AsyncDeflateIdleMonitors &&
 802         Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 803       // The deflation protocol finished the first part (setting owner),
 804       // but it failed the second part (making ref_count negative) and
 805       // bailed. Or the ObjectMonitor was async deflated and reused.
 806       // Acquired the monitor.
 807       break;
 808     }
 809 
 810     // State transition wrappers around park() ...
 811     // ReenterI() wisely defers state transitions until
 812     // it's clear we must park the thread.
 813     {
 814       OSThreadContendState osts(Self->osthread());
 815       ThreadBlockInVM tbivm(jt);
 816 
 817       // cleared by handle_special_suspend_equivalent_condition()
 818       // or java_suspend_self()
 819       jt->set_suspend_equivalent();
 820       Self->_ParkEvent->park();
 821 
 822       // were we externally suspended while we were waiting?
 823       for (;;) {
 824         if (!ExitSuspendEquivalent(jt)) break;
 825         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 826         jt->java_suspend_self();
 827         jt->set_suspend_equivalent();
 828       }
 829     }


 997   Thread * const Self = THREAD;
 998   if (THREAD != _owner) {
 999     if (THREAD->is_lock_owned((address) _owner)) {
1000       // Transmute _owner from a BasicLock pointer to a Thread address.
1001       // We don't need to hold _mutex for this transition.
1002       // Non-null to Non-null is safe as long as all readers can
1003       // tolerate either flavor.
1004       assert(_recursions == 0, "invariant");
1005       _owner = THREAD;
1006       _recursions = 0;
1007     } else {
1008       // Apparent unbalanced locking ...
1009       // Naively we'd like to throw IllegalMonitorStateException.
1010       // As a practical matter we can neither allocate nor throw an
1011       // exception as ::exit() can be called from leaf routines.
1012       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
1013       // Upon deeper reflection, however, in a properly run JVM the only
1014       // way we should encounter this situation is in the presence of
1015       // unbalanced JNI locking. TODO: CheckJNICalls.
1016       // See also: CR4414101
1017       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking: "
1018              "owner=" INTPTR_FORMAT, p2i(_owner));
1019       return;
1020     }
1021   }
1022 
1023   if (_recursions != 0) {
1024     _recursions--;        // this is simple recursive enter
1025     return;
1026   }
1027 
1028   // Invariant: after setting Responsible=null an thread must execute
1029   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1030   _Responsible = NULL;
1031 
1032 #if INCLUDE_JFR
1033   // get the owner's thread id for the MonitorEnter event
1034   // if it is enabled and the thread isn't suspended
1035   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1036     _previous_owner_tid = JFR_THREAD_ID(Self);
1037   }
1038 #endif


1254       _recursions = 0;
1255     }
1256   }
1257 
1258   guarantee(Self == _owner, "complete_exit not owner");
1259   intptr_t save = _recursions; // record the old recursion count
1260   _recursions = 0;        // set the recursion level to be 0
1261   exit(true, Self);           // exit the monitor
1262   guarantee(_owner != Self, "invariant");
1263   return save;
1264 }
1265 
1266 // reenter() enters a lock and sets recursion count
1267 // complete_exit/reenter operate as a wait without waiting
1268 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1269   Thread * const Self = THREAD;
1270   assert(Self->is_Java_thread(), "Must be Java thread!");
1271   JavaThread *jt = (JavaThread *)THREAD;
1272 
1273   guarantee(_owner != Self, "reenter already owner");
1274   enter(THREAD);
1275   // Entered the monitor.
1276   guarantee(_recursions == 0, "reenter recursion");
1277   _recursions = recursions;

1278 }
1279 
1280 
1281 // -----------------------------------------------------------------------------
1282 // A macro is used below because there may already be a pending
1283 // exception which should not abort the execution of the routines
1284 // which use this (which is why we don't put this into check_slow and
1285 // call it with a CHECK argument).
1286 
1287 #define CHECK_OWNER()                                                       \
1288   do {                                                                      \
1289     if (THREAD != _owner) {                                                 \
1290       if (THREAD->is_lock_owned((address) _owner)) {                        \
1291         _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
1292         _recursions = 0;                                                    \
1293       } else {                                                              \
1294         THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
1295       }                                                                     \
1296     }                                                                       \
1297   } while (false)


2047     n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,  \
2048                                         CHECK);                          \
2049   }
2050 #define NEWPERFVARIABLE(n)                                                \
2051   {                                                                       \
2052     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
2053                                          CHECK);                          \
2054   }
2055     NEWPERFCOUNTER(_sync_Inflations);
2056     NEWPERFCOUNTER(_sync_Deflations);
2057     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2058     NEWPERFCOUNTER(_sync_FutileWakeups);
2059     NEWPERFCOUNTER(_sync_Parks);
2060     NEWPERFCOUNTER(_sync_Notifications);
2061     NEWPERFVARIABLE(_sync_MonExtant);
2062 #undef NEWPERFCOUNTER
2063 #undef NEWPERFVARIABLE
2064   }
2065 
2066   DEBUG_ONLY(InitDone = true;)
2067 }
2068 
2069 // For internal use by ObjectSynchronizer::monitors_iterate().
2070 ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) {
2071   om_ptr->inc_ref_count();
2072   _om_ptr = om_ptr;
2073 }
2074 
2075 ObjectMonitorHandle::~ObjectMonitorHandle() {
2076   if (_om_ptr != NULL) {
2077     _om_ptr->dec_ref_count();
2078     _om_ptr = NULL;
2079   }
2080 }
2081 
2082 // Save the ObjectMonitor* associated with the specified markOop and
2083 // increment the ref_count. This function should only be called if
2084 // the caller has verified mark->has_monitor() == true. The object
2085 // parameter is needed to verify that ObjectMonitor* has not been
2086 // deflated and reused for another object.
2087 //
2088 // This function returns true if the ObjectMonitor* has been safely
2089 // saved. This function returns false if we have lost a race with
2090 // async deflation; the caller should retry as appropriate.
2091 //
2092 bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) {
2093   guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
2094             p2i(mark));
2095 
2096   ObjectMonitor * om_ptr = mark->monitor();
2097   om_ptr->inc_ref_count();
2098 
2099   if (AsyncDeflateIdleMonitors) {
2100     // Race here if monitor is not owned! The above ref_count bump
2101     // will cause subsequent async deflation to skip it. However,
2102     // previous or concurrent async deflation is a race.
2103     if (om_ptr->owner_is_DEFLATER_MARKER() && om_ptr->ref_count() <= 0) {
2104       // Async deflation is in progress and our ref_count increment
2105       // above lost the race to async deflation. Attempt to restore
2106       // the header/dmw to the object's header so that we only retry
2107       // once if the deflater thread happens to be slow.
2108       om_ptr->install_displaced_markword_in_object(object);
2109       om_ptr->dec_ref_count();
2110       return false;
2111     }
2112     // The ObjectMonitor could have been deflated and reused for
2113     // another object before we bumped the ref_count so make sure
2114     // our object still refers to this ObjectMonitor.
2115     const markOop tmp = object->mark();
2116     if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {
2117       // Async deflation and reuse won the race so we have to retry.
2118       // Skip object header restoration since that's already done.
2119       om_ptr->dec_ref_count();
2120       return false;
2121     }
2122   }
2123 
2124   ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2125                  p2i(_om_ptr));
2126   _om_ptr = om_ptr;
2127   return true;
2128 }
2129 
2130 // For internal use by ObjectSynchronizer::inflate().
2131 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
2132   if (_om_ptr == NULL) {
2133     ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
2134     om_ptr->inc_ref_count();
2135     _om_ptr = om_ptr;
2136   } else {
2137     ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr");
2138     _om_ptr->dec_ref_count();
2139     _om_ptr = NULL;
2140   }
2141 }
2142 
2143 void ObjectMonitor::print_on(outputStream* st) const {
2144   // The minimal things to print for markOop printing, more can be added for debugging and logging.
2145   st->print("{contentions=0x%08x,waiters=0x%08x"
2146             ",recursions=" INTPTR_FORMAT ",owner=" INTPTR_FORMAT "}",
2147             contentions(), waiters(), recursions(),
2148             p2i(owner()));
2149 }
2150 void ObjectMonitor::print() const { print_on(tty); }
< prev index next >