< prev index next >

src/share/vm/runtime/objectMonitor.cpp

Print this page
rev 13054 : imported patch monitor_deflate_conc


 223 //   the caller onto the WaitSet.
 224 //
 225 // * notify() or notifyAll() simply transfers threads from the WaitSet to
 226 //   either the EntryList or cxq.  Subsequent exit() operations will
 227 //   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
 228 //   it's likely the notifyee would simply impale itself on the lock held
 229 //   by the notifier.
 230 //
 231 // * An interesting alternative is to encode cxq as (List,LockByte) where
 232 //   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
 233 //   variable, like _recursions, in the scheme.  The threads or Events that form
 234 //   the list would have to be aligned in 256-byte addresses.  A thread would
 235 //   try to acquire the lock or enqueue itself with CAS, but exiting threads
 236 //   could use a 1-0 protocol and simply STB to set the LockByte to 0.
 237 //   Note that is is *not* word-tearing, but it does presume that full-word
 238 //   CAS operations are coherent with intermix with STB operations.  That's true
 239 //   on most common processors.
 240 //
 241 // * See also http://blogs.sun.com/dave
 242 

 243 
 244 // -----------------------------------------------------------------------------
 245 // Enter support
 246 
 247 void ObjectMonitor::enter(TRAPS) {
 248   // The following code is ordered to check the most common cases first
 249   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 250   Thread * const Self = THREAD;
 251 
 252   void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
 253   if (cur == NULL) {
 254     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 255     assert(_recursions == 0, "invariant");
 256     assert(_owner == Self, "invariant");
 257     return;
 258   }
 259 
 260   if (cur == Self) {
 261     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 262     _recursions++;
 263     return;
 264   }
 265 
 266   if (Self->is_lock_owned ((address)cur)) {
 267     assert(_recursions == 0, "internal state error");
 268     _recursions = 1;
 269     // Commute owner from a thread-specific on-stack BasicLockObject address to
 270     // a full-fledged "Thread *".
 271     _owner = Self;
 272     return;
 273   }
 274 
 275   // We've encountered genuine contention.
 276   assert(Self->_Stalled == 0, "invariant");
 277   Self->_Stalled = intptr_t(this);
 278 
 279   // Try one round of spinning *before* enqueueing Self
 280   // and before going through the awkward and expensive state
 281   // transitions.  The following spin is strictly optional ...
 282   // Note that if we acquire the monitor from an initial spin
 283   // we forgo posting JVMTI events and firing DTRACE probes.
 284   if (Knob_SpinEarly && TrySpin (Self) > 0) {
 285     assert(_owner == Self, "invariant");
 286     assert(_recursions == 0, "invariant");
 287     assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 288     Self->_Stalled = 0;
 289     return;
 290   }
 291 
 292   assert(_owner != Self, "invariant");
 293   assert(_succ != Self, "invariant");
 294   assert(Self->is_Java_thread(), "invariant");
 295   JavaThread * jt = (JavaThread *) Self;
 296   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 297   assert(jt->thread_state() != _thread_blocked, "invariant");
 298   assert(this->object() != NULL, "invariant");
 299   assert(_count >= 0, "invariant");
 300 
 301   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 302   // Ensure the object-monitor relationship remains stable while there's contention.
 303   Atomic::inc(&_count);








 304 
 305   EventJavaMonitorEnter event;
 306 
 307   { // Change java thread status to indicate blocked on monitor enter.
 308     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 309 
 310     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 311     if (JvmtiExport::should_post_monitor_contended_enter()) {
 312       JvmtiExport::post_monitor_contended_enter(jt, this);
 313 
 314       // The current thread does not yet own the monitor and does not
 315       // yet appear on any queues that would get it made the successor.
 316       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
 317       // handler cannot accidentally consume an unpark() meant for the
 318       // ParkEvent associated with this ObjectMonitor.
 319     }
 320 
 321     OSThreadContendState osts(Self->osthread());
 322     ThreadBlockInVM tbivm(jt);
 323 


 340       //
 341       _recursions = 0;
 342       _succ = NULL;
 343       exit(false, Self);
 344 
 345       jt->java_suspend_self();
 346     }
 347     Self->set_current_pending_monitor(NULL);
 348 
 349     // We cleared the pending monitor info since we've just gotten past
 350     // the enter-check-for-suspend dance and we now own the monitor free
 351     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 352     // destructor can go to a safepoint at the end of this block. If we
 353     // do a thread dump during that safepoint, then this thread will show
 354     // as having "-locked" the monitor, but the OS and java.lang.Thread
 355     // states will still report that the thread is blocked trying to
 356     // acquire it.
 357   }
 358 
 359   Atomic::dec(&_count);
 360   assert(_count >= 0, "invariant");
 361   Self->_Stalled = 0;
 362 
 363   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 364   assert(_recursions == 0, "invariant");
 365   assert(_owner == Self, "invariant");
 366   assert(_succ != Self, "invariant");
 367   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 368 
 369   // The thread -- now the owner -- is back in vm mode.
 370   // Report the glorious news via TI,DTrace and jvmstat.
 371   // The probe effect is non-trivial.  All the reportage occurs
 372   // while we hold the monitor, increasing the length of the critical
 373   // section.  Amdahl's parallel speedup law comes vividly into play.
 374   //
 375   // Another option might be to aggregate the events (thread local or
 376   // per-monitor aggregation) and defer reporting until a more opportune
 377   // time -- such as next time some thread encounters contention but has
 378   // yet to acquire the lock.  While spinning that thread could
 379   // spinning we could increment JVMStat counters, etc.
 380 
 381   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
 382   if (JvmtiExport::should_post_monitor_contended_entered()) {
 383     JvmtiExport::post_monitor_contended_entered(jt, this);
 384 
 385     // The current thread already owns the monitor and is not going to
 386     // call park() for the remainder of the monitor enter protocol. So
 387     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 388     // event handler consumed an unpark() issued by the thread that
 389     // just exited the monitor.
 390   }
 391 
 392   if (event.should_commit()) {
 393     event.set_monitorClass(((oop)this->object())->klass());
 394     event.set_previousOwner((TYPE_THREAD)_previous_owner_tid);
 395     event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
 396     event.commit();
 397   }
 398 
 399   OM_PERFDATA_OP(ContendedLockAttempts, inc());

 400 }
 401 
 402 
 403 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 404 // Callers must compensate as needed.
 405 
 406 int ObjectMonitor::TryLock(Thread * Self) {
 407   void * own = _owner;
 408   if (own != NULL) return 0;
 409   if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 410     // Either guarantee _recursions == 0 or set _recursions = 0.
 411     assert(_recursions == 0, "invariant");
 412     assert(_owner == Self, "invariant");
 413     return 1;
 414   }
 415   // The lock had been free momentarily, but we lost the race to the lock.
 416   // Interference -- the CAS failed.
 417   // We can either return -1 or retry.
 418   // Retry doesn't make as much sense because the lock was just acquired.
 419   return -1;
 420 }
 421 


























































































 422 #define MAX_RECHECK_INTERVAL 1000
 423 
 424 void ObjectMonitor::EnterI(TRAPS) {
 425   Thread * const Self = THREAD;
 426   assert(Self->is_Java_thread(), "invariant");
 427   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 428 
 429   // Try the lock - TATAS
 430   if (TryLock (Self) > 0) {
 431     assert(_succ != Self, "invariant");
 432     assert(_owner == Self, "invariant");
 433     assert(_Responsible != Self, "invariant");
 434     return;
 435   }
 436 












 437   DeferredInitialize();
 438 
 439   // We try one round of spinning *before* enqueueing Self.
 440   //
 441   // If the _owner is ready but OFFPROC we could use a YieldTo()
 442   // operation to donate the remainder of this thread's quantum
 443   // to the owner.  This has subtle but beneficial affinity
 444   // effects.
 445 
 446   if (TrySpin (Self) > 0) {
 447     assert(_owner == Self, "invariant");
 448     assert(_succ != Self, "invariant");
 449     assert(_Responsible != Self, "invariant");
 450     return;
 451   }
 452 
 453   // The Spin failed -- Enqueue and park the thread ...
 454   assert(_succ != Self, "invariant");
 455   assert(_owner != Self, "invariant");
 456   assert(_Responsible != Self, "invariant");


 540     if ((SyncFlags & 2) && _Responsible == NULL) {
 541       Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
 542     }
 543 
 544     // park self
 545     if (_Responsible == Self || (SyncFlags & 1)) {
 546       TEVENT(Inflated enter - park TIMED);
 547       Self->_ParkEvent->park((jlong) recheckInterval);
 548       // Increase the recheckInterval, but clamp the value.
 549       recheckInterval *= 8;
 550       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 551         recheckInterval = MAX_RECHECK_INTERVAL;
 552       }
 553     } else {
 554       TEVENT(Inflated enter - park UNTIMED);
 555       Self->_ParkEvent->park();
 556     }
 557 
 558     if (TryLock(Self) > 0) break;
 559 









 560     // The lock is still contested.
 561     // Keep a tally of the # of futile wakeups.
 562     // Note that the counter is not protected by a lock or updated by atomics.
 563     // That is by design - we trade "lossy" counters which are exposed to
 564     // races during updates for a lower probe effect.
 565     TEVENT(Inflated enter - Futile wakeup);
 566     // This PerfData object can be used in parallel with a safepoint.
 567     // See the work around in PerfDataManager::destroy().
 568     OM_PERFDATA_OP(FutileWakeups, inc());
 569     ++nWakeups;
 570 
 571     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 572     // We can defer clearing _succ until after the spin completes
 573     // TrySpin() must tolerate being called with _succ == Self.
 574     // Try yet another round of adaptive spinning.
 575     if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
 576 
 577     // We can find that we were unpark()ed and redesignated _succ while
 578     // we were spinning.  That's harmless.  If we iterate and call park(),
 579     // park() will consume the event and return immediately and we'll


 672 // loop accordingly.
 673 
 674 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 675   assert(Self != NULL, "invariant");
 676   assert(SelfNode != NULL, "invariant");
 677   assert(SelfNode->_thread == Self, "invariant");
 678   assert(_waiters > 0, "invariant");
 679   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 680   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 681   JavaThread * jt = (JavaThread *) Self;
 682 
 683   int nWakeups = 0;
 684   for (;;) {
 685     ObjectWaiter::TStates v = SelfNode->TState;
 686     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 687     assert(_owner != Self, "invariant");
 688 
 689     if (TryLock(Self) > 0) break;
 690     if (TrySpin(Self) > 0) break;
 691 







 692     TEVENT(Wait Reentry - parking);
 693 
 694     // State transition wrappers around park() ...
 695     // ReenterI() wisely defers state transitions until
 696     // it's clear we must park the thread.
 697     {
 698       OSThreadContendState osts(Self->osthread());
 699       ThreadBlockInVM tbivm(jt);
 700 
 701       // cleared by handle_special_suspend_equivalent_condition()
 702       // or java_suspend_self()
 703       jt->set_suspend_equivalent();
 704       if (SyncFlags & 1) {
 705         Self->_ParkEvent->park((jlong)MAX_RECHECK_INTERVAL);
 706       } else {
 707         Self->_ParkEvent->park();
 708       }
 709 
 710       // were we externally suspended while we were waiting?
 711       for (;;) {


1311   DeferredInitialize();
1312 
1313   if (THREAD != _owner) {
1314     if (THREAD->is_lock_owned ((address)_owner)) {
1315       assert(_recursions == 0, "internal state error");
1316       _owner = THREAD;   // Convert from basiclock addr to Thread addr
1317       _recursions = 0;
1318     }
1319   }
1320 
1321   guarantee(Self == _owner, "complete_exit not owner");
1322   intptr_t save = _recursions; // record the old recursion count
1323   _recursions = 0;        // set the recursion level to be 0
1324   exit(true, Self);           // exit the monitor
1325   guarantee(_owner != Self, "invariant");
1326   return save;
1327 }
1328 
1329 // reenter() enters a lock and sets recursion count
1330 // complete_exit/reenter operate as a wait without waiting
1331 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1332   Thread * const Self = THREAD;
1333   assert(Self->is_Java_thread(), "Must be Java thread!");
1334   JavaThread *jt = (JavaThread *)THREAD;
1335 
1336   guarantee(_owner != Self, "reenter already owner");
1337   enter(THREAD);       // enter the monitor
1338   guarantee(_recursions == 0, "reenter recursion");
1339   _recursions = recursions;
1340   return;
1341 }
1342 
1343 
1344 // -----------------------------------------------------------------------------
1345 // A macro is used below because there may already be a pending
1346 // exception which should not abort the execution of the routines
1347 // which use this (which is why we don't put this into check_slow and
1348 // call it with a CHECK argument).
1349 
1350 #define CHECK_OWNER()                                                       \
1351   do {                                                                      \
1352     if (THREAD != _owner) {                                                 \
1353       if (THREAD->is_lock_owned((address) _owner)) {                        \
1354         _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
1355         _recursions = 0;                                                    \
1356       } else {                                                              \
1357         TEVENT(Throw IMSX);                                                 \
1358         THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
1359       }                                                                     \
1360     }                                                                       \


1559         //
1560         // We redo the unpark() to ensure forward progress, i.e., we
1561         // don't want all pending threads hanging (parked) with none
1562         // entering the unlocked monitor.
1563         node._event->unpark();
1564       }
1565     }
1566 
1567     if (event.should_commit()) {
1568       post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
1569     }
1570 
1571     OrderAccess::fence();
1572 
1573     assert(Self->_Stalled != 0, "invariant");
1574     Self->_Stalled = 0;
1575 
1576     assert(_owner != Self, "invariant");
1577     ObjectWaiter::TStates v = node.TState;
1578     if (v == ObjectWaiter::TS_RUN) {
1579       enter(Self);

1580     } else {
1581       guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1582       ReenterI(Self, &node);
1583       node.wait_reenter_end(this);
1584     }
1585 
1586     // Self has reacquired the lock.
1587     // Lifecycle - the node representing Self must not appear on any queues.
1588     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1589     // want residual elements associated with this thread left on any lists.
1590     guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1591     assert(_owner == Self, "invariant");
1592     assert(_succ != Self, "invariant");
1593   } // OSThreadWaitState()
1594 
1595   jt->set_current_waiting_monitor(NULL);
1596 
1597   guarantee(_recursions == 0, "invariant");
1598   _recursions = save;     // restore the old recursion count
1599   _waiters--;             // decrement the number of waiters




 223 //   the caller onto the WaitSet.
 224 //
 225 // * notify() or notifyAll() simply transfers threads from the WaitSet to
 226 //   either the EntryList or cxq.  Subsequent exit() operations will
 227 //   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
 228 //   it's likely the notifyee would simply impale itself on the lock held
 229 //   by the notifier.
 230 //
 231 // * An interesting alternative is to encode cxq as (List,LockByte) where
 232 //   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
 233 //   variable, like _recursions, in the scheme.  The threads or Events that form
 234 //   the list would have to be aligned in 256-byte addresses.  A thread would
 235 //   try to acquire the lock or enqueue itself with CAS, but exiting threads
 236 //   could use a 1-0 protocol and simply STB to set the LockByte to 0.
 237 //   Note that is is *not* word-tearing, but it does presume that full-word
 238 //   CAS operations are coherent with intermix with STB operations.  That's true
 239 //   on most common processors.
 240 //
 241 // * See also http://blogs.sun.com/dave
 242 
 243 #define DEFLATER_MARKER reinterpret_cast<void*>(-1)
 244 
 245 // -----------------------------------------------------------------------------
 246 // Enter support
 247 
 248 bool ObjectMonitor::enter(TRAPS) {
 249   // The following code is ordered to check the most common cases first
 250   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 251   Thread * const Self = THREAD;
 252 
 253   void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
 254   if (cur == NULL) {
 255     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 256     assert(_recursions == 0, "invariant");
 257     assert(_owner == Self, "invariant");
 258     return true;
 259   }
 260 
 261   if (cur == Self) {
 262     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 263     _recursions++;
 264     return true;
 265   }
 266 
 267   if (Self->is_lock_owned ((address)cur)) {
 268     assert(_recursions == 0, "internal state error");
 269     _recursions = 1;
 270     // Commute owner from a thread-specific on-stack BasicLockObject address to
 271     // a full-fledged "Thread *".
 272     _owner = Self;
 273     return true;
 274   }
 275 
 276   // We've encountered genuine contention.
 277   assert(Self->_Stalled == 0, "invariant");
 278   Self->_Stalled = intptr_t(this);
 279 
 280   // Try one round of spinning *before* enqueueing Self
 281   // and before going through the awkward and expensive state
 282   // transitions.  The following spin is strictly optional ...
 283   // Note that if we acquire the monitor from an initial spin
 284   // we forgo posting JVMTI events and firing DTRACE probes.
 285   if (Knob_SpinEarly && TrySpin (Self) > 0) {
 286     assert(_owner == Self, "invariant");
 287     assert(_recursions == 0, "invariant");
 288     assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 289     Self->_Stalled = 0;
 290     return true;
 291   }
 292 
 293   assert(_owner != Self, "invariant");
 294   assert(_succ != Self, "invariant");
 295   assert(Self->is_Java_thread(), "invariant");
 296   JavaThread * jt = (JavaThread *) Self;
 297   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 298   assert(jt->thread_state() != _thread_blocked, "invariant");
 299   assert(this->object() != NULL, "invariant");

 300 
 301   // Prevent deflation.  See deflate_idle_monitors(), try_disable_monitor, and is_busy().
 302   // Ensure the object-monitor relationship remains stable while there's contention.
 303   const jint count = Atomic::add(1, &_count);
 304   if (count <= 0 && _owner == DEFLATER_MARKER) {
 305     // Deflation in progress.
 306     // Help deflater thread install the mark word (in case deflater thread is slow).
 307     install_displaced_markword_in_object();
 308     Self->_Stalled = 0;
 309     return false; // Caller should retry. Never mind about _count as this monitor has been deflated.
 310   }
 311   // The deflater thread will not deflate this monitor and the monitor is contended, continue.
 312 
 313   EventJavaMonitorEnter event;
 314 
 315   { // Change java thread status to indicate blocked on monitor enter.
 316     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 317 
 318     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 319     if (JvmtiExport::should_post_monitor_contended_enter()) {
 320       JvmtiExport::post_monitor_contended_enter(jt, this);
 321 
 322       // The current thread does not yet own the monitor and does not
 323       // yet appear on any queues that would get it made the successor.
 324       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
 325       // handler cannot accidentally consume an unpark() meant for the
 326       // ParkEvent associated with this ObjectMonitor.
 327     }
 328 
 329     OSThreadContendState osts(Self->osthread());
 330     ThreadBlockInVM tbivm(jt);
 331 


 348       //
 349       _recursions = 0;
 350       _succ = NULL;
 351       exit(false, Self);
 352 
 353       jt->java_suspend_self();
 354     }
 355     Self->set_current_pending_monitor(NULL);
 356 
 357     // We cleared the pending monitor info since we've just gotten past
 358     // the enter-check-for-suspend dance and we now own the monitor free
 359     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 360     // destructor can go to a safepoint at the end of this block. If we
 361     // do a thread dump during that safepoint, then this thread will show
 362     // as having "-locked" the monitor, but the OS and java.lang.Thread
 363     // states will still report that the thread is blocked trying to
 364     // acquire it.
 365   }
 366 
 367   Atomic::dec(&_count);

 368   Self->_Stalled = 0;
 369 
 370   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 371   assert(_recursions == 0, "invariant");
 372   assert(_owner == Self, "invariant");
 373   assert(_succ != Self, "invariant");
 374   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 375 
 376   // The thread -- now the owner -- is back in vm mode.
 377   // Report the glorious news via TI,DTrace and jvmstat.
 378   // The probe effect is non-trivial.  All the reportage occurs
 379   // while we hold the monitor, increasing the length of the critical
 380   // section.  Amdahl's parallel speedup law comes vividly into play.
 381   //
 382   // Another option might be to aggregate the events (thread local or
 383   // per-monitor aggregation) and defer reporting until a more opportune
 384   // time -- such as next time some thread encounters contention but has
 385   // yet to acquire the lock.  While spinning that thread could
 386   // spinning we could increment JVMStat counters, etc.
 387 
 388   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
 389   if (JvmtiExport::should_post_monitor_contended_entered()) {
 390     JvmtiExport::post_monitor_contended_entered(jt, this);
 391 
 392     // The current thread already owns the monitor and is not going to
 393     // call park() for the remainder of the monitor enter protocol. So
 394     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 395     // event handler consumed an unpark() issued by the thread that
 396     // just exited the monitor.
 397   }
 398 
 399   if (event.should_commit()) {
 400     event.set_monitorClass(((oop)this->object())->klass());
 401     event.set_previousOwner((TYPE_THREAD)_previous_owner_tid);
 402     event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
 403     event.commit();
 404   }
 405 
 406   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 407   return true;
 408 }
 409 
 410 
 411 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 412 // Callers must compensate as needed.
 413 
 414 int ObjectMonitor::TryLock(Thread * Self) {
 415   void * own = _owner;
 416   if (own != NULL) return 0;
 417   if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 418     // Either guarantee _recursions == 0 or set _recursions = 0.
 419     assert(_recursions == 0, "invariant");
 420     assert(_owner == Self, "invariant");
 421     return 1;
 422   }
 423   // The lock had been free momentarily, but we lost the race to the lock.
 424   // Interference -- the CAS failed.
 425   // We can either return -1 or retry.
 426   // Retry doesn't make as much sense because the lock was just acquired.
 427   return -1;
 428 }
 429 
 430 // Try disabling this monitor. Returns true iff successful.
 431 // The method will install DEFLATER_MARKER (-1) as the owner of the monitor,
 432 // check _waiters == 0, and make _count negative if it is currently 0. The
 433 // monitor is successfully disabled if _count is negative and this monitor is
 434 // still owned by DEFLATER_MARKER.
 435 //
 436 // All threads trying to acquire the monitor must before parking themselves
 437 // increment _count and check that _owner != DEFLATER_MARKER. If _owner ==
 438 // DEFLATER_MARKER and _count is positive, then a thread can still win the lock
 439 // by atomically installing its thread pointer in _owner.  If _count is
 440 // negative and _owned == DEFLATER_MARKER, then the monitor has been
 441 // successfully disabled and the acquiring threads should help install the
 442 // displaced mark word back into the object and retry acquiring the lock.
 443 //
 444 // A thread wanting to wait on the monitor must increase _waiters while owning the monitor.
 445 bool ObjectMonitor::try_disable_monitor() {
 446   assert(Thread::current()->is_Java_thread(), "precondition");
 447   // We don't want to disable newly allocated monitors as it could result in a endless inflate/deflate cycle.
 448   assert(is_old(), "precondition");
 449 
 450   // Set _owned to DEFLATER_MARKER if monitor is not owned by another thread.
 451   // This forced contending thread through the slow path.
 452   if (!is_busy() && Atomic::cmpxchg_ptr(DEFLATER_MARKER, &_owner, NULL) == NULL) {
 453     // Another thread might still enter the monitor.
 454     // Signal that other threads should retry if the owner is DEFLATER_MARKER by making _count negative.
 455     if (_waiters == 0 && Atomic::cmpxchg(- max_jint, &_count, 0) == 0) {
 456       // ABA problem with _count:
 457       //   Another thread might have acquired this monitor and finished using it.
 458       //   Check owner to see if that happened (no other thread installs DEFLATER_MARKER as owner).
 459       if (_owner == DEFLATER_MARKER) {
 460         // We successfully signalled to all threads entering that they should
 461         // retry.
 462         // Nobody acquired this monitor between installing DEFLATER_MARKER into
 463         // _owner and now (such a thread would have changed _owner).  If any
 464         // thread is now waiting on the monitor, then _waiters must have been
 465         // incremented as it was 0 before. _waiters is changed only when owning
 466         // the monitor, but no other thread can have owned the monitor since we
 467         // installed DEFLATER_MARKER, and thus _waiters must still be 0.
 468         guarantee(_waiters == 0, "Not changed since the previous read");
 469         guarantee(_cxq == NULL, "All contending threads should retry");
 470         guarantee(_EntryList == NULL, "All contending threads should retry");
 471         // Install the old mark word if nobody else has already done it.
 472         install_displaced_markword_in_object();
 473         set_allocation_state(Free);
 474         // Leave this monitor locked to ensure acquiring threads take the slow-path and
 475         // leave _count negative to make them retry.
 476         return true; // Success, lock has been deflated.
 477       }
 478       // We do not own the monitor. Do not deflate.
 479       Atomic::add(max_jint, &_count);
 480     }
 481     // Never mind. Another thread managed to acquire this monitor or there were
 482     // threads waiting.  The threads that saw (or will see) 0 <= _count and
 483     // _owner == DEFLATER_MARKER will compete for ownership and one will
 484     // eventually install itself as owner and subsequently run the exit protocol.
 485   }
 486   assert(0 <= _count, "Nobody else should be make _count negative");
 487   return false;
 488 }
 489 
 490 // Install the displaced mark word of a disabled monitor into the object
 491 // associated with the monitor.
 492 // This method is idempotent and is expected to be executed by both mutators
 493 // wanting to acquire a monitor for an object, mutators wanting to install a
 494 // hashcode in an object, and the thread deflating monitors.
 495 void ObjectMonitor::install_displaced_markword_in_object() {
 496   markOop dmw = header();
 497   assert(dmw->is_neutral() || dmw->hash() == 0 && dmw->is_marked(), "precondition");
 498   if (dmw->hash() == 0 && !dmw->is_marked()) {
 499     // Another thread might update the displaced mark word by computing a hash
 500     // code for the oject or running this method.
 501     // Signal to other threads that the object mark word should be installed
 502     // and read from the object by marking the displaced mark word.
 503     markOop marked_dmw = dmw->set_marked();
 504     assert(marked_dmw->hash() == 0 && marked_dmw->is_marked(), "oops");
 505     dmw = (markOop) Atomic::cmpxchg_ptr(marked_dmw, &_header, dmw);
 506     // If the CAS failed because another thread installed a hash value, then dmw
 507     // will contain the hash and be unmarked. If the CAS failed because another thread
 508     // marked the displaced mark word, then dmw->hash() is zero and dmw is marked.
 509   }
 510   if (dmw->is_marked()) {
 511     assert(dmw->hash() == 0, "invariant");
 512     dmw = dmw->set_unmarked();
 513   }
 514   oop const obj = (oop) object();
 515   // Install displaced mark word if object mark word still points to this monitor.
 516   assert(dmw->is_neutral(), "Must not install non-neutral markword into object");
 517   obj->cas_set_mark(dmw, markOopDesc::encode(this));
 518 }
 519 
 520 #define MAX_RECHECK_INTERVAL 1000
 521 
 522 void ObjectMonitor::EnterI(TRAPS) {
 523   Thread * const Self = THREAD;
 524   assert(Self->is_Java_thread(), "invariant");
 525   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 526 
 527   // Try the lock - TATAS
 528   if (TryLock (Self) > 0) {
 529     assert(_succ != Self, "invariant");
 530     assert(_owner == Self, "invariant");
 531     assert(_Responsible != Self, "invariant");
 532     return;
 533   }
 534 
 535   if (_owner == DEFLATER_MARKER) {
 536     guarantee(0 < _count, "_owner == DEFLATER_MARKER && _count <= 0 should have been handled by the caller");
 537     // Deflater thread tried to lock this monitor, but it failed to make _count negative and gave up.
 538     // Try to acquire monitor.
 539     if (Atomic::cmpxchg_ptr(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 540       assert(_succ != Self, "invariant");
 541       assert(_owner == Self, "invariant");
 542       assert(_Responsible != Self, "invariant");
 543       return;
 544     }
 545   }
 546 
 547   DeferredInitialize();
 548 
 549   // We try one round of spinning *before* enqueueing Self.
 550   //
 551   // If the _owner is ready but OFFPROC we could use a YieldTo()
 552   // operation to donate the remainder of this thread's quantum
 553   // to the owner.  This has subtle but beneficial affinity
 554   // effects.
 555 
 556   if (TrySpin (Self) > 0) {
 557     assert(_owner == Self, "invariant");
 558     assert(_succ != Self, "invariant");
 559     assert(_Responsible != Self, "invariant");
 560     return;
 561   }
 562 
 563   // The Spin failed -- Enqueue and park the thread ...
 564   assert(_succ != Self, "invariant");
 565   assert(_owner != Self, "invariant");
 566   assert(_Responsible != Self, "invariant");


 650     if ((SyncFlags & 2) && _Responsible == NULL) {
 651       Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
 652     }
 653 
 654     // park self
 655     if (_Responsible == Self || (SyncFlags & 1)) {
 656       TEVENT(Inflated enter - park TIMED);
 657       Self->_ParkEvent->park((jlong) recheckInterval);
 658       // Increase the recheckInterval, but clamp the value.
 659       recheckInterval *= 8;
 660       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 661         recheckInterval = MAX_RECHECK_INTERVAL;
 662       }
 663     } else {
 664       TEVENT(Inflated enter - park UNTIMED);
 665       Self->_ParkEvent->park();
 666     }
 667 
 668     if (TryLock(Self) > 0) break;
 669 
 670     if (_owner == DEFLATER_MARKER) {
 671       guarantee(0 < _count, "_owner == DEFLATER_MARKER && _count <= 0 should have been handled by the caller");
 672       // Deflater thread tried to lock this monitor, but it failed to make _count negative and gave up.
 673       // Try to acquire monitor.
 674       if (Atomic::cmpxchg_ptr(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 675          break;
 676       }
 677     }
 678 
 679     // The lock is still contested.
 680     // Keep a tally of the # of futile wakeups.
 681     // Note that the counter is not protected by a lock or updated by atomics.
 682     // That is by design - we trade "lossy" counters which are exposed to
 683     // races during updates for a lower probe effect.
 684     TEVENT(Inflated enter - Futile wakeup);
 685     // This PerfData object can be used in parallel with a safepoint.
 686     // See the work around in PerfDataManager::destroy().
 687     OM_PERFDATA_OP(FutileWakeups, inc());
 688     ++nWakeups;
 689 
 690     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 691     // We can defer clearing _succ until after the spin completes
 692     // TrySpin() must tolerate being called with _succ == Self.
 693     // Try yet another round of adaptive spinning.
 694     if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
 695 
 696     // We can find that we were unpark()ed and redesignated _succ while
 697     // we were spinning.  That's harmless.  If we iterate and call park(),
 698     // park() will consume the event and return immediately and we'll


 791 // loop accordingly.
 792 
 793 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 794   assert(Self != NULL, "invariant");
 795   assert(SelfNode != NULL, "invariant");
 796   assert(SelfNode->_thread == Self, "invariant");
 797   assert(_waiters > 0, "invariant");
 798   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 799   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 800   JavaThread * jt = (JavaThread *) Self;
 801 
 802   int nWakeups = 0;
 803   for (;;) {
 804     ObjectWaiter::TStates v = SelfNode->TState;
 805     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 806     assert(_owner != Self, "invariant");
 807 
 808     if (TryLock(Self) > 0) break;
 809     if (TrySpin(Self) > 0) break;
 810 
 811     if (_owner == DEFLATER_MARKER) {
 812       guarantee(0 <= _count, "Impossible: _owner == DEFLATER_MARKER && _count < 0, monitor must not be owned by deflater thread here");
 813       if (Atomic::cmpxchg_ptr(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 814         break;
 815       }
 816     }
 817 
 818     TEVENT(Wait Reentry - parking);
 819 
 820     // State transition wrappers around park() ...
 821     // ReenterI() wisely defers state transitions until
 822     // it's clear we must park the thread.
 823     {
 824       OSThreadContendState osts(Self->osthread());
 825       ThreadBlockInVM tbivm(jt);
 826 
 827       // cleared by handle_special_suspend_equivalent_condition()
 828       // or java_suspend_self()
 829       jt->set_suspend_equivalent();
 830       if (SyncFlags & 1) {
 831         Self->_ParkEvent->park((jlong)MAX_RECHECK_INTERVAL);
 832       } else {
 833         Self->_ParkEvent->park();
 834       }
 835 
 836       // were we externally suspended while we were waiting?
 837       for (;;) {


1437   DeferredInitialize();
1438 
1439   if (THREAD != _owner) {
1440     if (THREAD->is_lock_owned ((address)_owner)) {
1441       assert(_recursions == 0, "internal state error");
1442       _owner = THREAD;   // Convert from basiclock addr to Thread addr
1443       _recursions = 0;
1444     }
1445   }
1446 
1447   guarantee(Self == _owner, "complete_exit not owner");
1448   intptr_t save = _recursions; // record the old recursion count
1449   _recursions = 0;        // set the recursion level to be 0
1450   exit(true, Self);           // exit the monitor
1451   guarantee(_owner != Self, "invariant");
1452   return save;
1453 }
1454 
1455 // reenter() enters a lock and sets recursion count
1456 // complete_exit/reenter operate as a wait without waiting
1457 bool ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1458   Thread * const Self = THREAD;
1459   assert(Self->is_Java_thread(), "Must be Java thread!");
1460   JavaThread *jt = (JavaThread *)THREAD;
1461 
1462   guarantee(_owner != Self, "reenter already owner");
1463   if (!enter(THREAD)) { return false; }      // enter the monitor
1464   guarantee(_recursions == 0, "reenter recursion");
1465   _recursions = recursions;
1466   return true;
1467 }
1468 
1469 
1470 // -----------------------------------------------------------------------------
1471 // A macro is used below because there may already be a pending
1472 // exception which should not abort the execution of the routines
1473 // which use this (which is why we don't put this into check_slow and
1474 // call it with a CHECK argument).
1475 
1476 #define CHECK_OWNER()                                                       \
1477   do {                                                                      \
1478     if (THREAD != _owner) {                                                 \
1479       if (THREAD->is_lock_owned((address) _owner)) {                        \
1480         _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
1481         _recursions = 0;                                                    \
1482       } else {                                                              \
1483         TEVENT(Throw IMSX);                                                 \
1484         THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
1485       }                                                                     \
1486     }                                                                       \


1685         //
1686         // We redo the unpark() to ensure forward progress, i.e., we
1687         // don't want all pending threads hanging (parked) with none
1688         // entering the unlocked monitor.
1689         node._event->unpark();
1690       }
1691     }
1692 
1693     if (event.should_commit()) {
1694       post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
1695     }
1696 
1697     OrderAccess::fence();
1698 
1699     assert(Self->_Stalled != 0, "invariant");
1700     Self->_Stalled = 0;
1701 
1702     assert(_owner != Self, "invariant");
1703     ObjectWaiter::TStates v = node.TState;
1704     if (v == ObjectWaiter::TS_RUN) {
1705       DEBUG_ONLY(const bool success = ) enter(Self);
1706       assert(success, "enter signalled that we should retry, but monitor should not be deflated as waiters > 0");
1707     } else {
1708       guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1709       ReenterI(Self, &node);
1710       node.wait_reenter_end(this);
1711     }
1712 
1713     // Self has reacquired the lock.
1714     // Lifecycle - the node representing Self must not appear on any queues.
1715     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1716     // want residual elements associated with this thread left on any lists.
1717     guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1718     assert(_owner == Self, "invariant");
1719     assert(_succ != Self, "invariant");
1720   } // OSThreadWaitState()
1721 
1722   jt->set_current_waiting_monitor(NULL);
1723 
1724   guarantee(_recursions == 0, "invariant");
1725   _recursions = save;     // restore the old recursion count
1726   _waiters--;             // decrement the number of waiters


< prev index next >