< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 54612 : Checkpoint latest preliminary review patches for full OpenJDK review; merge with 8222295.patch.
rev 54613 : imported patch dcubed.monitor_deflate_conc.v2.01
rev 54614 : imported patch dcubed.monitor_deflate_conc.v2.02
rev 54615 : imported patch dcubed.monitor_deflate_conc.v2.03


 222 // * See also http://blogs.sun.com/dave
 223 
 224 
 225 void* ObjectMonitor::operator new (size_t size) throw() {
 226   return AllocateHeap(size, mtInternal);
 227 }
 228 void* ObjectMonitor::operator new[] (size_t size) throw() {
 229   return operator new (size);
 230 }
 231 void ObjectMonitor::operator delete(void* p) {
 232   FreeHeap(p);
 233 }
 234 void ObjectMonitor::operator delete[] (void *p) {
 235   operator delete(p);
 236 }
 237 
 238 // -----------------------------------------------------------------------------
 239 // Enter support
 240 
 241 void ObjectMonitor::enter(TRAPS) {


 242   // The following code is ordered to check the most common cases first
 243   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 244   Thread * const Self = THREAD;
 245 
 246   void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
 247   if (cur == NULL) {
 248     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 249     assert(_recursions == 0, "invariant");
 250     assert(_owner == Self, "invariant");
 251     return;
 252   }
 253 
 254   if (cur == Self) {
 255     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 256     _recursions++;
 257     return;
 258   }
 259 
 260   if (Self->is_lock_owned ((address)cur)) {
 261     assert(_recursions == 0, "internal state error");


 276   // Note that if we acquire the monitor from an initial spin
 277   // we forgo posting JVMTI events and firing DTRACE probes.
 278   if (TrySpin(Self) > 0) {
 279     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 280     assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
 281            _recursions);
 282     assert(((oop)object())->mark() == markOopDesc::encode(this),
 283            "object mark must match encoded this: mark=" INTPTR_FORMAT
 284            ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
 285            p2i(markOopDesc::encode(this)));
 286     Self->_Stalled = 0;
 287     return;
 288   }
 289 
 290   assert(_owner != Self, "invariant");
 291   assert(_succ != Self, "invariant");
 292   assert(Self->is_Java_thread(), "invariant");
 293   JavaThread * jt = (JavaThread *) Self;
 294   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 295   assert(jt->thread_state() != _thread_blocked, "invariant");
 296   assert(this->object() != NULL, "invariant");
 297   assert(_contentions >= 0, "invariant");
 298 
 299   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 300   // Ensure the object-monitor relationship remains stable while there's contention.
 301   Atomic::inc(&_contentions);


 302 
 303   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 304   EventJavaMonitorEnter event;
 305   if (event.should_commit()) {
 306     event.set_monitorClass(((oop)this->object())->klass());
 307     event.set_address((uintptr_t)(this->object_addr()));
 308   }
 309 
 310   { // Change java thread status to indicate blocked on monitor enter.
 311     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 312 
 313     Self->set_current_pending_monitor(this);
 314 
 315     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 316     if (JvmtiExport::should_post_monitor_contended_enter()) {
 317       JvmtiExport::post_monitor_contended_enter(jt, this);
 318 
 319       // The current thread does not yet own the monitor and does not
 320       // yet appear on any queues that would get it made the successor.
 321       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event


 343       //
 344       _recursions = 0;
 345       _succ = NULL;
 346       exit(false, Self);
 347 
 348       jt->java_suspend_self();
 349     }
 350     Self->set_current_pending_monitor(NULL);
 351 
 352     // We cleared the pending monitor info since we've just gotten past
 353     // the enter-check-for-suspend dance and we now own the monitor free
 354     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 355     // destructor can go to a safepoint at the end of this block. If we
 356     // do a thread dump during that safepoint, then this thread will show
 357     // as having "-locked" the monitor, but the OS and java.lang.Thread
 358     // states will still report that the thread is blocked trying to
 359     // acquire it.
 360   }
 361 
 362   Atomic::dec(&_contentions);
 363   assert(_contentions >= 0, "invariant");
 364   Self->_Stalled = 0;
 365 
 366   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 367   assert(_recursions == 0, "invariant");
 368   assert(_owner == Self, "invariant");
 369   assert(_succ != Self, "invariant");
 370   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 371 
 372   // The thread -- now the owner -- is back in vm mode.
 373   // Report the glorious news via TI,DTrace and jvmstat.
 374   // The probe effect is non-trivial.  All the reportage occurs
 375   // while we hold the monitor, increasing the length of the critical
 376   // section.  Amdahl's parallel speedup law comes vividly into play.
 377   //
 378   // Another option might be to aggregate the events (thread local or
 379   // per-monitor aggregation) and defer reporting until a more opportune
 380   // time -- such as next time some thread encounters contention but has
 381   // yet to acquire the lock.  While spinning that thread could
 382   // spinning we could increment JVMStat counters, etc.
 383 


 400 
 401 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 402 // Callers must compensate as needed.
 403 
 404 int ObjectMonitor::TryLock(Thread * Self) {
 405   void * own = _owner;
 406   if (own != NULL) return 0;
 407   if (Atomic::replace_if_null(Self, &_owner)) {
 408     // Either guarantee _recursions == 0 or set _recursions = 0.
 409     assert(_recursions == 0, "invariant");
 410     assert(_owner == Self, "invariant");
 411     return 1;
 412   }
 413   // The lock had been free momentarily, but we lost the race to the lock.
 414   // Interference -- the CAS failed.
 415   // We can either return -1 or retry.
 416   // Retry doesn't make as much sense because the lock was just acquired.
 417   return -1;
 418 }
 419 















































































 420 #define MAX_RECHECK_INTERVAL 1000
 421 
 422 void ObjectMonitor::EnterI(TRAPS) {


 423   Thread * const Self = THREAD;
 424   assert(Self->is_Java_thread(), "invariant");
 425   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 426 
 427   // Try the lock - TATAS
 428   if (TryLock (Self) > 0) {
 429     assert(_succ != Self, "invariant");
 430     assert(_owner == Self, "invariant");
 431     assert(_Responsible != Self, "invariant");
 432     return;
 433   }
 434 











 435   assert(InitDone, "Unexpectedly not initialized");
 436 
 437   // We try one round of spinning *before* enqueueing Self.
 438   //
 439   // If the _owner is ready but OFFPROC we could use a YieldTo()
 440   // operation to donate the remainder of this thread's quantum
 441   // to the owner.  This has subtle but beneficial affinity
 442   // effects.
 443 
 444   if (TrySpin(Self) > 0) {
 445     assert(_owner == Self, "invariant");
 446     assert(_succ != Self, "invariant");
 447     assert(_Responsible != Self, "invariant");
 448     return;
 449   }
 450 
 451   // The Spin failed -- Enqueue and park the thread ...
 452   assert(_succ != Self, "invariant");
 453   assert(_owner != Self, "invariant");
 454   assert(_Responsible != Self, "invariant");


 531 
 532   for (;;) {
 533 
 534     if (TryLock(Self) > 0) break;
 535     assert(_owner != Self, "invariant");
 536 
 537     // park self
 538     if (_Responsible == Self) {
 539       Self->_ParkEvent->park((jlong) recheckInterval);
 540       // Increase the recheckInterval, but clamp the value.
 541       recheckInterval *= 8;
 542       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 543         recheckInterval = MAX_RECHECK_INTERVAL;
 544       }
 545     } else {
 546       Self->_ParkEvent->park();
 547     }
 548 
 549     if (TryLock(Self) > 0) break;
 550 









 551     // The lock is still contested.
 552     // Keep a tally of the # of futile wakeups.
 553     // Note that the counter is not protected by a lock or updated by atomics.
 554     // That is by design - we trade "lossy" counters which are exposed to
 555     // races during updates for a lower probe effect.
 556 
 557     // This PerfData object can be used in parallel with a safepoint.
 558     // See the work around in PerfDataManager::destroy().
 559     OM_PERFDATA_OP(FutileWakeups, inc());
 560     ++nWakeups;
 561 
 562     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 563     // We can defer clearing _succ until after the spin completes
 564     // TrySpin() must tolerate being called with _succ == Self.
 565     // Try yet another round of adaptive spinning.
 566     if (TrySpin(Self) > 0) break;
 567 
 568     // We can find that we were unpark()ed and redesignated _succ while
 569     // we were spinning.  That's harmless.  If we iterate and call park(),
 570     // park() will consume the event and return immediately and we'll


 635   // the lock.   The barrier ensures that changes to monitor meta-data and data
 636   // protected by the lock will be visible before we release the lock, and
 637   // therefore before some other thread (CPU) has a chance to acquire the lock.
 638   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 639   //
 640   // Critically, any prior STs to _succ or EntryList must be visible before
 641   // the ST of null into _owner in the *subsequent* (following) corresponding
 642   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 643   // execute a serializing instruction.
 644 
 645   return;
 646 }
 647 
 648 // ReenterI() is a specialized inline form of the latter half of the
 649 // contended slow-path from EnterI().  We use ReenterI() only for
 650 // monitor reentry in wait().
 651 //
 652 // In the future we should reconcile EnterI() and ReenterI().
 653 
 654 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {


 655   assert(Self != NULL, "invariant");
 656   assert(SelfNode != NULL, "invariant");
 657   assert(SelfNode->_thread == Self, "invariant");
 658   assert(_waiters > 0, "invariant");
 659   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 660   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 661   JavaThread * jt = (JavaThread *) Self;
 662 
 663   int nWakeups = 0;
 664   for (;;) {
 665     ObjectWaiter::TStates v = SelfNode->TState;
 666     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 667     assert(_owner != Self, "invariant");
 668 
 669     if (TryLock(Self) > 0) break;
 670     if (TrySpin(Self) > 0) break;
 671 









 672     // State transition wrappers around park() ...
 673     // ReenterI() wisely defers state transitions until
 674     // it's clear we must park the thread.
 675     {
 676       OSThreadContendState osts(Self->osthread());
 677       ThreadBlockInVM tbivm(jt);
 678 
 679       // cleared by handle_special_suspend_equivalent_condition()
 680       // or java_suspend_self()
 681       jt->set_suspend_equivalent();
 682       Self->_ParkEvent->park();
 683 
 684       // were we externally suspended while we were waiting?
 685       for (;;) {
 686         if (!ExitSuspendEquivalent(jt)) break;
 687         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 688         jt->java_suspend_self();
 689         jt->set_suspend_equivalent();
 690       }
 691     }


 859   Thread * const Self = THREAD;
 860   if (THREAD != _owner) {
 861     if (THREAD->is_lock_owned((address) _owner)) {
 862       // Transmute _owner from a BasicLock pointer to a Thread address.
 863       // We don't need to hold _mutex for this transition.
 864       // Non-null to Non-null is safe as long as all readers can
 865       // tolerate either flavor.
 866       assert(_recursions == 0, "invariant");
 867       _owner = THREAD;
 868       _recursions = 0;
 869     } else {
 870       // Apparent unbalanced locking ...
 871       // Naively we'd like to throw IllegalMonitorStateException.
 872       // As a practical matter we can neither allocate nor throw an
 873       // exception as ::exit() can be called from leaf routines.
 874       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 875       // Upon deeper reflection, however, in a properly run JVM the only
 876       // way we should encounter this situation is in the presence of
 877       // unbalanced JNI locking. TODO: CheckJNICalls.
 878       // See also: CR4414101
 879       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");

 880       return;
 881     }
 882   }
 883 
 884   if (_recursions != 0) {
 885     _recursions--;        // this is simple recursive enter
 886     return;
 887   }
 888 
 889   // Invariant: after setting Responsible=null an thread must execute
 890   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 891   _Responsible = NULL;
 892 
 893 #if INCLUDE_JFR
 894   // get the owner's thread id for the MonitorEnter event
 895   // if it is enabled and the thread isn't suspended
 896   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
 897     _previous_owner_tid = JFR_THREAD_ID(Self);
 898   }
 899 #endif


1115       _recursions = 0;
1116     }
1117   }
1118 
1119   guarantee(Self == _owner, "complete_exit not owner");
1120   intptr_t save = _recursions; // record the old recursion count
1121   _recursions = 0;        // set the recursion level to be 0
1122   exit(true, Self);           // exit the monitor
1123   guarantee(_owner != Self, "invariant");
1124   return save;
1125 }
1126 
1127 // reenter() enters a lock and sets recursion count
1128 // complete_exit/reenter operate as a wait without waiting
1129 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1130   Thread * const Self = THREAD;
1131   assert(Self->is_Java_thread(), "Must be Java thread!");
1132   JavaThread *jt = (JavaThread *)THREAD;
1133 
1134   guarantee(_owner != Self, "reenter already owner");
1135   enter(THREAD);       // enter the monitor

1136   guarantee(_recursions == 0, "reenter recursion");
1137   _recursions = recursions;
1138   return;
1139 }
1140 
1141 
1142 // -----------------------------------------------------------------------------
1143 // A macro is used below because there may already be a pending
1144 // exception which should not abort the execution of the routines
1145 // which use this (which is why we don't put this into check_slow and
1146 // call it with a CHECK argument).
1147 
1148 #define CHECK_OWNER()                                                       \
1149   do {                                                                      \
1150     if (THREAD != _owner) {                                                 \
1151       if (THREAD->is_lock_owned((address) _owner)) {                        \
1152         _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
1153         _recursions = 0;                                                    \
1154       } else {                                                              \
1155         THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
1156       }                                                                     \
1157     }                                                                       \
1158   } while (false)


1908     n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,  \
1909                                         CHECK);                          \
1910   }
1911 #define NEWPERFVARIABLE(n)                                                \
1912   {                                                                       \
1913     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
1914                                          CHECK);                          \
1915   }
1916     NEWPERFCOUNTER(_sync_Inflations);
1917     NEWPERFCOUNTER(_sync_Deflations);
1918     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
1919     NEWPERFCOUNTER(_sync_FutileWakeups);
1920     NEWPERFCOUNTER(_sync_Parks);
1921     NEWPERFCOUNTER(_sync_Notifications);
1922     NEWPERFVARIABLE(_sync_MonExtant);
1923 #undef NEWPERFCOUNTER
1924 #undef NEWPERFVARIABLE
1925   }
1926 
1927   DEBUG_ONLY(InitDone = true;)










































































1928 }


 222 // * See also http://blogs.sun.com/dave
 223 
 224 
 225 void* ObjectMonitor::operator new (size_t size) throw() {
 226   return AllocateHeap(size, mtInternal);
 227 }
 228 void* ObjectMonitor::operator new[] (size_t size) throw() {
 229   return operator new (size);
 230 }
 231 void ObjectMonitor::operator delete(void* p) {
 232   FreeHeap(p);
 233 }
 234 void ObjectMonitor::operator delete[] (void *p) {
 235   operator delete(p);
 236 }
 237 
 238 // -----------------------------------------------------------------------------
 239 // Enter support
 240 
 241 void ObjectMonitor::enter(TRAPS) {
 242   ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
 243 
 244   // The following code is ordered to check the most common cases first
 245   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 246   Thread * const Self = THREAD;
 247 
 248   void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
 249   if (cur == NULL) {
 250     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 251     assert(_recursions == 0, "invariant");
 252     assert(_owner == Self, "invariant");
 253     return;
 254   }
 255 
 256   if (cur == Self) {
 257     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 258     _recursions++;
 259     return;
 260   }
 261 
 262   if (Self->is_lock_owned ((address)cur)) {
 263     assert(_recursions == 0, "internal state error");


 278   // Note that if we acquire the monitor from an initial spin
 279   // we forgo posting JVMTI events and firing DTRACE probes.
 280   if (TrySpin(Self) > 0) {
 281     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 282     assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
 283            _recursions);
 284     assert(((oop)object())->mark() == markOopDesc::encode(this),
 285            "object mark must match encoded this: mark=" INTPTR_FORMAT
 286            ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
 287            p2i(markOopDesc::encode(this)));
 288     Self->_Stalled = 0;
 289     return;
 290   }
 291 
 292   assert(_owner != Self, "invariant");
 293   assert(_succ != Self, "invariant");
 294   assert(Self->is_Java_thread(), "invariant");
 295   JavaThread * jt = (JavaThread *) Self;
 296   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 297   assert(jt->thread_state() != _thread_blocked, "invariant");
 298   assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
 299   assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
 300 
 301   // Prevent deflation. See ObjectSynchronizer::deflate_monitor(),
 302   // ObjectSynchronizer::deflate_monitor_using_JT() and is_busy().
 303   // Ensure the object <-> monitor relationship remains stable while
 304   // there's contention.
 305   Atomic::add(1, &_contentions);
 306 
 307   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 308   EventJavaMonitorEnter event;
 309   if (event.should_commit()) {
 310     event.set_monitorClass(((oop)this->object())->klass());
 311     event.set_address((uintptr_t)(this->object_addr()));
 312   }
 313 
 314   { // Change java thread status to indicate blocked on monitor enter.
 315     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 316 
 317     Self->set_current_pending_monitor(this);
 318 
 319     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 320     if (JvmtiExport::should_post_monitor_contended_enter()) {
 321       JvmtiExport::post_monitor_contended_enter(jt, this);
 322 
 323       // The current thread does not yet own the monitor and does not
 324       // yet appear on any queues that would get it made the successor.
 325       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event


 347       //
 348       _recursions = 0;
 349       _succ = NULL;
 350       exit(false, Self);
 351 
 352       jt->java_suspend_self();
 353     }
 354     Self->set_current_pending_monitor(NULL);
 355 
 356     // We cleared the pending monitor info since we've just gotten past
 357     // the enter-check-for-suspend dance and we now own the monitor free
 358     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
 359     // destructor can go to a safepoint at the end of this block. If we
 360     // do a thread dump during that safepoint, then this thread will show
 361     // as having "-locked" the monitor, but the OS and java.lang.Thread
 362     // states will still report that the thread is blocked trying to
 363     // acquire it.
 364   }
 365 
 366   Atomic::dec(&_contentions);
 367   assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
 368   Self->_Stalled = 0;
 369 
 370   // Must either set _recursions = 0 or ASSERT _recursions == 0.
 371   assert(_recursions == 0, "invariant");
 372   assert(_owner == Self, "invariant");
 373   assert(_succ != Self, "invariant");
 374   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 375 
 376   // The thread -- now the owner -- is back in vm mode.
 377   // Report the glorious news via TI,DTrace and jvmstat.
 378   // The probe effect is non-trivial.  All the reportage occurs
 379   // while we hold the monitor, increasing the length of the critical
 380   // section.  Amdahl's parallel speedup law comes vividly into play.
 381   //
 382   // Another option might be to aggregate the events (thread local or
 383   // per-monitor aggregation) and defer reporting until a more opportune
 384   // time -- such as next time some thread encounters contention but has
 385   // yet to acquire the lock.  While spinning that thread could
 386   // spinning we could increment JVMStat counters, etc.
 387 


 404 
 405 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 406 // Callers must compensate as needed.
 407 
 408 int ObjectMonitor::TryLock(Thread * Self) {
 409   void * own = _owner;
 410   if (own != NULL) return 0;
 411   if (Atomic::replace_if_null(Self, &_owner)) {
 412     // Either guarantee _recursions == 0 or set _recursions = 0.
 413     assert(_recursions == 0, "invariant");
 414     assert(_owner == Self, "invariant");
 415     return 1;
 416   }
 417   // The lock had been free momentarily, but we lost the race to the lock.
 418   // Interference -- the CAS failed.
 419   // We can either return -1 or retry.
 420   // Retry doesn't make as much sense because the lock was just acquired.
 421   return -1;
 422 }
 423 
 424 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
 425 // into the header of the object associated with the monitor. This
 426 // idempotent method is called by a thread that is deflating a
 427 // monitor and by other threads that have detected a race with the
 428 // deflation process.
 429 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
 430   // This function must only be called when (owner == DEFLATER_MARKER
 431   // && ref_count <= 0), but we can't guarantee that here because
 432   // those values could change when the ObjectMonitor gets moved from
 433   // the global free list to a per-thread free list.
 434 
 435   guarantee(obj != NULL, "must be non-NULL");
 436   if (object() != obj) {
 437     // ObjectMonitor's object ref no longer refers to the target object
 438     // so the object's header has already been restored.
 439     return;
 440   }
 441 
 442   markOop dmw = header();
 443   if (dmw == NULL) {
 444     // ObjectMonitor's header/dmw has been cleared by the deflating
 445     // thread so the object's header has already been restored.
 446     return;
 447   }
 448 
 449   // A non-NULL dmw has to be either neutral (not locked and not marked)
 450   // or is already participating in this restoration protocol.
 451   assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0),
 452          "failed precondition: dmw=" INTPTR_FORMAT, p2i(dmw));
 453 
 454   markOop marked_dmw = NULL;
 455   if (!dmw->is_marked() && dmw->hash() == 0) {
 456     // This dmw has not yet started the restoration protocol so we
 457     // mark a copy of the dmw to begin the protocol.
 458     // Note: A dmw with a hashcode does not take this code path.
 459     marked_dmw = dmw->set_marked();
 460 
 461     // All of the callers to this function can be racing with each
 462     // other trying to update the _header field.
 463     dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw);
 464     if (dmw == NULL) {
 465       // ObjectMonitor's header/dmw has been cleared by the deflating
 466       // thread so the object's header has already been restored.
 467       return;
 468     }
 469     // The _header field is now marked. The winner's 'dmw' variable
 470     // contains the original, unmarked header/dmw value and any
 471     // losers have a marked header/dmw value that will be cleaned
 472     // up below.
 473   }
 474 
 475   if (dmw->is_marked()) {
 476     // Clear the mark from the header/dmw copy in preparation for
 477     // possible restoration from this thread.
 478     assert(dmw->hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT,
 479            p2i(dmw));
 480     dmw = dmw->set_unmarked();
 481   }
 482   assert(dmw->is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, p2i(dmw));
 483 
 484   // Install displaced mark word if the object's header still points
 485   // to this ObjectMonitor. All racing callers to this function will
 486   // reach this point, but only one can win.
 487   obj->cas_set_mark(dmw, markOopDesc::encode(this));
 488 
 489   // Note: It does not matter which thread restored the header/dmw
 490   // into the object's header. The thread deflating the monitor just
 491   // wanted the object's header restored and it is. The threads that
 492   // detected a race with the deflation process also wanted the
 493   // object's header restored before they retry their operation and
 494   // because it is restored they will only retry once.
 495 
 496   if (marked_dmw != NULL) {
 497     // Clear _header to NULL if it is still marked_dmw so a racing
 498     // install_displaced_markword_in_object() can bail out sooner.
 499     Atomic::cmpxchg((markOop)NULL, &_header, marked_dmw);
 500   }
 501 }
 502 
 503 #define MAX_RECHECK_INTERVAL 1000
 504 
 505 void ObjectMonitor::EnterI(TRAPS) {
 506   ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
 507 
 508   Thread * const Self = THREAD;
 509   assert(Self->is_Java_thread(), "invariant");
 510   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 511 
 512   // Try the lock - TATAS
 513   if (TryLock (Self) > 0) {
 514     assert(_succ != Self, "invariant");
 515     assert(_owner == Self, "invariant");
 516     assert(_Responsible != Self, "invariant");
 517     return;
 518   }
 519 
 520   if (_owner == DEFLATER_MARKER) {
 521     // The deflation protocol finished the first part (setting owner), but
 522     // it failed the second part (making ref_count negative) and bailed.
 523     if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 524       // Acquired the monitor.
 525       assert(_succ != Self, "invariant");
 526       assert(_Responsible != Self, "invariant");
 527       return;
 528     }
 529   }
 530 
 531   assert(InitDone, "Unexpectedly not initialized");
 532 
 533   // We try one round of spinning *before* enqueueing Self.
 534   //
 535   // If the _owner is ready but OFFPROC we could use a YieldTo()
 536   // operation to donate the remainder of this thread's quantum
 537   // to the owner.  This has subtle but beneficial affinity
 538   // effects.
 539 
 540   if (TrySpin(Self) > 0) {
 541     assert(_owner == Self, "invariant");
 542     assert(_succ != Self, "invariant");
 543     assert(_Responsible != Self, "invariant");
 544     return;
 545   }
 546 
 547   // The Spin failed -- Enqueue and park the thread ...
 548   assert(_succ != Self, "invariant");
 549   assert(_owner != Self, "invariant");
 550   assert(_Responsible != Self, "invariant");


 627 
 628   for (;;) {
 629 
 630     if (TryLock(Self) > 0) break;
 631     assert(_owner != Self, "invariant");
 632 
 633     // park self
 634     if (_Responsible == Self) {
 635       Self->_ParkEvent->park((jlong) recheckInterval);
 636       // Increase the recheckInterval, but clamp the value.
 637       recheckInterval *= 8;
 638       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 639         recheckInterval = MAX_RECHECK_INTERVAL;
 640       }
 641     } else {
 642       Self->_ParkEvent->park();
 643     }
 644 
 645     if (TryLock(Self) > 0) break;
 646 
 647     if (_owner == DEFLATER_MARKER) {
 648       // The deflation protocol finished the first part (setting owner), but
 649       // it failed the second part (making ref_count negative) and bailed.
 650       if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 651         // Acquired the monitor.
 652         break;
 653       }
 654     }
 655 
 656     // The lock is still contested.
 657     // Keep a tally of the # of futile wakeups.
 658     // Note that the counter is not protected by a lock or updated by atomics.
 659     // That is by design - we trade "lossy" counters which are exposed to
 660     // races during updates for a lower probe effect.
 661 
 662     // This PerfData object can be used in parallel with a safepoint.
 663     // See the work around in PerfDataManager::destroy().
 664     OM_PERFDATA_OP(FutileWakeups, inc());
 665     ++nWakeups;
 666 
 667     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 668     // We can defer clearing _succ until after the spin completes
 669     // TrySpin() must tolerate being called with _succ == Self.
 670     // Try yet another round of adaptive spinning.
 671     if (TrySpin(Self) > 0) break;
 672 
 673     // We can find that we were unpark()ed and redesignated _succ while
 674     // we were spinning.  That's harmless.  If we iterate and call park(),
 675     // park() will consume the event and return immediately and we'll


 740   // the lock.   The barrier ensures that changes to monitor meta-data and data
 741   // protected by the lock will be visible before we release the lock, and
 742   // therefore before some other thread (CPU) has a chance to acquire the lock.
 743   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 744   //
 745   // Critically, any prior STs to _succ or EntryList must be visible before
 746   // the ST of null into _owner in the *subsequent* (following) corresponding
 747   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 748   // execute a serializing instruction.
 749 
 750   return;
 751 }
 752 
 753 // ReenterI() is a specialized inline form of the latter half of the
 754 // contended slow-path from EnterI().  We use ReenterI() only for
 755 // monitor reentry in wait().
 756 //
 757 // In the future we should reconcile EnterI() and ReenterI().
 758 
 759 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 760   ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
 761 
 762   assert(Self != NULL, "invariant");
 763   assert(SelfNode != NULL, "invariant");
 764   assert(SelfNode->_thread == Self, "invariant");
 765   assert(_waiters > 0, "invariant");
 766   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 767   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 768   JavaThread * jt = (JavaThread *) Self;
 769 
 770   int nWakeups = 0;
 771   for (;;) {
 772     ObjectWaiter::TStates v = SelfNode->TState;
 773     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 774     assert(_owner != Self, "invariant");
 775 
 776     if (TryLock(Self) > 0) break;
 777     if (TrySpin(Self) > 0) break;
 778 
 779     if (_owner == DEFLATER_MARKER) {
 780       // The deflation protocol finished the first part (setting owner), but
 781       // it failed the second part (making ref_count negative) and bailed.
 782       if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 783         // Acquired the monitor.
 784         break;
 785       }
 786     }
 787 
 788     // State transition wrappers around park() ...
 789     // ReenterI() wisely defers state transitions until
 790     // it's clear we must park the thread.
 791     {
 792       OSThreadContendState osts(Self->osthread());
 793       ThreadBlockInVM tbivm(jt);
 794 
 795       // cleared by handle_special_suspend_equivalent_condition()
 796       // or java_suspend_self()
 797       jt->set_suspend_equivalent();
 798       Self->_ParkEvent->park();
 799 
 800       // were we externally suspended while we were waiting?
 801       for (;;) {
 802         if (!ExitSuspendEquivalent(jt)) break;
 803         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 804         jt->java_suspend_self();
 805         jt->set_suspend_equivalent();
 806       }
 807     }


 975   Thread * const Self = THREAD;
 976   if (THREAD != _owner) {
 977     if (THREAD->is_lock_owned((address) _owner)) {
 978       // Transmute _owner from a BasicLock pointer to a Thread address.
 979       // We don't need to hold _mutex for this transition.
 980       // Non-null to Non-null is safe as long as all readers can
 981       // tolerate either flavor.
 982       assert(_recursions == 0, "invariant");
 983       _owner = THREAD;
 984       _recursions = 0;
 985     } else {
 986       // Apparent unbalanced locking ...
 987       // Naively we'd like to throw IllegalMonitorStateException.
 988       // As a practical matter we can neither allocate nor throw an
 989       // exception as ::exit() can be called from leaf routines.
 990       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 991       // Upon deeper reflection, however, in a properly run JVM the only
 992       // way we should encounter this situation is in the presence of
 993       // unbalanced JNI locking. TODO: CheckJNICalls.
 994       // See also: CR4414101
 995       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking: "
 996              "owner=" INTPTR_FORMAT, p2i(_owner));
 997       return;
 998     }
 999   }
1000 
1001   if (_recursions != 0) {
1002     _recursions--;        // this is simple recursive enter
1003     return;
1004   }
1005 
1006   // Invariant: after setting Responsible=null an thread must execute
1007   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1008   _Responsible = NULL;
1009 
1010 #if INCLUDE_JFR
1011   // get the owner's thread id for the MonitorEnter event
1012   // if it is enabled and the thread isn't suspended
1013   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1014     _previous_owner_tid = JFR_THREAD_ID(Self);
1015   }
1016 #endif


1232       _recursions = 0;
1233     }
1234   }
1235 
1236   guarantee(Self == _owner, "complete_exit not owner");
1237   intptr_t save = _recursions; // record the old recursion count
1238   _recursions = 0;        // set the recursion level to be 0
1239   exit(true, Self);           // exit the monitor
1240   guarantee(_owner != Self, "invariant");
1241   return save;
1242 }
1243 
1244 // reenter() enters a lock and sets recursion count
1245 // complete_exit/reenter operate as a wait without waiting
1246 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1247   Thread * const Self = THREAD;
1248   assert(Self->is_Java_thread(), "Must be Java thread!");
1249   JavaThread *jt = (JavaThread *)THREAD;
1250 
1251   guarantee(_owner != Self, "reenter already owner");
1252   enter(THREAD);
1253   // Entered the monitor.
1254   guarantee(_recursions == 0, "reenter recursion");
1255   _recursions = recursions;

1256 }
1257 
1258 
1259 // -----------------------------------------------------------------------------
1260 // A macro is used below because there may already be a pending
1261 // exception which should not abort the execution of the routines
1262 // which use this (which is why we don't put this into check_slow and
1263 // call it with a CHECK argument).
1264 
1265 #define CHECK_OWNER()                                                       \
1266   do {                                                                      \
1267     if (THREAD != _owner) {                                                 \
1268       if (THREAD->is_lock_owned((address) _owner)) {                        \
1269         _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
1270         _recursions = 0;                                                    \
1271       } else {                                                              \
1272         THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
1273       }                                                                     \
1274     }                                                                       \
1275   } while (false)


2025     n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,  \
2026                                         CHECK);                          \
2027   }
2028 #define NEWPERFVARIABLE(n)                                                \
2029   {                                                                       \
2030     n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
2031                                          CHECK);                          \
2032   }
2033     NEWPERFCOUNTER(_sync_Inflations);
2034     NEWPERFCOUNTER(_sync_Deflations);
2035     NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2036     NEWPERFCOUNTER(_sync_FutileWakeups);
2037     NEWPERFCOUNTER(_sync_Parks);
2038     NEWPERFCOUNTER(_sync_Notifications);
2039     NEWPERFVARIABLE(_sync_MonExtant);
2040 #undef NEWPERFCOUNTER
2041 #undef NEWPERFVARIABLE
2042   }
2043 
2044   DEBUG_ONLY(InitDone = true;)
2045 }
2046 
2047 // For internal use by ObjectSynchronizer::monitors_iterate().
2048 ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) {
2049   om_ptr->inc_ref_count();
2050   _om_ptr = om_ptr;
2051 }
2052 
2053 ObjectMonitorHandle::~ObjectMonitorHandle() {
2054   if (_om_ptr != NULL) {
2055     _om_ptr->dec_ref_count();
2056     _om_ptr = NULL;
2057   }
2058 }
2059 
2060 // Save the ObjectMonitor* associated with the specified markOop and
2061 // increment the ref_count. This function should only be called if
2062 // the caller has verified mark->has_monitor() == true. The object
2063 // parameter is needed to verify that ObjectMonitor* has not been
2064 // deflated and reused for another object.
2065 //
2066 // This function returns true if the ObjectMonitor* has been safely
2067 // saved. This function returns false if we have lost a race with
2068 // async deflation; the caller should retry as appropriate.
2069 //
2070 bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) {
2071   guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
2072             p2i(mark));
2073 
2074   ObjectMonitor * om_ptr = mark->monitor();
2075   om_ptr->inc_ref_count();
2076 
2077   if (AsyncDeflateIdleMonitors) {
2078     // Race here if monitor is not owned! The above ref_count bump
2079     // will cause subsequent async deflation to skip it. However,
2080     // previous or concurrent async deflation is a race.
2081     if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->ref_count() <= 0) {
2082       // Async deflation is in progress and our ref_count increment
2083       // above lost the race to async deflation. Attempt to restore
2084       // the header/dmw to the object's header so that we only retry
2085       // once if the deflater thread happens to be slow.
2086       om_ptr->install_displaced_markword_in_object(object);
2087       om_ptr->dec_ref_count();
2088       return false;
2089     }
2090     // The ObjectMonitor could have been deflated and reused for
2091     // another object before we bumped the ref_count so make sure
2092     // our object still refers to this ObjectMonitor.
2093     const markOop tmp = object->mark();
2094     if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {
2095       // Async deflation and reuse won the race so we have to retry.
2096       // Skip object header restoration since that's already done.
2097       om_ptr->dec_ref_count();
2098       return false;
2099     }
2100   }
2101 
2102   ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2103                  p2i(_om_ptr));
2104   _om_ptr = om_ptr;
2105   return true;
2106 }
2107 
2108 // For internal use by ObjectSynchronizer::inflate().
2109 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
2110   if (_om_ptr == NULL) {
2111     ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
2112     om_ptr->inc_ref_count();
2113     _om_ptr = om_ptr;
2114   } else {
2115     ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr");
2116     _om_ptr->dec_ref_count();
2117     _om_ptr = NULL;
2118   }
2119 }
< prev index next >