< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 51780 : imported patch syncknobs-00-base
rev 51781 : imported patch syncknobs-01-Knob_ReportSettings
rev 51782 : imported patch syncknobs-02-Knob_SpinBackOff
rev 51783 : imported patch syncknobs-03-BackOffMask
rev 51784 : imported patch syncknobs-04-Knob_ExitRelease
rev 51785 : imported patch syncknobs-05-Knob_InlineNotify
rev 51786 : imported patch syncknobs-06-Knob_Verbose
rev 51787 : imported patch syncknobs-07-Knob_VerifyInUse
rev 51788 : imported patch syncknobs-08-Knob_VerifyMatch
rev 51789 : imported patch syncknobs-09-Knob_SpinBase
rev 51790 : imported patch syncknobs-10-Knob_CASPenalty
rev 51791 : imported patch syncknobs-11-Knob_OXPenalty
rev 51792 : imported patch syncknobs-12-Knob_SpinSetSucc
rev 51793 : imported patch syncknobs-13-Knob_SpinEarly


  86     if (DTraceMonitorProbes) {                                             \
  87       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  88       HOTSPOT_MONITOR_##probe(jtid,                                        \
  89                               (uintptr_t)(monitor), bytes, len);           \
  90     }                                                                      \
  91   }
  92 
  93 #else //  ndef DTRACE_ENABLED
  94 
  95 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
  96 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
  97 
  98 #endif // ndef DTRACE_ENABLED
  99 
 100 // Tunables ...
 101 // The knob* variables are effectively final.  Once set they should
 102 // never be modified hence.  Consider using __read_mostly with GCC.
 103 
 104 int ObjectMonitor::Knob_SpinLimit    = 5000;    // derived by an external tool -
 105 
 106 static int Knob_SpinEarly           = 1;
 107 static int Knob_SuccEnabled         = 1;       // futile wake throttling
 108 static int Knob_SuccRestrict        = 0;       // Limit successors + spinners to at-most-one
 109 static int Knob_MaxSpinners         = -1;      // Should be a function of # CPUs
 110 static int Knob_Bonus               = 100;     // spin success bonus
 111 static int Knob_BonusB              = 100;     // spin success bonus
 112 static int Knob_Penalty             = 200;     // spin failure penalty
 113 static int Knob_Poverty             = 1000;
 114 static int Knob_SpinAfterFutile     = 1;       // Spin after returning from park()
 115 static int Knob_FixedSpin           = 0;
 116 static int Knob_OState              = 3;       // Spinner checks thread state of _owner
 117 static int Knob_UsePause            = 1;
 118 static int Knob_ExitPolicy          = 0;
 119 static int Knob_PreSpin             = 10;      // 20-100 likely better
 120 static int Knob_ResetEvent          = 0;
 121 
 122 static int Knob_FastHSSEC           = 0;
 123 static int Knob_MoveNotifyee        = 2;       // notify() - disposition of notifyee
 124 static int Knob_QMode               = 0;       // EntryList-cxq policy - queue discipline
 125 static volatile int InitDone        = 0;
 126 


 270   }
 271 
 272   if (Self->is_lock_owned ((address)cur)) {
 273     assert(_recursions == 0, "internal state error");
 274     _recursions = 1;
 275     // Commute owner from a thread-specific on-stack BasicLockObject address to
 276     // a full-fledged "Thread *".
 277     _owner = Self;
 278     return;
 279   }
 280 
 281   // We've encountered genuine contention.
 282   assert(Self->_Stalled == 0, "invariant");
 283   Self->_Stalled = intptr_t(this);
 284 
 285   // Try one round of spinning *before* enqueueing Self
 286   // and before going through the awkward and expensive state
 287   // transitions.  The following spin is strictly optional ...
 288   // Note that if we acquire the monitor from an initial spin
 289   // we forgo posting JVMTI events and firing DTRACE probes.
 290   if (Knob_SpinEarly && TrySpin (Self) > 0) {
 291     assert(_owner == Self, "invariant");
 292     assert(_recursions == 0, "invariant");
 293     assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 294     Self->_Stalled = 0;
 295     return;
 296   }
 297 
 298   assert(_owner != Self, "invariant");
 299   assert(_succ != Self, "invariant");
 300   assert(Self->is_Java_thread(), "invariant");
 301   JavaThread * jt = (JavaThread *) Self;
 302   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 303   assert(jt->thread_state() != _thread_blocked, "invariant");
 304   assert(this->object() != NULL, "invariant");
 305   assert(_count >= 0, "invariant");
 306 
 307   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 308   // Ensure the object-monitor relationship remains stable while there's contention.
 309   Atomic::inc(&_count);
 310 


 432   assert(Self->is_Java_thread(), "invariant");
 433   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 434 
 435   // Try the lock - TATAS
 436   if (TryLock (Self) > 0) {
 437     assert(_succ != Self, "invariant");
 438     assert(_owner == Self, "invariant");
 439     assert(_Responsible != Self, "invariant");
 440     return;
 441   }
 442 
 443   DeferredInitialize();
 444 
 445   // We try one round of spinning *before* enqueueing Self.
 446   //
 447   // If the _owner is ready but OFFPROC we could use a YieldTo()
 448   // operation to donate the remainder of this thread's quantum
 449   // to the owner.  This has subtle but beneficial affinity
 450   // effects.
 451 
 452   if (TrySpin (Self) > 0) {
 453     assert(_owner == Self, "invariant");
 454     assert(_succ != Self, "invariant");
 455     assert(_Responsible != Self, "invariant");
 456     return;
 457   }
 458 
 459   // The Spin failed -- Enqueue and park the thread ...
 460   assert(_succ != Self, "invariant");
 461   assert(_owner != Self, "invariant");
 462   assert(_Responsible != Self, "invariant");
 463 
 464   // Enqueue "Self" on ObjectMonitor's _cxq.
 465   //
 466   // Node acts as a proxy for Self.
 467   // As an aside, if were to ever rewrite the synchronization code mostly
 468   // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
 469   // Java objects.  This would avoid awkward lifecycle and liveness issues,
 470   // as well as eliminate a subset of ABA issues.
 471   // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
 472 




  86     if (DTraceMonitorProbes) {                                             \
  87       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  88       HOTSPOT_MONITOR_##probe(jtid,                                        \
  89                               (uintptr_t)(monitor), bytes, len);           \
  90     }                                                                      \
  91   }
  92 
  93 #else //  ndef DTRACE_ENABLED
  94 
  95 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
  96 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
  97 
  98 #endif // ndef DTRACE_ENABLED
  99 
 100 // Tunables ...
 101 // The knob* variables are effectively final.  Once set they should
 102 // never be modified hence.  Consider using __read_mostly with GCC.
 103 
 104 int ObjectMonitor::Knob_SpinLimit    = 5000;    // derived by an external tool -
 105 

 106 static int Knob_SuccEnabled         = 1;       // futile wake throttling
 107 static int Knob_SuccRestrict        = 0;       // Limit successors + spinners to at-most-one
 108 static int Knob_MaxSpinners         = -1;      // Should be a function of # CPUs
 109 static int Knob_Bonus               = 100;     // spin success bonus
 110 static int Knob_BonusB              = 100;     // spin success bonus
 111 static int Knob_Penalty             = 200;     // spin failure penalty
 112 static int Knob_Poverty             = 1000;
 113 static int Knob_SpinAfterFutile     = 1;       // Spin after returning from park()
 114 static int Knob_FixedSpin           = 0;
 115 static int Knob_OState              = 3;       // Spinner checks thread state of _owner
 116 static int Knob_UsePause            = 1;
 117 static int Knob_ExitPolicy          = 0;
 118 static int Knob_PreSpin             = 10;      // 20-100 likely better
 119 static int Knob_ResetEvent          = 0;
 120 
 121 static int Knob_FastHSSEC           = 0;
 122 static int Knob_MoveNotifyee        = 2;       // notify() - disposition of notifyee
 123 static int Knob_QMode               = 0;       // EntryList-cxq policy - queue discipline
 124 static volatile int InitDone        = 0;
 125 


 269   }
 270 
 271   if (Self->is_lock_owned ((address)cur)) {
 272     assert(_recursions == 0, "internal state error");
 273     _recursions = 1;
 274     // Commute owner from a thread-specific on-stack BasicLockObject address to
 275     // a full-fledged "Thread *".
 276     _owner = Self;
 277     return;
 278   }
 279 
 280   // We've encountered genuine contention.
 281   assert(Self->_Stalled == 0, "invariant");
 282   Self->_Stalled = intptr_t(this);
 283 
 284   // Try one round of spinning *before* enqueueing Self
 285   // and before going through the awkward and expensive state
 286   // transitions.  The following spin is strictly optional ...
 287   // Note that if we acquire the monitor from an initial spin
 288   // we forgo posting JVMTI events and firing DTRACE probes.
 289   if (TrySpin(Self) > 0) {
 290     assert(_owner == Self, "invariant");
 291     assert(_recursions == 0, "invariant");
 292     assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 293     Self->_Stalled = 0;
 294     return;
 295   }
 296 
 297   assert(_owner != Self, "invariant");
 298   assert(_succ != Self, "invariant");
 299   assert(Self->is_Java_thread(), "invariant");
 300   JavaThread * jt = (JavaThread *) Self;
 301   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 302   assert(jt->thread_state() != _thread_blocked, "invariant");
 303   assert(this->object() != NULL, "invariant");
 304   assert(_count >= 0, "invariant");
 305 
 306   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 307   // Ensure the object-monitor relationship remains stable while there's contention.
 308   Atomic::inc(&_count);
 309 


 431   assert(Self->is_Java_thread(), "invariant");
 432   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 433 
 434   // Try the lock - TATAS
 435   if (TryLock (Self) > 0) {
 436     assert(_succ != Self, "invariant");
 437     assert(_owner == Self, "invariant");
 438     assert(_Responsible != Self, "invariant");
 439     return;
 440   }
 441 
 442   DeferredInitialize();
 443 
 444   // We try one round of spinning *before* enqueueing Self.
 445   //
 446   // If the _owner is ready but OFFPROC we could use a YieldTo()
 447   // operation to donate the remainder of this thread's quantum
 448   // to the owner.  This has subtle but beneficial affinity
 449   // effects.
 450 
 451   if (TrySpin(Self) > 0) {
 452     assert(_owner == Self, "invariant");
 453     assert(_succ != Self, "invariant");
 454     assert(_Responsible != Self, "invariant");
 455     return;
 456   }
 457 
 458   // The Spin failed -- Enqueue and park the thread ...
 459   assert(_succ != Self, "invariant");
 460   assert(_owner != Self, "invariant");
 461   assert(_Responsible != Self, "invariant");
 462 
 463   // Enqueue "Self" on ObjectMonitor's _cxq.
 464   //
 465   // Node acts as a proxy for Self.
 466   // As an aside, if were to ever rewrite the synchronization code mostly
 467   // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
 468   // Java objects.  This would avoid awkward lifecycle and liveness issues,
 469   // as well as eliminate a subset of ABA issues.
 470   // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
 471 


< prev index next >