< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 51780 : imported patch syncknobs-00-base
rev 51781 : imported patch syncknobs-01-Knob_ReportSettings
rev 51782 : imported patch syncknobs-02-Knob_SpinBackOff
rev 51783 : imported patch syncknobs-03-BackOffMask
rev 51784 : imported patch syncknobs-04-Knob_ExitRelease
rev 51785 : imported patch syncknobs-05-Knob_InlineNotify
rev 51786 : imported patch syncknobs-06-Knob_Verbose
rev 51787 : imported patch syncknobs-07-Knob_VerifyInUse
rev 51788 : imported patch syncknobs-08-Knob_VerifyMatch
rev 51789 : imported patch syncknobs-09-Knob_SpinBase
rev 51790 : imported patch syncknobs-10-Knob_CASPenalty
rev 51791 : imported patch syncknobs-11-Knob_OXPenalty
rev 51792 : imported patch syncknobs-12-Knob_SpinSetSucc
rev 51793 : imported patch syncknobs-13-Knob_SpinEarly
rev 51794 : imported patch syncknobs-14-Knob_SuccEnabled
rev 51795 : imported patch syncknobs-15-Knob_SuccRestrict
rev 51796 : imported patch syncknobs-16-Knob_MaxSpinners
rev 51797 : imported patch syncknobs-17-Knob_SpinAfterFutile
rev 51798 : imported patch syncknobs-18-Knob_OState
rev 51799 : imported patch syncknobs-19-Knob_UsePause
rev 51800 : imported patch syncknobs-20-Knob_ExitPolicy
rev 51801 : imported patch syncknobs-21-Knob_ResetEvent


  92 
  93 #else //  ndef DTRACE_ENABLED
  94 
  95 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
  96 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
  97 
  98 #endif // ndef DTRACE_ENABLED
  99 
 100 // Tunables ...
 101 // The knob* variables are effectively final.  Once set they should
 102 // never be modified hence.  Consider using __read_mostly with GCC.
 103 
 104 int ObjectMonitor::Knob_SpinLimit    = 5000;    // derived by an external tool -
 105 
 106 static int Knob_Bonus               = 100;     // spin success bonus
 107 static int Knob_BonusB              = 100;     // spin success bonus
 108 static int Knob_Penalty             = 200;     // spin failure penalty
 109 static int Knob_Poverty             = 1000;
 110 static int Knob_FixedSpin           = 0;
 111 static int Knob_PreSpin             = 10;      // 20-100 likely better
 112 static int Knob_ResetEvent          = 0;
 113 
 114 static int Knob_FastHSSEC           = 0;
 115 static int Knob_MoveNotifyee        = 2;       // notify() - disposition of notifyee
 116 static int Knob_QMode               = 0;       // EntryList-cxq policy - queue discipline
 117 static volatile int InitDone        = 0;
 118 
 119 // -----------------------------------------------------------------------------
 120 // Theory of operations -- Monitors lists, thread residency, etc:
 121 //
 122 // * A thread acquires ownership of a monitor by successfully
 123 //   CAS()ing the _owner field from null to non-null.
 124 //
 125 // * Invariant: A thread appears on at most one monitor list --
 126 //   cxq, EntryList or WaitSet -- at any one time.
 127 //
 128 // * Contending threads "push" themselves onto the cxq with CAS
 129 //   and then spin/park.
 130 //
 131 // * After a contending thread eventually acquires the lock it must
 132 //   dequeue itself from either the EntryList or the cxq.


 552     // Keep a tally of the # of futile wakeups.
 553     // Note that the counter is not protected by a lock or updated by atomics.
 554     // That is by design - we trade "lossy" counters which are exposed to
 555     // races during updates for a lower probe effect.
 556 
 557     // This PerfData object can be used in parallel with a safepoint.
 558     // See the work around in PerfDataManager::destroy().
 559     OM_PERFDATA_OP(FutileWakeups, inc());
 560     ++nWakeups;
 561 
 562     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 563     // We can defer clearing _succ until after the spin completes
 564     // TrySpin() must tolerate being called with _succ == Self.
 565     // Try yet another round of adaptive spinning.
 566     if (TrySpin(Self) > 0) break;
 567 
 568     // We can find that we were unpark()ed and redesignated _succ while
 569     // we were spinning.  That's harmless.  If we iterate and call park(),
 570     // park() will consume the event and return immediately and we'll
 571     // just spin again.  This pattern can repeat, leaving _succ to simply
 572     // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
 573     // Alternately, we can sample fired() here, and if set, forgo spinning
 574     // in the next iteration.
 575 
 576     if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
 577       Self->_ParkEvent->reset();
 578       OrderAccess::fence();
 579     }
 580     if (_succ == Self) _succ = NULL;
 581 
 582     // Invariant: after clearing _succ a thread *must* retry _owner before parking.
 583     OrderAccess::fence();
 584   }
 585 
 586   // Egress :
 587   // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
 588   // Normally we'll find Self on the EntryList .
 589   // From the perspective of the lock owner (this thread), the
 590   // EntryList is stable and cxq is prepend-only.
 591   // The head of cxq is volatile but the interior is stable.
 592   // In addition, Self.TState is stable.
 593 
 594   assert(_owner == Self, "invariant");
 595   assert(object() != NULL, "invariant");
 596   // I'd like to write:
 597   //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 598   // but as we're at a safepoint that's not safe.
 599 




  92 
  93 #else //  ndef DTRACE_ENABLED
  94 
  95 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
  96 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
  97 
  98 #endif // ndef DTRACE_ENABLED
  99 
 100 // Tunables ...
 101 // The knob* variables are effectively final.  Once set they should
 102 // never be modified hence.  Consider using __read_mostly with GCC.
 103 
 104 int ObjectMonitor::Knob_SpinLimit    = 5000;    // derived by an external tool -
 105 
 106 static int Knob_Bonus               = 100;     // spin success bonus
 107 static int Knob_BonusB              = 100;     // spin success bonus
 108 static int Knob_Penalty             = 200;     // spin failure penalty
 109 static int Knob_Poverty             = 1000;
 110 static int Knob_FixedSpin           = 0;
 111 static int Knob_PreSpin             = 10;      // 20-100 likely better

 112 
 113 static int Knob_FastHSSEC           = 0;
 114 static int Knob_MoveNotifyee        = 2;       // notify() - disposition of notifyee
 115 static int Knob_QMode               = 0;       // EntryList-cxq policy - queue discipline
 116 static volatile int InitDone        = 0;
 117 
 118 // -----------------------------------------------------------------------------
 119 // Theory of operations -- Monitors lists, thread residency, etc:
 120 //
 121 // * A thread acquires ownership of a monitor by successfully
 122 //   CAS()ing the _owner field from null to non-null.
 123 //
 124 // * Invariant: A thread appears on at most one monitor list --
 125 //   cxq, EntryList or WaitSet -- at any one time.
 126 //
 127 // * Contending threads "push" themselves onto the cxq with CAS
 128 //   and then spin/park.
 129 //
 130 // * After a contending thread eventually acquires the lock it must
 131 //   dequeue itself from either the EntryList or the cxq.


 551     // Keep a tally of the # of futile wakeups.
 552     // Note that the counter is not protected by a lock or updated by atomics.
 553     // That is by design - we trade "lossy" counters which are exposed to
 554     // races during updates for a lower probe effect.
 555 
 556     // This PerfData object can be used in parallel with a safepoint.
 557     // See the work around in PerfDataManager::destroy().
 558     OM_PERFDATA_OP(FutileWakeups, inc());
 559     ++nWakeups;
 560 
 561     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 562     // We can defer clearing _succ until after the spin completes
 563     // TrySpin() must tolerate being called with _succ == Self.
 564     // Try yet another round of adaptive spinning.
 565     if (TrySpin(Self) > 0) break;
 566 
 567     // We can find that we were unpark()ed and redesignated _succ while
 568     // we were spinning.  That's harmless.  If we iterate and call park(),
 569     // park() will consume the event and return immediately and we'll
 570     // just spin again.  This pattern can repeat, leaving _succ to simply
 571     // spin on a CPU.


 572 




 573     if (_succ == Self) _succ = NULL;
 574 
 575     // Invariant: after clearing _succ a thread *must* retry _owner before parking.
 576     OrderAccess::fence();
 577   }
 578 
 579   // Egress :
 580   // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
 581   // Normally we'll find Self on the EntryList .
 582   // From the perspective of the lock owner (this thread), the
 583   // EntryList is stable and cxq is prepend-only.
 584   // The head of cxq is volatile but the interior is stable.
 585   // In addition, Self.TState is stable.
 586 
 587   assert(_owner == Self, "invariant");
 588   assert(object() != NULL, "invariant");
 589   // I'd like to write:
 590   //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 591   // but as we're at a safepoint that's not safe.
 592 


< prev index next >