< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 51780 : imported patch syncknobs-00-base
rev 51781 : imported patch syncknobs-01-Knob_ReportSettings
rev 51782 : imported patch syncknobs-02-Knob_SpinBackOff
rev 51783 : imported patch syncknobs-03-BackOffMask
rev 51784 : imported patch syncknobs-04-Knob_ExitRelease
rev 51785 : imported patch syncknobs-05-Knob_InlineNotify
rev 51786 : imported patch syncknobs-06-Knob_Verbose
rev 51787 : imported patch syncknobs-07-Knob_VerifyInUse
rev 51788 : imported patch syncknobs-08-Knob_VerifyMatch
rev 51789 : imported patch syncknobs-09-Knob_SpinBase
rev 51790 : imported patch syncknobs-10-Knob_CASPenalty
rev 51791 : imported patch syncknobs-11-Knob_OXPenalty
rev 51792 : imported patch syncknobs-12-Knob_SpinSetSucc
rev 51793 : imported patch syncknobs-13-Knob_SpinEarly
rev 51794 : imported patch syncknobs-14-Knob_SuccEnabled
rev 51795 : imported patch syncknobs-15-Knob_SuccRestrict
rev 51796 : imported patch syncknobs-16-Knob_MaxSpinners
rev 51797 : imported patch syncknobs-17-Knob_SpinAfterFutile


  90     }                                                                      \
  91   }
  92 
  93 #else //  ndef DTRACE_ENABLED
  94 
  95 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
  96 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
  97 
  98 #endif // ndef DTRACE_ENABLED
  99 
 100 // Tunables ...
 101 // The knob* variables are effectively final.  Once set they should
 102 // never be modified hence.  Consider using __read_mostly with GCC.
 103 
 104 int ObjectMonitor::Knob_SpinLimit    = 5000;    // derived by an external tool -
 105 
 106 static int Knob_Bonus               = 100;     // spin success bonus
 107 static int Knob_BonusB              = 100;     // spin success bonus
 108 static int Knob_Penalty             = 200;     // spin failure penalty
 109 static int Knob_Poverty             = 1000;
 110 static int Knob_SpinAfterFutile     = 1;       // Spin after returning from park()
 111 static int Knob_FixedSpin           = 0;
 112 static int Knob_OState              = 3;       // Spinner checks thread state of _owner
 113 static int Knob_UsePause            = 1;
 114 static int Knob_ExitPolicy          = 0;
 115 static int Knob_PreSpin             = 10;      // 20-100 likely better
 116 static int Knob_ResetEvent          = 0;
 117 
 118 static int Knob_FastHSSEC           = 0;
 119 static int Knob_MoveNotifyee        = 2;       // notify() - disposition of notifyee
 120 static int Knob_QMode               = 0;       // EntryList-cxq policy - queue discipline
 121 static volatile int InitDone        = 0;
 122 
 123 // -----------------------------------------------------------------------------
 124 // Theory of operations -- Monitors lists, thread residency, etc:
 125 //
 126 // * A thread acquires ownership of a monitor by successfully
 127 //   CAS()ing the _owner field from null to non-null.
 128 //
 129 // * Invariant: A thread appears on at most one monitor list --
 130 //   cxq, EntryList or WaitSet -- at any one time.


 550       Self->_ParkEvent->park();
 551     }
 552 
 553     if (TryLock(Self) > 0) break;
 554 
 555     // The lock is still contested.
 556     // Keep a tally of the # of futile wakeups.
 557     // Note that the counter is not protected by a lock or updated by atomics.
 558     // That is by design - we trade "lossy" counters which are exposed to
 559     // races during updates for a lower probe effect.
 560 
 561     // This PerfData object can be used in parallel with a safepoint.
 562     // See the work around in PerfDataManager::destroy().
 563     OM_PERFDATA_OP(FutileWakeups, inc());
 564     ++nWakeups;
 565 
 566     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 567     // We can defer clearing _succ until after the spin completes
 568     // TrySpin() must tolerate being called with _succ == Self.
 569     // Try yet another round of adaptive spinning.
 570     if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
 571 
 572     // We can find that we were unpark()ed and redesignated _succ while
 573     // we were spinning.  That's harmless.  If we iterate and call park(),
 574     // park() will consume the event and return immediately and we'll
 575     // just spin again.  This pattern can repeat, leaving _succ to simply
 576     // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
 577     // Alternately, we can sample fired() here, and if set, forgo spinning
 578     // in the next iteration.
 579 
 580     if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
 581       Self->_ParkEvent->reset();
 582       OrderAccess::fence();
 583     }
 584     if (_succ == Self) _succ = NULL;
 585 
 586     // Invariant: after clearing _succ a thread *must* retry _owner before parking.
 587     OrderAccess::fence();
 588   }
 589 
 590   // Egress :


 642   // To that end, the 1-0 exit() operation must have at least STST|LDST
 643   // "release" barrier semantics.  Specifically, there must be at least a
 644   // STST|LDST barrier in exit() before the ST of null into _owner that drops
 645   // the lock.   The barrier ensures that changes to monitor meta-data and data
 646   // protected by the lock will be visible before we release the lock, and
 647   // therefore before some other thread (CPU) has a chance to acquire the lock.
 648   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 649   //
 650   // Critically, any prior STs to _succ or EntryList must be visible before
 651   // the ST of null into _owner in the *subsequent* (following) corresponding
 652   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 653   // execute a serializing instruction.
 654 
 655   return;
 656 }
 657 
 658 // ReenterI() is a specialized inline form of the latter half of the
 659 // contended slow-path from EnterI().  We use ReenterI() only for
 660 // monitor reentry in wait().
 661 //
 662 // In the future we should reconcile EnterI() and ReenterI(), adding
 663 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
 664 // loop accordingly.
 665 
 666 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 667   assert(Self != NULL, "invariant");
 668   assert(SelfNode != NULL, "invariant");
 669   assert(SelfNode->_thread == Self, "invariant");
 670   assert(_waiters > 0, "invariant");
 671   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 672   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 673   JavaThread * jt = (JavaThread *) Self;
 674 
 675   int nWakeups = 0;
 676   for (;;) {
 677     ObjectWaiter::TStates v = SelfNode->TState;
 678     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 679     assert(_owner != Self, "invariant");
 680 
 681     if (TryLock(Self) > 0) break;
 682     if (TrySpin(Self) > 0) break;
 683 
 684     // State transition wrappers around park() ...




  90     }                                                                      \
  91   }
  92 
  93 #else //  ndef DTRACE_ENABLED
  94 
  95 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
  96 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
  97 
  98 #endif // ndef DTRACE_ENABLED
  99 
 100 // Tunables ...
 101 // The knob* variables are effectively final.  Once set they should
 102 // never be modified hence.  Consider using __read_mostly with GCC.
 103 
 104 int ObjectMonitor::Knob_SpinLimit    = 5000;    // derived by an external tool -
 105 
 106 static int Knob_Bonus               = 100;     // spin success bonus
 107 static int Knob_BonusB              = 100;     // spin success bonus
 108 static int Knob_Penalty             = 200;     // spin failure penalty
 109 static int Knob_Poverty             = 1000;

 110 static int Knob_FixedSpin           = 0;
 111 static int Knob_OState              = 3;       // Spinner checks thread state of _owner
 112 static int Knob_UsePause            = 1;
 113 static int Knob_ExitPolicy          = 0;
 114 static int Knob_PreSpin             = 10;      // 20-100 likely better
 115 static int Knob_ResetEvent          = 0;
 116 
 117 static int Knob_FastHSSEC           = 0;
 118 static int Knob_MoveNotifyee        = 2;       // notify() - disposition of notifyee
 119 static int Knob_QMode               = 0;       // EntryList-cxq policy - queue discipline
 120 static volatile int InitDone        = 0;
 121 
 122 // -----------------------------------------------------------------------------
 123 // Theory of operations -- Monitors lists, thread residency, etc:
 124 //
 125 // * A thread acquires ownership of a monitor by successfully
 126 //   CAS()ing the _owner field from null to non-null.
 127 //
 128 // * Invariant: A thread appears on at most one monitor list --
 129 //   cxq, EntryList or WaitSet -- at any one time.


 549       Self->_ParkEvent->park();
 550     }
 551 
 552     if (TryLock(Self) > 0) break;
 553 
 554     // The lock is still contested.
 555     // Keep a tally of the # of futile wakeups.
 556     // Note that the counter is not protected by a lock or updated by atomics.
 557     // That is by design - we trade "lossy" counters which are exposed to
 558     // races during updates for a lower probe effect.
 559 
 560     // This PerfData object can be used in parallel with a safepoint.
 561     // See the work around in PerfDataManager::destroy().
 562     OM_PERFDATA_OP(FutileWakeups, inc());
 563     ++nWakeups;
 564 
 565     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 566     // We can defer clearing _succ until after the spin completes
 567     // TrySpin() must tolerate being called with _succ == Self.
 568     // Try yet another round of adaptive spinning.
 569     if (TrySpin(Self) > 0) break;
 570 
 571     // We can find that we were unpark()ed and redesignated _succ while
 572     // we were spinning.  That's harmless.  If we iterate and call park(),
 573     // park() will consume the event and return immediately and we'll
 574     // just spin again.  This pattern can repeat, leaving _succ to simply
 575     // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
 576     // Alternately, we can sample fired() here, and if set, forgo spinning
 577     // in the next iteration.
 578 
 579     if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
 580       Self->_ParkEvent->reset();
 581       OrderAccess::fence();
 582     }
 583     if (_succ == Self) _succ = NULL;
 584 
 585     // Invariant: after clearing _succ a thread *must* retry _owner before parking.
 586     OrderAccess::fence();
 587   }
 588 
 589   // Egress :


 641   // To that end, the 1-0 exit() operation must have at least STST|LDST
 642   // "release" barrier semantics.  Specifically, there must be at least a
 643   // STST|LDST barrier in exit() before the ST of null into _owner that drops
 644   // the lock.   The barrier ensures that changes to monitor meta-data and data
 645   // protected by the lock will be visible before we release the lock, and
 646   // therefore before some other thread (CPU) has a chance to acquire the lock.
 647   // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 648   //
 649   // Critically, any prior STs to _succ or EntryList must be visible before
 650   // the ST of null into _owner in the *subsequent* (following) corresponding
 651   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 652   // execute a serializing instruction.
 653 
 654   return;
 655 }
 656 
 657 // ReenterI() is a specialized inline form of the latter half of the
 658 // contended slow-path from EnterI().  We use ReenterI() only for
 659 // monitor reentry in wait().
 660 //
 661 // In the future we should reconcile EnterI() and ReenterI().


 662 
 663 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 664   assert(Self != NULL, "invariant");
 665   assert(SelfNode != NULL, "invariant");
 666   assert(SelfNode->_thread == Self, "invariant");
 667   assert(_waiters > 0, "invariant");
 668   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 669   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 670   JavaThread * jt = (JavaThread *) Self;
 671 
 672   int nWakeups = 0;
 673   for (;;) {
 674     ObjectWaiter::TStates v = SelfNode->TState;
 675     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 676     assert(_owner != Self, "invariant");
 677 
 678     if (TryLock(Self) > 0) break;
 679     if (TrySpin(Self) > 0) break;
 680 
 681     // State transition wrappers around park() ...


< prev index next >