91 }
92
93 #else // ndef DTRACE_ENABLED
94
95 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
96 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
97
98 #endif // ndef DTRACE_ENABLED
99
100 // Tunables ...
101 // The knob* variables are effectively final. Once set they should
102 // never be modified hence. Consider using __read_mostly with GCC.
103
104 int ObjectMonitor::Knob_SpinLimit = 5000; // derived by an external tool -
105
106 static int Knob_Bonus = 100; // spin success bonus
107 static int Knob_BonusB = 100; // spin success bonus
108 static int Knob_Penalty = 200; // spin failure penalty
109 static int Knob_Poverty = 1000;
110 static int Knob_FixedSpin = 0;
111 static int Knob_UsePause = 1;
112 static int Knob_ExitPolicy = 0;
113 static int Knob_PreSpin = 10; // 20-100 likely better
114 static int Knob_ResetEvent = 0;
115
116 static int Knob_FastHSSEC = 0;
117 static int Knob_MoveNotifyee = 2; // notify() - disposition of notifyee
118 static int Knob_QMode = 0; // EntryList-cxq policy - queue discipline
119 static volatile int InitDone = 0;
120
121 // -----------------------------------------------------------------------------
122 // Theory of operations -- Monitors lists, thread residency, etc:
123 //
124 // * A thread acquires ownership of a monitor by successfully
125 // CAS()ing the _owner field from null to non-null.
126 //
127 // * Invariant: A thread appears on at most one monitor list --
128 // cxq, EntryList or WaitSet -- at any one time.
129 //
130 // * Contending threads "push" themselves onto the cxq with CAS
131 // and then spin/park.
1846 // There are three ways to exit the following loop:
1847 // 1. A successful spin where this thread has acquired the lock.
1848 // 2. Spin failure with prejudice
1849 // 3. Spin failure without prejudice
1850
1851 while (--ctr >= 0) {
1852
1853 // Periodic polling -- Check for pending GC
1854 // Threads may spin while they're unsafe.
1855 // We don't want spinning threads to delay the JVM from reaching
1856 // a stop-the-world safepoint or to steal cycles from GC.
1857 // If we detect a pending safepoint we abort in order that
1858 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1859 // this thread, if safe, doesn't steal cycles from GC.
1860 // This is in keeping with the "no loitering in runtime" rule.
1861 // We periodically check to see if there's a safepoint pending.
1862 if ((ctr & 0xFF) == 0) {
1863 if (SafepointMechanism::poll(Self)) {
1864 goto Abort; // abrupt spin egress
1865 }
1866 if (Knob_UsePause & 1) SpinPause();
1867 }
1868
1869 if (Knob_UsePause & 2) SpinPause();
1870
1871 // Probe _owner with TATAS
1872 // If this thread observes the monitor transition or flicker
1873 // from locked to unlocked to locked, then the odds that this
1874 // thread will acquire the lock in this spin attempt go down
1875 // considerably. The same argument applies if the CAS fails
1876 // or if we observe _owner change from one non-null value to
1877 // another non-null value. In such cases we might abort
1878 // the spin without prejudice or apply a "penalty" to the
1879 // spin count-down variable "ctr", reducing it by 100, say.
1880
1881 Thread * ox = (Thread *) _owner;
1882 if (ox == NULL) {
1883 ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
1884 if (ox == NULL) {
1885 // The CAS succeeded -- this thread acquired ownership
1886 // Take care of some bookkeeping to exit spin state.
1887 if (_succ == Self) {
1888 _succ = NULL;
1889 }
|
91 }
92
93 #else // ndef DTRACE_ENABLED
94
95 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
96 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
97
98 #endif // ndef DTRACE_ENABLED
99
100 // Tunables ...
101 // The knob* variables are effectively final. Once set they should
102 // never be modified hence. Consider using __read_mostly with GCC.
103
104 int ObjectMonitor::Knob_SpinLimit = 5000; // derived by an external tool -
105
106 static int Knob_Bonus = 100; // spin success bonus
107 static int Knob_BonusB = 100; // spin success bonus
108 static int Knob_Penalty = 200; // spin failure penalty
109 static int Knob_Poverty = 1000;
110 static int Knob_FixedSpin = 0;
111 static int Knob_ExitPolicy = 0;
112 static int Knob_PreSpin = 10; // 20-100 likely better
113 static int Knob_ResetEvent = 0;
114
115 static int Knob_FastHSSEC = 0;
116 static int Knob_MoveNotifyee = 2; // notify() - disposition of notifyee
117 static int Knob_QMode = 0; // EntryList-cxq policy - queue discipline
118 static volatile int InitDone = 0;
119
120 // -----------------------------------------------------------------------------
121 // Theory of operations -- Monitors lists, thread residency, etc:
122 //
123 // * A thread acquires ownership of a monitor by successfully
124 // CAS()ing the _owner field from null to non-null.
125 //
126 // * Invariant: A thread appears on at most one monitor list --
127 // cxq, EntryList or WaitSet -- at any one time.
128 //
129 // * Contending threads "push" themselves onto the cxq with CAS
130 // and then spin/park.
1845 // There are three ways to exit the following loop:
1846 // 1. A successful spin where this thread has acquired the lock.
1847 // 2. Spin failure with prejudice
1848 // 3. Spin failure without prejudice
1849
1850 while (--ctr >= 0) {
1851
1852 // Periodic polling -- Check for pending GC
1853 // Threads may spin while they're unsafe.
1854 // We don't want spinning threads to delay the JVM from reaching
1855 // a stop-the-world safepoint or to steal cycles from GC.
1856 // If we detect a pending safepoint we abort in order that
1857 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1858 // this thread, if safe, doesn't steal cycles from GC.
1859 // This is in keeping with the "no loitering in runtime" rule.
1860 // We periodically check to see if there's a safepoint pending.
1861 if ((ctr & 0xFF) == 0) {
1862 if (SafepointMechanism::poll(Self)) {
1863 goto Abort; // abrupt spin egress
1864 }
1865 SpinPause();
1866 }
1867
1868 // Probe _owner with TATAS
1869 // If this thread observes the monitor transition or flicker
1870 // from locked to unlocked to locked, then the odds that this
1871 // thread will acquire the lock in this spin attempt go down
1872 // considerably. The same argument applies if the CAS fails
1873 // or if we observe _owner change from one non-null value to
1874 // another non-null value. In such cases we might abort
1875 // the spin without prejudice or apply a "penalty" to the
1876 // spin count-down variable "ctr", reducing it by 100, say.
1877
1878 Thread * ox = (Thread *) _owner;
1879 if (ox == NULL) {
1880 ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
1881 if (ox == NULL) {
1882 // The CAS succeeded -- this thread acquired ownership
1883 // Take care of some bookkeeping to exit spin state.
1884 if (_succ == Self) {
1885 _succ = NULL;
1886 }
|