110
111 static int Knob_SpinBase = 0; // Floor AKA SpinMin
112 static int Knob_CASPenalty = -1; // Penalty for failed CAS
113 static int Knob_OXPenalty = -1; // Penalty for observed _owner change
114 static int Knob_SpinSetSucc = 1; // spinners set the _succ field
115 static int Knob_SpinEarly = 1;
116 static int Knob_SuccEnabled = 1; // futile wake throttling
117 static int Knob_SuccRestrict = 0; // Limit successors + spinners to at-most-one
118 static int Knob_MaxSpinners = -1; // Should be a function of # CPUs
119 static int Knob_Bonus = 100; // spin success bonus
120 static int Knob_BonusB = 100; // spin success bonus
121 static int Knob_Penalty = 200; // spin failure penalty
122 static int Knob_Poverty = 1000;
123 static int Knob_SpinAfterFutile = 1; // Spin after returning from park()
124 static int Knob_FixedSpin = 0;
125 static int Knob_OState = 3; // Spinner checks thread state of _owner
126 static int Knob_UsePause = 1;
127 static int Knob_ExitPolicy = 0;
128 static int Knob_PreSpin = 10; // 20-100 likely better
129 static int Knob_ResetEvent = 0;
130 static int BackOffMask = 0;
131
132 static int Knob_FastHSSEC = 0;
133 static int Knob_MoveNotifyee = 2; // notify() - disposition of notifyee
134 static int Knob_QMode = 0; // EntryList-cxq policy - queue discipline
135 static volatile int InitDone = 0;
136
137 // -----------------------------------------------------------------------------
138 // Theory of operations -- Monitors lists, thread residency, etc:
139 //
140 // * A thread acquires ownership of a monitor by successfully
141 // CAS()ing the _owner field from null to non-null.
142 //
143 // * Invariant: A thread appears on at most one monitor list --
144 // cxq, EntryList or WaitSet -- at any one time.
145 //
146 // * Contending threads "push" themselves onto the cxq with CAS
147 // and then spin/park.
148 //
149 // * After a contending thread eventually acquires the lock it must
150 // dequeue itself from either the EntryList or the cxq.
1856 if (ctr <= 0) return 0;
1857
1858 if (Knob_SuccRestrict && _succ != NULL) return 0;
1859 if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
1860 return 0;
1861 }
1862
1863 int MaxSpin = Knob_MaxSpinners;
1864 if (MaxSpin >= 0) {
1865 if (_Spinner > MaxSpin) {
1866 return 0;
1867 }
1868 // Slightly racy, but benign ...
1869 Adjust(&_Spinner, 1);
1870 }
1871
1872 // We're good to spin ... spin ingress.
1873 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
1874 // when preparing to LD...CAS _owner, etc and the CAS is likely
1875 // to succeed.
1876 int hits = 0;
1877 int msk = 0;
1878 int caspty = Knob_CASPenalty;
1879 int oxpty = Knob_OXPenalty;
1880 int sss = Knob_SpinSetSucc;
1881 if (sss && _succ == NULL) _succ = Self;
1882 Thread * prv = NULL;
1883
1884 // There are three ways to exit the following loop:
1885 // 1. A successful spin where this thread has acquired the lock.
1886 // 2. Spin failure with prejudice
1887 // 3. Spin failure without prejudice
1888
1889 while (--ctr >= 0) {
1890
1891 // Periodic polling -- Check for pending GC
1892 // Threads may spin while they're unsafe.
1893 // We don't want spinning threads to delay the JVM from reaching
1894 // a stop-the-world safepoint or to steal cycles from GC.
1895 // If we detect a pending safepoint we abort in order that
1896 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1897 // this thread, if safe, doesn't steal cycles from GC.
1898 // This is in keeping with the "no loitering in runtime" rule.
1899 // We periodically check to see if there's a safepoint pending.
1900 if ((ctr & 0xFF) == 0) {
1901 if (SafepointMechanism::poll(Self)) {
1902 goto Abort; // abrupt spin egress
1903 }
1904 if (Knob_UsePause & 1) SpinPause();
1905 }
1906
1907 if (Knob_UsePause & 2) SpinPause();
1908
1909 // Exponential back-off ... Stay off the bus to reduce coherency traffic.
1910 // This is useful on classic SMP systems, but is of less utility on
1911 // N1-style CMT platforms.
1912 //
1913 // Trade-off: lock acquisition latency vs coherency bandwidth.
1914 // Lock hold times are typically short. A histogram
1915 // of successful spin attempts shows that we usually acquire
1916 // the lock early in the spin. That suggests we want to
1917 // sample _owner frequently in the early phase of the spin,
1918 // but then back-off and sample less frequently as the spin
1919 // progresses. The back-off makes a good citizen on SMP big
1920 // SMP systems. Oversampling _owner can consume excessive
1921 // coherency bandwidth. Relatedly, if we _oversample _owner we
1922 // can inadvertently interfere with the the ST m->owner=null.
1923 // executed by the lock owner.
1924 if (ctr & msk) continue;
1925 ++hits;
1926 if ((hits & 0xF) == 0) {
1927 // The 0xF, above, corresponds to the exponent.
1928 // Consider: (msk+1)|msk
1929 msk = ((msk << 2)|3) & BackOffMask;
1930 }
1931
1932 // Probe _owner with TATAS
1933 // If this thread observes the monitor transition or flicker
1934 // from locked to unlocked to locked, then the odds that this
1935 // thread will acquire the lock in this spin attempt go down
1936 // considerably. The same argument applies if the CAS fails
1937 // or if we observe _owner change from one non-null value to
1938 // another non-null value. In such cases we might abort
1939 // the spin without prejudice or apply a "penalty" to the
1940 // spin count-down variable "ctr", reducing it by 100, say.
1941
1942 Thread * ox = (Thread *) _owner;
1943 if (ox == NULL) {
1944 ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
1945 if (ox == NULL) {
1946 // The CAS succeeded -- this thread acquired ownership
1947 // Take care of some bookkeeping to exit spin state.
1948 if (sss && _succ == Self) {
1949 _succ = NULL;
1950 }
1951 if (MaxSpin > 0) Adjust(&_Spinner, -1);
2196 NEWPERFCOUNTER(_sync_FutileWakeups);
2197 NEWPERFCOUNTER(_sync_Parks);
2198 NEWPERFCOUNTER(_sync_Notifications);
2199 NEWPERFVARIABLE(_sync_MonExtant);
2200 #undef NEWPERFCOUNTER
2201 #undef NEWPERFVARIABLE
2202 }
2203 }
2204
2205 void ObjectMonitor::DeferredInitialize() {
2206 if (InitDone > 0) return;
2207 if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
2208 while (InitDone != 1) /* empty */;
2209 return;
2210 }
2211
2212 // One-shot global initialization ...
2213 // The initialization is idempotent, so we don't need locks.
2214 // In the future consider doing this via os::init_2().
2215
2216 if (os::is_MP()) {
2217 BackOffMask = 0;
2218 } else {
2219 Knob_SpinLimit = 0;
2220 Knob_SpinBase = 0;
2221 Knob_PreSpin = 0;
2222 Knob_FixedSpin = -1;
2223 }
2224
2225 OrderAccess::fence();
2226 InitDone = 1;
2227 }
2228
|
110
111 static int Knob_SpinBase = 0; // Floor AKA SpinMin
112 static int Knob_CASPenalty = -1; // Penalty for failed CAS
113 static int Knob_OXPenalty = -1; // Penalty for observed _owner change
114 static int Knob_SpinSetSucc = 1; // spinners set the _succ field
115 static int Knob_SpinEarly = 1;
116 static int Knob_SuccEnabled = 1; // futile wake throttling
117 static int Knob_SuccRestrict = 0; // Limit successors + spinners to at-most-one
118 static int Knob_MaxSpinners = -1; // Should be a function of # CPUs
119 static int Knob_Bonus = 100; // spin success bonus
120 static int Knob_BonusB = 100; // spin success bonus
121 static int Knob_Penalty = 200; // spin failure penalty
122 static int Knob_Poverty = 1000;
123 static int Knob_SpinAfterFutile = 1; // Spin after returning from park()
124 static int Knob_FixedSpin = 0;
125 static int Knob_OState = 3; // Spinner checks thread state of _owner
126 static int Knob_UsePause = 1;
127 static int Knob_ExitPolicy = 0;
128 static int Knob_PreSpin = 10; // 20-100 likely better
129 static int Knob_ResetEvent = 0;
130
131 static int Knob_FastHSSEC = 0;
132 static int Knob_MoveNotifyee = 2; // notify() - disposition of notifyee
133 static int Knob_QMode = 0; // EntryList-cxq policy - queue discipline
134 static volatile int InitDone = 0;
135
136 // -----------------------------------------------------------------------------
137 // Theory of operations -- Monitors lists, thread residency, etc:
138 //
139 // * A thread acquires ownership of a monitor by successfully
140 // CAS()ing the _owner field from null to non-null.
141 //
142 // * Invariant: A thread appears on at most one monitor list --
143 // cxq, EntryList or WaitSet -- at any one time.
144 //
145 // * Contending threads "push" themselves onto the cxq with CAS
146 // and then spin/park.
147 //
148 // * After a contending thread eventually acquires the lock it must
149 // dequeue itself from either the EntryList or the cxq.
1855 if (ctr <= 0) return 0;
1856
1857 if (Knob_SuccRestrict && _succ != NULL) return 0;
1858 if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
1859 return 0;
1860 }
1861
1862 int MaxSpin = Knob_MaxSpinners;
1863 if (MaxSpin >= 0) {
1864 if (_Spinner > MaxSpin) {
1865 return 0;
1866 }
1867 // Slightly racy, but benign ...
1868 Adjust(&_Spinner, 1);
1869 }
1870
1871 // We're good to spin ... spin ingress.
1872 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
1873 // when preparing to LD...CAS _owner, etc and the CAS is likely
1874 // to succeed.
1875 int caspty = Knob_CASPenalty;
1876 int oxpty = Knob_OXPenalty;
1877 int sss = Knob_SpinSetSucc;
1878 if (sss && _succ == NULL) _succ = Self;
1879 Thread * prv = NULL;
1880
1881 // There are three ways to exit the following loop:
1882 // 1. A successful spin where this thread has acquired the lock.
1883 // 2. Spin failure with prejudice
1884 // 3. Spin failure without prejudice
1885
1886 while (--ctr >= 0) {
1887
1888 // Periodic polling -- Check for pending GC
1889 // Threads may spin while they're unsafe.
1890 // We don't want spinning threads to delay the JVM from reaching
1891 // a stop-the-world safepoint or to steal cycles from GC.
1892 // If we detect a pending safepoint we abort in order that
1893 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1894 // this thread, if safe, doesn't steal cycles from GC.
1895 // This is in keeping with the "no loitering in runtime" rule.
1896 // We periodically check to see if there's a safepoint pending.
1897 if ((ctr & 0xFF) == 0) {
1898 if (SafepointMechanism::poll(Self)) {
1899 goto Abort; // abrupt spin egress
1900 }
1901 if (Knob_UsePause & 1) SpinPause();
1902 }
1903
1904 if (Knob_UsePause & 2) SpinPause();
1905
1906 // Probe _owner with TATAS
1907 // If this thread observes the monitor transition or flicker
1908 // from locked to unlocked to locked, then the odds that this
1909 // thread will acquire the lock in this spin attempt go down
1910 // considerably. The same argument applies if the CAS fails
1911 // or if we observe _owner change from one non-null value to
1912 // another non-null value. In such cases we might abort
1913 // the spin without prejudice or apply a "penalty" to the
1914 // spin count-down variable "ctr", reducing it by 100, say.
1915
1916 Thread * ox = (Thread *) _owner;
1917 if (ox == NULL) {
1918 ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
1919 if (ox == NULL) {
1920 // The CAS succeeded -- this thread acquired ownership
1921 // Take care of some bookkeeping to exit spin state.
1922 if (sss && _succ == Self) {
1923 _succ = NULL;
1924 }
1925 if (MaxSpin > 0) Adjust(&_Spinner, -1);
2170 NEWPERFCOUNTER(_sync_FutileWakeups);
2171 NEWPERFCOUNTER(_sync_Parks);
2172 NEWPERFCOUNTER(_sync_Notifications);
2173 NEWPERFVARIABLE(_sync_MonExtant);
2174 #undef NEWPERFCOUNTER
2175 #undef NEWPERFVARIABLE
2176 }
2177 }
2178
2179 void ObjectMonitor::DeferredInitialize() {
2180 if (InitDone > 0) return;
2181 if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
2182 while (InitDone != 1) /* empty */;
2183 return;
2184 }
2185
2186 // One-shot global initialization ...
2187 // The initialization is idempotent, so we don't need locks.
2188 // In the future consider doing this via os::init_2().
2189
2190 if (!os::is_MP()) {
2191 Knob_SpinLimit = 0;
2192 Knob_SpinBase = 0;
2193 Knob_PreSpin = 0;
2194 Knob_FixedSpin = -1;
2195 }
2196
2197 OrderAccess::fence();
2198 InitDone = 1;
2199 }
2200
|