86 if (DTraceMonitorProbes) { \
87 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
88 HOTSPOT_MONITOR_##probe(jtid, \
89 (uintptr_t)(monitor), bytes, len); \
90 } \
91 }
92
93 #else // ndef DTRACE_ENABLED
94
95 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
96 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
97
98 #endif // ndef DTRACE_ENABLED
99
100 // Tunables ...
101 // The knob* variables are effectively final. Once set they should
102 // never be modified hence. Consider using __read_mostly with GCC.
103
104 int ObjectMonitor::Knob_SpinLimit = 5000; // derived by an external tool -
105
106 static int Knob_SpinSetSucc = 1; // spinners set the _succ field
107 static int Knob_SpinEarly = 1;
108 static int Knob_SuccEnabled = 1; // futile wake throttling
109 static int Knob_SuccRestrict = 0; // Limit successors + spinners to at-most-one
110 static int Knob_MaxSpinners = -1; // Should be a function of # CPUs
111 static int Knob_Bonus = 100; // spin success bonus
112 static int Knob_BonusB = 100; // spin success bonus
113 static int Knob_Penalty = 200; // spin failure penalty
114 static int Knob_Poverty = 1000;
115 static int Knob_SpinAfterFutile = 1; // Spin after returning from park()
116 static int Knob_FixedSpin = 0;
117 static int Knob_OState = 3; // Spinner checks thread state of _owner
118 static int Knob_UsePause = 1;
119 static int Knob_ExitPolicy = 0;
120 static int Knob_PreSpin = 10; // 20-100 likely better
121 static int Knob_ResetEvent = 0;
122
123 static int Knob_FastHSSEC = 0;
124 static int Knob_MoveNotifyee = 2; // notify() - disposition of notifyee
125 static int Knob_QMode = 0; // EntryList-cxq policy - queue discipline
126 static volatile int InitDone = 0;
1846 if (ctr <= 0) return 0;
1847
1848 if (Knob_SuccRestrict && _succ != NULL) return 0;
1849 if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
1850 return 0;
1851 }
1852
1853 int MaxSpin = Knob_MaxSpinners;
1854 if (MaxSpin >= 0) {
1855 if (_Spinner > MaxSpin) {
1856 return 0;
1857 }
1858 // Slightly racy, but benign ...
1859 Adjust(&_Spinner, 1);
1860 }
1861
1862 // We're good to spin ... spin ingress.
1863 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
1864 // when preparing to LD...CAS _owner, etc and the CAS is likely
1865 // to succeed.
1866 int sss = Knob_SpinSetSucc;
1867 if (sss && _succ == NULL) _succ = Self;
1868 Thread * prv = NULL;
1869
1870 // There are three ways to exit the following loop:
1871 // 1. A successful spin where this thread has acquired the lock.
1872 // 2. Spin failure with prejudice
1873 // 3. Spin failure without prejudice
1874
1875 while (--ctr >= 0) {
1876
1877 // Periodic polling -- Check for pending GC
1878 // Threads may spin while they're unsafe.
1879 // We don't want spinning threads to delay the JVM from reaching
1880 // a stop-the-world safepoint or to steal cycles from GC.
1881 // If we detect a pending safepoint we abort in order that
1882 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1883 // this thread, if safe, doesn't steal cycles from GC.
1884 // This is in keeping with the "no loitering in runtime" rule.
1885 // We periodically check to see if there's a safepoint pending.
1886 if ((ctr & 0xFF) == 0) {
1887 if (SafepointMechanism::poll(Self)) {
1891 }
1892
1893 if (Knob_UsePause & 2) SpinPause();
1894
1895 // Probe _owner with TATAS
1896 // If this thread observes the monitor transition or flicker
1897 // from locked to unlocked to locked, then the odds that this
1898 // thread will acquire the lock in this spin attempt go down
1899 // considerably. The same argument applies if the CAS fails
1900 // or if we observe _owner change from one non-null value to
1901 // another non-null value. In such cases we might abort
1902 // the spin without prejudice or apply a "penalty" to the
1903 // spin count-down variable "ctr", reducing it by 100, say.
1904
1905 Thread * ox = (Thread *) _owner;
1906 if (ox == NULL) {
1907 ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
1908 if (ox == NULL) {
1909 // The CAS succeeded -- this thread acquired ownership
1910 // Take care of some bookkeeping to exit spin state.
1911 if (sss && _succ == Self) {
1912 _succ = NULL;
1913 }
1914 if (MaxSpin > 0) Adjust(&_Spinner, -1);
1915
1916 // Increase _SpinDuration :
1917 // The spin was successful (profitable) so we tend toward
1918 // longer spin attempts in the future.
1919 // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1920 // If we acquired the lock early in the spin cycle it
1921 // makes sense to increase _SpinDuration proportionally.
1922 // Note that we don't clamp SpinDuration precisely at SpinLimit.
1923 int x = _SpinDuration;
1924 if (x < Knob_SpinLimit) {
1925 if (x < Knob_Poverty) x = Knob_Poverty;
1926 _SpinDuration = x + Knob_Bonus;
1927 }
1928 return 1;
1929 }
1930
1931 // The CAS failed ... we can take any of the following actions:
1933 // * exit spin with prejudice -- goto Abort;
1934 // * exit spin without prejudice.
1935 // * Since CAS is high-latency, retry again immediately.
1936 prv = ox;
1937 goto Abort;
1938 }
1939
1940 // Did lock ownership change hands ?
1941 if (ox != prv && prv != NULL) {
1942 goto Abort;
1943 }
1944 prv = ox;
1945
1946 // Abort the spin if the owner is not executing.
1947 // The owner must be executing in order to drop the lock.
1948 // Spinning while the owner is OFFPROC is idiocy.
1949 // Consider: ctr -= RunnablePenalty ;
1950 if (Knob_OState && NotRunnable (Self, ox)) {
1951 goto Abort;
1952 }
1953 if (sss && _succ == NULL) _succ = Self;
1954 }
1955
1956 // Spin failed with prejudice -- reduce _SpinDuration.
1957 // TODO: Use an AIMD-like policy to adjust _SpinDuration.
1958 // AIMD is globally stable.
1959 {
1960 int x = _SpinDuration;
1961 if (x > 0) {
1962 // Consider an AIMD scheme like: x -= (x >> 3) + 100
1963 // This is globally sample and tends to damp the response.
1964 x -= Knob_Penalty;
1965 if (x < 0) x = 0;
1966 _SpinDuration = x;
1967 }
1968 }
1969
1970 Abort:
1971 if (MaxSpin >= 0) Adjust(&_Spinner, -1);
1972 if (sss && _succ == Self) {
1973 _succ = NULL;
1974 // Invariant: after setting succ=null a contending thread
1975 // must recheck-retry _owner before parking. This usually happens
1976 // in the normal usage of TrySpin(), but it's safest
1977 // to make TrySpin() as foolproof as possible.
1978 OrderAccess::fence();
1979 if (TryLock(Self) > 0) return 1;
1980 }
1981 return 0;
1982 }
1983
1984 // NotRunnable() -- informed spinning
1985 //
1986 // Don't bother spinning if the owner is not eligible to drop the lock.
1987 // Spin only if the owner thread is _thread_in_Java or _thread_in_vm.
1988 // The thread must be runnable in order to drop the lock in timely fashion.
1989 // If the _owner is not runnable then spinning will not likely be
1990 // successful (profitable).
1991 //
1992 // Beware -- the thread referenced by _owner could have died
|
86 if (DTraceMonitorProbes) { \
87 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
88 HOTSPOT_MONITOR_##probe(jtid, \
89 (uintptr_t)(monitor), bytes, len); \
90 } \
91 }
92
93 #else // ndef DTRACE_ENABLED
94
95 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
96 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
97
98 #endif // ndef DTRACE_ENABLED
99
100 // Tunables ...
101 // The knob* variables are effectively final. Once set they should
102 // never be modified hence. Consider using __read_mostly with GCC.
103
104 int ObjectMonitor::Knob_SpinLimit = 5000; // derived by an external tool -
105
106 static int Knob_SpinEarly = 1;
107 static int Knob_SuccEnabled = 1; // futile wake throttling
108 static int Knob_SuccRestrict = 0; // Limit successors + spinners to at-most-one
109 static int Knob_MaxSpinners = -1; // Should be a function of # CPUs
110 static int Knob_Bonus = 100; // spin success bonus
111 static int Knob_BonusB = 100; // spin success bonus
112 static int Knob_Penalty = 200; // spin failure penalty
113 static int Knob_Poverty = 1000;
114 static int Knob_SpinAfterFutile = 1; // Spin after returning from park()
115 static int Knob_FixedSpin = 0;
116 static int Knob_OState = 3; // Spinner checks thread state of _owner
117 static int Knob_UsePause = 1;
118 static int Knob_ExitPolicy = 0;
119 static int Knob_PreSpin = 10; // 20-100 likely better
120 static int Knob_ResetEvent = 0;
121
122 static int Knob_FastHSSEC = 0;
123 static int Knob_MoveNotifyee = 2; // notify() - disposition of notifyee
124 static int Knob_QMode = 0; // EntryList-cxq policy - queue discipline
125 static volatile int InitDone = 0;
1845 if (ctr <= 0) return 0;
1846
1847 if (Knob_SuccRestrict && _succ != NULL) return 0;
1848 if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
1849 return 0;
1850 }
1851
1852 int MaxSpin = Knob_MaxSpinners;
1853 if (MaxSpin >= 0) {
1854 if (_Spinner > MaxSpin) {
1855 return 0;
1856 }
1857 // Slightly racy, but benign ...
1858 Adjust(&_Spinner, 1);
1859 }
1860
1861 // We're good to spin ... spin ingress.
1862 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
1863 // when preparing to LD...CAS _owner, etc and the CAS is likely
1864 // to succeed.
1865 if (_succ == NULL) {
1866 _succ = Self;
1867 }
1868 Thread * prv = NULL;
1869
1870 // There are three ways to exit the following loop:
1871 // 1. A successful spin where this thread has acquired the lock.
1872 // 2. Spin failure with prejudice
1873 // 3. Spin failure without prejudice
1874
1875 while (--ctr >= 0) {
1876
1877 // Periodic polling -- Check for pending GC
1878 // Threads may spin while they're unsafe.
1879 // We don't want spinning threads to delay the JVM from reaching
1880 // a stop-the-world safepoint or to steal cycles from GC.
1881 // If we detect a pending safepoint we abort in order that
1882 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1883 // this thread, if safe, doesn't steal cycles from GC.
1884 // This is in keeping with the "no loitering in runtime" rule.
1885 // We periodically check to see if there's a safepoint pending.
1886 if ((ctr & 0xFF) == 0) {
1887 if (SafepointMechanism::poll(Self)) {
1891 }
1892
1893 if (Knob_UsePause & 2) SpinPause();
1894
1895 // Probe _owner with TATAS
1896 // If this thread observes the monitor transition or flicker
1897 // from locked to unlocked to locked, then the odds that this
1898 // thread will acquire the lock in this spin attempt go down
1899 // considerably. The same argument applies if the CAS fails
1900 // or if we observe _owner change from one non-null value to
1901 // another non-null value. In such cases we might abort
1902 // the spin without prejudice or apply a "penalty" to the
1903 // spin count-down variable "ctr", reducing it by 100, say.
1904
1905 Thread * ox = (Thread *) _owner;
1906 if (ox == NULL) {
1907 ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
1908 if (ox == NULL) {
1909 // The CAS succeeded -- this thread acquired ownership
1910 // Take care of some bookkeeping to exit spin state.
1911 if (_succ == Self) {
1912 _succ = NULL;
1913 }
1914 if (MaxSpin > 0) Adjust(&_Spinner, -1);
1915
1916 // Increase _SpinDuration :
1917 // The spin was successful (profitable) so we tend toward
1918 // longer spin attempts in the future.
1919 // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1920 // If we acquired the lock early in the spin cycle it
1921 // makes sense to increase _SpinDuration proportionally.
1922 // Note that we don't clamp SpinDuration precisely at SpinLimit.
1923 int x = _SpinDuration;
1924 if (x < Knob_SpinLimit) {
1925 if (x < Knob_Poverty) x = Knob_Poverty;
1926 _SpinDuration = x + Knob_Bonus;
1927 }
1928 return 1;
1929 }
1930
1931 // The CAS failed ... we can take any of the following actions:
1933 // * exit spin with prejudice -- goto Abort;
1934 // * exit spin without prejudice.
1935 // * Since CAS is high-latency, retry again immediately.
1936 prv = ox;
1937 goto Abort;
1938 }
1939
1940 // Did lock ownership change hands ?
1941 if (ox != prv && prv != NULL) {
1942 goto Abort;
1943 }
1944 prv = ox;
1945
1946 // Abort the spin if the owner is not executing.
1947 // The owner must be executing in order to drop the lock.
1948 // Spinning while the owner is OFFPROC is idiocy.
1949 // Consider: ctr -= RunnablePenalty ;
1950 if (Knob_OState && NotRunnable (Self, ox)) {
1951 goto Abort;
1952 }
1953 if (_succ == NULL) {
1954 _succ = Self;
1955 }
1956 }
1957
1958 // Spin failed with prejudice -- reduce _SpinDuration.
1959 // TODO: Use an AIMD-like policy to adjust _SpinDuration.
1960 // AIMD is globally stable.
1961 {
1962 int x = _SpinDuration;
1963 if (x > 0) {
1964 // Consider an AIMD scheme like: x -= (x >> 3) + 100
1965 // This is globally sample and tends to damp the response.
1966 x -= Knob_Penalty;
1967 if (x < 0) x = 0;
1968 _SpinDuration = x;
1969 }
1970 }
1971
1972 Abort:
1973 if (MaxSpin >= 0) Adjust(&_Spinner, -1);
1974 if (_succ == Self) {
1975 _succ = NULL;
1976 // Invariant: after setting succ=null a contending thread
1977 // must recheck-retry _owner before parking. This usually happens
1978 // in the normal usage of TrySpin(), but it's safest
1979 // to make TrySpin() as foolproof as possible.
1980 OrderAccess::fence();
1981 if (TryLock(Self) > 0) return 1;
1982 }
1983 return 0;
1984 }
1985
1986 // NotRunnable() -- informed spinning
1987 //
1988 // Don't bother spinning if the owner is not eligible to drop the lock.
1989 // Spin only if the owner thread is _thread_in_Java or _thread_in_vm.
1990 // The thread must be runnable in order to drop the lock in timely fashion.
1991 // If the _owner is not runnable then spinning will not likely be
1992 // successful (profitable).
1993 //
1994 // Beware -- the thread referenced by _owner could have died
|