93 #else // ndef DTRACE_ENABLED
94
95 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
96 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
97
98 #endif // ndef DTRACE_ENABLED
99
100 // Tunables ...
101 // The knob* variables are effectively final. Once set they should
102 // never be modified hence. Consider using __read_mostly with GCC.
103
104 int ObjectMonitor::Knob_SpinLimit = 5000; // derived by an external tool -
105
106 static int Knob_Bonus = 100; // spin success bonus
107 static int Knob_BonusB = 100; // spin success bonus
108 static int Knob_Penalty = 200; // spin failure penalty
109 static int Knob_Poverty = 1000;
110 static int Knob_FixedSpin = 0;
111 static int Knob_PreSpin = 10; // 20-100 likely better
112
113 static volatile int InitDone = 0;
114
115 // -----------------------------------------------------------------------------
116 // Theory of operations -- Monitors lists, thread residency, etc:
117 //
118 // * A thread acquires ownership of a monitor by successfully
119 // CAS()ing the _owner field from null to non-null.
120 //
121 // * Invariant: A thread appears on at most one monitor list --
122 // cxq, EntryList or WaitSet -- at any one time.
123 //
124 // * Contending threads "push" themselves onto the cxq with CAS
125 // and then spin/park.
126 //
127 // * After a contending thread eventually acquires the lock it must
128 // dequeue itself from either the EntryList or the cxq.
129 //
130 // * The exiting thread identifies and unparks an "heir presumptive"
131 // tentative successor thread on the EntryList. Critically, the
132 // exiting thread doesn't unlink the successor thread from the EntryList.
133 // After having been unparked, the wakee will recontend for ownership of
411 // We can either return -1 or retry.
412 // Retry doesn't make as much sense because the lock was just acquired.
413 return -1;
414 }
415
416 #define MAX_RECHECK_INTERVAL 1000
417
418 void ObjectMonitor::EnterI(TRAPS) {
419 Thread * const Self = THREAD;
420 assert(Self->is_Java_thread(), "invariant");
421 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
422
423 // Try the lock - TATAS
424 if (TryLock (Self) > 0) {
425 assert(_succ != Self, "invariant");
426 assert(_owner == Self, "invariant");
427 assert(_Responsible != Self, "invariant");
428 return;
429 }
430
431 DeferredInitialize();
432
433 // We try one round of spinning *before* enqueueing Self.
434 //
435 // If the _owner is ready but OFFPROC we could use a YieldTo()
436 // operation to donate the remainder of this thread's quantum
437 // to the owner. This has subtle but beneficial affinity
438 // effects.
439
440 if (TrySpin(Self) > 0) {
441 assert(_owner == Self, "invariant");
442 assert(_succ != Self, "invariant");
443 assert(_Responsible != Self, "invariant");
444 return;
445 }
446
447 // The Spin failed -- Enqueue and park the thread ...
448 assert(_succ != Self, "invariant");
449 assert(_owner != Self, "invariant");
450 assert(_Responsible != Self, "invariant");
451
1085
1086 // Maintain stats and report events to JVMTI
1087 OM_PERFDATA_OP(Parks, inc());
1088 }
1089
1090
1091 // -----------------------------------------------------------------------------
1092 // Class Loader deadlock handling.
1093 //
1094 // complete_exit exits a lock returning recursion count
1095 // complete_exit/reenter operate as a wait without waiting
1096 // complete_exit requires an inflated monitor
1097 // The _owner field is not always the Thread addr even with an
1098 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1099 // thread due to contention.
1100 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1101 Thread * const Self = THREAD;
1102 assert(Self->is_Java_thread(), "Must be Java thread!");
1103 JavaThread *jt = (JavaThread *)THREAD;
1104
1105 DeferredInitialize();
1106
1107 if (THREAD != _owner) {
1108 if (THREAD->is_lock_owned ((address)_owner)) {
1109 assert(_recursions == 0, "internal state error");
1110 _owner = THREAD; // Convert from basiclock addr to Thread addr
1111 _recursions = 0;
1112 }
1113 }
1114
1115 guarantee(Self == _owner, "complete_exit not owner");
1116 intptr_t save = _recursions; // record the old recursion count
1117 _recursions = 0; // set the recursion level to be 0
1118 exit(true, Self); // exit the monitor
1119 guarantee(_owner != Self, "invariant");
1120 return save;
1121 }
1122
1123 // reenter() enters a lock and sets recursion count
1124 // complete_exit/reenter operate as a wait without waiting
1125 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1169 assert(event != NULL, "invariant");
1170 assert(monitor != NULL, "invariant");
1171 event->set_monitorClass(((oop)monitor->object())->klass());
1172 event->set_timeout(timeout);
1173 event->set_address((uintptr_t)monitor->object_addr());
1174 event->set_notifier(notifier_tid);
1175 event->set_timedOut(timedout);
1176 event->commit();
1177 }
1178
1179 // -----------------------------------------------------------------------------
1180 // Wait/Notify/NotifyAll
1181 //
1182 // Note: a subset of changes to ObjectMonitor::wait()
1183 // will need to be replicated in complete_exit
1184 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1185 Thread * const Self = THREAD;
1186 assert(Self->is_Java_thread(), "Must be Java thread!");
1187 JavaThread *jt = (JavaThread *)THREAD;
1188
1189 DeferredInitialize();
1190
1191 // Throw IMSX or IEX.
1192 CHECK_OWNER();
1193
1194 EventJavaMonitorWait event;
1195
1196 // check for a pending interrupt
1197 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1198 // post monitor waited event. Note that this is past-tense, we are done waiting.
1199 if (JvmtiExport::should_post_monitor_waited()) {
1200 // Note: 'false' parameter is passed here because the
1201 // wait was not timed out due to thread interrupt.
1202 JvmtiExport::post_monitor_waited(jt, this, false);
1203
1204 // In this short circuit of the monitor wait protocol, the
1205 // current thread never drops ownership of the monitor and
1206 // never gets added to the wait queue so the current thread
1207 // cannot be made the successor. This means that the
1208 // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1209 // consume an unpark() meant for the ParkEvent associated with
1871 node->_next = NULL;
1872 node->_prev = NULL;
1873 }
1874
1875 // -----------------------------------------------------------------------------
1876 // PerfData support
1877 PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts = NULL;
1878 PerfCounter * ObjectMonitor::_sync_FutileWakeups = NULL;
1879 PerfCounter * ObjectMonitor::_sync_Parks = NULL;
1880 PerfCounter * ObjectMonitor::_sync_Notifications = NULL;
1881 PerfCounter * ObjectMonitor::_sync_Inflations = NULL;
1882 PerfCounter * ObjectMonitor::_sync_Deflations = NULL;
1883 PerfLongVariable * ObjectMonitor::_sync_MonExtant = NULL;
1884
1885 // One-shot global initialization for the sync subsystem.
1886 // We could also defer initialization and initialize on-demand
1887 // the first time we call inflate(). Initialization would
1888 // be protected - like so many things - by the MonitorCache_lock.
1889
1890 void ObjectMonitor::Initialize() {
1891 static int InitializationCompleted = 0;
1892 assert(InitializationCompleted == 0, "invariant");
1893 InitializationCompleted = 1;
1894 if (UsePerfData) {
1895 EXCEPTION_MARK;
1896 #define NEWPERFCOUNTER(n) \
1897 { \
1898 n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events, \
1899 CHECK); \
1900 }
1901 #define NEWPERFVARIABLE(n) \
1902 { \
1903 n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events, \
1904 CHECK); \
1905 }
1906 NEWPERFCOUNTER(_sync_Inflations);
1907 NEWPERFCOUNTER(_sync_Deflations);
1908 NEWPERFCOUNTER(_sync_ContendedLockAttempts);
1909 NEWPERFCOUNTER(_sync_FutileWakeups);
1910 NEWPERFCOUNTER(_sync_Parks);
1911 NEWPERFCOUNTER(_sync_Notifications);
1912 NEWPERFVARIABLE(_sync_MonExtant);
1913 #undef NEWPERFCOUNTER
1914 #undef NEWPERFVARIABLE
1915 }
1916 }
1917
1918 void ObjectMonitor::DeferredInitialize() {
1919 if (InitDone > 0) return;
1920 if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
1921 while (InitDone != 1) /* empty */;
1922 return;
1923 }
1924
1925 // One-shot global initialization ...
1926 // The initialization is idempotent, so we don't need locks.
1927 // In the future consider doing this via os::init_2().
1928
1929 if (!os::is_MP()) {
1930 Knob_SpinLimit = 0;
1931 Knob_PreSpin = 0;
1932 Knob_FixedSpin = -1;
1933 }
1934
1935 OrderAccess::fence();
1936 InitDone = 1;
1937 }
1938
|
93 #else // ndef DTRACE_ENABLED
94
95 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
96 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
97
98 #endif // ndef DTRACE_ENABLED
99
100 // Tunables ...
101 // The knob* variables are effectively final. Once set they should
102 // never be modified hence. Consider using __read_mostly with GCC.
103
104 int ObjectMonitor::Knob_SpinLimit = 5000; // derived by an external tool -
105
106 static int Knob_Bonus = 100; // spin success bonus
107 static int Knob_BonusB = 100; // spin success bonus
108 static int Knob_Penalty = 200; // spin failure penalty
109 static int Knob_Poverty = 1000;
110 static int Knob_FixedSpin = 0;
111 static int Knob_PreSpin = 10; // 20-100 likely better
112
113 DEBUG_ONLY(static volatile bool InitDone = false;)
114
115 // -----------------------------------------------------------------------------
116 // Theory of operations -- Monitors lists, thread residency, etc:
117 //
118 // * A thread acquires ownership of a monitor by successfully
119 // CAS()ing the _owner field from null to non-null.
120 //
121 // * Invariant: A thread appears on at most one monitor list --
122 // cxq, EntryList or WaitSet -- at any one time.
123 //
124 // * Contending threads "push" themselves onto the cxq with CAS
125 // and then spin/park.
126 //
127 // * After a contending thread eventually acquires the lock it must
128 // dequeue itself from either the EntryList or the cxq.
129 //
130 // * The exiting thread identifies and unparks an "heir presumptive"
131 // tentative successor thread on the EntryList. Critically, the
132 // exiting thread doesn't unlink the successor thread from the EntryList.
133 // After having been unparked, the wakee will recontend for ownership of
411 // We can either return -1 or retry.
412 // Retry doesn't make as much sense because the lock was just acquired.
413 return -1;
414 }
415
416 #define MAX_RECHECK_INTERVAL 1000
417
418 void ObjectMonitor::EnterI(TRAPS) {
419 Thread * const Self = THREAD;
420 assert(Self->is_Java_thread(), "invariant");
421 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
422
423 // Try the lock - TATAS
424 if (TryLock (Self) > 0) {
425 assert(_succ != Self, "invariant");
426 assert(_owner == Self, "invariant");
427 assert(_Responsible != Self, "invariant");
428 return;
429 }
430
431 assert(InitDone, "Unexpectedly not initialized");
432
433 // We try one round of spinning *before* enqueueing Self.
434 //
435 // If the _owner is ready but OFFPROC we could use a YieldTo()
436 // operation to donate the remainder of this thread's quantum
437 // to the owner. This has subtle but beneficial affinity
438 // effects.
439
440 if (TrySpin(Self) > 0) {
441 assert(_owner == Self, "invariant");
442 assert(_succ != Self, "invariant");
443 assert(_Responsible != Self, "invariant");
444 return;
445 }
446
447 // The Spin failed -- Enqueue and park the thread ...
448 assert(_succ != Self, "invariant");
449 assert(_owner != Self, "invariant");
450 assert(_Responsible != Self, "invariant");
451
1085
1086 // Maintain stats and report events to JVMTI
1087 OM_PERFDATA_OP(Parks, inc());
1088 }
1089
1090
1091 // -----------------------------------------------------------------------------
1092 // Class Loader deadlock handling.
1093 //
1094 // complete_exit exits a lock returning recursion count
1095 // complete_exit/reenter operate as a wait without waiting
1096 // complete_exit requires an inflated monitor
1097 // The _owner field is not always the Thread addr even with an
1098 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1099 // thread due to contention.
1100 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1101 Thread * const Self = THREAD;
1102 assert(Self->is_Java_thread(), "Must be Java thread!");
1103 JavaThread *jt = (JavaThread *)THREAD;
1104
1105 assert(InitDone, "Unexpectedly not initialized");
1106
1107 if (THREAD != _owner) {
1108 if (THREAD->is_lock_owned ((address)_owner)) {
1109 assert(_recursions == 0, "internal state error");
1110 _owner = THREAD; // Convert from basiclock addr to Thread addr
1111 _recursions = 0;
1112 }
1113 }
1114
1115 guarantee(Self == _owner, "complete_exit not owner");
1116 intptr_t save = _recursions; // record the old recursion count
1117 _recursions = 0; // set the recursion level to be 0
1118 exit(true, Self); // exit the monitor
1119 guarantee(_owner != Self, "invariant");
1120 return save;
1121 }
1122
1123 // reenter() enters a lock and sets recursion count
1124 // complete_exit/reenter operate as a wait without waiting
1125 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1169 assert(event != NULL, "invariant");
1170 assert(monitor != NULL, "invariant");
1171 event->set_monitorClass(((oop)monitor->object())->klass());
1172 event->set_timeout(timeout);
1173 event->set_address((uintptr_t)monitor->object_addr());
1174 event->set_notifier(notifier_tid);
1175 event->set_timedOut(timedout);
1176 event->commit();
1177 }
1178
1179 // -----------------------------------------------------------------------------
1180 // Wait/Notify/NotifyAll
1181 //
1182 // Note: a subset of changes to ObjectMonitor::wait()
1183 // will need to be replicated in complete_exit
1184 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1185 Thread * const Self = THREAD;
1186 assert(Self->is_Java_thread(), "Must be Java thread!");
1187 JavaThread *jt = (JavaThread *)THREAD;
1188
1189 assert(InitDone, "Unexpectedly not initialized");
1190
1191 // Throw IMSX or IEX.
1192 CHECK_OWNER();
1193
1194 EventJavaMonitorWait event;
1195
1196 // check for a pending interrupt
1197 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1198 // post monitor waited event. Note that this is past-tense, we are done waiting.
1199 if (JvmtiExport::should_post_monitor_waited()) {
1200 // Note: 'false' parameter is passed here because the
1201 // wait was not timed out due to thread interrupt.
1202 JvmtiExport::post_monitor_waited(jt, this, false);
1203
1204 // In this short circuit of the monitor wait protocol, the
1205 // current thread never drops ownership of the monitor and
1206 // never gets added to the wait queue so the current thread
1207 // cannot be made the successor. This means that the
1208 // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1209 // consume an unpark() meant for the ParkEvent associated with
1871 node->_next = NULL;
1872 node->_prev = NULL;
1873 }
1874
1875 // -----------------------------------------------------------------------------
1876 // PerfData support
1877 PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts = NULL;
1878 PerfCounter * ObjectMonitor::_sync_FutileWakeups = NULL;
1879 PerfCounter * ObjectMonitor::_sync_Parks = NULL;
1880 PerfCounter * ObjectMonitor::_sync_Notifications = NULL;
1881 PerfCounter * ObjectMonitor::_sync_Inflations = NULL;
1882 PerfCounter * ObjectMonitor::_sync_Deflations = NULL;
1883 PerfLongVariable * ObjectMonitor::_sync_MonExtant = NULL;
1884
1885 // One-shot global initialization for the sync subsystem.
1886 // We could also defer initialization and initialize on-demand
1887 // the first time we call inflate(). Initialization would
1888 // be protected - like so many things - by the MonitorCache_lock.
1889
1890 void ObjectMonitor::Initialize() {
1891 assert(!InitDone, "invariant");
1892
1893 if (!os::is_MP()) {
1894 Knob_SpinLimit = 0;
1895 Knob_PreSpin = 0;
1896 Knob_FixedSpin = -1;
1897 }
1898
1899 if (UsePerfData) {
1900 EXCEPTION_MARK;
1901 #define NEWPERFCOUNTER(n) \
1902 { \
1903 n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events, \
1904 CHECK); \
1905 }
1906 #define NEWPERFVARIABLE(n) \
1907 { \
1908 n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events, \
1909 CHECK); \
1910 }
1911 NEWPERFCOUNTER(_sync_Inflations);
1912 NEWPERFCOUNTER(_sync_Deflations);
1913 NEWPERFCOUNTER(_sync_ContendedLockAttempts);
1914 NEWPERFCOUNTER(_sync_FutileWakeups);
1915 NEWPERFCOUNTER(_sync_Parks);
1916 NEWPERFCOUNTER(_sync_Notifications);
1917 NEWPERFVARIABLE(_sync_MonExtant);
1918 #undef NEWPERFCOUNTER
1919 #undef NEWPERFVARIABLE
1920 }
1921
1922 DEBUG_ONLY(InitDone = true;)
1923 }
|