119 // _recursions, _EntryList, _cxq, and _succ, all of which may be
120 // fetched in the inflated unlock path, on a different cache line
121 // would make them immune to CAS-based invalidation from the _owner
122 // field.
123 //
124 // - The _recursions field should be of type int, or int32_t but not
125 // intptr_t. There's no reason to use a 64-bit type for this field
126 // in a 64-bit JVM.
127
128 class ObjectMonitor {
129 public:
130 enum {
131 OM_OK, // no error
132 OM_SYSTEM_ERROR, // operating system error
133 OM_ILLEGAL_MONITOR_STATE, // IllegalMonitorStateException
134 OM_INTERRUPTED, // Thread.interrupt()
135 OM_TIMED_OUT // Object.wait() timed out
136 };
137
138 private:
139 friend class ObjectSynchronizer;
140 friend class ObjectWaiter;
141 friend class VMStructs;
142
143 volatile markOop _header; // displaced object header word - mark
144 void* volatile _object; // backward object pointer - strong root
145 public:
146 ObjectMonitor* FreeNext; // Free list linkage
147 private:
148 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE,
149 sizeof(volatile markOop) + sizeof(void * volatile) +
150 sizeof(ObjectMonitor *));
151 protected: // protected for JvmtiRawMonitor
152 void * volatile _owner; // pointer to owning thread OR BasicLock
153 volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor
154 volatile intptr_t _recursions; // recursion count, 0 for first entry
155 ObjectWaiter * volatile _EntryList; // Threads blocked on entry or reentry.
156 // The list is actually composed of WaitNodes,
157 // acting as proxies for Threads.
158 private:
159 ObjectWaiter * volatile _cxq; // LL of recently-arrived threads blocked on entry.
160 Thread * volatile _succ; // Heir presumptive thread - used for futile wakeup throttling
161 Thread * volatile _Responsible;
162
163 volatile int _Spinner; // for exit->spinner handoff optimization
164 volatile int _SpinDuration;
165
166 volatile jint _count; // reference count to prevent reclamation/deflation
167 // at stop-the-world time. See ObjectSynchronizer::deflate_monitor().
168 // _count is approximately |_WaitSet| + |_EntryList|
169 protected:
170 ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
171 volatile jint _waiters; // number of waiting threads
172 private:
173 volatile int _WaitSetLock; // protects Wait Queue - simple spinlock
174
175 public:
176 static void Initialize();
177
178 // Only perform a PerfData operation if the PerfData object has been
179 // allocated and if the PerfDataManager has not freed the PerfData
180 // objects which can happen at normal VM shutdown.
181 //
182 #define OM_PERFDATA_OP(f, op_str) \
183 do { \
184 if (ObjectMonitor::_sync_ ## f != NULL && \
185 PerfDataManager::has_PerfData()) { \
186 ObjectMonitor::_sync_ ## f->op_str; \
187 } \
188 } while (0)
189
190 static PerfCounter * _sync_ContendedLockAttempts;
191 static PerfCounter * _sync_FutileWakeups;
192 static PerfCounter * _sync_Parks;
193 static PerfCounter * _sync_Notifications;
218 // ObjectMonitor reference with an offset, we need to remove the tag
219 // value in order to generate the proper address.
220 //
221 // We can either adjust the ObjectMonitor reference and then add the
222 // offset or we can adjust the offset that is added to the ObjectMonitor
223 // reference. The latter avoids an AGI (Address Generation Interlock)
224 // stall so the helper macro adjusts the offset value that is returned
225 // to the ObjectMonitor reference manipulation code:
226 //
227 #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
228 ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
229
230 markOop header() const;
231 volatile markOop* header_addr();
232 void set_header(markOop hdr);
233
234 intptr_t is_busy() const {
235 // TODO-FIXME: merge _count and _waiters.
236 // TODO-FIXME: assert _owner == null implies _recursions = 0
237 // TODO-FIXME: assert _WaitSet != null implies _count > 0
238 return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList);
239 }
240
241 intptr_t is_entered(Thread* current) const;
242
243 void* owner() const;
244 void set_owner(void* owner);
245
246 jint waiters() const;
247
248 jint count() const;
249 void set_count(jint count);
250 jint contentions() const;
251 intptr_t recursions() const { return _recursions; }
252
253 // JVM/TI GetObjectMonitorUsage() needs this:
254 ObjectWaiter* first_waiter() { return _WaitSet; }
255 ObjectWaiter* next_waiter(ObjectWaiter* o) { return o->_next; }
256 Thread* thread_of_waiter(ObjectWaiter* o) { return o->_thread; }
257
258 protected:
259 // We don't typically expect or want the ctors or dtors to run.
260 // normal ObjectMonitors are type-stable and immortal.
261 ObjectMonitor() { ::memset((void *)this, 0, sizeof(*this)); }
262
263 ~ObjectMonitor() {
268
269 private:
270 void Recycle() {
271 // TODO: add stronger asserts ...
272 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
273 // _count == 0 EntryList == NULL
274 // _recursions == 0 _WaitSet == NULL
275 assert(((is_busy()|_recursions) == 0), "freeing inuse monitor");
276 _succ = NULL;
277 _EntryList = NULL;
278 _cxq = NULL;
279 _WaitSet = NULL;
280 _recursions = 0;
281 }
282
283 public:
284
285 void* object() const;
286 void* object_addr();
287 void set_object(void* obj);
288
289 bool check(TRAPS); // true if the thread owns the monitor.
290 void check_slow(TRAPS);
291 void clear();
292
293 void enter(TRAPS);
294 void exit(bool not_suspended, TRAPS);
295 void wait(jlong millis, bool interruptable, TRAPS);
296 void notify(TRAPS);
297 void notifyAll(TRAPS);
298
299 // Use the following at your own risk
300 intptr_t complete_exit(TRAPS);
301 void reenter(intptr_t recursions, TRAPS);
302
303 private:
304 void AddWaiter(ObjectWaiter * waiter);
305 void INotify(Thread * Self);
306 ObjectWaiter * DequeueWaiter();
307 void DequeueSpecificWaiter(ObjectWaiter * waiter);
308 void EnterI(TRAPS);
309 void ReenterI(Thread * Self, ObjectWaiter * SelfNode);
310 void UnlinkAfterAcquire(Thread * Self, ObjectWaiter * SelfNode);
311 int TryLock(Thread * Self);
312 int NotRunnable(Thread * Self, Thread * Owner);
313 int TrySpin(Thread * Self);
314 void ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
315 bool ExitSuspendEquivalent(JavaThread * Self);
316 };
317
318 #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP
|
119 // _recursions, _EntryList, _cxq, and _succ, all of which may be
120 // fetched in the inflated unlock path, on a different cache line
121 // would make them immune to CAS-based invalidation from the _owner
122 // field.
123 //
124 // - The _recursions field should be of type int, or int32_t but not
125 // intptr_t. There's no reason to use a 64-bit type for this field
126 // in a 64-bit JVM.
127
128 class ObjectMonitor {
129 public:
130 enum {
131 OM_OK, // no error
132 OM_SYSTEM_ERROR, // operating system error
133 OM_ILLEGAL_MONITOR_STATE, // IllegalMonitorStateException
134 OM_INTERRUPTED, // Thread.interrupt()
135 OM_TIMED_OUT // Object.wait() timed out
136 };
137
138 private:
139 friend class ObjectMonitorHandle;
140 friend class ObjectSynchronizer;
141 friend class ObjectWaiter;
142 friend class VMStructs;
143
144 volatile markOop _header; // displaced object header word - mark
145 void* volatile _object; // backward object pointer - strong root
146 public:
147 ObjectMonitor* FreeNext; // Free list linkage
148 private:
149 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE,
150 sizeof(volatile markOop) + sizeof(void * volatile) +
151 sizeof(ObjectMonitor *));
152 protected: // protected for JvmtiRawMonitor
153 // Used by async monitor deflation as a marker in the _owner field:
154 #define DEFLATER_MARKER reinterpret_cast<void*>(-1)
155 void * volatile _owner; // pointer to owning thread OR BasicLock
156 volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor
157 volatile intptr_t _recursions; // recursion count, 0 for first entry
158 ObjectWaiter * volatile _EntryList; // Threads blocked on entry or reentry.
159 // The list is actually composed of WaitNodes,
160 // acting as proxies for Threads.
161 private:
162 ObjectWaiter * volatile _cxq; // LL of recently-arrived threads blocked on entry.
163 Thread * volatile _succ; // Heir presumptive thread - used for futile wakeup throttling
164 Thread * volatile _Responsible;
165
166 volatile int _Spinner; // for exit->spinner handoff optimization
167 volatile int _SpinDuration;
168
169 volatile jint _count; // reference count to prevent reclamation/deflation
170 // at stop-the-world time. See ObjectSynchronizer::deflate_monitor().
171 // _count is approximately |_WaitSet| + |_EntryList|
172 protected:
173 ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
174 volatile jint _waiters; // number of waiting threads
175 private:
176 volatile int _WaitSetLock; // protects Wait Queue - simple spinlock
177 volatile jint _ref_count; // ref count for ObjectMonitor*
178 typedef enum {
179 Free = 0, // Free must be 0 for monitor to be free after memset(..,0,..).
180 New,
181 Old
182 } AllocationState;
183 AllocationState _allocation_state;
184
185 public:
186 static void Initialize();
187
188 // Only perform a PerfData operation if the PerfData object has been
189 // allocated and if the PerfDataManager has not freed the PerfData
190 // objects which can happen at normal VM shutdown.
191 //
192 #define OM_PERFDATA_OP(f, op_str) \
193 do { \
194 if (ObjectMonitor::_sync_ ## f != NULL && \
195 PerfDataManager::has_PerfData()) { \
196 ObjectMonitor::_sync_ ## f->op_str; \
197 } \
198 } while (0)
199
200 static PerfCounter * _sync_ContendedLockAttempts;
201 static PerfCounter * _sync_FutileWakeups;
202 static PerfCounter * _sync_Parks;
203 static PerfCounter * _sync_Notifications;
228 // ObjectMonitor reference with an offset, we need to remove the tag
229 // value in order to generate the proper address.
230 //
231 // We can either adjust the ObjectMonitor reference and then add the
232 // offset or we can adjust the offset that is added to the ObjectMonitor
233 // reference. The latter avoids an AGI (Address Generation Interlock)
234 // stall so the helper macro adjusts the offset value that is returned
235 // to the ObjectMonitor reference manipulation code:
236 //
237 #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
238 ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
239
240 markOop header() const;
241 volatile markOop* header_addr();
242 void set_header(markOop hdr);
243
244 intptr_t is_busy() const {
245 // TODO-FIXME: merge _count and _waiters.
246 // TODO-FIXME: assert _owner == null implies _recursions = 0
247 // TODO-FIXME: assert _WaitSet != null implies _count > 0
248 // We do not include _ref_count in the is_busy() check because
249 // _ref_count is for indicating that the ObjectMonitor* is in
250 // use which is orthogonal to whether the ObjectMonitor itself
251 // is in use for a locking operation.
252 return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList);
253 }
254
255 // Version of is_busy() that accounts for special values in
256 // _count and _owner when AsyncDeflateIdleMonitors is enabled.
257 intptr_t is_busy_async() const {
258 intptr_t ret_code = _waiters | intptr_t(_cxq) | intptr_t(_EntryList);
259 if (!AsyncDeflateIdleMonitors) {
260 ret_code |= _count | intptr_t(_owner);
261 } else {
262 if (_count > 0) {
263 ret_code |= _count;
264 }
265 if (_owner != DEFLATER_MARKER) {
266 ret_code |= intptr_t(_owner);
267 }
268 }
269 return ret_code;
270 }
271
272 intptr_t is_entered(Thread* current) const;
273
274 void* owner() const; // Returns NULL if DEFLATER_MARKER is observed.
275 void set_owner(void* owner);
276
277 jint waiters() const;
278
279 jint count() const;
280 void set_count(jint count);
281 jint contentions() const;
282 intptr_t recursions() const { return _recursions; }
283
284 // JVM/TI GetObjectMonitorUsage() needs this:
285 ObjectWaiter* first_waiter() { return _WaitSet; }
286 ObjectWaiter* next_waiter(ObjectWaiter* o) { return o->_next; }
287 Thread* thread_of_waiter(ObjectWaiter* o) { return o->_thread; }
288
289 protected:
290 // We don't typically expect or want the ctors or dtors to run.
291 // normal ObjectMonitors are type-stable and immortal.
292 ObjectMonitor() { ::memset((void *)this, 0, sizeof(*this)); }
293
294 ~ObjectMonitor() {
299
300 private:
301 void Recycle() {
302 // TODO: add stronger asserts ...
303 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
304 // _count == 0 EntryList == NULL
305 // _recursions == 0 _WaitSet == NULL
306 assert(((is_busy()|_recursions) == 0), "freeing inuse monitor");
307 _succ = NULL;
308 _EntryList = NULL;
309 _cxq = NULL;
310 _WaitSet = NULL;
311 _recursions = 0;
312 }
313
314 public:
315
316 void* object() const;
317 void* object_addr();
318 void set_object(void* obj);
319 void set_allocation_state(AllocationState s);
320 AllocationState allocation_state() const;
321 bool is_free() const;
322 bool is_active() const;
323 bool is_old() const;
324 bool is_new() const;
325 void dec_ref_count();
326 void inc_ref_count();
327 jint ref_count() const;
328
329 bool check(TRAPS); // true if the thread owns the monitor.
330 void check_slow(TRAPS);
331 void clear();
332 void clear_using_JT();
333
334 bool enter(TRAPS); // Returns false if monitor is being async deflated and caller should retry locking the object.
335 void exit(bool not_suspended, TRAPS);
336 void wait(jlong millis, bool interruptable, TRAPS);
337 void notify(TRAPS);
338 void notifyAll(TRAPS);
339
340 // Use the following at your own risk
341 intptr_t complete_exit(TRAPS);
342 bool reenter(intptr_t recursions, TRAPS); // Returns false if monitor is being async deflated and caller should retry locking the object.
343
344 private:
345 void AddWaiter(ObjectWaiter * waiter);
346 void INotify(Thread * Self);
347 ObjectWaiter * DequeueWaiter();
348 void DequeueSpecificWaiter(ObjectWaiter * waiter);
349 void EnterI(TRAPS);
350 void ReenterI(Thread * Self, ObjectWaiter * SelfNode);
351 void UnlinkAfterAcquire(Thread * Self, ObjectWaiter * SelfNode);
352 int TryLock(Thread * Self);
353 int NotRunnable(Thread * Self, Thread * Owner);
354 int TrySpin(Thread * Self);
355 void ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
356 bool ExitSuspendEquivalent(JavaThread * Self);
357 void install_displaced_markword_in_object();
358 };
359
360 // A helper object for managing an ObjectMonitor*'s ref_count. There
361 // are special safety considerations when async deflation is used.
362 class ObjectMonitorHandle : public StackObj {
363 private:
364 ObjectMonitor * _om_ptr;
365 public:
366 ObjectMonitorHandle() { _om_ptr = NULL; }
367 ~ObjectMonitorHandle();
368
369 ObjectMonitor * om_ptr() const { return _om_ptr; }
370 // Save the ObjectMonitor* associated with the specified markOop and
371 // increment the ref_count.
372 bool save_om_ptr(oop object, markOop mark);
373
374 // For internal used by ObjectSynchronizer::monitors_iterate().
375 ObjectMonitorHandle(ObjectMonitor * _om_ptr);
376 // For internal use by ObjectSynchronizer::inflate().
377 void set_om_ptr(ObjectMonitor * om_ptr);
378 };
379
380 #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP
|