110 // http://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
111 // we know that the CAS in monitorenter will invalidate the line
112 // underlying _owner. We want to avoid an L1 data cache miss on that
113 // same line for monitorexit. Putting these <remaining_fields>:
114 // _recursions, _EntryList, _cxq, and _succ, all of which may be
115 // fetched in the inflated unlock path, on a different cache line
116 // would make them immune to CAS-based invalidation from the _owner
117 // field.
118 //
119 // - The _recursions field should be of type int, or int32_t but not
120 // intptr_t. There's no reason to use a 64-bit type for this field
121 // in a 64-bit JVM.
122
123 #ifndef OM_CACHE_LINE_SIZE
124 // Use DEFAULT_CACHE_LINE_SIZE if not already specified for
125 // the current build platform.
126 #define OM_CACHE_LINE_SIZE DEFAULT_CACHE_LINE_SIZE
127 #endif
128
129 class ObjectMonitor {
130 friend class ObjectSynchronizer;
131 friend class ObjectWaiter;
132 friend class VMStructs;
133 JVMCI_ONLY(friend class JVMCIVMStructs;)
134
135 // The sync code expects the header field to be at offset zero (0).
136 // Enforced by the assert() in header_addr().
137 volatile markWord _header; // displaced object header word - mark
138 void* volatile _object; // backward object pointer - strong root
139 private:
140 // Separate _header and _owner on different cache lines since both can
141 // have busy multi-threaded access. _header and _object are set at
142 // initial inflation and _object doesn't change until deflation so
143 // _object is a good choice to share the cache line with _header.
144 DEFINE_PAD_MINUS_SIZE(0, OM_CACHE_LINE_SIZE,
145 sizeof(volatile markWord) + sizeof(void* volatile));
146 void* volatile _owner; // pointer to owning thread OR BasicLock
147 volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor
148 // Separate _owner and _next_om on different cache lines since
149 // both can have busy multi-threaded access. _previous_owner_tid is only
150 // changed by ObjectMonitor::exit() so it is a good choice to share the
151 // cache line with _owner.
152 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(void* volatile) +
153 sizeof(volatile jlong));
154 ObjectMonitor* _next_om; // Next ObjectMonitor* linkage
155 volatile intx _recursions; // recursion count, 0 for first entry
156 ObjectWaiter* volatile _EntryList; // Threads blocked on entry or reentry.
157 // The list is actually composed of WaitNodes,
158 // acting as proxies for Threads.
159
160 ObjectWaiter* volatile _cxq; // LL of recently-arrived threads blocked on entry.
161 Thread* volatile _succ; // Heir presumptive thread - used for futile wakeup throttling
162 Thread* volatile _Responsible;
163
164 volatile int _Spinner; // for exit->spinner handoff optimization
165 volatile int _SpinDuration;
166
167 volatile jint _contentions; // Number of active contentions in enter(). It is used by is_busy()
168 // along with other fields to determine if an ObjectMonitor can be
169 // deflated. See ObjectSynchronizer::deflate_monitor().
170 protected:
171 ObjectWaiter* volatile _WaitSet; // LL of threads wait()ing on the monitor
172 volatile jint _waiters; // number of waiting threads
173 private:
174 volatile int _WaitSetLock; // protects Wait Queue - simple spinlock
175
176 public:
177 static void Initialize();
178
179 // Only perform a PerfData operation if the PerfData object has been
180 // allocated and if the PerfDataManager has not freed the PerfData
181 // objects which can happen at normal VM shutdown.
182 //
183 #define OM_PERFDATA_OP(f, op_str) \
184 do { \
185 if (ObjectMonitor::_sync_ ## f != NULL && \
186 PerfDataManager::has_PerfData()) { \
187 ObjectMonitor::_sync_ ## f->op_str; \
188 } \
189 } while (0)
191 static PerfCounter * _sync_ContendedLockAttempts;
192 static PerfCounter * _sync_FutileWakeups;
193 static PerfCounter * _sync_Parks;
194 static PerfCounter * _sync_Notifications;
195 static PerfCounter * _sync_Inflations;
196 static PerfCounter * _sync_Deflations;
197 static PerfLongVariable * _sync_MonExtant;
198
199 static int Knob_SpinLimit;
200
201 void* operator new (size_t size) throw();
202 void* operator new[] (size_t size) throw();
203 void operator delete(void* p);
204 void operator delete[] (void* p);
205
206 // TODO-FIXME: the "offset" routines should return a type of off_t instead of int ...
207 // ByteSize would also be an appropriate type.
208 static int header_offset_in_bytes() { return offset_of(ObjectMonitor, _header); }
209 static int object_offset_in_bytes() { return offset_of(ObjectMonitor, _object); }
210 static int owner_offset_in_bytes() { return offset_of(ObjectMonitor, _owner); }
211 static int recursions_offset_in_bytes() { return offset_of(ObjectMonitor, _recursions); }
212 static int cxq_offset_in_bytes() { return offset_of(ObjectMonitor, _cxq); }
213 static int succ_offset_in_bytes() { return offset_of(ObjectMonitor, _succ); }
214 static int EntryList_offset_in_bytes() { return offset_of(ObjectMonitor, _EntryList); }
215
216 // ObjectMonitor references can be ORed with markWord::monitor_value
217 // as part of the ObjectMonitor tagging mechanism. When we combine an
218 // ObjectMonitor reference with an offset, we need to remove the tag
219 // value in order to generate the proper address.
220 //
221 // We can either adjust the ObjectMonitor reference and then add the
222 // offset or we can adjust the offset that is added to the ObjectMonitor
223 // reference. The latter avoids an AGI (Address Generation Interlock)
224 // stall so the helper macro adjusts the offset value that is returned
225 // to the ObjectMonitor reference manipulation code:
226 //
227 #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
228 ((ObjectMonitor::f ## _offset_in_bytes()) - markWord::monitor_value)
229
230 markWord header() const;
231 volatile markWord* header_addr();
232 void set_header(markWord hdr);
233
234 intptr_t is_busy() const {
235 // TODO-FIXME: assert _owner == null implies _recursions = 0
236 return _contentions|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList);
237 }
238 const char* is_busy_to_string(stringStream* ss);
239
240 intptr_t is_entered(Thread* current) const;
241
242 void* owner() const;
243 // Clear _owner field; current value must match old_value.
244 void release_clear_owner(void* old_value);
245 // Simply set _owner field to new_value; current value must match old_value.
246 void set_owner_from(void* old_value, void* new_value);
247 // Simply set _owner field to self; current value must match basic_lock_p.
248 void set_owner_from_BasicLock(void* basic_lock_p, Thread* self);
249 // Try to set _owner field to new_value if the current value matches
250 // old_value, using Atomic::cmpxchg(). Otherwise, does not change the
251 // _owner field. Returns the prior value of the _owner field.
252 void* try_set_owner_from(void* old_value, void* new_value);
253
254 ObjectMonitor* next_om() const;
255 // Simply set _next_om field to new_value.
256 void set_next_om(ObjectMonitor* new_value);
257 // Try to set _next_om field to new_value if the current value matches
258 // old_value, using Atomic::cmpxchg(). Otherwise, does not change the
259 // _next_om field. Returns the prior value of the _next_om field.
260 ObjectMonitor* try_set_next_om(ObjectMonitor* old_value, ObjectMonitor* new_value);
261
262 jint waiters() const;
263
264 jint contentions() const;
265 intx recursions() const { return _recursions; }
266
269 ObjectWaiter* next_waiter(ObjectWaiter* o) { return o->_next; }
270 Thread* thread_of_waiter(ObjectWaiter* o) { return o->_thread; }
271
272 protected:
273 // We don't typically expect or want the ctors or dtors to run.
274 // normal ObjectMonitors are type-stable and immortal.
275 ObjectMonitor() { ::memset((void*)this, 0, sizeof(*this)); }
276
277 ~ObjectMonitor() {
278 // TODO: Add asserts ...
279 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
280 // _contentions == 0 _EntryList == NULL etc
281 }
282
283 private:
284 void Recycle() {
285 // TODO: add stronger asserts ...
286 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
287 // _contentions == 0 EntryList == NULL
288 // _recursions == 0 _WaitSet == NULL
289 DEBUG_ONLY(stringStream ss;)
290 assert((is_busy() | _recursions) == 0, "freeing in-use monitor: %s, "
291 "recursions=" INTX_FORMAT, is_busy_to_string(&ss), _recursions);
292 _succ = NULL;
293 _EntryList = NULL;
294 _cxq = NULL;
295 _WaitSet = NULL;
296 _recursions = 0;
297 }
298
299 public:
300
301 void* object() const;
302 void* object_addr();
303 void set_object(void* obj);
304
305 // Returns true if the specified thread owns the ObjectMonitor. Otherwise
306 // returns false and throws IllegalMonitorStateException (IMSE).
307 bool check_owner(Thread* THREAD);
308 void clear();
309
310 void enter(TRAPS);
311 void exit(bool not_suspended, TRAPS);
312 void wait(jlong millis, bool interruptable, TRAPS);
313 void notify(TRAPS);
314 void notifyAll(TRAPS);
315
316 void print() const;
317 #ifdef ASSERT
318 void print_debug_style_on(outputStream* st) const;
319 #endif
320 void print_on(outputStream* st) const;
321
322 // Use the following at your own risk
323 intx complete_exit(TRAPS);
324 void reenter(intx recursions, TRAPS);
325
326 private:
327 void AddWaiter(ObjectWaiter* waiter);
328 void INotify(Thread* self);
329 ObjectWaiter* DequeueWaiter();
330 void DequeueSpecificWaiter(ObjectWaiter* waiter);
331 void EnterI(TRAPS);
332 void ReenterI(Thread* self, ObjectWaiter* self_node);
333 void UnlinkAfterAcquire(Thread* self, ObjectWaiter* self_node);
334 int TryLock(Thread* self);
335 int NotRunnable(Thread* self, Thread * Owner);
336 int TrySpin(Thread* self);
337 void ExitEpilog(Thread* self, ObjectWaiter* Wakee);
338 bool ExitSuspendEquivalent(JavaThread* self);
339 };
340
341 #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP
|
110 // http://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
111 // we know that the CAS in monitorenter will invalidate the line
112 // underlying _owner. We want to avoid an L1 data cache miss on that
113 // same line for monitorexit. Putting these <remaining_fields>:
114 // _recursions, _EntryList, _cxq, and _succ, all of which may be
115 // fetched in the inflated unlock path, on a different cache line
116 // would make them immune to CAS-based invalidation from the _owner
117 // field.
118 //
119 // - The _recursions field should be of type int, or int32_t but not
120 // intptr_t. There's no reason to use a 64-bit type for this field
121 // in a 64-bit JVM.
122
123 #ifndef OM_CACHE_LINE_SIZE
124 // Use DEFAULT_CACHE_LINE_SIZE if not already specified for
125 // the current build platform.
126 #define OM_CACHE_LINE_SIZE DEFAULT_CACHE_LINE_SIZE
127 #endif
128
129 class ObjectMonitor {
130 friend class ObjectMonitorHandle;
131 friend class ObjectSynchronizer;
132 friend class ObjectWaiter;
133 friend class VMStructs;
134 JVMCI_ONLY(friend class JVMCIVMStructs;)
135
136 // The sync code expects the header field to be at offset zero (0).
137 // Enforced by the assert() in header_addr().
138 volatile markWord _header; // displaced object header word - mark
139 void* volatile _object; // backward object pointer - strong root
140 typedef enum {
141 Free = 0, // Free must be 0 for monitor to be free after memset(..,0,..).
142 New,
143 Old
144 } AllocationState;
145 AllocationState _allocation_state;
146 // Separate _header and _owner on different cache lines since both can
147 // have busy multi-threaded access. _header, _object and _allocation_state
148 // are set at initial inflation. _object and _allocation_state don't
149 // change until deflation so _object and _allocation_state are good
150 // choices to share the cache line with _header.
151 DEFINE_PAD_MINUS_SIZE(0, OM_CACHE_LINE_SIZE, sizeof(volatile markWord) +
152 sizeof(void* volatile) + sizeof(AllocationState));
153 // Used by async deflation as a marker in the _owner field:
154 #define DEFLATER_MARKER reinterpret_cast<void*>(-1)
155 void* volatile _owner; // pointer to owning thread OR BasicLock
156 volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor
157 // Separate _owner and _ref_count on different cache lines since both
158 // can have busy multi-threaded access. _previous_owner_tid is only
159 // changed by ObjectMonitor::exit() so it is a good choice to share the
160 // cache line with _owner.
161 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(void* volatile) +
162 sizeof(volatile jlong));
163 jint _ref_count; // ref count for ObjectMonitor* and used by the async deflation
164 // protocol. See ObjectSynchronizer::deflate_monitor_using_JT().
165 // Separate _ref_count and _next_om on different cache lines since
166 // both can have busy multi-threaded access.
167 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile jint));
168 ObjectMonitor* _next_om; // Next ObjectMonitor* linkage
169 volatile intx _recursions; // recursion count, 0 for first entry
170 ObjectWaiter* volatile _EntryList; // Threads blocked on entry or reentry.
171 // The list is actually composed of WaitNodes,
172 // acting as proxies for Threads.
173
174 ObjectWaiter* volatile _cxq; // LL of recently-arrived threads blocked on entry.
175 Thread* volatile _succ; // Heir presumptive thread - used for futile wakeup throttling
176 Thread* volatile _Responsible;
177
178 volatile int _Spinner; // for exit->spinner handoff optimization
179 volatile int _SpinDuration;
180
181 volatile jint _contentions; // Number of active contentions in enter(). It is used by is_busy()
182 // along with other fields to determine if an ObjectMonitor can be
183 // deflated. See ObjectSynchronizer::deflate_monitor() and
184 // ObjectSynchronizer::deflate_monitor_using_JT().
185 protected:
186 ObjectWaiter* volatile _WaitSet; // LL of threads wait()ing on the monitor
187 volatile jint _waiters; // number of waiting threads
188 private:
189 volatile int _WaitSetLock; // protects Wait Queue - simple spinlock
190
191 public:
192 static void Initialize();
193
194 // Only perform a PerfData operation if the PerfData object has been
195 // allocated and if the PerfDataManager has not freed the PerfData
196 // objects which can happen at normal VM shutdown.
197 //
198 #define OM_PERFDATA_OP(f, op_str) \
199 do { \
200 if (ObjectMonitor::_sync_ ## f != NULL && \
201 PerfDataManager::has_PerfData()) { \
202 ObjectMonitor::_sync_ ## f->op_str; \
203 } \
204 } while (0)
206 static PerfCounter * _sync_ContendedLockAttempts;
207 static PerfCounter * _sync_FutileWakeups;
208 static PerfCounter * _sync_Parks;
209 static PerfCounter * _sync_Notifications;
210 static PerfCounter * _sync_Inflations;
211 static PerfCounter * _sync_Deflations;
212 static PerfLongVariable * _sync_MonExtant;
213
214 static int Knob_SpinLimit;
215
216 void* operator new (size_t size) throw();
217 void* operator new[] (size_t size) throw();
218 void operator delete(void* p);
219 void operator delete[] (void* p);
220
221 // TODO-FIXME: the "offset" routines should return a type of off_t instead of int ...
222 // ByteSize would also be an appropriate type.
223 static int header_offset_in_bytes() { return offset_of(ObjectMonitor, _header); }
224 static int object_offset_in_bytes() { return offset_of(ObjectMonitor, _object); }
225 static int owner_offset_in_bytes() { return offset_of(ObjectMonitor, _owner); }
226 static int ref_count_offset_in_bytes() { return offset_of(ObjectMonitor, _ref_count); }
227 static int recursions_offset_in_bytes() { return offset_of(ObjectMonitor, _recursions); }
228 static int cxq_offset_in_bytes() { return offset_of(ObjectMonitor, _cxq); }
229 static int succ_offset_in_bytes() { return offset_of(ObjectMonitor, _succ); }
230 static int EntryList_offset_in_bytes() { return offset_of(ObjectMonitor, _EntryList); }
231
232 // ObjectMonitor references can be ORed with markWord::monitor_value
233 // as part of the ObjectMonitor tagging mechanism. When we combine an
234 // ObjectMonitor reference with an offset, we need to remove the tag
235 // value in order to generate the proper address.
236 //
237 // We can either adjust the ObjectMonitor reference and then add the
238 // offset or we can adjust the offset that is added to the ObjectMonitor
239 // reference. The latter avoids an AGI (Address Generation Interlock)
240 // stall so the helper macro adjusts the offset value that is returned
241 // to the ObjectMonitor reference manipulation code:
242 //
243 #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
244 ((ObjectMonitor::f ## _offset_in_bytes()) - markWord::monitor_value)
245
246 markWord header() const;
247 volatile markWord* header_addr();
248 void set_header(markWord hdr);
249
250 intptr_t is_busy() const {
251 // TODO-FIXME: assert _owner == null implies _recursions = 0
252 // We do not include _ref_count in the is_busy() check because
253 // _ref_count is for indicating that the ObjectMonitor* is in
254 // use which is orthogonal to whether the ObjectMonitor itself
255 // is in use for a locking operation.
256 intptr_t ret_code = _contentions | _waiters | intptr_t(_cxq) | intptr_t(_EntryList);
257 if (!AsyncDeflateIdleMonitors) {
258 ret_code |= intptr_t(_owner);
259 } else {
260 if (_owner != DEFLATER_MARKER) {
261 ret_code |= intptr_t(_owner);
262 }
263 }
264 return ret_code;
265 }
266 const char* is_busy_to_string(stringStream* ss);
267
268 intptr_t is_entered(Thread* current) const;
269
270 void* owner() const; // Returns NULL if DEFLATER_MARKER is observed.
271 // Returns true if owner field == DEFLATER_MARKER and false otherwise.
272 bool owner_is_DEFLATER_MARKER();
273 // Clear _owner field; current value must match old_value.
274 void release_clear_owner(void* old_value);
275 // Simply set _owner field to new_value; current value must match old_value.
276 void set_owner_from(void* old_value, void* new_value);
277 // Simply set _owner field to new_value; current value must match old_value1 or old_value2.
278 void set_owner_from(void* old_value1, void* old_value2, void* new_value);
279 // Simply set _owner field to self; current value must match basic_lock_p.
280 void set_owner_from_BasicLock(void* basic_lock_p, Thread* self);
281 // Try to set _owner field to new_value if the current value matches
282 // old_value, using Atomic::cmpxchg(). Otherwise, does not change the
283 // _owner field. Returns the prior value of the _owner field.
284 void* try_set_owner_from(void* old_value, void* new_value);
285
286 ObjectMonitor* next_om() const;
287 // Simply set _next_om field to new_value.
288 void set_next_om(ObjectMonitor* new_value);
289 // Try to set _next_om field to new_value if the current value matches
290 // old_value, using Atomic::cmpxchg(). Otherwise, does not change the
291 // _next_om field. Returns the prior value of the _next_om field.
292 ObjectMonitor* try_set_next_om(ObjectMonitor* old_value, ObjectMonitor* new_value);
293
294 jint waiters() const;
295
296 jint contentions() const;
297 intx recursions() const { return _recursions; }
298
301 ObjectWaiter* next_waiter(ObjectWaiter* o) { return o->_next; }
302 Thread* thread_of_waiter(ObjectWaiter* o) { return o->_thread; }
303
304 protected:
305 // We don't typically expect or want the ctors or dtors to run.
306 // normal ObjectMonitors are type-stable and immortal.
307 ObjectMonitor() { ::memset((void*)this, 0, sizeof(*this)); }
308
309 ~ObjectMonitor() {
310 // TODO: Add asserts ...
311 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
312 // _contentions == 0 _EntryList == NULL etc
313 }
314
315 private:
316 void Recycle() {
317 // TODO: add stronger asserts ...
318 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
319 // _contentions == 0 EntryList == NULL
320 // _recursions == 0 _WaitSet == NULL
321 #ifdef ASSERT
322 stringStream ss;
323 #endif
324 assert((is_busy() | _recursions) == 0, "freeing in-use monitor: %s, "
325 "recursions=" INTX_FORMAT, is_busy_to_string(&ss), _recursions);
326 _succ = NULL;
327 _EntryList = NULL;
328 _cxq = NULL;
329 _WaitSet = NULL;
330 _recursions = 0;
331 }
332
333 public:
334
335 void* object() const;
336 void* object_addr();
337 void set_object(void* obj);
338 void set_allocation_state(AllocationState s);
339 AllocationState allocation_state() const;
340 bool is_free() const;
341 bool is_old() const;
342 bool is_new() const;
343 void dec_ref_count();
344 void inc_ref_count();
345 jint ref_count() const;
346
347 // Returns true if the specified thread owns the ObjectMonitor. Otherwise
348 // returns false and throws IllegalMonitorStateException (IMSE).
349 bool check_owner(Thread* THREAD);
350 void clear();
351 void clear_using_JT();
352
353 void enter(TRAPS);
354 void exit(bool not_suspended, TRAPS);
355 void wait(jlong millis, bool interruptable, TRAPS);
356 void notify(TRAPS);
357 void notifyAll(TRAPS);
358
359 void print() const;
360 #ifdef ASSERT
361 void print_debug_style_on(outputStream* st) const;
362 #endif
363 void print_on(outputStream* st) const;
364
365 // Use the following at your own risk
366 intx complete_exit(TRAPS);
367 void reenter(intx recursions, TRAPS);
368
369 private:
370 void AddWaiter(ObjectWaiter* waiter);
371 void INotify(Thread* self);
372 ObjectWaiter* DequeueWaiter();
373 void DequeueSpecificWaiter(ObjectWaiter* waiter);
374 void EnterI(TRAPS);
375 void ReenterI(Thread* self, ObjectWaiter* self_node);
376 void UnlinkAfterAcquire(Thread* self, ObjectWaiter* self_node);
377 int TryLock(Thread* self);
378 int NotRunnable(Thread* self, Thread* Owner);
379 int TrySpin(Thread* self);
380 void ExitEpilog(Thread* self, ObjectWaiter* Wakee);
381 bool ExitSuspendEquivalent(JavaThread* self);
382 void install_displaced_markword_in_object(const oop obj);
383 };
384
385 // A helper object for managing an ObjectMonitor*'s ref_count. There
386 // are special safety considerations when async deflation is used.
387 class ObjectMonitorHandle : public StackObj {
388 private:
389 ObjectMonitor* _om_ptr;
390 public:
391 ObjectMonitorHandle() { _om_ptr = NULL; }
392 ~ObjectMonitorHandle();
393
394 ObjectMonitor* om_ptr() const { return _om_ptr; }
395 // Save the ObjectMonitor* associated with the specified markWord and
396 // increment the ref_count.
397 bool save_om_ptr(oop object, markWord mark);
398 // Save the specified ObjectMonitor* if safe and increment the ref_count.
399 bool set_om_ptr_if_safe(ObjectMonitor* om_ptr);
400 // Unset the _om_ptr field and decrement the ref_count.
401 void unset_om_ptr();
402
403 // For internal use by ObjectSynchronizer::inflate().
404 void set_om_ptr(ObjectMonitor* om_ptr);
405 };
406
407 // Macro to use guarantee() for more strict AsyncDeflateIdleMonitors
408 // checks and assert() otherwise.
409 #define ADIM_guarantee(p, ...) \
410 do { \
411 if (AsyncDeflateIdleMonitors) { \
412 guarantee(p, __VA_ARGS__); \
413 } else { \
414 assert(p, __VA_ARGS__); \
415 } \
416 } while (0)
417
418 #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP
|