158 // changed by ObjectMonitor::exit() so it is a good choice to share the
159 // cache line with _owner.
160 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(void* volatile) +
161 sizeof(volatile jlong));
162 ObjectMonitor* _next_om; // Next ObjectMonitor* linkage
163 volatile intx _recursions; // recursion count, 0 for first entry
164 ObjectWaiter* volatile _EntryList; // Threads blocked on entry or reentry.
165 // The list is actually composed of WaitNodes,
166 // acting as proxies for Threads.
167
168 ObjectWaiter* volatile _cxq; // LL of recently-arrived threads blocked on entry.
169 Thread* volatile _succ; // Heir presumptive thread - used for futile wakeup throttling
170 Thread* volatile _Responsible;
171
172 volatile int _Spinner; // for exit->spinner handoff optimization
173 volatile int _SpinDuration;
174
175 jint _contentions; // Number of active contentions in enter(). It is used by is_busy()
176 // along with other fields to determine if an ObjectMonitor can be
177 // deflated. It is also used by the async deflation protocol. See
178 // ObjectSynchronizer::deflate_monitor() and deflate_monitor_using_JT().
179 protected:
180 ObjectWaiter* volatile _WaitSet; // LL of threads wait()ing on the monitor
181 volatile jint _waiters; // number of waiting threads
182 private:
183 volatile int _WaitSetLock; // protects Wait Queue - simple spinlock
184
185 public:
186 static void Initialize();
187
188 // Only perform a PerfData operation if the PerfData object has been
189 // allocated and if the PerfDataManager has not freed the PerfData
190 // objects which can happen at normal VM shutdown.
191 //
192 #define OM_PERFDATA_OP(f, op_str) \
193 do { \
194 if (ObjectMonitor::_sync_ ## f != NULL && \
195 PerfDataManager::has_PerfData()) { \
196 ObjectMonitor::_sync_ ## f->op_str; \
197 } \
198 } while (0)
226 // as part of the ObjectMonitor tagging mechanism. When we combine an
227 // ObjectMonitor reference with an offset, we need to remove the tag
228 // value in order to generate the proper address.
229 //
230 // We can either adjust the ObjectMonitor reference and then add the
231 // offset or we can adjust the offset that is added to the ObjectMonitor
232 // reference. The latter avoids an AGI (Address Generation Interlock)
233 // stall so the helper macro adjusts the offset value that is returned
234 // to the ObjectMonitor reference manipulation code:
235 //
236 #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
237 ((ObjectMonitor::f ## _offset_in_bytes()) - markWord::monitor_value)
238
239 markWord header() const;
240 volatile markWord* header_addr();
241 void set_header(markWord hdr);
242
243 intptr_t is_busy() const {
244 // TODO-FIXME: assert _owner == null implies _recursions = 0
245 intptr_t ret_code = _waiters | intptr_t(_cxq) | intptr_t(_EntryList);
246 if (!AsyncDeflateIdleMonitors) {
247 ret_code |= contentions() | intptr_t(_owner);
248 } else {
249 if (contentions() > 0) {
250 ret_code |= contentions();
251 }
252 if (_owner != DEFLATER_MARKER) {
253 ret_code |= intptr_t(_owner);
254 }
255 }
256 return ret_code;
257 }
258 const char* is_busy_to_string(stringStream* ss);
259
260 intptr_t is_entered(Thread* current) const;
261
262 void* owner() const; // Returns NULL if DEFLATER_MARKER is observed.
263 // Returns true if owner field == DEFLATER_MARKER and false otherwise.
264 bool owner_is_DEFLATER_MARKER();
265 // Returns true if 'this' is being async deflated and false otherwise.
266 bool is_being_async_deflated();
267 // Clear _owner field; current value must match old_value.
268 void release_clear_owner(void* old_value);
269 // Simply set _owner field to new_value; current value must match old_value.
270 void set_owner_from(void* old_value, void* new_value);
271 // Simply set _owner field to new_value; current value must match old_value1 or old_value2.
272 void set_owner_from(void* old_value1, void* old_value2, void* new_value);
273 // Simply set _owner field to self; current value must match basic_lock_p.
274 void set_owner_from_BasicLock(void* basic_lock_p, Thread* self);
275 // Try to set _owner field to new_value if the current value matches
357 // Use the following at your own risk
358 intx complete_exit(TRAPS);
359 bool reenter(intx recursions, TRAPS);
360
361 private:
362 void AddWaiter(ObjectWaiter* waiter);
363 void INotify(Thread* self);
364 ObjectWaiter* DequeueWaiter();
365 void DequeueSpecificWaiter(ObjectWaiter* waiter);
366 void EnterI(TRAPS);
367 void ReenterI(Thread* self, ObjectWaiter* self_node);
368 void UnlinkAfterAcquire(Thread* self, ObjectWaiter* self_node);
369 int TryLock(Thread* self);
370 int NotRunnable(Thread* self, Thread* Owner);
371 int TrySpin(Thread* self);
372 void ExitEpilog(Thread* self, ObjectWaiter* Wakee);
373 bool ExitSuspendEquivalent(JavaThread* self);
374 void install_displaced_markword_in_object(const oop obj);
375 };
376
377 // Macro to use guarantee() for more strict AsyncDeflateIdleMonitors
378 // checks and assert() otherwise.
379 #define ADIM_guarantee(p, ...) \
380 do { \
381 if (AsyncDeflateIdleMonitors) { \
382 guarantee(p, __VA_ARGS__); \
383 } else { \
384 assert(p, __VA_ARGS__); \
385 } \
386 } while (0)
387
388 #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP
|
158 // changed by ObjectMonitor::exit() so it is a good choice to share the
159 // cache line with _owner.
160 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(void* volatile) +
161 sizeof(volatile jlong));
162 ObjectMonitor* _next_om; // Next ObjectMonitor* linkage
163 volatile intx _recursions; // recursion count, 0 for first entry
164 ObjectWaiter* volatile _EntryList; // Threads blocked on entry or reentry.
165 // The list is actually composed of WaitNodes,
166 // acting as proxies for Threads.
167
168 ObjectWaiter* volatile _cxq; // LL of recently-arrived threads blocked on entry.
169 Thread* volatile _succ; // Heir presumptive thread - used for futile wakeup throttling
170 Thread* volatile _Responsible;
171
172 volatile int _Spinner; // for exit->spinner handoff optimization
173 volatile int _SpinDuration;
174
175 jint _contentions; // Number of active contentions in enter(). It is used by is_busy()
176 // along with other fields to determine if an ObjectMonitor can be
177 // deflated. It is also used by the async deflation protocol. See
178 // ObjectSynchronizer::deflate_monitor_using_JT().
179 protected:
180 ObjectWaiter* volatile _WaitSet; // LL of threads wait()ing on the monitor
181 volatile jint _waiters; // number of waiting threads
182 private:
183 volatile int _WaitSetLock; // protects Wait Queue - simple spinlock
184
185 public:
186 static void Initialize();
187
188 // Only perform a PerfData operation if the PerfData object has been
189 // allocated and if the PerfDataManager has not freed the PerfData
190 // objects which can happen at normal VM shutdown.
191 //
192 #define OM_PERFDATA_OP(f, op_str) \
193 do { \
194 if (ObjectMonitor::_sync_ ## f != NULL && \
195 PerfDataManager::has_PerfData()) { \
196 ObjectMonitor::_sync_ ## f->op_str; \
197 } \
198 } while (0)
226 // as part of the ObjectMonitor tagging mechanism. When we combine an
227 // ObjectMonitor reference with an offset, we need to remove the tag
228 // value in order to generate the proper address.
229 //
230 // We can either adjust the ObjectMonitor reference and then add the
231 // offset or we can adjust the offset that is added to the ObjectMonitor
232 // reference. The latter avoids an AGI (Address Generation Interlock)
233 // stall so the helper macro adjusts the offset value that is returned
234 // to the ObjectMonitor reference manipulation code:
235 //
236 #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
237 ((ObjectMonitor::f ## _offset_in_bytes()) - markWord::monitor_value)
238
239 markWord header() const;
240 volatile markWord* header_addr();
241 void set_header(markWord hdr);
242
243 intptr_t is_busy() const {
244 // TODO-FIXME: assert _owner == null implies _recursions = 0
245 intptr_t ret_code = _waiters | intptr_t(_cxq) | intptr_t(_EntryList);
246 if (contentions() > 0) {
247 ret_code |= contentions();
248 }
249 if (_owner != DEFLATER_MARKER) {
250 ret_code |= intptr_t(_owner);
251 }
252 return ret_code;
253 }
254 const char* is_busy_to_string(stringStream* ss);
255
256 intptr_t is_entered(Thread* current) const;
257
258 void* owner() const; // Returns NULL if DEFLATER_MARKER is observed.
259 // Returns true if owner field == DEFLATER_MARKER and false otherwise.
260 bool owner_is_DEFLATER_MARKER();
261 // Returns true if 'this' is being async deflated and false otherwise.
262 bool is_being_async_deflated();
263 // Clear _owner field; current value must match old_value.
264 void release_clear_owner(void* old_value);
265 // Simply set _owner field to new_value; current value must match old_value.
266 void set_owner_from(void* old_value, void* new_value);
267 // Simply set _owner field to new_value; current value must match old_value1 or old_value2.
268 void set_owner_from(void* old_value1, void* old_value2, void* new_value);
269 // Simply set _owner field to self; current value must match basic_lock_p.
270 void set_owner_from_BasicLock(void* basic_lock_p, Thread* self);
271 // Try to set _owner field to new_value if the current value matches
353 // Use the following at your own risk
354 intx complete_exit(TRAPS);
355 bool reenter(intx recursions, TRAPS);
356
357 private:
358 void AddWaiter(ObjectWaiter* waiter);
359 void INotify(Thread* self);
360 ObjectWaiter* DequeueWaiter();
361 void DequeueSpecificWaiter(ObjectWaiter* waiter);
362 void EnterI(TRAPS);
363 void ReenterI(Thread* self, ObjectWaiter* self_node);
364 void UnlinkAfterAcquire(Thread* self, ObjectWaiter* self_node);
365 int TryLock(Thread* self);
366 int NotRunnable(Thread* self, Thread* Owner);
367 int TrySpin(Thread* self);
368 void ExitEpilog(Thread* self, ObjectWaiter* Wakee);
369 bool ExitSuspendEquivalent(JavaThread* self);
370 void install_displaced_markword_in_object(const oop obj);
371 };
372
373 #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP
|