< prev index next >

src/hotspot/share/runtime/objectMonitor.hpp

Print this page
rev 54415 : 8222295: more baseline cleanups from Async Monitor Deflation project
rev 54416 : Checkpoint latest preliminary review patches for full OpenJDK review; merge with 8222295.patch.
rev 54417 : imported patch dcubed.monitor_deflate_conc.v2.01


 119 //     _recursions, _EntryList, _cxq, and _succ, all of which may be
 120 //     fetched in the inflated unlock path, on a different cache line
 121 //     would make them immune to CAS-based invalidation from the _owner
 122 //     field.
 123 //
 124 //   - The _recursions field should be of type int, or int32_t but not
 125 //     intptr_t. There's no reason to use a 64-bit type for this field
 126 //     in a 64-bit JVM.
 127 
 128 class ObjectMonitor {
 129  public:
 130   enum {
 131     OM_OK,                    // no error
 132     OM_SYSTEM_ERROR,          // operating system error
 133     OM_ILLEGAL_MONITOR_STATE, // IllegalMonitorStateException
 134     OM_INTERRUPTED,           // Thread.interrupt()
 135     OM_TIMED_OUT              // Object.wait() timed out
 136   };
 137 
 138  private:

 139   friend class ObjectSynchronizer;
 140   friend class ObjectWaiter;
 141   friend class VMStructs;
 142 
 143   volatile markOop   _header;       // displaced object header word - mark
 144   void*     volatile _object;       // backward object pointer - strong root
 145  public:
 146   ObjectMonitor*     FreeNext;      // Free list linkage
 147  private:
 148   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE,
 149                         sizeof(volatile markOop) + sizeof(void * volatile) +
 150                         sizeof(ObjectMonitor *));
 151  protected:                         // protected for JvmtiRawMonitor


 152   void *  volatile _owner;          // pointer to owning thread OR BasicLock
 153   volatile jlong _previous_owner_tid;  // thread id of the previous owner of the monitor
 154   volatile intptr_t  _recursions;   // recursion count, 0 for first entry
 155   ObjectWaiter * volatile _EntryList; // Threads blocked on entry or reentry.
 156                                       // The list is actually composed of WaitNodes,
 157                                       // acting as proxies for Threads.
 158  private:
 159   ObjectWaiter * volatile _cxq;     // LL of recently-arrived threads blocked on entry.
 160   Thread * volatile _succ;          // Heir presumptive thread - used for futile wakeup throttling
 161   Thread * volatile _Responsible;
 162 
 163   volatile int _Spinner;            // for exit->spinner handoff optimization
 164   volatile int _SpinDuration;
 165 
 166   volatile jint  _contentions;      // Number of active contentions in enter(). It is used by is_busy()
 167                                     // along with other fields to determine if an ObjectMonitor can be
 168                                     // deflated. See ObjectSynchronizer::deflate_monitor().
 169  protected:
 170   ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
 171   volatile jint  _waiters;          // number of waiting threads
 172  private:
 173   volatile int _WaitSetLock;        // protects Wait Queue - simple spinlock







 174 
 175  public:
 176   static void Initialize();
 177 
 178   // Only perform a PerfData operation if the PerfData object has been
 179   // allocated and if the PerfDataManager has not freed the PerfData
 180   // objects which can happen at normal VM shutdown.
 181   //
 182   #define OM_PERFDATA_OP(f, op_str)              \
 183     do {                                         \
 184       if (ObjectMonitor::_sync_ ## f != NULL &&  \
 185           PerfDataManager::has_PerfData()) {     \
 186         ObjectMonitor::_sync_ ## f->op_str;      \
 187       }                                          \
 188     } while (0)
 189 
 190   static PerfCounter * _sync_ContendedLockAttempts;
 191   static PerfCounter * _sync_FutileWakeups;
 192   static PerfCounter * _sync_Parks;
 193   static PerfCounter * _sync_Notifications;


 215   // ObjectMonitor references can be ORed with markOopDesc::monitor_value
 216   // as part of the ObjectMonitor tagging mechanism. When we combine an
 217   // ObjectMonitor reference with an offset, we need to remove the tag
 218   // value in order to generate the proper address.
 219   //
 220   // We can either adjust the ObjectMonitor reference and then add the
 221   // offset or we can adjust the offset that is added to the ObjectMonitor
 222   // reference. The latter avoids an AGI (Address Generation Interlock)
 223   // stall so the helper macro adjusts the offset value that is returned
 224   // to the ObjectMonitor reference manipulation code:
 225   //
 226   #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
 227     ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
 228 
 229   markOop   header() const;
 230   volatile markOop* header_addr();
 231   void      set_header(markOop hdr);
 232 
 233   intptr_t is_busy() const {
 234     // TODO-FIXME: assert _owner == null implies _recursions = 0




 235     return _contentions|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList);
 236   }
 237 

















 238   intptr_t  is_entered(Thread* current) const;
 239 
 240   void*     owner() const;
 241   void      set_owner(void* owner);
 242 
 243   jint      waiters() const;
 244 
 245   jint      contentions() const;
 246   intptr_t  recursions() const                                         { return _recursions; }
 247 
 248   // JVM/TI GetObjectMonitorUsage() needs this:
 249   ObjectWaiter* first_waiter()                                         { return _WaitSet; }
 250   ObjectWaiter* next_waiter(ObjectWaiter* o)                           { return o->_next; }
 251   Thread* thread_of_waiter(ObjectWaiter* o)                            { return o->_thread; }
 252 
 253  protected:
 254   // We don't typically expect or want the ctors or dtors to run.
 255   // normal ObjectMonitors are type-stable and immortal.
 256   ObjectMonitor() { ::memset((void *)this, 0, sizeof(*this)); }
 257 
 258   ~ObjectMonitor() {
 259     // TODO: Add asserts ...
 260     // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0


 263 
 264  private:
 265   void Recycle() {
 266     // TODO: add stronger asserts ...
 267     // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
 268     // _contentions == 0 EntryList  == NULL
 269     // _recursions == 0 _WaitSet == NULL
 270     assert(((is_busy()|_recursions) == 0), "freeing inuse monitor");
 271     _succ          = NULL;
 272     _EntryList     = NULL;
 273     _cxq           = NULL;
 274     _WaitSet       = NULL;
 275     _recursions    = 0;
 276   }
 277 
 278  public:
 279 
 280   void*     object() const;
 281   void*     object_addr();
 282   void      set_object(void* obj);









 283 
 284   bool      check(TRAPS);       // true if the thread owns the monitor.
 285   void      check_slow(TRAPS);
 286   void      clear();

 287 
 288   void      enter(TRAPS);
 289   void      exit(bool not_suspended, TRAPS);
 290   void      wait(jlong millis, bool interruptable, TRAPS);
 291   void      notify(TRAPS);
 292   void      notifyAll(TRAPS);
 293 
 294 // Use the following at your own risk
 295   intptr_t  complete_exit(TRAPS);
 296   void      reenter(intptr_t recursions, TRAPS);
 297 
 298  private:
 299   void      AddWaiter(ObjectWaiter * waiter);
 300   void      INotify(Thread * Self);
 301   ObjectWaiter * DequeueWaiter();
 302   void      DequeueSpecificWaiter(ObjectWaiter * waiter);
 303   void      EnterI(TRAPS);
 304   void      ReenterI(Thread * Self, ObjectWaiter * SelfNode);
 305   void      UnlinkAfterAcquire(Thread * Self, ObjectWaiter * SelfNode);
 306   int       TryLock(Thread * Self);
 307   int       NotRunnable(Thread * Self, Thread * Owner);
 308   int       TrySpin(Thread * Self);
 309   void      ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
 310   bool      ExitSuspendEquivalent(JavaThread * Self);





















 311 };











 312 
 313 #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP


 119 //     _recursions, _EntryList, _cxq, and _succ, all of which may be
 120 //     fetched in the inflated unlock path, on a different cache line
 121 //     would make them immune to CAS-based invalidation from the _owner
 122 //     field.
 123 //
 124 //   - The _recursions field should be of type int, or int32_t but not
 125 //     intptr_t. There's no reason to use a 64-bit type for this field
 126 //     in a 64-bit JVM.
 127 
 128 class ObjectMonitor {
 129  public:
 130   enum {
 131     OM_OK,                    // no error
 132     OM_SYSTEM_ERROR,          // operating system error
 133     OM_ILLEGAL_MONITOR_STATE, // IllegalMonitorStateException
 134     OM_INTERRUPTED,           // Thread.interrupt()
 135     OM_TIMED_OUT              // Object.wait() timed out
 136   };
 137 
 138  private:
 139   friend class ObjectMonitorHandle;
 140   friend class ObjectSynchronizer;
 141   friend class ObjectWaiter;
 142   friend class VMStructs;
 143 
 144   volatile markOop   _header;       // displaced object header word - mark
 145   void*     volatile _object;       // backward object pointer - strong root
 146  public:
 147   ObjectMonitor*     FreeNext;      // Free list linkage
 148  private:
 149   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE,
 150                         sizeof(volatile markOop) + sizeof(void * volatile) +
 151                         sizeof(ObjectMonitor *));
 152  protected:                         // protected for JvmtiRawMonitor
 153   // Used by async deflation as a marker in the _owner field:
 154   #define DEFLATER_MARKER reinterpret_cast<void*>(-1)
 155   void *  volatile _owner;          // pointer to owning thread OR BasicLock
 156   volatile jlong _previous_owner_tid;  // thread id of the previous owner of the monitor
 157   volatile intptr_t  _recursions;   // recursion count, 0 for first entry
 158   ObjectWaiter * volatile _EntryList; // Threads blocked on entry or reentry.
 159                                       // The list is actually composed of WaitNodes,
 160                                       // acting as proxies for Threads.
 161  private:
 162   ObjectWaiter * volatile _cxq;     // LL of recently-arrived threads blocked on entry.
 163   Thread * volatile _succ;          // Heir presumptive thread - used for futile wakeup throttling
 164   Thread * volatile _Responsible;
 165 
 166   volatile int _Spinner;            // for exit->spinner handoff optimization
 167   volatile int _SpinDuration;
 168 
 169   volatile jint  _contentions;      // Number of active contentions in enter(). It is used by is_busy()
 170                                     // along with other fields to determine if an ObjectMonitor can be
 171                                     // deflated. See ObjectSynchronizer::deflate_monitor().
 172  protected:
 173   ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
 174   volatile jint  _waiters;          // number of waiting threads
 175  private:
 176   volatile int _WaitSetLock;        // protects Wait Queue - simple spinlock
 177   volatile jint _ref_count;         // ref count for ObjectMonitor*
 178   typedef enum {
 179     Free = 0,  // Free must be 0 for monitor to be free after memset(..,0,..).
 180     New,
 181     Old
 182   } AllocationState;
 183   AllocationState _allocation_state;
 184 
 185  public:
 186   static void Initialize();
 187 
 188   // Only perform a PerfData operation if the PerfData object has been
 189   // allocated and if the PerfDataManager has not freed the PerfData
 190   // objects which can happen at normal VM shutdown.
 191   //
 192   #define OM_PERFDATA_OP(f, op_str)              \
 193     do {                                         \
 194       if (ObjectMonitor::_sync_ ## f != NULL &&  \
 195           PerfDataManager::has_PerfData()) {     \
 196         ObjectMonitor::_sync_ ## f->op_str;      \
 197       }                                          \
 198     } while (0)
 199 
 200   static PerfCounter * _sync_ContendedLockAttempts;
 201   static PerfCounter * _sync_FutileWakeups;
 202   static PerfCounter * _sync_Parks;
 203   static PerfCounter * _sync_Notifications;


 225   // ObjectMonitor references can be ORed with markOopDesc::monitor_value
 226   // as part of the ObjectMonitor tagging mechanism. When we combine an
 227   // ObjectMonitor reference with an offset, we need to remove the tag
 228   // value in order to generate the proper address.
 229   //
 230   // We can either adjust the ObjectMonitor reference and then add the
 231   // offset or we can adjust the offset that is added to the ObjectMonitor
 232   // reference. The latter avoids an AGI (Address Generation Interlock)
 233   // stall so the helper macro adjusts the offset value that is returned
 234   // to the ObjectMonitor reference manipulation code:
 235   //
 236   #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
 237     ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
 238 
 239   markOop   header() const;
 240   volatile markOop* header_addr();
 241   void      set_header(markOop hdr);
 242 
 243   intptr_t is_busy() const {
 244     // TODO-FIXME: assert _owner == null implies _recursions = 0
 245     // We do not include _ref_count in the is_busy() check because
 246     // _ref_count is for indicating that the ObjectMonitor* is in
 247     // use which is orthogonal to whether the ObjectMonitor itself
 248     // is in use for a locking operation.
 249     return _contentions|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList);
 250   }
 251 
 252   // Version of is_busy() that accounts for special values in
 253   // _contentions and _owner when AsyncDeflateIdleMonitors is enabled.
 254   intptr_t is_busy_async() const {
 255     intptr_t ret_code = _waiters | intptr_t(_cxq) | intptr_t(_EntryList);
 256     if (!AsyncDeflateIdleMonitors) {
 257       ret_code |= _contentions | intptr_t(_owner);
 258     } else {
 259       if (_contentions > 0) {
 260         ret_code |= _contentions;
 261       }
 262       if (_owner != DEFLATER_MARKER) {
 263         ret_code |= intptr_t(_owner);
 264       }
 265     }
 266     return ret_code;
 267   }
 268 
 269   intptr_t  is_entered(Thread* current) const;
 270 
 271   void*     owner() const;  // Returns NULL if DEFLATER_MARKER is observed.
 272   void      set_owner(void* owner);
 273 
 274   jint      waiters() const;
 275 
 276   jint      contentions() const;
 277   intptr_t  recursions() const                                         { return _recursions; }
 278 
 279   // JVM/TI GetObjectMonitorUsage() needs this:
 280   ObjectWaiter* first_waiter()                                         { return _WaitSet; }
 281   ObjectWaiter* next_waiter(ObjectWaiter* o)                           { return o->_next; }
 282   Thread* thread_of_waiter(ObjectWaiter* o)                            { return o->_thread; }
 283 
 284  protected:
 285   // We don't typically expect or want the ctors or dtors to run.
 286   // normal ObjectMonitors are type-stable and immortal.
 287   ObjectMonitor() { ::memset((void *)this, 0, sizeof(*this)); }
 288 
 289   ~ObjectMonitor() {
 290     // TODO: Add asserts ...
 291     // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0


 294 
 295  private:
 296   void Recycle() {
 297     // TODO: add stronger asserts ...
 298     // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
 299     // _contentions == 0 EntryList  == NULL
 300     // _recursions == 0 _WaitSet == NULL
 301     assert(((is_busy()|_recursions) == 0), "freeing inuse monitor");
 302     _succ          = NULL;
 303     _EntryList     = NULL;
 304     _cxq           = NULL;
 305     _WaitSet       = NULL;
 306     _recursions    = 0;
 307   }
 308 
 309  public:
 310 
 311   void*     object() const;
 312   void*     object_addr();
 313   void      set_object(void* obj);
 314   void      set_allocation_state(AllocationState s);
 315   AllocationState allocation_state() const;
 316   bool      is_free() const;
 317   bool      is_active() const;
 318   bool      is_old() const;
 319   bool      is_new() const;
 320   void      dec_ref_count();
 321   void      inc_ref_count();
 322   jint      ref_count() const;
 323 
 324   bool      check(TRAPS);       // true if the thread owns the monitor.
 325   void      check_slow(TRAPS);
 326   void      clear();
 327   void      clear_using_JT();
 328 
 329   bool      enter(TRAPS);  // Returns false if monitor is being async deflated and caller should retry locking the object.
 330   void      exit(bool not_suspended, TRAPS);
 331   void      wait(jlong millis, bool interruptable, TRAPS);
 332   void      notify(TRAPS);
 333   void      notifyAll(TRAPS);
 334 
 335 // Use the following at your own risk
 336   intptr_t  complete_exit(TRAPS);
 337   bool      reenter(intptr_t recursions, TRAPS);  // Returns false if monitor is being async deflated and caller should retry locking the object.
 338 
 339  private:
 340   void      AddWaiter(ObjectWaiter * waiter);
 341   void      INotify(Thread * Self);
 342   ObjectWaiter * DequeueWaiter();
 343   void      DequeueSpecificWaiter(ObjectWaiter * waiter);
 344   void      EnterI(TRAPS);
 345   void      ReenterI(Thread * Self, ObjectWaiter * SelfNode);
 346   void      UnlinkAfterAcquire(Thread * Self, ObjectWaiter * SelfNode);
 347   int       TryLock(Thread * Self);
 348   int       NotRunnable(Thread * Self, Thread * Owner);
 349   int       TrySpin(Thread * Self);
 350   void      ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
 351   bool      ExitSuspendEquivalent(JavaThread * Self);
 352   void      install_displaced_markword_in_object(const oop obj);
 353 };
 354 
 355 // A helper object for managing an ObjectMonitor*'s ref_count. There
 356 // are special safety considerations when async deflation is used.
 357 class ObjectMonitorHandle : public StackObj {
 358  private:
 359   ObjectMonitor * _om_ptr;
 360  public:
 361   ObjectMonitorHandle() { _om_ptr = NULL; }
 362   ~ObjectMonitorHandle();
 363 
 364   ObjectMonitor * om_ptr() const { return _om_ptr; }
 365   // Save the ObjectMonitor* associated with the specified markOop and
 366   // increment the ref_count.
 367   bool save_om_ptr(oop object, markOop mark);
 368 
 369   // For internal used by ObjectSynchronizer::monitors_iterate().
 370   ObjectMonitorHandle(ObjectMonitor * _om_ptr);
 371   // For internal use by ObjectSynchronizer::inflate().
 372   void set_om_ptr(ObjectMonitor * om_ptr);
 373 };
 374 
 375 // Macro to use guarantee() for more strict AsyncDeflateIdleMonitors
 376 // checks and assert() otherwise.
 377 #define ADIM_guarantee(p, ...)       \
 378   do {                               \
 379     if (AsyncDeflateIdleMonitors) {  \
 380       guarantee(p, __VA_ARGS__);     \
 381     } else {                         \
 382       assert(p, __VA_ARGS__);        \
 383     }                                \
 384   } while (0)
 385 
 386 #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP
< prev index next >