< prev index next >

src/hotspot/share/runtime/objectMonitor.hpp

Print this page
rev 51882 : imported patch 8211176-eager-object-monitor-init


 110 //   in synchronizer.cpp. Also see TEST_VM(SynchronizerTest, sanity) gtest.
 111 //
 112 // Futures notes:
 113 //   - Separating _owner from the <remaining_fields> by enough space to
 114 //     avoid false sharing might be profitable. Given
 115 //     http://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
 116 //     we know that the CAS in monitorenter will invalidate the line
 117 //     underlying _owner. We want to avoid an L1 data cache miss on that
 118 //     same line for monitorexit. Putting these <remaining_fields>:
 119 //     _recursions, _EntryList, _cxq, and _succ, all of which may be
 120 //     fetched in the inflated unlock path, on a different cache line
 121 //     would make them immune to CAS-based invalidation from the _owner
 122 //     field.
 123 //
 124 //   - The _recursions field should be of type int, or int32_t but not
 125 //     intptr_t. There's no reason to use a 64-bit type for this field
 126 //     in a 64-bit JVM.
 127 
 128 class ObjectMonitor {
 129  public:



 130   enum {
 131     OM_OK,                    // no error
 132     OM_SYSTEM_ERROR,          // operating system error
 133     OM_ILLEGAL_MONITOR_STATE, // IllegalMonitorStateException
 134     OM_INTERRUPTED,           // Thread.interrupt()
 135     OM_TIMED_OUT              // Object.wait() timed out
 136   };
 137 
 138  private:
 139   friend class ObjectSynchronizer;
 140   friend class ObjectWaiter;
 141   friend class VMStructs;
 142 
 143   volatile markOop   _header;       // displaced object header word - mark
 144   void*     volatile _object;       // backward object pointer - strong root
 145  public:
 146   ObjectMonitor*     FreeNext;      // Free list linkage
 147  private:
 148   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE,
 149                         sizeof(volatile markOop) + sizeof(void * volatile) +


 156                                       // The list is actually composed of WaitNodes,
 157                                       // acting as proxies for Threads.
 158  private:
 159   ObjectWaiter * volatile _cxq;     // LL of recently-arrived threads blocked on entry.
 160   Thread * volatile _succ;          // Heir presumptive thread - used for futile wakeup throttling
 161   Thread * volatile _Responsible;
 162 
 163   volatile int _Spinner;            // for exit->spinner handoff optimization
 164   volatile int _SpinDuration;
 165 
 166   volatile jint  _count;            // reference count to prevent reclamation/deflation
 167                                     // at stop-the-world time.  See deflate_idle_monitors().
 168                                     // _count is approximately |_WaitSet| + |_EntryList|
 169  protected:
 170   ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
 171   volatile jint  _waiters;          // number of waiting threads
 172  private:
 173   volatile int _WaitSetLock;        // protects Wait Queue - simple spinlock
 174 
 175  public:
 176   static void Initialize();
 177 
 178   // Only perform a PerfData operation if the PerfData object has been
 179   // allocated and if the PerfDataManager has not freed the PerfData
 180   // objects which can happen at normal VM shutdown.
 181   //
 182   #define OM_PERFDATA_OP(f, op_str)              \
 183     do {                                         \
 184       if (ObjectMonitor::_sync_ ## f != NULL &&  \
 185           PerfDataManager::has_PerfData()) {     \
 186         ObjectMonitor::_sync_ ## f->op_str;      \
 187       }                                          \
 188     } while (0)
 189 
 190   static PerfCounter * _sync_ContendedLockAttempts;
 191   static PerfCounter * _sync_FutileWakeups;
 192   static PerfCounter * _sync_Parks;
 193   static PerfCounter * _sync_Notifications;
 194   static PerfCounter * _sync_Inflations;
 195   static PerfCounter * _sync_Deflations;
 196   static PerfLongVariable * _sync_MonExtant;
 197 


 285   void*     object() const;
 286   void*     object_addr();
 287   void      set_object(void* obj);
 288 
 289   bool      check(TRAPS);       // true if the thread owns the monitor.
 290   void      check_slow(TRAPS);
 291   void      clear();
 292 
 293   void      enter(TRAPS);
 294   void      exit(bool not_suspended, TRAPS);
 295   void      wait(jlong millis, bool interruptable, TRAPS);
 296   void      notify(TRAPS);
 297   void      notifyAll(TRAPS);
 298 
 299 // Use the following at your own risk
 300   intptr_t  complete_exit(TRAPS);
 301   void      reenter(intptr_t recursions, TRAPS);
 302 
 303  private:
 304   void      AddWaiter(ObjectWaiter * waiter);
 305   static    void DeferredInitialize();
 306   void      INotify(Thread * Self);
 307   ObjectWaiter * DequeueWaiter();
 308   void      DequeueSpecificWaiter(ObjectWaiter * waiter);
 309   void      EnterI(TRAPS);
 310   void      ReenterI(Thread * Self, ObjectWaiter * SelfNode);
 311   void      UnlinkAfterAcquire(Thread * Self, ObjectWaiter * SelfNode);
 312   int       TryLock(Thread * Self);
 313   int       NotRunnable(Thread * Self, Thread * Owner);
 314   int       TrySpin(Thread * Self);
 315   void      ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
 316   bool      ExitSuspendEquivalent(JavaThread * Self);
 317 };
 318 
 319 #endif // SHARE_VM_RUNTIME_OBJECTMONITOR_HPP


 110 //   in synchronizer.cpp. Also see TEST_VM(SynchronizerTest, sanity) gtest.
 111 //
 112 // Futures notes:
 113 //   - Separating _owner from the <remaining_fields> by enough space to
 114 //     avoid false sharing might be profitable. Given
 115 //     http://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
 116 //     we know that the CAS in monitorenter will invalidate the line
 117 //     underlying _owner. We want to avoid an L1 data cache miss on that
 118 //     same line for monitorexit. Putting these <remaining_fields>:
 119 //     _recursions, _EntryList, _cxq, and _succ, all of which may be
 120 //     fetched in the inflated unlock path, on a different cache line
 121 //     would make them immune to CAS-based invalidation from the _owner
 122 //     field.
 123 //
 124 //   - The _recursions field should be of type int, or int32_t but not
 125 //     intptr_t. There's no reason to use a 64-bit type for this field
 126 //     in a 64-bit JVM.
 127 
 128 class ObjectMonitor {
 129  public:
 130   static void init();
 131   static void init_2();
 132 
 133   enum {
 134     OM_OK,                    // no error
 135     OM_SYSTEM_ERROR,          // operating system error
 136     OM_ILLEGAL_MONITOR_STATE, // IllegalMonitorStateException
 137     OM_INTERRUPTED,           // Thread.interrupt()
 138     OM_TIMED_OUT              // Object.wait() timed out
 139   };
 140 
 141  private:
 142   friend class ObjectSynchronizer;
 143   friend class ObjectWaiter;
 144   friend class VMStructs;
 145 
 146   volatile markOop   _header;       // displaced object header word - mark
 147   void*     volatile _object;       // backward object pointer - strong root
 148  public:
 149   ObjectMonitor*     FreeNext;      // Free list linkage
 150  private:
 151   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE,
 152                         sizeof(volatile markOop) + sizeof(void * volatile) +


 159                                       // The list is actually composed of WaitNodes,
 160                                       // acting as proxies for Threads.
 161  private:
 162   ObjectWaiter * volatile _cxq;     // LL of recently-arrived threads blocked on entry.
 163   Thread * volatile _succ;          // Heir presumptive thread - used for futile wakeup throttling
 164   Thread * volatile _Responsible;
 165 
 166   volatile int _Spinner;            // for exit->spinner handoff optimization
 167   volatile int _SpinDuration;
 168 
 169   volatile jint  _count;            // reference count to prevent reclamation/deflation
 170                                     // at stop-the-world time.  See deflate_idle_monitors().
 171                                     // _count is approximately |_WaitSet| + |_EntryList|
 172  protected:
 173   ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
 174   volatile jint  _waiters;          // number of waiting threads
 175  private:
 176   volatile int _WaitSetLock;        // protects Wait Queue - simple spinlock
 177 
 178  public:


 179   // Only perform a PerfData operation if the PerfData object has been
 180   // allocated and if the PerfDataManager has not freed the PerfData
 181   // objects which can happen at normal VM shutdown.
 182   //
 183   #define OM_PERFDATA_OP(f, op_str)              \
 184     do {                                         \
 185       if (ObjectMonitor::_sync_ ## f != NULL &&  \
 186           PerfDataManager::has_PerfData()) {     \
 187         ObjectMonitor::_sync_ ## f->op_str;      \
 188       }                                          \
 189     } while (0)
 190 
 191   static PerfCounter * _sync_ContendedLockAttempts;
 192   static PerfCounter * _sync_FutileWakeups;
 193   static PerfCounter * _sync_Parks;
 194   static PerfCounter * _sync_Notifications;
 195   static PerfCounter * _sync_Inflations;
 196   static PerfCounter * _sync_Deflations;
 197   static PerfLongVariable * _sync_MonExtant;
 198 


 286   void*     object() const;
 287   void*     object_addr();
 288   void      set_object(void* obj);
 289 
 290   bool      check(TRAPS);       // true if the thread owns the monitor.
 291   void      check_slow(TRAPS);
 292   void      clear();
 293 
 294   void      enter(TRAPS);
 295   void      exit(bool not_suspended, TRAPS);
 296   void      wait(jlong millis, bool interruptable, TRAPS);
 297   void      notify(TRAPS);
 298   void      notifyAll(TRAPS);
 299 
 300 // Use the following at your own risk
 301   intptr_t  complete_exit(TRAPS);
 302   void      reenter(intptr_t recursions, TRAPS);
 303 
 304  private:
 305   void      AddWaiter(ObjectWaiter * waiter);

 306   void      INotify(Thread * Self);
 307   ObjectWaiter * DequeueWaiter();
 308   void      DequeueSpecificWaiter(ObjectWaiter * waiter);
 309   void      EnterI(TRAPS);
 310   void      ReenterI(Thread * Self, ObjectWaiter * SelfNode);
 311   void      UnlinkAfterAcquire(Thread * Self, ObjectWaiter * SelfNode);
 312   int       TryLock(Thread * Self);
 313   int       NotRunnable(Thread * Self, Thread * Owner);
 314   int       TrySpin(Thread * Self);
 315   void      ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
 316   bool      ExitSuspendEquivalent(JavaThread * Self);
 317 };
 318 
 319 #endif // SHARE_VM_RUNTIME_OBJECTMONITOR_HPP
< prev index next >