src/share/vm/opto/locknode.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8031320_8u Sdiff src/share/vm/opto

src/share/vm/opto/locknode.cpp

Print this page
rev 5968 : 8031320: Use Intel RTM instructions for locks
Summary: Use RTM for inflated locks and stack locks.
Reviewed-by: iveresov, twisti, roland, dcubed


 119       assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
 120     }
 121     // Don't check monitor info in safepoints since the referenced object could
 122     // be different from the locked object. It could be Phi node of different
 123     // cast nodes which point to this locked object.
 124     // We assume that no other objects could be referenced in monitor info
 125     // associated with this BoxLock node because all associated locks and
 126     // unlocks are reference only this one object.
 127   }
 128 #endif
 129   if (unique_lock != NULL && has_one_lock) {
 130     *unique_lock = lock;
 131   }
 132   return true;
 133 }
 134 
 135 //=============================================================================
 136 //-----------------------------hash--------------------------------------------
 137 uint FastLockNode::hash() const { return NO_HASH; }
 138 


 139 //------------------------------cmp--------------------------------------------
 140 uint FastLockNode::cmp( const Node &n ) const {
 141   return (&n == this);                // Always fail except on self
 142 }
 143 
 144 //=============================================================================
 145 //-----------------------------hash--------------------------------------------
 146 uint FastUnlockNode::hash() const { return NO_HASH; }
 147 
 148 //------------------------------cmp--------------------------------------------
 149 uint FastUnlockNode::cmp( const Node &n ) const {
 150   return (&n == this);                // Always fail except on self
 151 }
 152 
 153 //
 154 // Create a counter which counts the number of times this lock is acquired
 155 //
 156 void FastLockNode::create_lock_counter(JVMState* state) {
 157   BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*)
 158            OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter);
 159   _counters = blnc->counters();
 160 }
 161 
















 162 //=============================================================================
 163 //------------------------------do_monitor_enter-------------------------------
 164 void Parse::do_monitor_enter() {
 165   kill_dead_locals();
 166 
 167   // Null check; get casted pointer.
 168   Node* obj = null_check(peek());
 169   // Check for locking null object
 170   if (stopped()) return;
 171 
 172   // the monitor object is not part of debug info expression stack
 173   pop();
 174 
 175   // Insert a FastLockNode which takes as arguments the current thread pointer,
 176   // the obj pointer & the address of the stack slot pair used for the lock.
 177   shared_lock(obj);
 178 }
 179 
 180 //------------------------------do_monitor_exit--------------------------------
 181 void Parse::do_monitor_exit() {


 119       assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
 120     }
 121     // Don't check monitor info in safepoints since the referenced object could
 122     // be different from the locked object. It could be Phi node of different
 123     // cast nodes which point to this locked object.
 124     // We assume that no other objects could be referenced in monitor info
 125     // associated with this BoxLock node because all associated locks and
 126     // unlocks are reference only this one object.
 127   }
 128 #endif
 129   if (unique_lock != NULL && has_one_lock) {
 130     *unique_lock = lock;
 131   }
 132   return true;
 133 }
 134 
 135 //=============================================================================
 136 //-----------------------------hash--------------------------------------------
 137 uint FastLockNode::hash() const { return NO_HASH; }
 138 
 139 uint FastLockNode::size_of() const { return sizeof(*this); }
 140 
 141 //------------------------------cmp--------------------------------------------
 142 uint FastLockNode::cmp( const Node &n ) const {
 143   return (&n == this);                // Always fail except on self
 144 }
 145 
 146 //=============================================================================
 147 //-----------------------------hash--------------------------------------------
 148 uint FastUnlockNode::hash() const { return NO_HASH; }
 149 
 150 //------------------------------cmp--------------------------------------------
 151 uint FastUnlockNode::cmp( const Node &n ) const {
 152   return (&n == this);                // Always fail except on self
 153 }
 154 
 155 //
 156 // Create a counter which counts the number of times this lock is acquired
 157 //
 158 void FastLockNode::create_lock_counter(JVMState* state) {
 159   BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*)
 160            OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter);
 161   _counters = blnc->counters();
 162 }
 163 
 164 void FastLockNode::create_rtm_lock_counter(JVMState* state) {
 165 #if INCLUDE_RTM_OPT
 166   Compile* C = Compile::current();
 167   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
 168     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
 169            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
 170     _rtm_counters = rlnc->counters();
 171     if (UseRTMForStackLocks) {
 172       rlnc = (RTMLockingNamedCounter*)
 173            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
 174       _stack_rtm_counters = rlnc->counters();
 175     }
 176   }
 177 #endif
 178 }
 179 
 180 //=============================================================================
 181 //------------------------------do_monitor_enter-------------------------------
 182 void Parse::do_monitor_enter() {
 183   kill_dead_locals();
 184 
 185   // Null check; get casted pointer.
 186   Node* obj = null_check(peek());
 187   // Check for locking null object
 188   if (stopped()) return;
 189 
 190   // the monitor object is not part of debug info expression stack
 191   pop();
 192 
 193   // Insert a FastLockNode which takes as arguments the current thread pointer,
 194   // the obj pointer & the address of the stack slot pair used for the lock.
 195   shared_lock(obj);
 196 }
 197 
 198 //------------------------------do_monitor_exit--------------------------------
 199 void Parse::do_monitor_exit() {
src/share/vm/opto/locknode.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File