< prev index next >

src/hotspot/share/opto/locknode.cpp

Print this page




 165 #if INCLUDE_RTM_OPT
 166   Compile* C = Compile::current();
 167   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
 168     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
 169            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
 170     _rtm_counters = rlnc->counters();
 171     if (UseRTMForStackLocks) {
 172       rlnc = (RTMLockingNamedCounter*)
 173            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
 174       _stack_rtm_counters = rlnc->counters();
 175     }
 176   }
 177 #endif
 178 }
 179 
 180 //=============================================================================
 181 //------------------------------do_monitor_enter-------------------------------
 182 void Parse::do_monitor_enter() {
 183   kill_dead_locals();
 184 








 185   // Null check; get casted pointer.
 186   Node* obj = null_check(peek());
 187   // Check for locking null object
 188   if (stopped()) return;
 189 
 190   // the monitor object is not part of debug info expression stack
 191   pop();
 192 
 193   // Insert a FastLockNode which takes as arguments the current thread pointer,
 194   // the obj pointer & the address of the stack slot pair used for the lock.
 195   shared_lock(obj);
 196 }
 197 
 198 //------------------------------do_monitor_exit--------------------------------
 199 void Parse::do_monitor_exit() {
 200   kill_dead_locals();
 201 
 202   pop();                        // Pop oop to unlock
 203   // Because monitors are guaranteed paired (else we bail out), we know
 204   // the matching Lock for this Unlock.  Hence we know there is no need
 205   // for a null check on Unlock.
 206   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());


 165 #if INCLUDE_RTM_OPT
 166   Compile* C = Compile::current();
 167   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
 168     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
 169            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
 170     _rtm_counters = rlnc->counters();
 171     if (UseRTMForStackLocks) {
 172       rlnc = (RTMLockingNamedCounter*)
 173            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
 174       _stack_rtm_counters = rlnc->counters();
 175     }
 176   }
 177 #endif
 178 }
 179 
 180 //=============================================================================
 181 //------------------------------do_monitor_enter-------------------------------
 182 void Parse::do_monitor_enter() {
 183   kill_dead_locals();
 184 
 185   Node* obj = peek();
 186 
 187   if (obj->is_ValueType()) {
 188     uncommon_trap(Deoptimization::Reason_class_check,
 189                   Deoptimization::Action_none);
 190     return;
 191   }
 192 
 193   // Null check; get casted pointer.
 194   obj = null_check(obj);
 195   // Check for locking null object
 196   if (stopped()) return;
 197 
 198   // the monitor object is not part of debug info expression stack
 199   pop();
 200 
 201   // Insert a FastLockNode which takes as arguments the current thread pointer,
 202   // the obj pointer & the address of the stack slot pair used for the lock.
 203   shared_lock(obj);
 204 }
 205 
 206 //------------------------------do_monitor_exit--------------------------------
 207 void Parse::do_monitor_exit() {
 208   kill_dead_locals();
 209 
 210   pop();                        // Pop oop to unlock
 211   // Because monitors are guaranteed paired (else we bail out), we know
 212   // the matching Lock for this Unlock.  Hence we know there is no need
 213   // for a null check on Unlock.
 214   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
< prev index next >