< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp

 
   nmethodLocker caller_lock(caller);
 
   address pc = vfst.frame_pc();
   { // Get call instruction under lock because another thread may be busy patching it.
-    MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
+    CompiledICLocker ic_locker(caller);
     return caller->attached_method_before_pc(pc);
   }
   return NULL;
 }
 

@@ -1719,18 +1719,15 // 2 - a racing deoptimization. We could be doing a vanilla vtable // call and between the time we fetch the entry address and // we jump to it the target gets deoptimized. Similar to 1 // we will wind up in the interprter (thru a c2i with c2). // - address call_addr = NULL; - { - // Get call instruction under lock because another thread may be - // busy patching it. - MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); - // Location of call instruction - call_addr = caller_nm->call_instruction_address(pc); - } + CompiledICLocker ml(caller_nm); + // Get call instruction under lock because another thread may be + // busy patching it. + // Location of call instruction + address call_addr = caller_nm->call_instruction_address(pc); // Make sure nmethod doesn't get deoptimized and removed until // this is done with it. // CLEANUP - with lazy deopt shouldn't need this lock nmethodLocker nmlock(caller_nm);
@@ -1755,11 +1752,10 // is always done through the same code path. (experience shows that it // leads to very hard to track down bugs, if an inline cache gets updated // to a wrong method). It should not be performance critical, since the // resolve is only done once. - CompiledICLocker ml(caller_nm); if (is_static_call) { CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr); ssc->set_to_clean(); } else { // compiled, dispatched call (which used to call an interpreted method)
@@ -1900,13 +1896,12 // and patch the code with the same old data. Asi es la vida. if (moop->code() == NULL) return; if (nm->is_in_use()) { - // Expect to find a native call there (unless it was no-inline cache vtable dispatch) - MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); + CompiledICLocker ic_locker(nm); if (NativeCall::is_call_before(return_pc)) { ResourceMark mark; NativeCallWrapper* call = nm->call_wrapper_before(return_pc); // // bug 6281185. We might get here after resolving a call site to a vanilla
< prev index next >