diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp index 794e21a..455c286 100644 --- a/src/hotspot/share/code/compiledIC.cpp +++ b/src/hotspot/share/code/compiledIC.cpp @@ -126,7 +126,6 @@ void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub { CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address()); - MutexLockerEx pl(CompiledICLocker::is_safe(cb->as_compiled_method()) ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); assert(cb != NULL && cb->is_compiled(), "must be compiled"); _call->set_destination_mt_safe(entry_point); } @@ -366,7 +365,7 @@ bool CompiledIC::set_to_clean(bool in_use) { // A zombie transition will always be safe, since the metadata has already been set to NULL, so // we only need to patch the destination - bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || CompiledICLocker::is_safe(_method); + bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint(); if (safe_transition) { // Kill any leftover stub we might have too @@ -419,8 +418,7 @@ bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) { if (info.to_interpreter() || info.to_aot()) { // Call to interpreter if (info.is_optimized() && is_optimized()) { - assert(is_clean(), "unsafe IC path"); - MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); + assert(is_clean(), "unsafe IC path"); // the call analysis (callee structure) specifies that the call is optimized // (either because of CHA or the static target is final) // At code generation time, this call has been emitted as static call @@ -594,7 +592,6 @@ bool CompiledStaticCall::set_to_clean(bool in_use) { // in_use is unused but needed to match template function in CompiledMethod assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call"); // Reset call site - MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); set_destination_mt_safe(resolve_call_stub()); // Do not reset stub here: It is too expensive to call find_stub. @@ -640,7 +637,6 @@ void CompiledStaticCall::set_to_compiled(address entry) { void CompiledStaticCall::set(const StaticCallInfo& info) { assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call"); - MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); // Updating a cache to the wrong entry can cause bugs that are very hard // to track down - if cache entry gets invalid - we just clean it. In // this way it is always the same code path that is responsible for diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index 8bf1b9a..977050e 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -1059,7 +1059,7 @@ methodHandle SharedRuntime::extract_attached_method(vframeStream& vfst) { address pc = vfst.frame_pc(); { // Get call instruction under lock because another thread may be busy patching it. - MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); + CompiledICLocker ic_locker(caller); return caller->attached_method_before_pc(pc); } return NULL; @@ -1721,14 +1721,11 @@ methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) { // we jump to it the target gets deoptimized. Similar to 1 // we will wind up in the interprter (thru a c2i with c2). // - address call_addr = NULL; - { - // Get call instruction under lock because another thread may be - // busy patching it. - MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); - // Location of call instruction - call_addr = caller_nm->call_instruction_address(pc); - } + CompiledICLocker ml(caller_nm); + // Get call instruction under lock because another thread may be + // busy patching it. + // Location of call instruction + address call_addr = caller_nm->call_instruction_address(pc); // Make sure nmethod doesn't get deoptimized and removed until // this is done with it. // CLEANUP - with lazy deopt shouldn't need this lock @@ -1757,7 +1754,6 @@ methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) { // to a wrong method). It should not be performance critical, since the // resolve is only done once. - CompiledICLocker ml(caller_nm); if (is_static_call) { CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr); ssc->set_to_clean(); @@ -1902,9 +1898,8 @@ IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address cal if (moop->code() == NULL) return; if (nm->is_in_use()) { - // Expect to find a native call there (unless it was no-inline cache vtable dispatch) - MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); + CompiledICLocker ic_locker(nm); if (NativeCall::is_call_before(return_pc)) { ResourceMark mark; NativeCallWrapper* call = nm->call_wrapper_before(return_pc);