--- old/src/hotspot/share/code/nmethod.cpp 2019-04-25 11:54:02.141401919 +0200 +++ new/src/hotspot/share/code/nmethod.cpp 2019-04-25 11:54:01.418377569 +0200 @@ -48,6 +48,7 @@ #include "oops/oop.inline.hpp" #include "prims/jvmtiImpl.hpp" #include "runtime/atomic.hpp" +#include "runtime/deoptimization.hpp" #include "runtime/flags/flagSetting.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" @@ -1083,13 +1084,7 @@ // so we don't have to break the cycle. Note that it is possible to // have the Method* live here, in case we unload the nmethod because // it is pointing to some oop (other than the Method*) being unloaded. - if (_method != NULL) { - // OSR methods point to the Method*, but the Method* does not - // point back! - if (_method->code() == this) { - _method->clear_code(); // Break a cycle - } - } + Method::unlink_code(_method, this); // Break a cycle // Make the class unloaded - i.e., change state and notify sweeper assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(), @@ -1169,17 +1164,14 @@ } } -void nmethod::unlink_from_method(bool acquire_lock) { +void nmethod::unlink_from_method() { // We need to check if both the _code and _from_compiled_code_entry_point // refer to this nmethod because there is a race in setting these two fields // in Method* as seen in bugid 4947125. // If the vep() points to the zombie nmethod, the memory for the nmethod // could be flushed and the compiler and vtable stubs could still call // through it. - if (method() != NULL && (method()->code() == this || - method()->from_compiled_entry() == verified_entry_point())) { - method()->clear_code(acquire_lock); - } + Method::unlink_code(method(), this); } /** @@ -1205,24 +1197,24 @@ // during patching, depending on the nmethod state we must notify the GC that // code has been unloaded, unregistering it. We cannot do this right while - // holding the Patching_lock because we need to use the CodeCache_lock. This + // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This // would be prone to deadlocks. // This flag is used to remember whether we need to later lock and unregister. bool nmethod_needs_unregister = false; - { - // invalidate osr nmethod before acquiring the patching lock since - // they both acquire leaf locks and we don't want a deadlock. - // This logic is equivalent to the logic below for patching the - // verified entry point of regular methods. We check that the - // nmethod is in use to ensure that it is invalidated only once. - if (is_osr_method() && is_in_use()) { - // this effectively makes the osr nmethod not entrant - invalidate_osr_method(); - } + // invalidate osr nmethod before acquiring the patching lock since + // they both acquire leaf locks and we don't want a deadlock. + // This logic is equivalent to the logic below for patching the + // verified entry point of regular methods. We check that the + // nmethod is in use to ensure that it is invalidated only once. + if (is_osr_method() && is_in_use()) { + // this effectively makes the osr nmethod not entrant + invalidate_osr_method(); + } + { // Enter critical section. Does not block for safepoint. - MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); + MutexLockerEx pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); if (_state == state) { // another thread already performed this transition so nothing @@ -1269,8 +1261,9 @@ JVMCI_ONLY(maybe_invalidate_installed_code()); // Remove nmethod from method. - unlink_from_method(false /* already owns Patching_lock */); - } // leave critical region under Patching_lock + unlink_from_method(); + + } // leave critical region under CompiledMethod_lock #ifdef ASSERT if (is_osr_method() && method() != NULL) { @@ -2858,7 +2851,7 @@ #if INCLUDE_JVMCI void nmethod::clear_jvmci_installed_code() { - assert_locked_or_safepoint(Patching_lock); + assert_locked_or_safepoint(CompiledMethod_lock); if (_jvmci_installed_code != NULL) { JNIHandles::destroy_weak_global(_jvmci_installed_code); _jvmci_installed_code = NULL; @@ -2866,7 +2859,7 @@ } void nmethod::clear_speculation_log() { - assert_locked_or_safepoint(Patching_lock); + assert_locked_or_safepoint(CompiledMethod_lock); if (_speculation_log != NULL) { JNIHandles::destroy_weak_global(_speculation_log); _speculation_log = NULL; @@ -2878,7 +2871,7 @@ return; } - assert(Patching_lock->is_locked() || + assert(CompiledMethod_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency"); oop installed_code = JNIHandles::resolve(_jvmci_installed_code); if (installed_code != NULL) { @@ -2922,7 +2915,7 @@ nmethodLocker nml(nm); #ifdef ASSERT { - MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); + MutexLockerEx pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); // This relationship can only be checked safely under a lock assert(!nm->is_alive() || nm->is_unloading() || nm->jvmci_installed_code() == installedCode(), "sanity check"); } @@ -2932,14 +2925,13 @@ // Invalidating the InstalledCode means we want the nmethod // to be deoptimized. nm->mark_for_deoptimization(); - VM_Deoptimize op; - VMThread::execute(&op); + Deoptimization::deoptimize_all_marked(); } // Multiple threads could reach this point so we now need to // lock and re-check the link to the nmethod so that only one // thread clears it. - MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); + MutexLockerEx pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); if (InstalledCode::address(installedCode) == nativeMethod) { InstalledCode::set_address(installedCode, 0); }