< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page
rev 54621 : imported patch 8221734-v1

*** 46,55 **** --- 46,56 ---- #include "oops/method.inline.hpp" #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiImpl.hpp" #include "runtime/atomic.hpp" + #include "runtime/deoptimization.hpp" #include "runtime/flags/flagSetting.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/orderAccess.hpp"
*** 1081,1097 **** // If _method is already NULL the Method* is about to be unloaded, // so we don't have to break the cycle. Note that it is possible to // have the Method* live here, in case we unload the nmethod because // it is pointing to some oop (other than the Method*) being unloaded. ! if (_method != NULL) { ! // OSR methods point to the Method*, but the Method* does not ! // point back! ! if (_method->code() == this) { ! _method->clear_code(); // Break a cycle ! } ! } // Make the class unloaded - i.e., change state and notify sweeper assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(), "must be at safepoint"); --- 1082,1092 ---- // If _method is already NULL the Method* is about to be unloaded, // so we don't have to break the cycle. Note that it is possible to // have the Method* live here, in case we unload the nmethod because // it is pointing to some oop (other than the Method*) being unloaded. ! Method::unlink_code(_method, this); // Break a cycle // Make the class unloaded - i.e., change state and notify sweeper assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(), "must be at safepoint");
*** 1167,1187 **** if (PrintCompilation && _state != unloaded) { print_on(tty, state_msg); } } ! void nmethod::unlink_from_method(bool acquire_lock) { // We need to check if both the _code and _from_compiled_code_entry_point // refer to this nmethod because there is a race in setting these two fields // in Method* as seen in bugid 4947125. // If the vep() points to the zombie nmethod, the memory for the nmethod // could be flushed and the compiler and vtable stubs could still call // through it. ! if (method() != NULL && (method()->code() == this || ! method()->from_compiled_entry() == verified_entry_point())) { ! method()->clear_code(acquire_lock); ! } } /** * Common functionality for both make_not_entrant and make_zombie */ --- 1162,1179 ---- if (PrintCompilation && _state != unloaded) { print_on(tty, state_msg); } } ! void nmethod::unlink_from_method() { // We need to check if both the _code and _from_compiled_code_entry_point // refer to this nmethod because there is a race in setting these two fields // in Method* as seen in bugid 4947125. // If the vep() points to the zombie nmethod, the memory for the nmethod // could be flushed and the compiler and vtable stubs could still call // through it. ! Method::unlink_code(method(), this); } /** * Common functionality for both make_not_entrant and make_zombie */
*** 1203,1230 **** // This can be called while the system is already at a safepoint which is ok NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint()); // during patching, depending on the nmethod state we must notify the GC that // code has been unloaded, unregistering it. We cannot do this right while ! // holding the Patching_lock because we need to use the CodeCache_lock. This // would be prone to deadlocks. // This flag is used to remember whether we need to later lock and unregister. bool nmethod_needs_unregister = false; - { // invalidate osr nmethod before acquiring the patching lock since // they both acquire leaf locks and we don't want a deadlock. // This logic is equivalent to the logic below for patching the // verified entry point of regular methods. We check that the // nmethod is in use to ensure that it is invalidated only once. if (is_osr_method() && is_in_use()) { // this effectively makes the osr nmethod not entrant invalidate_osr_method(); } // Enter critical section. Does not block for safepoint. ! MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); if (_state == state) { // another thread already performed this transition so nothing // to do, but return false to indicate this. return false; --- 1195,1222 ---- // This can be called while the system is already at a safepoint which is ok NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint()); // during patching, depending on the nmethod state we must notify the GC that // code has been unloaded, unregistering it. We cannot do this right while ! // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This // would be prone to deadlocks. // This flag is used to remember whether we need to later lock and unregister. bool nmethod_needs_unregister = false; // invalidate osr nmethod before acquiring the patching lock since // they both acquire leaf locks and we don't want a deadlock. // This logic is equivalent to the logic below for patching the // verified entry point of regular methods. We check that the // nmethod is in use to ensure that it is invalidated only once. if (is_osr_method() && is_in_use()) { // this effectively makes the osr nmethod not entrant invalidate_osr_method(); } + { // Enter critical section. Does not block for safepoint. ! MutexLockerEx pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); if (_state == state) { // another thread already performed this transition so nothing // to do, but return false to indicate this. return false;
*** 1267,1278 **** // Invalidate while holding the patching lock JVMCI_ONLY(maybe_invalidate_installed_code()); // Remove nmethod from method. ! unlink_from_method(false /* already owns Patching_lock */); ! } // leave critical region under Patching_lock #ifdef ASSERT if (is_osr_method() && method() != NULL) { // Make sure osr nmethod is invalidated, i.e. not on the list bool found = method()->method_holder()->remove_osr_nmethod(this); --- 1259,1271 ---- // Invalidate while holding the patching lock JVMCI_ONLY(maybe_invalidate_installed_code()); // Remove nmethod from method. ! unlink_from_method(); ! ! } // leave critical region under CompiledMethod_lock #ifdef ASSERT if (is_osr_method() && method() != NULL) { // Make sure osr nmethod is invalidated, i.e. not on the list bool found = method()->method_holder()->remove_osr_nmethod(this);
*** 2856,2874 **** #endif // !PRODUCT #if INCLUDE_JVMCI void nmethod::clear_jvmci_installed_code() { ! assert_locked_or_safepoint(Patching_lock); if (_jvmci_installed_code != NULL) { JNIHandles::destroy_weak_global(_jvmci_installed_code); _jvmci_installed_code = NULL; } } void nmethod::clear_speculation_log() { ! assert_locked_or_safepoint(Patching_lock); if (_speculation_log != NULL) { JNIHandles::destroy_weak_global(_speculation_log); _speculation_log = NULL; } } --- 2849,2867 ---- #endif // !PRODUCT #if INCLUDE_JVMCI void nmethod::clear_jvmci_installed_code() { ! assert_locked_or_safepoint(CompiledMethod_lock); if (_jvmci_installed_code != NULL) { JNIHandles::destroy_weak_global(_jvmci_installed_code); _jvmci_installed_code = NULL; } } void nmethod::clear_speculation_log() { ! assert_locked_or_safepoint(CompiledMethod_lock); if (_speculation_log != NULL) { JNIHandles::destroy_weak_global(_speculation_log); _speculation_log = NULL; } }
*** 2876,2886 **** void nmethod::maybe_invalidate_installed_code() { if (!is_compiled_by_jvmci()) { return; } ! assert(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency"); oop installed_code = JNIHandles::resolve(_jvmci_installed_code); if (installed_code != NULL) { // Update the values in the InstalledCode instance if it still refers to this nmethod nmethod* nm = (nmethod*)InstalledCode::address(installed_code); --- 2869,2879 ---- void nmethod::maybe_invalidate_installed_code() { if (!is_compiled_by_jvmci()) { return; } ! assert(CompiledMethod_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency"); oop installed_code = JNIHandles::resolve(_jvmci_installed_code); if (installed_code != NULL) { // Update the values in the InstalledCode instance if it still refers to this nmethod nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
*** 2920,2947 **** } nmethodLocker nml(nm); #ifdef ASSERT { ! MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); // This relationship can only be checked safely under a lock assert(!nm->is_alive() || nm->is_unloading() || nm->jvmci_installed_code() == installedCode(), "sanity check"); } #endif if (nm->is_alive()) { // Invalidating the InstalledCode means we want the nmethod // to be deoptimized. nm->mark_for_deoptimization(); ! VM_Deoptimize op; ! VMThread::execute(&op); } // Multiple threads could reach this point so we now need to // lock and re-check the link to the nmethod so that only one // thread clears it. ! MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); if (InstalledCode::address(installedCode) == nativeMethod) { InstalledCode::set_address(installedCode, 0); } } --- 2913,2939 ---- } nmethodLocker nml(nm); #ifdef ASSERT { ! MutexLockerEx pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); // This relationship can only be checked safely under a lock assert(!nm->is_alive() || nm->is_unloading() || nm->jvmci_installed_code() == installedCode(), "sanity check"); } #endif if (nm->is_alive()) { // Invalidating the InstalledCode means we want the nmethod // to be deoptimized. nm->mark_for_deoptimization(); ! Deoptimization::deoptimize_all_marked(); } // Multiple threads could reach this point so we now need to // lock and re-check the link to the nmethod so that only one // thread clears it. ! MutexLockerEx pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); if (InstalledCode::address(installedCode) == nativeMethod) { InstalledCode::set_address(installedCode, 0); } }
< prev index next >