< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page
rev 56251 : imported patch 8226705-v1
rev 56252 : imported patch 8226705-v2

*** 48,57 **** --- 48,58 ---- #include "oops/method.inline.hpp" #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiImpl.hpp" #include "runtime/atomic.hpp" + #include "runtime/deoptimization.hpp" #include "runtime/flags/flagSetting.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/orderAccess.hpp"
*** 474,484 **** if (nm != NULL) { // verify nmethod debug_only(nm->verify();) // might block nm->log_new_nmethod(); - nm->make_in_use(); } return nm; } nmethod* nmethod::new_nmethod(const methodHandle& method, --- 475,484 ----
*** 1136,1145 **** --- 1136,1150 ---- mdo->inc_decompile_count(); } bool nmethod::try_transition(int new_state_int) { signed char new_state = new_state_int; + #ifdef DEBUG + if (new_state != unloaded) { + assert_lock_strong(CompiledMethod_lock); + } + #endif for (;;) { signed char old_state = Atomic::load(&_state); if (old_state >= new_state) { // Ensure monotonicity of transitions. return false;
*** 1191,1205 **** // If _method is already NULL the Method* is about to be unloaded, // so we don't have to break the cycle. Note that it is possible to // have the Method* live here, in case we unload the nmethod because // it is pointing to some oop (other than the Method*) being unloaded. if (_method != NULL) { ! // OSR methods point to the Method*, but the Method* does not ! // point back! ! if (_method->code() == this) { ! _method->clear_code(); // Break a cycle ! } } // Make the class unloaded - i.e., change state and notify sweeper assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(), "must be at safepoint"); --- 1196,1206 ---- // If _method is already NULL the Method* is about to be unloaded, // so we don't have to break the cycle. Note that it is possible to // have the Method* live here, in case we unload the nmethod because // it is pointing to some oop (other than the Method*) being unloaded. if (_method != NULL) { ! _method->unlink_code(this); } // Make the class unloaded - i.e., change state and notify sweeper assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(), "must be at safepoint");
*** 1279,1298 **** if (PrintCompilation && _state != unloaded) { print_on(tty, state_msg); } } ! void nmethod::unlink_from_method(bool acquire_lock) { ! // We need to check if both the _code and _from_compiled_code_entry_point ! // refer to this nmethod because there is a race in setting these two fields ! // in Method* as seen in bugid 4947125. ! // If the vep() points to the zombie nmethod, the memory for the nmethod ! // could be flushed and the compiler and vtable stubs could still call ! // through it. ! if (method() != NULL && (method()->code() == this || ! method()->from_compiled_entry() == verified_entry_point())) { ! method()->clear_code(acquire_lock); } } /** * Common functionality for both make_not_entrant and make_zombie --- 1280,1292 ---- if (PrintCompilation && _state != unloaded) { print_on(tty, state_msg); } } ! void nmethod::unlink_from_method() { ! if (method() != NULL) { ! method()->unlink_code(this); } } /** * Common functionality for both make_not_entrant and make_zombie
*** 1315,1342 **** // This can be called while the system is already at a safepoint which is ok NoSafepointVerifier nsv; // during patching, depending on the nmethod state we must notify the GC that // code has been unloaded, unregistering it. We cannot do this right while ! // holding the Patching_lock because we need to use the CodeCache_lock. This // would be prone to deadlocks. // This flag is used to remember whether we need to later lock and unregister. bool nmethod_needs_unregister = false; - { // invalidate osr nmethod before acquiring the patching lock since // they both acquire leaf locks and we don't want a deadlock. // This logic is equivalent to the logic below for patching the // verified entry point of regular methods. We check that the // nmethod is in use to ensure that it is invalidated only once. if (is_osr_method() && is_in_use()) { // this effectively makes the osr nmethod not entrant invalidate_osr_method(); } // Enter critical section. Does not block for safepoint. ! MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag); if (Atomic::load(&_state) >= state) { // another thread already performed this transition so nothing // to do, but return false to indicate this. return false; --- 1309,1336 ---- // This can be called while the system is already at a safepoint which is ok NoSafepointVerifier nsv; // during patching, depending on the nmethod state we must notify the GC that // code has been unloaded, unregistering it. We cannot do this right while ! // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This // would be prone to deadlocks. // This flag is used to remember whether we need to later lock and unregister. bool nmethod_needs_unregister = false; // invalidate osr nmethod before acquiring the patching lock since // they both acquire leaf locks and we don't want a deadlock. // This logic is equivalent to the logic below for patching the // verified entry point of regular methods. We check that the // nmethod is in use to ensure that it is invalidated only once. if (is_osr_method() && is_in_use()) { // this effectively makes the osr nmethod not entrant invalidate_osr_method(); } + { // Enter critical section. Does not block for safepoint. ! MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); if (Atomic::load(&_state) >= state) { // another thread already performed this transition so nothing // to do, but return false to indicate this. return false;
*** 1387,1398 **** // Log the transition once log_state_change(); // Remove nmethod from method. ! unlink_from_method(false /* already owns Patching_lock */); ! } // leave critical region under Patching_lock #if INCLUDE_JVMCI // Invalidate can't occur while holding the Patching lock JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); if (nmethod_data != NULL) { --- 1381,1393 ---- // Log the transition once log_state_change(); // Remove nmethod from method. ! unlink_from_method(); ! ! } // leave critical region under CompiledMethod_lock #if INCLUDE_JVMCI // Invalidate can't occur while holding the Patching lock JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); if (nmethod_data != NULL) {
< prev index next >