< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page
rev 54621 : imported patch 8221734-v1


  31 #include "code/nativeInst.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/abstractCompiler.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compileLog.hpp"
  37 #include "compiler/compilerDirectives.hpp"
  38 #include "compiler/directivesParser.hpp"
  39 #include "compiler/disassembler.hpp"
  40 #include "interpreter/bytecode.hpp"
  41 #include "logging/log.hpp"
  42 #include "logging/logStream.hpp"
  43 #include "memory/allocation.inline.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "oops/access.inline.hpp"
  46 #include "oops/method.inline.hpp"
  47 #include "oops/methodData.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "prims/jvmtiImpl.hpp"
  50 #include "runtime/atomic.hpp"

  51 #include "runtime/flags/flagSetting.hpp"
  52 #include "runtime/frame.inline.hpp"
  53 #include "runtime/handles.inline.hpp"
  54 #include "runtime/jniHandles.inline.hpp"
  55 #include "runtime/orderAccess.hpp"
  56 #include "runtime/os.hpp"
  57 #include "runtime/safepointVerifiers.hpp"
  58 #include "runtime/sharedRuntime.hpp"
  59 #include "runtime/sweeper.hpp"
  60 #include "runtime/vmThread.hpp"
  61 #include "utilities/align.hpp"
  62 #include "utilities/dtrace.hpp"
  63 #include "utilities/events.hpp"
  64 #include "utilities/resourceHash.hpp"
  65 #include "utilities/xmlstream.hpp"
  66 #if INCLUDE_JVMCI
  67 #include "jvmci/jvmciJavaClasses.hpp"
  68 #endif
  69 
  70 #ifdef DTRACE_ENABLED


1066   }
1067   // Unlink the osr method, so we do not look this up again
1068   if (is_osr_method()) {
1069     // Invalidate the osr nmethod only once
1070     if (is_in_use()) {
1071       invalidate_osr_method();
1072     }
1073 #ifdef ASSERT
1074     if (method() != NULL) {
1075       // Make sure osr nmethod is invalidated, i.e. not on the list
1076       bool found = method()->method_holder()->remove_osr_nmethod(this);
1077       assert(!found, "osr nmethod should have been invalidated");
1078     }
1079 #endif
1080   }
1081 
1082   // If _method is already NULL the Method* is about to be unloaded,
1083   // so we don't have to break the cycle. Note that it is possible to
1084   // have the Method* live here, in case we unload the nmethod because
1085   // it is pointing to some oop (other than the Method*) being unloaded.
1086   if (_method != NULL) {
1087     // OSR methods point to the Method*, but the Method* does not
1088     // point back!
1089     if (_method->code() == this) {
1090       _method->clear_code(); // Break a cycle
1091     }
1092   }
1093 
1094   // Make the class unloaded - i.e., change state and notify sweeper
1095   assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(),
1096          "must be at safepoint");
1097 
1098   {
1099     // Clear ICStubs and release any CompiledICHolders.
1100     CompiledICLocker ml(this);
1101     clear_ic_callsites();
1102   }
1103 
1104   // Unregister must be done before the state change
1105   {
1106     MutexLockerEx ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
1107                      Mutex::_no_safepoint_check_flag);
1108     Universe::heap()->unregister_nmethod(this);
1109     CodeCache::unregister_old_nmethod(this);
1110   }
1111 
1112   // Clear the method of this dead nmethod


1152         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1153                          os::current_thread_id());
1154       } else {
1155         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1156                          os::current_thread_id(),
1157                          (_state == zombie ? " zombie='1'" : ""));
1158       }
1159       log_identity(xtty);
1160       xtty->stamp();
1161       xtty->end_elem();
1162     }
1163   }
1164 
1165   const char *state_msg = _state == zombie ? "made zombie" : "made not entrant";
1166   CompileTask::print_ul(this, state_msg);
1167   if (PrintCompilation && _state != unloaded) {
1168     print_on(tty, state_msg);
1169   }
1170 }
1171 
1172 void nmethod::unlink_from_method(bool acquire_lock) {
1173   // We need to check if both the _code and _from_compiled_code_entry_point
1174   // refer to this nmethod because there is a race in setting these two fields
1175   // in Method* as seen in bugid 4947125.
1176   // If the vep() points to the zombie nmethod, the memory for the nmethod
1177   // could be flushed and the compiler and vtable stubs could still call
1178   // through it.
1179   if (method() != NULL && (method()->code() == this ||
1180                            method()->from_compiled_entry() == verified_entry_point())) {
1181     method()->clear_code(acquire_lock);
1182   }
1183 }
1184 
1185 /**
1186  * Common functionality for both make_not_entrant and make_zombie
1187  */
1188 bool nmethod::make_not_entrant_or_zombie(int state) {
1189   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1190   assert(!is_zombie(), "should not already be a zombie");
1191 
1192   if (_state == state) {
1193     // Avoid taking the lock if already in required state.
1194     // This is safe from races because the state is an end-state,
1195     // which the nmethod cannot back out of once entered.
1196     // No need for fencing either.
1197     return false;
1198   }
1199 
1200   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1201   nmethodLocker nml(this);
1202   methodHandle the_method(method());
1203   // This can be called while the system is already at a safepoint which is ok
1204   NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
1205 
1206   // during patching, depending on the nmethod state we must notify the GC that
1207   // code has been unloaded, unregistering it. We cannot do this right while
1208   // holding the Patching_lock because we need to use the CodeCache_lock. This
1209   // would be prone to deadlocks.
1210   // This flag is used to remember whether we need to later lock and unregister.
1211   bool nmethod_needs_unregister = false;
1212 
1213   {
1214     // invalidate osr nmethod before acquiring the patching lock since
1215     // they both acquire leaf locks and we don't want a deadlock.
1216     // This logic is equivalent to the logic below for patching the
1217     // verified entry point of regular methods. We check that the
1218     // nmethod is in use to ensure that it is invalidated only once.
1219     if (is_osr_method() && is_in_use()) {
1220       // this effectively makes the osr nmethod not entrant
1221       invalidate_osr_method();
1222     }
1223 

1224     // Enter critical section.  Does not block for safepoint.
1225     MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1226 
1227     if (_state == state) {
1228       // another thread already performed this transition so nothing
1229       // to do, but return false to indicate this.
1230       return false;
1231     }
1232 
1233     // The caller can be calling the method statically or through an inline
1234     // cache call.
1235     if (!is_osr_method() && !is_not_entrant()) {
1236       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1237                   SharedRuntime::get_handle_wrong_method_stub());
1238     }
1239 
1240     if (is_in_use() && update_recompile_counts()) {
1241       // It's a true state change, so mark the method as decompiled.
1242       // Do it only for transition from alive.
1243       inc_decompile_count();
1244     }
1245 


1252 
1253     // Must happen before state change. Otherwise we have a race condition in
1254     // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1255     // transition its state from 'not_entrant' to 'zombie' without having to wait
1256     // for stack scanning.
1257     if (state == not_entrant) {
1258       mark_as_seen_on_stack();
1259       OrderAccess::storestore(); // _stack_traversal_mark and _state
1260     }
1261 
1262     // Change state
1263     _state = state;
1264 
1265     // Log the transition once
1266     log_state_change();
1267 
1268     // Invalidate while holding the patching lock
1269     JVMCI_ONLY(maybe_invalidate_installed_code());
1270 
1271     // Remove nmethod from method.
1272     unlink_from_method(false /* already owns Patching_lock */);
1273   } // leave critical region under Patching_lock

1274 
1275 #ifdef ASSERT
1276   if (is_osr_method() && method() != NULL) {
1277     // Make sure osr nmethod is invalidated, i.e. not on the list
1278     bool found = method()->method_holder()->remove_osr_nmethod(this);
1279     assert(!found, "osr nmethod should have been invalidated");
1280   }
1281 #endif
1282 
1283   // When the nmethod becomes zombie it is no longer alive so the
1284   // dependencies must be flushed.  nmethods in the not_entrant
1285   // state will be flushed later when the transition to zombie
1286   // happens or they get unloaded.
1287   if (state == zombie) {
1288     {
1289       // Flushing dependencies must be done before any possible
1290       // safepoint can sneak in, otherwise the oops used by the
1291       // dependency logic could have become stale.
1292       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1293       if (nmethod_needs_unregister) {


2841 #endif
2842 #ifdef COMPILER2
2843   c2_java_nmethod_stats.print_nmethod_stats("C2");
2844 #endif
2845 #if INCLUDE_JVMCI
2846   jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
2847 #endif
2848   unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
2849   DebugInformationRecorder::print_statistics();
2850 #ifndef PRODUCT
2851   pc_nmethod_stats.print_pc_stats();
2852 #endif
2853   Dependencies::print_statistics();
2854   if (xtty != NULL)  xtty->tail("statistics");
2855 }
2856 
2857 #endif // !PRODUCT
2858 
2859 #if INCLUDE_JVMCI
2860 void nmethod::clear_jvmci_installed_code() {
2861   assert_locked_or_safepoint(Patching_lock);
2862   if (_jvmci_installed_code != NULL) {
2863     JNIHandles::destroy_weak_global(_jvmci_installed_code);
2864     _jvmci_installed_code = NULL;
2865   }
2866 }
2867 
2868 void nmethod::clear_speculation_log() {
2869   assert_locked_or_safepoint(Patching_lock);
2870   if (_speculation_log != NULL) {
2871     JNIHandles::destroy_weak_global(_speculation_log);
2872     _speculation_log = NULL;
2873   }
2874 }
2875 
2876 void nmethod::maybe_invalidate_installed_code() {
2877   if (!is_compiled_by_jvmci()) {
2878     return;
2879   }
2880 
2881   assert(Patching_lock->is_locked() ||
2882          SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
2883   oop installed_code = JNIHandles::resolve(_jvmci_installed_code);
2884   if (installed_code != NULL) {
2885     // Update the values in the InstalledCode instance if it still refers to this nmethod
2886     nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
2887     if (nm == this) {
2888       if (!is_alive() || is_unloading()) {
2889         // Break the link between nmethod and InstalledCode such that the nmethod
2890         // can subsequently be flushed safely.  The link must be maintained while
2891         // the method could have live activations since invalidateInstalledCode
2892         // might want to invalidate all existing activations.
2893         InstalledCode::set_address(installed_code, 0);
2894         InstalledCode::set_entryPoint(installed_code, 0);
2895       } else if (is_not_entrant()) {
2896         // Remove the entry point so any invocation will fail but keep
2897         // the address link around that so that existing activations can
2898         // be invalidated.
2899         InstalledCode::set_entryPoint(installed_code, 0);
2900       }
2901     }


2905     // updates to the InstalledCode instance have been performed.
2906     clear_jvmci_installed_code();
2907     clear_speculation_log();
2908   }
2909 }
2910 
2911 void nmethod::invalidate_installed_code(Handle installedCode, TRAPS) {
2912   if (installedCode() == NULL) {
2913     THROW(vmSymbols::java_lang_NullPointerException());
2914   }
2915   jlong nativeMethod = InstalledCode::address(installedCode);
2916   nmethod* nm = (nmethod*)nativeMethod;
2917   if (nm == NULL) {
2918     // Nothing to do
2919     return;
2920   }
2921 
2922   nmethodLocker nml(nm);
2923 #ifdef ASSERT
2924   {
2925     MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
2926     // This relationship can only be checked safely under a lock
2927     assert(!nm->is_alive() || nm->is_unloading() || nm->jvmci_installed_code() == installedCode(), "sanity check");
2928   }
2929 #endif
2930 
2931   if (nm->is_alive()) {
2932     // Invalidating the InstalledCode means we want the nmethod
2933     // to be deoptimized.
2934     nm->mark_for_deoptimization();
2935     VM_Deoptimize op;
2936     VMThread::execute(&op);
2937   }
2938 
2939   // Multiple threads could reach this point so we now need to
2940   // lock and re-check the link to the nmethod so that only one
2941   // thread clears it.
2942   MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
2943   if (InstalledCode::address(installedCode) == nativeMethod) {
2944       InstalledCode::set_address(installedCode, 0);
2945   }
2946 }
2947 
2948 oop nmethod::jvmci_installed_code() {
2949   return JNIHandles::resolve(_jvmci_installed_code);
2950 }
2951 
2952 oop nmethod::speculation_log() {
2953   return JNIHandles::resolve(_speculation_log);
2954 }
2955 
2956 char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) const {
2957   if (!this->is_compiled_by_jvmci()) {
2958     return NULL;
2959   }
2960   oop installed_code = JNIHandles::resolve(_jvmci_installed_code);
2961   if (installed_code != NULL) {
2962     oop installed_code_name = NULL;


  31 #include "code/nativeInst.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/abstractCompiler.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compileLog.hpp"
  37 #include "compiler/compilerDirectives.hpp"
  38 #include "compiler/directivesParser.hpp"
  39 #include "compiler/disassembler.hpp"
  40 #include "interpreter/bytecode.hpp"
  41 #include "logging/log.hpp"
  42 #include "logging/logStream.hpp"
  43 #include "memory/allocation.inline.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "oops/access.inline.hpp"
  46 #include "oops/method.inline.hpp"
  47 #include "oops/methodData.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "prims/jvmtiImpl.hpp"
  50 #include "runtime/atomic.hpp"
  51 #include "runtime/deoptimization.hpp"
  52 #include "runtime/flags/flagSetting.hpp"
  53 #include "runtime/frame.inline.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/jniHandles.inline.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/os.hpp"
  58 #include "runtime/safepointVerifiers.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/sweeper.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "utilities/align.hpp"
  63 #include "utilities/dtrace.hpp"
  64 #include "utilities/events.hpp"
  65 #include "utilities/resourceHash.hpp"
  66 #include "utilities/xmlstream.hpp"
  67 #if INCLUDE_JVMCI
  68 #include "jvmci/jvmciJavaClasses.hpp"
  69 #endif
  70 
  71 #ifdef DTRACE_ENABLED


1067   }
1068   // Unlink the osr method, so we do not look this up again
1069   if (is_osr_method()) {
1070     // Invalidate the osr nmethod only once
1071     if (is_in_use()) {
1072       invalidate_osr_method();
1073     }
1074 #ifdef ASSERT
1075     if (method() != NULL) {
1076       // Make sure osr nmethod is invalidated, i.e. not on the list
1077       bool found = method()->method_holder()->remove_osr_nmethod(this);
1078       assert(!found, "osr nmethod should have been invalidated");
1079     }
1080 #endif
1081   }
1082 
1083   // If _method is already NULL the Method* is about to be unloaded,
1084   // so we don't have to break the cycle. Note that it is possible to
1085   // have the Method* live here, in case we unload the nmethod because
1086   // it is pointing to some oop (other than the Method*) being unloaded.
1087   Method::unlink_code(_method, this); // Break a cycle






1088 
1089   // Make the class unloaded - i.e., change state and notify sweeper
1090   assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(),
1091          "must be at safepoint");
1092 
1093   {
1094     // Clear ICStubs and release any CompiledICHolders.
1095     CompiledICLocker ml(this);
1096     clear_ic_callsites();
1097   }
1098 
1099   // Unregister must be done before the state change
1100   {
1101     MutexLockerEx ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
1102                      Mutex::_no_safepoint_check_flag);
1103     Universe::heap()->unregister_nmethod(this);
1104     CodeCache::unregister_old_nmethod(this);
1105   }
1106 
1107   // Clear the method of this dead nmethod


1147         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1148                          os::current_thread_id());
1149       } else {
1150         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1151                          os::current_thread_id(),
1152                          (_state == zombie ? " zombie='1'" : ""));
1153       }
1154       log_identity(xtty);
1155       xtty->stamp();
1156       xtty->end_elem();
1157     }
1158   }
1159 
1160   const char *state_msg = _state == zombie ? "made zombie" : "made not entrant";
1161   CompileTask::print_ul(this, state_msg);
1162   if (PrintCompilation && _state != unloaded) {
1163     print_on(tty, state_msg);
1164   }
1165 }
1166 
1167 void nmethod::unlink_from_method() {
1168   // We need to check if both the _code and _from_compiled_code_entry_point
1169   // refer to this nmethod because there is a race in setting these two fields
1170   // in Method* as seen in bugid 4947125.
1171   // If the vep() points to the zombie nmethod, the memory for the nmethod
1172   // could be flushed and the compiler and vtable stubs could still call
1173   // through it.
1174   Method::unlink_code(method(), this);



1175 }
1176 
1177 /**
1178  * Common functionality for both make_not_entrant and make_zombie
1179  */
1180 bool nmethod::make_not_entrant_or_zombie(int state) {
1181   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1182   assert(!is_zombie(), "should not already be a zombie");
1183 
1184   if (_state == state) {
1185     // Avoid taking the lock if already in required state.
1186     // This is safe from races because the state is an end-state,
1187     // which the nmethod cannot back out of once entered.
1188     // No need for fencing either.
1189     return false;
1190   }
1191 
1192   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1193   nmethodLocker nml(this);
1194   methodHandle the_method(method());
1195   // This can be called while the system is already at a safepoint which is ok
1196   NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
1197 
1198   // during patching, depending on the nmethod state we must notify the GC that
1199   // code has been unloaded, unregistering it. We cannot do this right while
1200   // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
1201   // would be prone to deadlocks.
1202   // This flag is used to remember whether we need to later lock and unregister.
1203   bool nmethod_needs_unregister = false;
1204 

1205   // invalidate osr nmethod before acquiring the patching lock since
1206   // they both acquire leaf locks and we don't want a deadlock.
1207   // This logic is equivalent to the logic below for patching the
1208   // verified entry point of regular methods. We check that the
1209   // nmethod is in use to ensure that it is invalidated only once.
1210   if (is_osr_method() && is_in_use()) {
1211     // this effectively makes the osr nmethod not entrant
1212     invalidate_osr_method();
1213   }
1214 
1215   {
1216     // Enter critical section.  Does not block for safepoint.
1217     MutexLockerEx pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
1218 
1219     if (_state == state) {
1220       // another thread already performed this transition so nothing
1221       // to do, but return false to indicate this.
1222       return false;
1223     }
1224 
1225     // The caller can be calling the method statically or through an inline
1226     // cache call.
1227     if (!is_osr_method() && !is_not_entrant()) {
1228       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1229                   SharedRuntime::get_handle_wrong_method_stub());
1230     }
1231 
1232     if (is_in_use() && update_recompile_counts()) {
1233       // It's a true state change, so mark the method as decompiled.
1234       // Do it only for transition from alive.
1235       inc_decompile_count();
1236     }
1237 


1244 
1245     // Must happen before state change. Otherwise we have a race condition in
1246     // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1247     // transition its state from 'not_entrant' to 'zombie' without having to wait
1248     // for stack scanning.
1249     if (state == not_entrant) {
1250       mark_as_seen_on_stack();
1251       OrderAccess::storestore(); // _stack_traversal_mark and _state
1252     }
1253 
1254     // Change state
1255     _state = state;
1256 
1257     // Log the transition once
1258     log_state_change();
1259 
1260     // Invalidate while holding the patching lock
1261     JVMCI_ONLY(maybe_invalidate_installed_code());
1262 
1263     // Remove nmethod from method.
1264     unlink_from_method();
1265 
1266   } // leave critical region under CompiledMethod_lock
1267 
1268 #ifdef ASSERT
1269   if (is_osr_method() && method() != NULL) {
1270     // Make sure osr nmethod is invalidated, i.e. not on the list
1271     bool found = method()->method_holder()->remove_osr_nmethod(this);
1272     assert(!found, "osr nmethod should have been invalidated");
1273   }
1274 #endif
1275 
1276   // When the nmethod becomes zombie it is no longer alive so the
1277   // dependencies must be flushed.  nmethods in the not_entrant
1278   // state will be flushed later when the transition to zombie
1279   // happens or they get unloaded.
1280   if (state == zombie) {
1281     {
1282       // Flushing dependencies must be done before any possible
1283       // safepoint can sneak in, otherwise the oops used by the
1284       // dependency logic could have become stale.
1285       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1286       if (nmethod_needs_unregister) {


2834 #endif
2835 #ifdef COMPILER2
2836   c2_java_nmethod_stats.print_nmethod_stats("C2");
2837 #endif
2838 #if INCLUDE_JVMCI
2839   jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
2840 #endif
2841   unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
2842   DebugInformationRecorder::print_statistics();
2843 #ifndef PRODUCT
2844   pc_nmethod_stats.print_pc_stats();
2845 #endif
2846   Dependencies::print_statistics();
2847   if (xtty != NULL)  xtty->tail("statistics");
2848 }
2849 
2850 #endif // !PRODUCT
2851 
2852 #if INCLUDE_JVMCI
2853 void nmethod::clear_jvmci_installed_code() {
2854   assert_locked_or_safepoint(CompiledMethod_lock);
2855   if (_jvmci_installed_code != NULL) {
2856     JNIHandles::destroy_weak_global(_jvmci_installed_code);
2857     _jvmci_installed_code = NULL;
2858   }
2859 }
2860 
2861 void nmethod::clear_speculation_log() {
2862   assert_locked_or_safepoint(CompiledMethod_lock);
2863   if (_speculation_log != NULL) {
2864     JNIHandles::destroy_weak_global(_speculation_log);
2865     _speculation_log = NULL;
2866   }
2867 }
2868 
2869 void nmethod::maybe_invalidate_installed_code() {
2870   if (!is_compiled_by_jvmci()) {
2871     return;
2872   }
2873 
2874   assert(CompiledMethod_lock->is_locked() ||
2875          SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
2876   oop installed_code = JNIHandles::resolve(_jvmci_installed_code);
2877   if (installed_code != NULL) {
2878     // Update the values in the InstalledCode instance if it still refers to this nmethod
2879     nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
2880     if (nm == this) {
2881       if (!is_alive() || is_unloading()) {
2882         // Break the link between nmethod and InstalledCode such that the nmethod
2883         // can subsequently be flushed safely.  The link must be maintained while
2884         // the method could have live activations since invalidateInstalledCode
2885         // might want to invalidate all existing activations.
2886         InstalledCode::set_address(installed_code, 0);
2887         InstalledCode::set_entryPoint(installed_code, 0);
2888       } else if (is_not_entrant()) {
2889         // Remove the entry point so any invocation will fail but keep
2890         // the address link around that so that existing activations can
2891         // be invalidated.
2892         InstalledCode::set_entryPoint(installed_code, 0);
2893       }
2894     }


2898     // updates to the InstalledCode instance have been performed.
2899     clear_jvmci_installed_code();
2900     clear_speculation_log();
2901   }
2902 }
2903 
2904 void nmethod::invalidate_installed_code(Handle installedCode, TRAPS) {
2905   if (installedCode() == NULL) {
2906     THROW(vmSymbols::java_lang_NullPointerException());
2907   }
2908   jlong nativeMethod = InstalledCode::address(installedCode);
2909   nmethod* nm = (nmethod*)nativeMethod;
2910   if (nm == NULL) {
2911     // Nothing to do
2912     return;
2913   }
2914 
2915   nmethodLocker nml(nm);
2916 #ifdef ASSERT
2917   {
2918     MutexLockerEx pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
2919     // This relationship can only be checked safely under a lock
2920     assert(!nm->is_alive() || nm->is_unloading() || nm->jvmci_installed_code() == installedCode(), "sanity check");
2921   }
2922 #endif
2923 
2924   if (nm->is_alive()) {
2925     // Invalidating the InstalledCode means we want the nmethod
2926     // to be deoptimized.
2927     nm->mark_for_deoptimization();
2928     Deoptimization::deoptimize_all_marked();

2929   }
2930 
2931   // Multiple threads could reach this point so we now need to
2932   // lock and re-check the link to the nmethod so that only one
2933   // thread clears it.
2934   MutexLockerEx pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
2935   if (InstalledCode::address(installedCode) == nativeMethod) {
2936       InstalledCode::set_address(installedCode, 0);
2937   }
2938 }
2939 
2940 oop nmethod::jvmci_installed_code() {
2941   return JNIHandles::resolve(_jvmci_installed_code);
2942 }
2943 
2944 oop nmethod::speculation_log() {
2945   return JNIHandles::resolve(_speculation_log);
2946 }
2947 
2948 char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) const {
2949   if (!this->is_compiled_by_jvmci()) {
2950     return NULL;
2951   }
2952   oop installed_code = JNIHandles::resolve(_jvmci_installed_code);
2953   if (installed_code != NULL) {
2954     oop installed_code_name = NULL;
< prev index next >