< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page
rev 54697 : imported patch 8221734-v2-merge


  31 #include "code/nativeInst.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/abstractCompiler.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compileLog.hpp"
  37 #include "compiler/compilerDirectives.hpp"
  38 #include "compiler/directivesParser.hpp"
  39 #include "compiler/disassembler.hpp"
  40 #include "interpreter/bytecode.hpp"
  41 #include "logging/log.hpp"
  42 #include "logging/logStream.hpp"
  43 #include "memory/allocation.inline.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "oops/access.inline.hpp"
  46 #include "oops/method.inline.hpp"
  47 #include "oops/methodData.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "prims/jvmtiImpl.hpp"
  50 #include "runtime/atomic.hpp"

  51 #include "runtime/flags/flagSetting.hpp"
  52 #include "runtime/frame.inline.hpp"
  53 #include "runtime/handles.inline.hpp"
  54 #include "runtime/jniHandles.inline.hpp"
  55 #include "runtime/orderAccess.hpp"
  56 #include "runtime/os.hpp"
  57 #include "runtime/safepointVerifiers.hpp"
  58 #include "runtime/sharedRuntime.hpp"
  59 #include "runtime/sweeper.hpp"
  60 #include "runtime/vmThread.hpp"
  61 #include "utilities/align.hpp"
  62 #include "utilities/dtrace.hpp"
  63 #include "utilities/events.hpp"
  64 #include "utilities/resourceHash.hpp"
  65 #include "utilities/xmlstream.hpp"
  66 #if INCLUDE_JVMCI
  67 #include "jvmci/jvmciRuntime.hpp"
  68 #endif
  69 
  70 #ifdef DTRACE_ENABLED


1102   }
1103   // Unlink the osr method, so we do not look this up again
1104   if (is_osr_method()) {
1105     // Invalidate the osr nmethod only once
1106     if (is_in_use()) {
1107       invalidate_osr_method();
1108     }
1109 #ifdef ASSERT
1110     if (method() != NULL) {
1111       // Make sure osr nmethod is invalidated, i.e. not on the list
1112       bool found = method()->method_holder()->remove_osr_nmethod(this);
1113       assert(!found, "osr nmethod should have been invalidated");
1114     }
1115 #endif
1116   }
1117 
1118   // If _method is already NULL the Method* is about to be unloaded,
1119   // so we don't have to break the cycle. Note that it is possible to
1120   // have the Method* live here, in case we unload the nmethod because
1121   // it is pointing to some oop (other than the Method*) being unloaded.
1122   if (_method != NULL) {
1123     // OSR methods point to the Method*, but the Method* does not
1124     // point back!
1125     if (_method->code() == this) {
1126       _method->clear_code(); // Break a cycle
1127     }
1128   }
1129 
1130   // Make the class unloaded - i.e., change state and notify sweeper
1131   assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(),
1132          "must be at safepoint");
1133 
1134   {
1135     // Clear ICStubs and release any CompiledICHolders.
1136     CompiledICLocker ml(this);
1137     clear_ic_callsites();
1138   }
1139 
1140   // Unregister must be done before the state change
1141   {
1142     MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
1143                      Mutex::_no_safepoint_check_flag);
1144     Universe::heap()->unregister_nmethod(this);
1145     CodeCache::unregister_old_nmethod(this);
1146   }
1147 
1148   // Clear the method of this dead nmethod


1190         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1191                          os::current_thread_id());
1192       } else {
1193         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1194                          os::current_thread_id(),
1195                          (_state == zombie ? " zombie='1'" : ""));
1196       }
1197       log_identity(xtty);
1198       xtty->stamp();
1199       xtty->end_elem();
1200     }
1201   }
1202 
1203   const char *state_msg = _state == zombie ? "made zombie" : "made not entrant";
1204   CompileTask::print_ul(this, state_msg);
1205   if (PrintCompilation && _state != unloaded) {
1206     print_on(tty, state_msg);
1207   }
1208 }
1209 
1210 void nmethod::unlink_from_method(bool acquire_lock) {
1211   // We need to check if both the _code and _from_compiled_code_entry_point
1212   // refer to this nmethod because there is a race in setting these two fields
1213   // in Method* as seen in bugid 4947125.
1214   // If the vep() points to the zombie nmethod, the memory for the nmethod
1215   // could be flushed and the compiler and vtable stubs could still call
1216   // through it.
1217   if (method() != NULL && (method()->code() == this ||
1218                            method()->from_compiled_entry() == verified_entry_point())) {
1219     method()->clear_code(acquire_lock);
1220   }
1221 }
1222 
1223 /**
1224  * Common functionality for both make_not_entrant and make_zombie
1225  */
1226 bool nmethod::make_not_entrant_or_zombie(int state) {
1227   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1228   assert(!is_zombie(), "should not already be a zombie");
1229 
1230   if (_state == state) {
1231     // Avoid taking the lock if already in required state.
1232     // This is safe from races because the state is an end-state,
1233     // which the nmethod cannot back out of once entered.
1234     // No need for fencing either.
1235     return false;
1236   }
1237 
1238   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1239   nmethodLocker nml(this);
1240   methodHandle the_method(method());
1241   // This can be called while the system is already at a safepoint which is ok
1242   NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
1243 
1244   // during patching, depending on the nmethod state we must notify the GC that
1245   // code has been unloaded, unregistering it. We cannot do this right while
1246   // holding the Patching_lock because we need to use the CodeCache_lock. This
1247   // would be prone to deadlocks.
1248   // This flag is used to remember whether we need to later lock and unregister.
1249   bool nmethod_needs_unregister = false;
1250 
1251   {
1252     // invalidate osr nmethod before acquiring the patching lock since
1253     // they both acquire leaf locks and we don't want a deadlock.
1254     // This logic is equivalent to the logic below for patching the
1255     // verified entry point of regular methods. We check that the
1256     // nmethod is in use to ensure that it is invalidated only once.
1257     if (is_osr_method() && is_in_use()) {
1258       // this effectively makes the osr nmethod not entrant
1259       invalidate_osr_method();
1260     }
1261 

1262     // Enter critical section.  Does not block for safepoint.
1263     MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1264 
1265     if (_state == state) {
1266       // another thread already performed this transition so nothing
1267       // to do, but return false to indicate this.
1268       return false;
1269     }
1270 
1271     // The caller can be calling the method statically or through an inline
1272     // cache call.
1273     if (!is_osr_method() && !is_not_entrant()) {
1274       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1275                   SharedRuntime::get_handle_wrong_method_stub());
1276     }
1277 
1278     if (is_in_use() && update_recompile_counts()) {
1279       // It's a true state change, so mark the method as decompiled.
1280       // Do it only for transition from alive.
1281       inc_decompile_count();
1282     }
1283 


1287     if ((state == zombie) && !is_unloaded()) {
1288       nmethod_needs_unregister = true;
1289     }
1290 
1291     // Must happen before state change. Otherwise we have a race condition in
1292     // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1293     // transition its state from 'not_entrant' to 'zombie' without having to wait
1294     // for stack scanning.
1295     if (state == not_entrant) {
1296       mark_as_seen_on_stack();
1297       OrderAccess::storestore(); // _stack_traversal_mark and _state
1298     }
1299 
1300     // Change state
1301     _state = state;
1302 
1303     // Log the transition once
1304     log_state_change();
1305 
1306     // Remove nmethod from method.
1307     unlink_from_method(false /* already owns Patching_lock */);
1308   } // leave critical region under Patching_lock

1309 
1310 #if INCLUDE_JVMCI
1311   // Invalidate can't occur while holding the Patching lock
1312   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
1313   if (nmethod_data != NULL) {
1314     nmethod_data->invalidate_nmethod_mirror(this);
1315   }
1316 #endif
1317 
1318 #ifdef ASSERT
1319   if (is_osr_method() && method() != NULL) {
1320     // Make sure osr nmethod is invalidated, i.e. not on the list
1321     bool found = method()->method_holder()->remove_osr_nmethod(this);
1322     assert(!found, "osr nmethod should have been invalidated");
1323   }
1324 #endif
1325 
1326   // When the nmethod becomes zombie it is no longer alive so the
1327   // dependencies must be flushed.  nmethods in the not_entrant
1328   // state will be flushed later when the transition to zombie




  31 #include "code/nativeInst.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/abstractCompiler.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compileLog.hpp"
  37 #include "compiler/compilerDirectives.hpp"
  38 #include "compiler/directivesParser.hpp"
  39 #include "compiler/disassembler.hpp"
  40 #include "interpreter/bytecode.hpp"
  41 #include "logging/log.hpp"
  42 #include "logging/logStream.hpp"
  43 #include "memory/allocation.inline.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "oops/access.inline.hpp"
  46 #include "oops/method.inline.hpp"
  47 #include "oops/methodData.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "prims/jvmtiImpl.hpp"
  50 #include "runtime/atomic.hpp"
  51 #include "runtime/deoptimization.hpp"
  52 #include "runtime/flags/flagSetting.hpp"
  53 #include "runtime/frame.inline.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/jniHandles.inline.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/os.hpp"
  58 #include "runtime/safepointVerifiers.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/sweeper.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "utilities/align.hpp"
  63 #include "utilities/dtrace.hpp"
  64 #include "utilities/events.hpp"
  65 #include "utilities/resourceHash.hpp"
  66 #include "utilities/xmlstream.hpp"
  67 #if INCLUDE_JVMCI
  68 #include "jvmci/jvmciRuntime.hpp"
  69 #endif
  70 
  71 #ifdef DTRACE_ENABLED


1103   }
1104   // Unlink the osr method, so we do not look this up again
1105   if (is_osr_method()) {
1106     // Invalidate the osr nmethod only once
1107     if (is_in_use()) {
1108       invalidate_osr_method();
1109     }
1110 #ifdef ASSERT
1111     if (method() != NULL) {
1112       // Make sure osr nmethod is invalidated, i.e. not on the list
1113       bool found = method()->method_holder()->remove_osr_nmethod(this);
1114       assert(!found, "osr nmethod should have been invalidated");
1115     }
1116 #endif
1117   }
1118 
1119   // If _method is already NULL the Method* is about to be unloaded,
1120   // so we don't have to break the cycle. Note that it is possible to
1121   // have the Method* live here, in case we unload the nmethod because
1122   // it is pointing to some oop (other than the Method*) being unloaded.
1123   Method::unlink_code(_method, this); // Break a cycle






1124 
1125   // Make the class unloaded - i.e., change state and notify sweeper
1126   assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(),
1127          "must be at safepoint");
1128 
1129   {
1130     // Clear ICStubs and release any CompiledICHolders.
1131     CompiledICLocker ml(this);
1132     clear_ic_callsites();
1133   }
1134 
1135   // Unregister must be done before the state change
1136   {
1137     MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
1138                      Mutex::_no_safepoint_check_flag);
1139     Universe::heap()->unregister_nmethod(this);
1140     CodeCache::unregister_old_nmethod(this);
1141   }
1142 
1143   // Clear the method of this dead nmethod


1185         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1186                          os::current_thread_id());
1187       } else {
1188         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1189                          os::current_thread_id(),
1190                          (_state == zombie ? " zombie='1'" : ""));
1191       }
1192       log_identity(xtty);
1193       xtty->stamp();
1194       xtty->end_elem();
1195     }
1196   }
1197 
1198   const char *state_msg = _state == zombie ? "made zombie" : "made not entrant";
1199   CompileTask::print_ul(this, state_msg);
1200   if (PrintCompilation && _state != unloaded) {
1201     print_on(tty, state_msg);
1202   }
1203 }
1204 
1205 void nmethod::unlink_from_method() {
1206   // We need to check if both the _code and _from_compiled_code_entry_point
1207   // refer to this nmethod because there is a race in setting these two fields
1208   // in Method* as seen in bugid 4947125.
1209   // If the vep() points to the zombie nmethod, the memory for the nmethod
1210   // could be flushed and the compiler and vtable stubs could still call
1211   // through it.
1212   Method::unlink_code(method(), this);



1213 }
1214 
1215 /**
1216  * Common functionality for both make_not_entrant and make_zombie
1217  */
1218 bool nmethod::make_not_entrant_or_zombie(int state) {
1219   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1220   assert(!is_zombie(), "should not already be a zombie");
1221 
1222   if (_state == state) {
1223     // Avoid taking the lock if already in required state.
1224     // This is safe from races because the state is an end-state,
1225     // which the nmethod cannot back out of once entered.
1226     // No need for fencing either.
1227     return false;
1228   }
1229 
1230   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1231   nmethodLocker nml(this);
1232   methodHandle the_method(method());
1233   // This can be called while the system is already at a safepoint which is ok
1234   NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
1235 
1236   // during patching, depending on the nmethod state we must notify the GC that
1237   // code has been unloaded, unregistering it. We cannot do this right while
1238   // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
1239   // would be prone to deadlocks.
1240   // This flag is used to remember whether we need to later lock and unregister.
1241   bool nmethod_needs_unregister = false;
1242 

1243   // invalidate osr nmethod before acquiring the patching lock since
1244   // they both acquire leaf locks and we don't want a deadlock.
1245   // This logic is equivalent to the logic below for patching the
1246   // verified entry point of regular methods. We check that the
1247   // nmethod is in use to ensure that it is invalidated only once.
1248   if (is_osr_method() && is_in_use()) {
1249     // this effectively makes the osr nmethod not entrant
1250     invalidate_osr_method();
1251   }
1252 
1253   {
1254     // Enter critical section.  Does not block for safepoint.
1255     MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
1256 
1257     if (_state == state) {
1258       // another thread already performed this transition so nothing
1259       // to do, but return false to indicate this.
1260       return false;
1261     }
1262 
1263     // The caller can be calling the method statically or through an inline
1264     // cache call.
1265     if (!is_osr_method() && !is_not_entrant()) {
1266       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1267                   SharedRuntime::get_handle_wrong_method_stub());
1268     }
1269 
1270     if (is_in_use() && update_recompile_counts()) {
1271       // It's a true state change, so mark the method as decompiled.
1272       // Do it only for transition from alive.
1273       inc_decompile_count();
1274     }
1275 


1279     if ((state == zombie) && !is_unloaded()) {
1280       nmethod_needs_unregister = true;
1281     }
1282 
1283     // Must happen before state change. Otherwise we have a race condition in
1284     // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1285     // transition its state from 'not_entrant' to 'zombie' without having to wait
1286     // for stack scanning.
1287     if (state == not_entrant) {
1288       mark_as_seen_on_stack();
1289       OrderAccess::storestore(); // _stack_traversal_mark and _state
1290     }
1291 
1292     // Change state
1293     _state = state;
1294 
1295     // Log the transition once
1296     log_state_change();
1297 
1298     // Remove nmethod from method.
1299     unlink_from_method();
1300 
1301   } // leave critical region under CompiledMethod_lock
1302 
1303 #if INCLUDE_JVMCI
1304   // Invalidate can't occur while holding the Patching lock
1305   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
1306   if (nmethod_data != NULL) {
1307     nmethod_data->invalidate_nmethod_mirror(this);
1308   }
1309 #endif
1310 
1311 #ifdef ASSERT
1312   if (is_osr_method() && method() != NULL) {
1313     // Make sure osr nmethod is invalidated, i.e. not on the list
1314     bool found = method()->method_holder()->remove_osr_nmethod(this);
1315     assert(!found, "osr nmethod should have been invalidated");
1316   }
1317 #endif
1318 
1319   // When the nmethod becomes zombie it is no longer alive so the
1320   // dependencies must be flushed.  nmethods in the not_entrant
1321   // state will be flushed later when the transition to zombie


< prev index next >