< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page
rev 54936 : [mq]: 8221734-v3


  32 #include "code/nmethod.hpp"
  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/abstractCompiler.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compileLog.hpp"
  37 #include "compiler/compilerDirectives.hpp"
  38 #include "compiler/directivesParser.hpp"
  39 #include "compiler/disassembler.hpp"
  40 #include "interpreter/bytecode.hpp"
  41 #include "logging/log.hpp"
  42 #include "logging/logStream.hpp"
  43 #include "memory/allocation.inline.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "oops/access.inline.hpp"
  47 #include "oops/method.inline.hpp"
  48 #include "oops/methodData.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "prims/jvmtiImpl.hpp"
  51 #include "runtime/atomic.hpp"

  52 #include "runtime/flags/flagSetting.hpp"
  53 #include "runtime/frame.inline.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/jniHandles.inline.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/os.hpp"
  58 #include "runtime/safepointVerifiers.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/sweeper.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "utilities/align.hpp"
  63 #include "utilities/dtrace.hpp"
  64 #include "utilities/events.hpp"
  65 #include "utilities/resourceHash.hpp"
  66 #include "utilities/xmlstream.hpp"
  67 #if INCLUDE_JVMCI
  68 #include "jvmci/jvmciRuntime.hpp"
  69 #endif
  70 
  71 #ifdef DTRACE_ENABLED


1104   // Unlink the osr method, so we do not look this up again
1105   if (is_osr_method()) {
1106     // Invalidate the osr nmethod only once
1107     if (is_in_use()) {
1108       invalidate_osr_method();
1109     }
1110 #ifdef ASSERT
1111     if (method() != NULL) {
1112       // Make sure osr nmethod is invalidated, i.e. not on the list
1113       bool found = method()->method_holder()->remove_osr_nmethod(this);
1114       assert(!found, "osr nmethod should have been invalidated");
1115     }
1116 #endif
1117   }
1118 
1119   // If _method is already NULL the Method* is about to be unloaded,
1120   // so we don't have to break the cycle. Note that it is possible to
1121   // have the Method* live here, in case we unload the nmethod because
1122   // it is pointing to some oop (other than the Method*) being unloaded.
1123   if (_method != NULL) {
1124     // OSR methods point to the Method*, but the Method* does not
1125     // point back!
1126     if (_method->code() == this) {
1127       _method->clear_code(); // Break a cycle
1128     }
1129   }
1130 
1131   // Make the class unloaded - i.e., change state and notify sweeper
1132   assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(),
1133          "must be at safepoint");
1134 
1135   {
1136     // Clear ICStubs and release any CompiledICHolders.
1137     CompiledICLocker ml(this);
1138     clear_ic_callsites();
1139   }
1140 
1141   // Unregister must be done before the state change
1142   {
1143     MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
1144                      Mutex::_no_safepoint_check_flag);
1145     Universe::heap()->unregister_nmethod(this);
1146   }
1147 
1148   // Clear the method of this dead nmethod


1190         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1191                          os::current_thread_id());
1192       } else {
1193         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1194                          os::current_thread_id(),
1195                          (_state == zombie ? " zombie='1'" : ""));
1196       }
1197       log_identity(xtty);
1198       xtty->stamp();
1199       xtty->end_elem();
1200     }
1201   }
1202 
1203   const char *state_msg = _state == zombie ? "made zombie" : "made not entrant";
1204   CompileTask::print_ul(this, state_msg);
1205   if (PrintCompilation && _state != unloaded) {
1206     print_on(tty, state_msg);
1207   }
1208 }
1209 
1210 void nmethod::unlink_from_method(bool acquire_lock) {
1211   // We need to check if both the _code and _from_compiled_code_entry_point
1212   // refer to this nmethod because there is a race in setting these two fields
1213   // in Method* as seen in bugid 4947125.
1214   // If the vep() points to the zombie nmethod, the memory for the nmethod
1215   // could be flushed and the compiler and vtable stubs could still call
1216   // through it.
1217   if (method() != NULL && (method()->code() == this ||
1218                            method()->from_compiled_entry() == verified_entry_point())) {
1219     method()->clear_code(acquire_lock);
1220   }
1221 }
1222 
1223 /**
1224  * Common functionality for both make_not_entrant and make_zombie
1225  */
1226 bool nmethod::make_not_entrant_or_zombie(int state) {
1227   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1228   assert(!is_zombie(), "should not already be a zombie");
1229 
1230   if (_state == state) {
1231     // Avoid taking the lock if already in required state.
1232     // This is safe from races because the state is an end-state,
1233     // which the nmethod cannot back out of once entered.
1234     // No need for fencing either.
1235     return false;
1236   }
1237 
1238   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1239   nmethodLocker nml(this);
1240   methodHandle the_method(method());
1241   // This can be called while the system is already at a safepoint which is ok
1242   NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
1243 
1244   // during patching, depending on the nmethod state we must notify the GC that
1245   // code has been unloaded, unregistering it. We cannot do this right while
1246   // holding the Patching_lock because we need to use the CodeCache_lock. This
1247   // would be prone to deadlocks.
1248   // This flag is used to remember whether we need to later lock and unregister.
1249   bool nmethod_needs_unregister = false;
1250 
1251   {
1252     // invalidate osr nmethod before acquiring the patching lock since
1253     // they both acquire leaf locks and we don't want a deadlock.
1254     // This logic is equivalent to the logic below for patching the
1255     // verified entry point of regular methods. We check that the
1256     // nmethod is in use to ensure that it is invalidated only once.
1257     if (is_osr_method() && is_in_use()) {
1258       // this effectively makes the osr nmethod not entrant
1259       invalidate_osr_method();
1260     }
1261 

1262     // Enter critical section.  Does not block for safepoint.
1263     MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1264 
1265     if (_state == state) {
1266       // another thread already performed this transition so nothing
1267       // to do, but return false to indicate this.
1268       return false;
1269     }
1270 
1271     // The caller can be calling the method statically or through an inline
1272     // cache call.
1273     if (!is_osr_method() && !is_not_entrant()) {
1274       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1275                   SharedRuntime::get_handle_wrong_method_stub());
1276     }
1277 
1278     if (is_in_use() && update_recompile_counts()) {
1279       // It's a true state change, so mark the method as decompiled.
1280       // Do it only for transition from alive.
1281       inc_decompile_count();
1282     }
1283 


1287     if ((state == zombie) && !is_unloaded()) {
1288       nmethod_needs_unregister = true;
1289     }
1290 
1291     // Must happen before state change. Otherwise we have a race condition in
1292     // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1293     // transition its state from 'not_entrant' to 'zombie' without having to wait
1294     // for stack scanning.
1295     if (state == not_entrant) {
1296       mark_as_seen_on_stack();
1297       OrderAccess::storestore(); // _stack_traversal_mark and _state
1298     }
1299 
1300     // Change state
1301     _state = state;
1302 
1303     // Log the transition once
1304     log_state_change();
1305 
1306     // Remove nmethod from method.
1307     unlink_from_method(false /* already owns Patching_lock */);
1308   } // leave critical region under Patching_lock

1309 
1310 #if INCLUDE_JVMCI
1311   // Invalidate can't occur while holding the Patching lock
1312   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
1313   if (nmethod_data != NULL) {
1314     nmethod_data->invalidate_nmethod_mirror(this);
1315   }
1316 #endif
1317 
1318 #ifdef ASSERT
1319   if (is_osr_method() && method() != NULL) {
1320     // Make sure osr nmethod is invalidated, i.e. not on the list
1321     bool found = method()->method_holder()->remove_osr_nmethod(this);
1322     assert(!found, "osr nmethod should have been invalidated");
1323   }
1324 #endif
1325 
1326   // When the nmethod becomes zombie it is no longer alive so the
1327   // dependencies must be flushed.  nmethods in the not_entrant
1328   // state will be flushed later when the transition to zombie




  32 #include "code/nmethod.hpp"
  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/abstractCompiler.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compileLog.hpp"
  37 #include "compiler/compilerDirectives.hpp"
  38 #include "compiler/directivesParser.hpp"
  39 #include "compiler/disassembler.hpp"
  40 #include "interpreter/bytecode.hpp"
  41 #include "logging/log.hpp"
  42 #include "logging/logStream.hpp"
  43 #include "memory/allocation.inline.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "oops/access.inline.hpp"
  47 #include "oops/method.inline.hpp"
  48 #include "oops/methodData.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "prims/jvmtiImpl.hpp"
  51 #include "runtime/atomic.hpp"
  52 #include "runtime/deoptimization.hpp"
  53 #include "runtime/flags/flagSetting.hpp"
  54 #include "runtime/frame.inline.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/jniHandles.inline.hpp"
  57 #include "runtime/orderAccess.hpp"
  58 #include "runtime/os.hpp"
  59 #include "runtime/safepointVerifiers.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/sweeper.hpp"
  62 #include "runtime/vmThread.hpp"
  63 #include "utilities/align.hpp"
  64 #include "utilities/dtrace.hpp"
  65 #include "utilities/events.hpp"
  66 #include "utilities/resourceHash.hpp"
  67 #include "utilities/xmlstream.hpp"
  68 #if INCLUDE_JVMCI
  69 #include "jvmci/jvmciRuntime.hpp"
  70 #endif
  71 
  72 #ifdef DTRACE_ENABLED


1105   // Unlink the osr method, so we do not look this up again
1106   if (is_osr_method()) {
1107     // Invalidate the osr nmethod only once
1108     if (is_in_use()) {
1109       invalidate_osr_method();
1110     }
1111 #ifdef ASSERT
1112     if (method() != NULL) {
1113       // Make sure osr nmethod is invalidated, i.e. not on the list
1114       bool found = method()->method_holder()->remove_osr_nmethod(this);
1115       assert(!found, "osr nmethod should have been invalidated");
1116     }
1117 #endif
1118   }
1119 
1120   // If _method is already NULL the Method* is about to be unloaded,
1121   // so we don't have to break the cycle. Note that it is possible to
1122   // have the Method* live here, in case we unload the nmethod because
1123   // it is pointing to some oop (other than the Method*) being unloaded.
1124   if (_method != NULL) {
1125     _method->unlink_code(this);




1126   }
1127 
1128   // Make the class unloaded - i.e., change state and notify sweeper
1129   assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(),
1130          "must be at safepoint");
1131 
1132   {
1133     // Clear ICStubs and release any CompiledICHolders.
1134     CompiledICLocker ml(this);
1135     clear_ic_callsites();
1136   }
1137 
1138   // Unregister must be done before the state change
1139   {
1140     MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
1141                      Mutex::_no_safepoint_check_flag);
1142     Universe::heap()->unregister_nmethod(this);
1143   }
1144 
1145   // Clear the method of this dead nmethod


1187         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1188                          os::current_thread_id());
1189       } else {
1190         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1191                          os::current_thread_id(),
1192                          (_state == zombie ? " zombie='1'" : ""));
1193       }
1194       log_identity(xtty);
1195       xtty->stamp();
1196       xtty->end_elem();
1197     }
1198   }
1199 
1200   const char *state_msg = _state == zombie ? "made zombie" : "made not entrant";
1201   CompileTask::print_ul(this, state_msg);
1202   if (PrintCompilation && _state != unloaded) {
1203     print_on(tty, state_msg);
1204   }
1205 }
1206 
1207 void nmethod::unlink_from_method() {
1208   if (method() != NULL) {
1209     method()->unlink_code();







1210   }
1211 }
1212 
1213 /**
1214  * Common functionality for both make_not_entrant and make_zombie
1215  */
1216 bool nmethod::make_not_entrant_or_zombie(int state) {
1217   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1218   assert(!is_zombie(), "should not already be a zombie");
1219 
1220   if (_state == state) {
1221     // Avoid taking the lock if already in required state.
1222     // This is safe from races because the state is an end-state,
1223     // which the nmethod cannot back out of once entered.
1224     // No need for fencing either.
1225     return false;
1226   }
1227 
1228   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1229   nmethodLocker nml(this);
1230   methodHandle the_method(method());
1231   // This can be called while the system is already at a safepoint which is ok
1232   NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
1233 
1234   // during patching, depending on the nmethod state we must notify the GC that
1235   // code has been unloaded, unregistering it. We cannot do this right while
1236   // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
1237   // would be prone to deadlocks.
1238   // This flag is used to remember whether we need to later lock and unregister.
1239   bool nmethod_needs_unregister = false;
1240 

1241   // invalidate osr nmethod before acquiring the patching lock since
1242   // they both acquire leaf locks and we don't want a deadlock.
1243   // This logic is equivalent to the logic below for patching the
1244   // verified entry point of regular methods. We check that the
1245   // nmethod is in use to ensure that it is invalidated only once.
1246   if (is_osr_method() && is_in_use()) {
1247     // this effectively makes the osr nmethod not entrant
1248     invalidate_osr_method();
1249   }
1250 
1251   {
1252     // Enter critical section.  Does not block for safepoint.
1253     MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
1254 
1255     if (_state == state) {
1256       // another thread already performed this transition so nothing
1257       // to do, but return false to indicate this.
1258       return false;
1259     }
1260 
1261     // The caller can be calling the method statically or through an inline
1262     // cache call.
1263     if (!is_osr_method() && !is_not_entrant()) {
1264       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1265                   SharedRuntime::get_handle_wrong_method_stub());
1266     }
1267 
1268     if (is_in_use() && update_recompile_counts()) {
1269       // It's a true state change, so mark the method as decompiled.
1270       // Do it only for transition from alive.
1271       inc_decompile_count();
1272     }
1273 


1277     if ((state == zombie) && !is_unloaded()) {
1278       nmethod_needs_unregister = true;
1279     }
1280 
1281     // Must happen before state change. Otherwise we have a race condition in
1282     // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1283     // transition its state from 'not_entrant' to 'zombie' without having to wait
1284     // for stack scanning.
1285     if (state == not_entrant) {
1286       mark_as_seen_on_stack();
1287       OrderAccess::storestore(); // _stack_traversal_mark and _state
1288     }
1289 
1290     // Change state
1291     _state = state;
1292 
1293     // Log the transition once
1294     log_state_change();
1295 
1296     // Remove nmethod from method.
1297     unlink_from_method();
1298 
1299   } // leave critical region under CompiledMethod_lock
1300 
1301 #if INCLUDE_JVMCI
1302   // Invalidate can't occur while holding the Patching lock
1303   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
1304   if (nmethod_data != NULL) {
1305     nmethod_data->invalidate_nmethod_mirror(this);
1306   }
1307 #endif
1308 
1309 #ifdef ASSERT
1310   if (is_osr_method() && method() != NULL) {
1311     // Make sure osr nmethod is invalidated, i.e. not on the list
1312     bool found = method()->method_holder()->remove_osr_nmethod(this);
1313     assert(!found, "osr nmethod should have been invalidated");
1314   }
1315 #endif
1316 
1317   // When the nmethod becomes zombie it is no longer alive so the
1318   // dependencies must be flushed.  nmethods in the not_entrant
1319   // state will be flushed later when the transition to zombie


< prev index next >