< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page
rev 56098 : imported patch 8226705-8221734-baseline
rev 56099 : imported patch 8226705-rebase


  33 #include "code/nmethod.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "compiler/abstractCompiler.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compileLog.hpp"
  38 #include "compiler/compilerDirectives.hpp"
  39 #include "compiler/directivesParser.hpp"
  40 #include "compiler/disassembler.hpp"
  41 #include "interpreter/bytecode.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "memory/universe.hpp"
  47 #include "oops/access.inline.hpp"
  48 #include "oops/method.inline.hpp"
  49 #include "oops/methodData.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "prims/jvmtiImpl.hpp"
  52 #include "runtime/atomic.hpp"

  53 #include "runtime/flags/flagSetting.hpp"
  54 #include "runtime/frame.inline.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/jniHandles.inline.hpp"
  57 #include "runtime/orderAccess.hpp"
  58 #include "runtime/os.hpp"
  59 #include "runtime/safepointVerifiers.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/sweeper.hpp"
  62 #include "runtime/vmThread.hpp"
  63 #include "utilities/align.hpp"
  64 #include "utilities/dtrace.hpp"
  65 #include "utilities/events.hpp"
  66 #include "utilities/resourceHash.hpp"
  67 #include "utilities/xmlstream.hpp"
  68 #if INCLUDE_JVMCI
  69 #include "jvmci/jvmciRuntime.hpp"
  70 #endif
  71 
  72 #ifdef DTRACE_ENABLED


1176     // Invalidate the osr nmethod only once. Note that with concurrent
1177     // code cache unloading, OSR nmethods are invalidated before they
1178     // are made unloaded. Therefore, this becomes a no-op then.
1179     if (is_in_use()) {
1180       invalidate_osr_method();
1181     }
1182 #ifdef ASSERT
1183     if (method() != NULL) {
1184       // Make sure osr nmethod is invalidated, i.e. not on the list
1185       bool found = method()->method_holder()->remove_osr_nmethod(this);
1186       assert(!found, "osr nmethod should have been invalidated");
1187     }
1188 #endif
1189   }
1190 
1191   // If _method is already NULL the Method* is about to be unloaded,
1192   // so we don't have to break the cycle. Note that it is possible to
1193   // have the Method* live here, in case we unload the nmethod because
1194   // it is pointing to some oop (other than the Method*) being unloaded.
1195   if (_method != NULL) {
1196     // OSR methods point to the Method*, but the Method* does not
1197     // point back!
1198     if (_method->code() == this) {
1199       _method->clear_code(); // Break a cycle
1200     }
1201   }
1202 
1203   // Make the class unloaded - i.e., change state and notify sweeper
1204   assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(),
1205          "must be at safepoint");
1206 
1207   {
1208     // Clear ICStubs and release any CompiledICHolders.
1209     CompiledICLocker ml(this);
1210     clear_ic_callsites();
1211   }
1212 
1213   // Unregister must be done before the state change
1214   {
1215     MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
1216                      Mutex::_no_safepoint_check_flag);
1217     Universe::heap()->unregister_nmethod(this);
1218   }
1219 
1220   // Clear the method of this dead nmethod


1264         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1265                          os::current_thread_id());
1266       } else {
1267         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1268                          os::current_thread_id(),
1269                          (_state == zombie ? " zombie='1'" : ""));
1270       }
1271       log_identity(xtty);
1272       xtty->stamp();
1273       xtty->end_elem();
1274     }
1275   }
1276 
1277   const char *state_msg = _state == zombie ? "made zombie" : "made not entrant";
1278   CompileTask::print_ul(this, state_msg);
1279   if (PrintCompilation && _state != unloaded) {
1280     print_on(tty, state_msg);
1281   }
1282 }
1283 
1284 void nmethod::unlink_from_method(bool acquire_lock) {
1285   // We need to check if both the _code and _from_compiled_code_entry_point
1286   // refer to this nmethod because there is a race in setting these two fields
1287   // in Method* as seen in bugid 4947125.
1288   // If the vep() points to the zombie nmethod, the memory for the nmethod
1289   // could be flushed and the compiler and vtable stubs could still call
1290   // through it.
1291   if (method() != NULL && (method()->code() == this ||
1292                            method()->from_compiled_entry() == verified_entry_point())) {
1293     method()->clear_code(acquire_lock);
1294   }
1295 }
1296 
1297 /**
1298  * Common functionality for both make_not_entrant and make_zombie
1299  */
1300 bool nmethod::make_not_entrant_or_zombie(int state) {
1301   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1302   assert(!is_zombie(), "should not already be a zombie");
1303 
1304   if (Atomic::load(&_state) >= state) {
1305     // Avoid taking the lock if already in required state.
1306     // This is safe from races because the state is an end-state,
1307     // which the nmethod cannot back out of once entered.
1308     // No need for fencing either.
1309     return false;
1310   }
1311 
1312   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1313   nmethodLocker nml(this);
1314   methodHandle the_method(method());
1315   // This can be called while the system is already at a safepoint which is ok
1316   NoSafepointVerifier nsv;
1317 
1318   // during patching, depending on the nmethod state we must notify the GC that
1319   // code has been unloaded, unregistering it. We cannot do this right while
1320   // holding the Patching_lock because we need to use the CodeCache_lock. This
1321   // would be prone to deadlocks.
1322   // This flag is used to remember whether we need to later lock and unregister.
1323   bool nmethod_needs_unregister = false;
1324 
1325   {
1326     // invalidate osr nmethod before acquiring the patching lock since
1327     // they both acquire leaf locks and we don't want a deadlock.
1328     // This logic is equivalent to the logic below for patching the
1329     // verified entry point of regular methods. We check that the
1330     // nmethod is in use to ensure that it is invalidated only once.
1331     if (is_osr_method() && is_in_use()) {
1332       // this effectively makes the osr nmethod not entrant
1333       invalidate_osr_method();
1334     }
1335 

1336     // Enter critical section.  Does not block for safepoint.
1337     MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1338 
1339     if (Atomic::load(&_state) >= state) {
1340       // another thread already performed this transition so nothing
1341       // to do, but return false to indicate this.
1342       return false;
1343     }
1344 
1345     // The caller can be calling the method statically or through an inline
1346     // cache call.
1347     if (!is_osr_method() && !is_not_entrant()) {
1348       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1349                   SharedRuntime::get_handle_wrong_method_stub());
1350     }
1351 
1352     if (is_in_use() && update_recompile_counts()) {
1353       // It's a true state change, so mark the method as decompiled.
1354       // Do it only for transition from alive.
1355       inc_decompile_count();
1356     }
1357 


1372     }
1373 
1374     // Change state
1375     if (!try_transition(state)) {
1376       // If the transition fails, it is due to another thread making the nmethod more
1377       // dead. In particular, one thread might be making the nmethod unloaded concurrently.
1378       // If so, having patched in the jump in the verified entry unnecessarily is fine.
1379       // The nmethod is no longer possible to call by Java threads.
1380       // Incrementing the decompile count is also fine as the caller of make_not_entrant()
1381       // had a valid reason to deoptimize the nmethod.
1382       // Marking the nmethod as seen on stack also has no effect, as the nmethod is now
1383       // !is_alive(), and the seen on stack value is only used to convert not_entrant
1384       // nmethods to zombie in can_convert_to_zombie().
1385       return false;
1386     }
1387 
1388     // Log the transition once
1389     log_state_change();
1390 
1391     // Remove nmethod from method.
1392     unlink_from_method(false /* already owns Patching_lock */);
1393   } // leave critical region under Patching_lock

1394 
1395 #if INCLUDE_JVMCI
1396   // Invalidate can't occur while holding the Patching lock
1397   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
1398   if (nmethod_data != NULL) {
1399     nmethod_data->invalidate_nmethod_mirror(this);
1400   }
1401 #endif
1402 
1403 #ifdef ASSERT
1404   if (is_osr_method() && method() != NULL) {
1405     // Make sure osr nmethod is invalidated, i.e. not on the list
1406     bool found = method()->method_holder()->remove_osr_nmethod(this);
1407     assert(!found, "osr nmethod should have been invalidated");
1408   }
1409 #endif
1410 
1411   // When the nmethod becomes zombie it is no longer alive so the
1412   // dependencies must be flushed.  nmethods in the not_entrant
1413   // state will be flushed later when the transition to zombie




  33 #include "code/nmethod.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "compiler/abstractCompiler.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compileLog.hpp"
  38 #include "compiler/compilerDirectives.hpp"
  39 #include "compiler/directivesParser.hpp"
  40 #include "compiler/disassembler.hpp"
  41 #include "interpreter/bytecode.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "memory/universe.hpp"
  47 #include "oops/access.inline.hpp"
  48 #include "oops/method.inline.hpp"
  49 #include "oops/methodData.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "prims/jvmtiImpl.hpp"
  52 #include "runtime/atomic.hpp"
  53 #include "runtime/deoptimization.hpp"
  54 #include "runtime/flags/flagSetting.hpp"
  55 #include "runtime/frame.inline.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/jniHandles.inline.hpp"
  58 #include "runtime/orderAccess.hpp"
  59 #include "runtime/os.hpp"
  60 #include "runtime/safepointVerifiers.hpp"
  61 #include "runtime/sharedRuntime.hpp"
  62 #include "runtime/sweeper.hpp"
  63 #include "runtime/vmThread.hpp"
  64 #include "utilities/align.hpp"
  65 #include "utilities/dtrace.hpp"
  66 #include "utilities/events.hpp"
  67 #include "utilities/resourceHash.hpp"
  68 #include "utilities/xmlstream.hpp"
  69 #if INCLUDE_JVMCI
  70 #include "jvmci/jvmciRuntime.hpp"
  71 #endif
  72 
  73 #ifdef DTRACE_ENABLED


1177     // Invalidate the osr nmethod only once. Note that with concurrent
1178     // code cache unloading, OSR nmethods are invalidated before they
1179     // are made unloaded. Therefore, this becomes a no-op then.
1180     if (is_in_use()) {
1181       invalidate_osr_method();
1182     }
1183 #ifdef ASSERT
1184     if (method() != NULL) {
1185       // Make sure osr nmethod is invalidated, i.e. not on the list
1186       bool found = method()->method_holder()->remove_osr_nmethod(this);
1187       assert(!found, "osr nmethod should have been invalidated");
1188     }
1189 #endif
1190   }
1191 
1192   // If _method is already NULL the Method* is about to be unloaded,
1193   // so we don't have to break the cycle. Note that it is possible to
1194   // have the Method* live here, in case we unload the nmethod because
1195   // it is pointing to some oop (other than the Method*) being unloaded.
1196   if (_method != NULL) {
1197     _method->unlink_code(this);




1198   }
1199 
1200   // Make the class unloaded - i.e., change state and notify sweeper
1201   assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(),
1202          "must be at safepoint");
1203 
1204   {
1205     // Clear ICStubs and release any CompiledICHolders.
1206     CompiledICLocker ml(this);
1207     clear_ic_callsites();
1208   }
1209 
1210   // Unregister must be done before the state change
1211   {
1212     MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
1213                      Mutex::_no_safepoint_check_flag);
1214     Universe::heap()->unregister_nmethod(this);
1215   }
1216 
1217   // Clear the method of this dead nmethod


1261         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1262                          os::current_thread_id());
1263       } else {
1264         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1265                          os::current_thread_id(),
1266                          (_state == zombie ? " zombie='1'" : ""));
1267       }
1268       log_identity(xtty);
1269       xtty->stamp();
1270       xtty->end_elem();
1271     }
1272   }
1273 
1274   const char *state_msg = _state == zombie ? "made zombie" : "made not entrant";
1275   CompileTask::print_ul(this, state_msg);
1276   if (PrintCompilation && _state != unloaded) {
1277     print_on(tty, state_msg);
1278   }
1279 }
1280 
1281 void nmethod::unlink_from_method() {
1282   if (method() != NULL) {
1283     method()->unlink_code(this);







1284   }
1285 }
1286 
1287 /**
1288  * Common functionality for both make_not_entrant and make_zombie
1289  */
1290 bool nmethod::make_not_entrant_or_zombie(int state) {
1291   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1292   assert(!is_zombie(), "should not already be a zombie");
1293 
1294   if (Atomic::load(&_state) >= state) {
1295     // Avoid taking the lock if already in required state.
1296     // This is safe from races because the state is an end-state,
1297     // which the nmethod cannot back out of once entered.
1298     // No need for fencing either.
1299     return false;
1300   }
1301 
1302   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1303   nmethodLocker nml(this);
1304   methodHandle the_method(method());
1305   // This can be called while the system is already at a safepoint which is ok
1306   NoSafepointVerifier nsv;
1307 
1308   // during patching, depending on the nmethod state we must notify the GC that
1309   // code has been unloaded, unregistering it. We cannot do this right while
1310   // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
1311   // would be prone to deadlocks.
1312   // This flag is used to remember whether we need to later lock and unregister.
1313   bool nmethod_needs_unregister = false;
1314 

1315   // invalidate osr nmethod before acquiring the patching lock since
1316   // they both acquire leaf locks and we don't want a deadlock.
1317   // This logic is equivalent to the logic below for patching the
1318   // verified entry point of regular methods. We check that the
1319   // nmethod is in use to ensure that it is invalidated only once.
1320   if (is_osr_method() && is_in_use()) {
1321     // this effectively makes the osr nmethod not entrant
1322     invalidate_osr_method();
1323   }
1324 
1325   {
1326     // Enter critical section.  Does not block for safepoint.
1327     MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
1328 
1329     if (Atomic::load(&_state) >= state) {
1330       // another thread already performed this transition so nothing
1331       // to do, but return false to indicate this.
1332       return false;
1333     }
1334 
1335     // The caller can be calling the method statically or through an inline
1336     // cache call.
1337     if (!is_osr_method() && !is_not_entrant()) {
1338       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1339                   SharedRuntime::get_handle_wrong_method_stub());
1340     }
1341 
1342     if (is_in_use() && update_recompile_counts()) {
1343       // It's a true state change, so mark the method as decompiled.
1344       // Do it only for transition from alive.
1345       inc_decompile_count();
1346     }
1347 


1362     }
1363 
1364     // Change state
1365     if (!try_transition(state)) {
1366       // If the transition fails, it is due to another thread making the nmethod more
1367       // dead. In particular, one thread might be making the nmethod unloaded concurrently.
1368       // If so, having patched in the jump in the verified entry unnecessarily is fine.
1369       // The nmethod is no longer possible to call by Java threads.
1370       // Incrementing the decompile count is also fine as the caller of make_not_entrant()
1371       // had a valid reason to deoptimize the nmethod.
1372       // Marking the nmethod as seen on stack also has no effect, as the nmethod is now
1373       // !is_alive(), and the seen on stack value is only used to convert not_entrant
1374       // nmethods to zombie in can_convert_to_zombie().
1375       return false;
1376     }
1377 
1378     // Log the transition once
1379     log_state_change();
1380 
1381     // Remove nmethod from method.
1382     unlink_from_method();
1383 
1384   } // leave critical region under CompiledMethod_lock
1385 
1386 #if INCLUDE_JVMCI
1387   // Invalidate can't occur while holding the Patching lock
1388   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
1389   if (nmethod_data != NULL) {
1390     nmethod_data->invalidate_nmethod_mirror(this);
1391   }
1392 #endif
1393 
1394 #ifdef ASSERT
1395   if (is_osr_method() && method() != NULL) {
1396     // Make sure osr nmethod is invalidated, i.e. not on the list
1397     bool found = method()->method_holder()->remove_osr_nmethod(this);
1398     assert(!found, "osr nmethod should have been invalidated");
1399   }
1400 #endif
1401 
1402   // When the nmethod becomes zombie it is no longer alive so the
1403   // dependencies must be flushed.  nmethods in the not_entrant
1404   // state will be flushed later when the transition to zombie


< prev index next >