< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page
rev 56251 : imported patch 8226705-v1
rev 56252 : imported patch 8226705-v2


  33 #include "code/nmethod.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "compiler/abstractCompiler.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compileLog.hpp"
  38 #include "compiler/compilerDirectives.hpp"
  39 #include "compiler/directivesParser.hpp"
  40 #include "compiler/disassembler.hpp"
  41 #include "interpreter/bytecode.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "memory/universe.hpp"
  47 #include "oops/access.inline.hpp"
  48 #include "oops/method.inline.hpp"
  49 #include "oops/methodData.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "prims/jvmtiImpl.hpp"
  52 #include "runtime/atomic.hpp"

  53 #include "runtime/flags/flagSetting.hpp"
  54 #include "runtime/frame.inline.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/jniHandles.inline.hpp"
  57 #include "runtime/orderAccess.hpp"
  58 #include "runtime/os.hpp"
  59 #include "runtime/safepointVerifiers.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/sweeper.hpp"
  62 #include "runtime/vmThread.hpp"
  63 #include "utilities/align.hpp"
  64 #include "utilities/dtrace.hpp"
  65 #include "utilities/events.hpp"
  66 #include "utilities/resourceHash.hpp"
  67 #include "utilities/xmlstream.hpp"
  68 #if INCLUDE_JVMCI
  69 #include "jvmci/jvmciRuntime.hpp"
  70 #endif
  71 
  72 #ifdef DTRACE_ENABLED


 459     int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
 460 
 461     CodeOffsets offsets;
 462     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 463     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 464     nm = new (native_nmethod_size, CompLevel_none)
 465     nmethod(method(), compiler_none, native_nmethod_size,
 466             compile_id, &offsets,
 467             code_buffer, frame_size,
 468             basic_lock_owner_sp_offset,
 469             basic_lock_sp_offset,
 470             oop_maps);
 471     NOT_PRODUCT(if (nm != NULL)  native_nmethod_stats.note_native_nmethod(nm));
 472   }
 473 
 474   if (nm != NULL) {
 475     // verify nmethod
 476     debug_only(nm->verify();) // might block
 477 
 478     nm->log_new_nmethod();
 479     nm->make_in_use();
 480   }
 481   return nm;
 482 }
 483 
 484 nmethod* nmethod::new_nmethod(const methodHandle& method,
 485   int compile_id,
 486   int entry_bci,
 487   CodeOffsets* offsets,
 488   int orig_pc_offset,
 489   DebugInformationRecorder* debug_info,
 490   Dependencies* dependencies,
 491   CodeBuffer* code_buffer, int frame_size,
 492   OopMapSet* oop_maps,
 493   ExceptionHandlerTable* handler_table,
 494   ImplicitExceptionTable* nul_chk_table,
 495   AbstractCompiler* compiler,
 496   int comp_level
 497 #if INCLUDE_JVMCI
 498   , char* speculations,
 499   int speculations_len,


1121   // If an is_unloading() nmethod is still not_entrant, then it is not safe to
1122   // convert it to zombie due to GC unloading interactions. However, if it
1123   // has become unloaded, then it is okay to convert such nmethods to zombie.
1124   return stack_traversal_mark() + 1 < NMethodSweeper::traversal_count() &&
1125          !is_locked_by_vm() && (!is_unloading() || is_unloaded());
1126 }
1127 
1128 void nmethod::inc_decompile_count() {
1129   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1130   // Could be gated by ProfileTraps, but do not bother...
1131   Method* m = method();
1132   if (m == NULL)  return;
1133   MethodData* mdo = m->method_data();
1134   if (mdo == NULL)  return;
1135   // There is a benign race here.  See comments in methodData.hpp.
1136   mdo->inc_decompile_count();
1137 }
1138 
1139 bool nmethod::try_transition(int new_state_int) {
1140   signed char new_state = new_state_int;





1141   for (;;) {
1142     signed char old_state = Atomic::load(&_state);
1143     if (old_state >= new_state) {
1144       // Ensure monotonicity of transitions.
1145       return false;
1146     }
1147     if (Atomic::cmpxchg(new_state, &_state, old_state) == old_state) {
1148       return true;
1149     }
1150   }
1151 }
1152 
1153 void nmethod::make_unloaded() {
1154   post_compiled_method_unload();
1155 
1156   // This nmethod is being unloaded, make sure that dependencies
1157   // recorded in instanceKlasses get flushed.
1158   // Since this work is being done during a GC, defer deleting dependencies from the
1159   // InstanceKlass.
1160   assert(Universe::heap()->is_gc_active() || Thread::current()->is_ConcurrentGC_thread(),


1176     // Invalidate the osr nmethod only once. Note that with concurrent
1177     // code cache unloading, OSR nmethods are invalidated before they
1178     // are made unloaded. Therefore, this becomes a no-op then.
1179     if (is_in_use()) {
1180       invalidate_osr_method();
1181     }
1182 #ifdef ASSERT
1183     if (method() != NULL) {
1184       // Make sure osr nmethod is invalidated, i.e. not on the list
1185       bool found = method()->method_holder()->remove_osr_nmethod(this);
1186       assert(!found, "osr nmethod should have been invalidated");
1187     }
1188 #endif
1189   }
1190 
1191   // If _method is already NULL the Method* is about to be unloaded,
1192   // so we don't have to break the cycle. Note that it is possible to
1193   // have the Method* live here, in case we unload the nmethod because
1194   // it is pointing to some oop (other than the Method*) being unloaded.
1195   if (_method != NULL) {
1196     // OSR methods point to the Method*, but the Method* does not
1197     // point back!
1198     if (_method->code() == this) {
1199       _method->clear_code(); // Break a cycle
1200     }
1201   }
1202 
1203   // Make the class unloaded - i.e., change state and notify sweeper
1204   assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(),
1205          "must be at safepoint");
1206 
1207   {
1208     // Clear ICStubs and release any CompiledICHolders.
1209     CompiledICLocker ml(this);
1210     clear_ic_callsites();
1211   }
1212 
1213   // Unregister must be done before the state change
1214   {
1215     MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
1216                      Mutex::_no_safepoint_check_flag);
1217     Universe::heap()->unregister_nmethod(this);
1218   }
1219 
1220   // Clear the method of this dead nmethod


1264         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1265                          os::current_thread_id());
1266       } else {
1267         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1268                          os::current_thread_id(),
1269                          (_state == zombie ? " zombie='1'" : ""));
1270       }
1271       log_identity(xtty);
1272       xtty->stamp();
1273       xtty->end_elem();
1274     }
1275   }
1276 
1277   const char *state_msg = _state == zombie ? "made zombie" : "made not entrant";
1278   CompileTask::print_ul(this, state_msg);
1279   if (PrintCompilation && _state != unloaded) {
1280     print_on(tty, state_msg);
1281   }
1282 }
1283 
1284 void nmethod::unlink_from_method(bool acquire_lock) {
1285   // We need to check if both the _code and _from_compiled_code_entry_point
1286   // refer to this nmethod because there is a race in setting these two fields
1287   // in Method* as seen in bugid 4947125.
1288   // If the vep() points to the zombie nmethod, the memory for the nmethod
1289   // could be flushed and the compiler and vtable stubs could still call
1290   // through it.
1291   if (method() != NULL && (method()->code() == this ||
1292                            method()->from_compiled_entry() == verified_entry_point())) {
1293     method()->clear_code(acquire_lock);
1294   }
1295 }
1296 
1297 /**
1298  * Common functionality for both make_not_entrant and make_zombie
1299  */
1300 bool nmethod::make_not_entrant_or_zombie(int state) {
1301   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1302   assert(!is_zombie(), "should not already be a zombie");
1303 
1304   if (Atomic::load(&_state) >= state) {
1305     // Avoid taking the lock if already in required state.
1306     // This is safe from races because the state is an end-state,
1307     // which the nmethod cannot back out of once entered.
1308     // No need for fencing either.
1309     return false;
1310   }
1311 
1312   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1313   nmethodLocker nml(this);
1314   methodHandle the_method(method());
1315   // This can be called while the system is already at a safepoint which is ok
1316   NoSafepointVerifier nsv;
1317 
1318   // during patching, depending on the nmethod state we must notify the GC that
1319   // code has been unloaded, unregistering it. We cannot do this right while
1320   // holding the Patching_lock because we need to use the CodeCache_lock. This
1321   // would be prone to deadlocks.
1322   // This flag is used to remember whether we need to later lock and unregister.
1323   bool nmethod_needs_unregister = false;
1324 
1325   {
1326     // invalidate osr nmethod before acquiring the patching lock since
1327     // they both acquire leaf locks and we don't want a deadlock.
1328     // This logic is equivalent to the logic below for patching the
1329     // verified entry point of regular methods. We check that the
1330     // nmethod is in use to ensure that it is invalidated only once.
1331     if (is_osr_method() && is_in_use()) {
1332       // this effectively makes the osr nmethod not entrant
1333       invalidate_osr_method();
1334     }
1335 

1336     // Enter critical section.  Does not block for safepoint.
1337     MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1338 
1339     if (Atomic::load(&_state) >= state) {
1340       // another thread already performed this transition so nothing
1341       // to do, but return false to indicate this.
1342       return false;
1343     }
1344 
1345     // The caller can be calling the method statically or through an inline
1346     // cache call.
1347     if (!is_osr_method() && !is_not_entrant()) {
1348       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1349                   SharedRuntime::get_handle_wrong_method_stub());
1350     }
1351 
1352     if (is_in_use() && update_recompile_counts()) {
1353       // It's a true state change, so mark the method as decompiled.
1354       // Do it only for transition from alive.
1355       inc_decompile_count();
1356     }
1357 


1372     }
1373 
1374     // Change state
1375     if (!try_transition(state)) {
1376       // If the transition fails, it is due to another thread making the nmethod more
1377       // dead. In particular, one thread might be making the nmethod unloaded concurrently.
1378       // If so, having patched in the jump in the verified entry unnecessarily is fine.
1379       // The nmethod is no longer possible to call by Java threads.
1380       // Incrementing the decompile count is also fine as the caller of make_not_entrant()
1381       // had a valid reason to deoptimize the nmethod.
1382       // Marking the nmethod as seen on stack also has no effect, as the nmethod is now
1383       // !is_alive(), and the seen on stack value is only used to convert not_entrant
1384       // nmethods to zombie in can_convert_to_zombie().
1385       return false;
1386     }
1387 
1388     // Log the transition once
1389     log_state_change();
1390 
1391     // Remove nmethod from method.
1392     unlink_from_method(false /* already owns Patching_lock */);
1393   } // leave critical region under Patching_lock

1394 
1395 #if INCLUDE_JVMCI
1396   // Invalidate can't occur while holding the Patching lock
1397   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
1398   if (nmethod_data != NULL) {
1399     nmethod_data->invalidate_nmethod_mirror(this);
1400   }
1401 #endif
1402 
1403 #ifdef ASSERT
1404   if (is_osr_method() && method() != NULL) {
1405     // Make sure osr nmethod is invalidated, i.e. not on the list
1406     bool found = method()->method_holder()->remove_osr_nmethod(this);
1407     assert(!found, "osr nmethod should have been invalidated");
1408   }
1409 #endif
1410 
1411   // When the nmethod becomes zombie it is no longer alive so the
1412   // dependencies must be flushed.  nmethods in the not_entrant
1413   // state will be flushed later when the transition to zombie




  33 #include "code/nmethod.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "compiler/abstractCompiler.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compileLog.hpp"
  38 #include "compiler/compilerDirectives.hpp"
  39 #include "compiler/directivesParser.hpp"
  40 #include "compiler/disassembler.hpp"
  41 #include "interpreter/bytecode.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "memory/universe.hpp"
  47 #include "oops/access.inline.hpp"
  48 #include "oops/method.inline.hpp"
  49 #include "oops/methodData.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "prims/jvmtiImpl.hpp"
  52 #include "runtime/atomic.hpp"
  53 #include "runtime/deoptimization.hpp"
  54 #include "runtime/flags/flagSetting.hpp"
  55 #include "runtime/frame.inline.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/jniHandles.inline.hpp"
  58 #include "runtime/orderAccess.hpp"
  59 #include "runtime/os.hpp"
  60 #include "runtime/safepointVerifiers.hpp"
  61 #include "runtime/sharedRuntime.hpp"
  62 #include "runtime/sweeper.hpp"
  63 #include "runtime/vmThread.hpp"
  64 #include "utilities/align.hpp"
  65 #include "utilities/dtrace.hpp"
  66 #include "utilities/events.hpp"
  67 #include "utilities/resourceHash.hpp"
  68 #include "utilities/xmlstream.hpp"
  69 #if INCLUDE_JVMCI
  70 #include "jvmci/jvmciRuntime.hpp"
  71 #endif
  72 
  73 #ifdef DTRACE_ENABLED


 460     int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
 461 
 462     CodeOffsets offsets;
 463     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 464     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 465     nm = new (native_nmethod_size, CompLevel_none)
 466     nmethod(method(), compiler_none, native_nmethod_size,
 467             compile_id, &offsets,
 468             code_buffer, frame_size,
 469             basic_lock_owner_sp_offset,
 470             basic_lock_sp_offset,
 471             oop_maps);
 472     NOT_PRODUCT(if (nm != NULL)  native_nmethod_stats.note_native_nmethod(nm));
 473   }
 474 
 475   if (nm != NULL) {
 476     // verify nmethod
 477     debug_only(nm->verify();) // might block
 478 
 479     nm->log_new_nmethod();

 480   }
 481   return nm;
 482 }
 483 
 484 nmethod* nmethod::new_nmethod(const methodHandle& method,
 485   int compile_id,
 486   int entry_bci,
 487   CodeOffsets* offsets,
 488   int orig_pc_offset,
 489   DebugInformationRecorder* debug_info,
 490   Dependencies* dependencies,
 491   CodeBuffer* code_buffer, int frame_size,
 492   OopMapSet* oop_maps,
 493   ExceptionHandlerTable* handler_table,
 494   ImplicitExceptionTable* nul_chk_table,
 495   AbstractCompiler* compiler,
 496   int comp_level
 497 #if INCLUDE_JVMCI
 498   , char* speculations,
 499   int speculations_len,


1121   // If an is_unloading() nmethod is still not_entrant, then it is not safe to
1122   // convert it to zombie due to GC unloading interactions. However, if it
1123   // has become unloaded, then it is okay to convert such nmethods to zombie.
1124   return stack_traversal_mark() + 1 < NMethodSweeper::traversal_count() &&
1125          !is_locked_by_vm() && (!is_unloading() || is_unloaded());
1126 }
1127 
1128 void nmethod::inc_decompile_count() {
1129   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1130   // Could be gated by ProfileTraps, but do not bother...
1131   Method* m = method();
1132   if (m == NULL)  return;
1133   MethodData* mdo = m->method_data();
1134   if (mdo == NULL)  return;
1135   // There is a benign race here.  See comments in methodData.hpp.
1136   mdo->inc_decompile_count();
1137 }
1138 
1139 bool nmethod::try_transition(int new_state_int) {
1140   signed char new_state = new_state_int;
1141 #ifdef DEBUG
1142   if (new_state != unloaded) {
1143     assert_lock_strong(CompiledMethod_lock);
1144   }
1145 #endif
1146   for (;;) {
1147     signed char old_state = Atomic::load(&_state);
1148     if (old_state >= new_state) {
1149       // Ensure monotonicity of transitions.
1150       return false;
1151     }
1152     if (Atomic::cmpxchg(new_state, &_state, old_state) == old_state) {
1153       return true;
1154     }
1155   }
1156 }
1157 
1158 void nmethod::make_unloaded() {
1159   post_compiled_method_unload();
1160 
1161   // This nmethod is being unloaded, make sure that dependencies
1162   // recorded in instanceKlasses get flushed.
1163   // Since this work is being done during a GC, defer deleting dependencies from the
1164   // InstanceKlass.
1165   assert(Universe::heap()->is_gc_active() || Thread::current()->is_ConcurrentGC_thread(),


1181     // Invalidate the osr nmethod only once. Note that with concurrent
1182     // code cache unloading, OSR nmethods are invalidated before they
1183     // are made unloaded. Therefore, this becomes a no-op then.
1184     if (is_in_use()) {
1185       invalidate_osr_method();
1186     }
1187 #ifdef ASSERT
1188     if (method() != NULL) {
1189       // Make sure osr nmethod is invalidated, i.e. not on the list
1190       bool found = method()->method_holder()->remove_osr_nmethod(this);
1191       assert(!found, "osr nmethod should have been invalidated");
1192     }
1193 #endif
1194   }
1195 
1196   // If _method is already NULL the Method* is about to be unloaded,
1197   // so we don't have to break the cycle. Note that it is possible to
1198   // have the Method* live here, in case we unload the nmethod because
1199   // it is pointing to some oop (other than the Method*) being unloaded.
1200   if (_method != NULL) {
1201     _method->unlink_code(this);




1202   }
1203 
1204   // Make the class unloaded - i.e., change state and notify sweeper
1205   assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(),
1206          "must be at safepoint");
1207 
1208   {
1209     // Clear ICStubs and release any CompiledICHolders.
1210     CompiledICLocker ml(this);
1211     clear_ic_callsites();
1212   }
1213 
1214   // Unregister must be done before the state change
1215   {
1216     MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
1217                      Mutex::_no_safepoint_check_flag);
1218     Universe::heap()->unregister_nmethod(this);
1219   }
1220 
1221   // Clear the method of this dead nmethod


1265         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1266                          os::current_thread_id());
1267       } else {
1268         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1269                          os::current_thread_id(),
1270                          (_state == zombie ? " zombie='1'" : ""));
1271       }
1272       log_identity(xtty);
1273       xtty->stamp();
1274       xtty->end_elem();
1275     }
1276   }
1277 
1278   const char *state_msg = _state == zombie ? "made zombie" : "made not entrant";
1279   CompileTask::print_ul(this, state_msg);
1280   if (PrintCompilation && _state != unloaded) {
1281     print_on(tty, state_msg);
1282   }
1283 }
1284 
1285 void nmethod::unlink_from_method() {
1286   if (method() != NULL) {
1287     method()->unlink_code(this);







1288   }
1289 }
1290 
1291 /**
1292  * Common functionality for both make_not_entrant and make_zombie
1293  */
1294 bool nmethod::make_not_entrant_or_zombie(int state) {
1295   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1296   assert(!is_zombie(), "should not already be a zombie");
1297 
1298   if (Atomic::load(&_state) >= state) {
1299     // Avoid taking the lock if already in required state.
1300     // This is safe from races because the state is an end-state,
1301     // which the nmethod cannot back out of once entered.
1302     // No need for fencing either.
1303     return false;
1304   }
1305 
1306   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1307   nmethodLocker nml(this);
1308   methodHandle the_method(method());
1309   // This can be called while the system is already at a safepoint which is ok
1310   NoSafepointVerifier nsv;
1311 
1312   // during patching, depending on the nmethod state we must notify the GC that
1313   // code has been unloaded, unregistering it. We cannot do this right while
1314   // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
1315   // would be prone to deadlocks.
1316   // This flag is used to remember whether we need to later lock and unregister.
1317   bool nmethod_needs_unregister = false;
1318 

1319   // invalidate osr nmethod before acquiring the patching lock since
1320   // they both acquire leaf locks and we don't want a deadlock.
1321   // This logic is equivalent to the logic below for patching the
1322   // verified entry point of regular methods. We check that the
1323   // nmethod is in use to ensure that it is invalidated only once.
1324   if (is_osr_method() && is_in_use()) {
1325     // this effectively makes the osr nmethod not entrant
1326     invalidate_osr_method();
1327   }
1328 
1329   {
1330     // Enter critical section.  Does not block for safepoint.
1331     MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
1332 
1333     if (Atomic::load(&_state) >= state) {
1334       // another thread already performed this transition so nothing
1335       // to do, but return false to indicate this.
1336       return false;
1337     }
1338 
1339     // The caller can be calling the method statically or through an inline
1340     // cache call.
1341     if (!is_osr_method() && !is_not_entrant()) {
1342       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1343                   SharedRuntime::get_handle_wrong_method_stub());
1344     }
1345 
1346     if (is_in_use() && update_recompile_counts()) {
1347       // It's a true state change, so mark the method as decompiled.
1348       // Do it only for transition from alive.
1349       inc_decompile_count();
1350     }
1351 


1366     }
1367 
1368     // Change state
1369     if (!try_transition(state)) {
1370       // If the transition fails, it is due to another thread making the nmethod more
1371       // dead. In particular, one thread might be making the nmethod unloaded concurrently.
1372       // If so, having patched in the jump in the verified entry unnecessarily is fine.
1373       // The nmethod is no longer possible to call by Java threads.
1374       // Incrementing the decompile count is also fine as the caller of make_not_entrant()
1375       // had a valid reason to deoptimize the nmethod.
1376       // Marking the nmethod as seen on stack also has no effect, as the nmethod is now
1377       // !is_alive(), and the seen on stack value is only used to convert not_entrant
1378       // nmethods to zombie in can_convert_to_zombie().
1379       return false;
1380     }
1381 
1382     // Log the transition once
1383     log_state_change();
1384 
1385     // Remove nmethod from method.
1386     unlink_from_method();
1387 
1388   } // leave critical region under CompiledMethod_lock
1389 
1390 #if INCLUDE_JVMCI
1391   // Invalidate can't occur while holding the Patching lock
1392   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
1393   if (nmethod_data != NULL) {
1394     nmethod_data->invalidate_nmethod_mirror(this);
1395   }
1396 #endif
1397 
1398 #ifdef ASSERT
1399   if (is_osr_method() && method() != NULL) {
1400     // Make sure osr nmethod is invalidated, i.e. not on the list
1401     bool found = method()->method_holder()->remove_osr_nmethod(this);
1402     assert(!found, "osr nmethod should have been invalidated");
1403   }
1404 #endif
1405 
1406   // When the nmethod becomes zombie it is no longer alive so the
1407   // dependencies must be flushed.  nmethods in the not_entrant
1408   // state will be flushed later when the transition to zombie


< prev index next >