src/share/vm/code/nmethod.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8058737 Sdiff src/share/vm/code

src/share/vm/code/nmethod.cpp

Print this page




1113   PcDesc* pd = pc_desc_at(pc);
1114   guarantee(pd != NULL, "scope must be present");
1115   return new ScopeDesc(this, pd->scope_decode_offset(),
1116                        pd->obj_decode_offset(), pd->should_reexecute(),
1117                        pd->return_oop());
1118 }
1119 
1120 
1121 void nmethod::clear_inline_caches() {
1122   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
1123   if (is_zombie()) {
1124     return;
1125   }
1126 
1127   RelocIterator iter(this);
1128   while (iter.next()) {
1129     iter.reloc()->clear_inline_cache();
1130   }
1131 }
1132 












1133 
1134 void nmethod::cleanup_inline_caches() {
1135 
1136   assert_locked_or_safepoint(CompiledIC_lock);
1137 
1138   // If the method is not entrant or zombie then a JMP is plastered over the
1139   // first few bytes.  If an oop in the old code was there, that oop
1140   // should not get GC'd.  Skip the first few bytes of oops on
1141   // not-entrant methods.
1142   address low_boundary = verified_entry_point();
1143   if (!is_in_use()) {
1144     low_boundary += NativeJump::instruction_size;
1145     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1146     // This means that the low_boundary is going to be a little too high.
1147     // This shouldn't matter, since oops of non-entrant methods are never used.
1148     // In fact, why are we bothering to look at oops in a non-entrant method??
1149   }
1150 
1151   // Find all calls in an nmethod, and clear the ones that points to zombie methods
1152   ResourceMark rm;


1475       HandleMark hm;
1476       method()->clear_code();
1477     }
1478   } // leave critical region under Patching_lock
1479 
1480   // When the nmethod becomes zombie it is no longer alive so the
1481   // dependencies must be flushed.  nmethods in the not_entrant
1482   // state will be flushed later when the transition to zombie
1483   // happens or they get unloaded.
1484   if (state == zombie) {
1485     {
1486       // Flushing dependecies must be done before any possible
1487       // safepoint can sneak in, otherwise the oops used by the
1488       // dependency logic could have become stale.
1489       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1490       if (nmethod_needs_unregister) {
1491         Universe::heap()->unregister_nmethod(this);
1492       }
1493       flush_dependencies(NULL);
1494     }


1495 
1496     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1497     // event and it hasn't already been reported for this nmethod then
1498     // report it now. The event may have been reported earilier if the GC
1499     // marked it for unloading). JvmtiDeferredEventQueue support means
1500     // we no longer go to a safepoint here.
1501     post_compiled_method_unload();
1502 
1503 #ifdef ASSERT
1504     // It's no longer safe to access the oops section since zombie
1505     // nmethods aren't scanned for GC.
1506     _oops_are_stale = true;
1507 #endif
1508      // the Method may be reclaimed by class unloading now that the
1509      // nmethod is in zombie state
1510     set_method(NULL);
1511   } else {
1512     assert(state == not_entrant, "other cases may need to be handled differently");
1513   }
1514 




1113   PcDesc* pd = pc_desc_at(pc);
1114   guarantee(pd != NULL, "scope must be present");
1115   return new ScopeDesc(this, pd->scope_decode_offset(),
1116                        pd->obj_decode_offset(), pd->should_reexecute(),
1117                        pd->return_oop());
1118 }
1119 
1120 
1121 void nmethod::clear_inline_caches() {
1122   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
1123   if (is_zombie()) {
1124     return;
1125   }
1126 
1127   RelocIterator iter(this);
1128   while (iter.next()) {
1129     iter.reloc()->clear_inline_cache();
1130   }
1131 }
1132 
1133 // Clear ICStubs of all compiled ICs
1134 void nmethod::clear_ic_stubs() {
1135   assert_locked_or_safepoint(CompiledIC_lock);
1136   RelocIterator iter(this);
1137   while(iter.next()) {
1138     if (iter.type() == relocInfo::virtual_call_type) {
1139       CompiledIC* ic = CompiledIC_at(&iter);
1140       ic->clear_ic_stub();
1141     }
1142   }
1143 }
1144 
1145 
1146 void nmethod::cleanup_inline_caches() {
1147 
1148   assert_locked_or_safepoint(CompiledIC_lock);
1149 
1150   // If the method is not entrant or zombie then a JMP is plastered over the
1151   // first few bytes.  If an oop in the old code was there, that oop
1152   // should not get GC'd.  Skip the first few bytes of oops on
1153   // not-entrant methods.
1154   address low_boundary = verified_entry_point();
1155   if (!is_in_use()) {
1156     low_boundary += NativeJump::instruction_size;
1157     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1158     // This means that the low_boundary is going to be a little too high.
1159     // This shouldn't matter, since oops of non-entrant methods are never used.
1160     // In fact, why are we bothering to look at oops in a non-entrant method??
1161   }
1162 
1163   // Find all calls in an nmethod, and clear the ones that points to zombie methods
1164   ResourceMark rm;


1487       HandleMark hm;
1488       method()->clear_code();
1489     }
1490   } // leave critical region under Patching_lock
1491 
1492   // When the nmethod becomes zombie it is no longer alive so the
1493   // dependencies must be flushed.  nmethods in the not_entrant
1494   // state will be flushed later when the transition to zombie
1495   // happens or they get unloaded.
1496   if (state == zombie) {
1497     {
1498       // Flushing dependecies must be done before any possible
1499       // safepoint can sneak in, otherwise the oops used by the
1500       // dependency logic could have become stale.
1501       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1502       if (nmethod_needs_unregister) {
1503         Universe::heap()->unregister_nmethod(this);
1504       }
1505       flush_dependencies(NULL);
1506     }
1507 
1508 
1509 
1510     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1511     // event and it hasn't already been reported for this nmethod then
1512     // report it now. The event may have been reported earilier if the GC
1513     // marked it for unloading). JvmtiDeferredEventQueue support means
1514     // we no longer go to a safepoint here.
1515     post_compiled_method_unload();
1516 
1517 #ifdef ASSERT
1518     // It's no longer safe to access the oops section since zombie
1519     // nmethods aren't scanned for GC.
1520     _oops_are_stale = true;
1521 #endif
1522      // the Method may be reclaimed by class unloading now that the
1523      // nmethod is in zombie state
1524     set_method(NULL);
1525   } else {
1526     assert(state == not_entrant, "other cases may need to be handled differently");
1527   }
1528 


src/share/vm/code/nmethod.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File