34 #include "compiler/compilerOracle.hpp"
35 #include "compiler/disassembler.hpp"
36 #include "interpreter/bytecode.hpp"
37 #include "oops/methodData.hpp"
38 #include "prims/jvmtiRedefineClassesTrace.hpp"
39 #include "prims/jvmtiImpl.hpp"
40 #include "runtime/atomic.inline.hpp"
41 #include "runtime/orderAccess.inline.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/sweeper.hpp"
44 #include "utilities/resourceHash.hpp"
45 #include "utilities/dtrace.hpp"
46 #include "utilities/events.hpp"
47 #include "utilities/xmlstream.hpp"
48 #ifdef SHARK
49 #include "shark/sharkCompiler.hpp"
50 #endif
51
52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
53
54 #ifdef DTRACE_ENABLED
55
56 // Only bother with this argument setup if dtrace is available
57
58 #define DTRACE_METHOD_UNLOAD_PROBE(method) \
59 { \
60 Method* m = (method); \
61 if (m != NULL) { \
62 Symbol* klass_name = m->klass_name(); \
63 Symbol* name = m->name(); \
64 Symbol* signature = m->signature(); \
65 HOTSPOT_COMPILED_METHOD_UNLOAD( \
66 (char *) klass_name->bytes(), klass_name->utf8_length(), \
67 (char *) name->bytes(), name->utf8_length(), \
68 (char *) signature->bytes(), signature->utf8_length()); \
69 } \
70 }
71
72 #else // ndef DTRACE_ENABLED
73
429 int nmethod::total_size() const {
430 return
431 consts_size() +
432 insts_size() +
433 stub_size() +
434 scopes_data_size() +
435 scopes_pcs_size() +
436 handler_table_size() +
437 nul_chk_table_size();
438 }
439
440 const char* nmethod::compile_kind() const {
441 if (is_osr_method()) return "osr";
442 if (method() != NULL && is_native_method()) return "c2n";
443 return NULL;
444 }
445
446 // Fill in default values for various flag fields
447 void nmethod::init_defaults() {
448 _state = in_use;
449 _marked_for_reclamation = 0;
450 _has_flushed_dependencies = 0;
451 _has_unsafe_access = 0;
452 _has_method_handle_invokes = 0;
453 _lazy_critical_native = 0;
454 _has_wide_vectors = 0;
455 _marked_for_deoptimization = 0;
456 _lock_count = 0;
457 _stack_traversal_mark = 0;
458 _unload_reported = false; // jvmti state
459
460 #ifdef ASSERT
461 _oops_are_stale = false;
462 #endif
463
464 _oops_do_mark_link = NULL;
465 _jmethod_id = NULL;
466 _osr_link = NULL;
467 _scavenge_root_link = NULL;
468 _scavenge_root_state = 0;
469 _compiler = NULL;
470 #if INCLUDE_RTM_OPT
471 _rtm_state = NoRTM;
472 #endif
473 #ifdef HAVE_DTRACE_H
474 _trap_offset = 0;
475 #endif // def HAVE_DTRACE_H
476 }
477
478 nmethod* nmethod::new_native_nmethod(methodHandle method,
479 int compile_id,
480 CodeBuffer *code_buffer,
481 int vep_offset,
482 int frame_complete,
483 int frame_size,
484 ByteSize basic_lock_owner_sp_offset,
485 ByteSize basic_lock_sp_offset,
486 OopMapSet* oop_maps) {
487 code_buffer->finalize_oop_references(method);
1153 nmethod* nm = (nmethod*)cb;
1154 // Clean inline caches pointing to both zombie and not_entrant methods
1155 if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
1156 }
1157 break;
1158 }
1159 case relocInfo::static_call_type: {
1160 CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1161 CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1162 if( cb != NULL && cb->is_nmethod() ) {
1163 nmethod* nm = (nmethod*)cb;
1164 // Clean inline caches pointing to both zombie and not_entrant methods
1165 if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
1166 }
1167 break;
1168 }
1169 }
1170 }
1171 }
1172
1173 // This is a private interface with the sweeper.
1174 void nmethod::mark_as_seen_on_stack() {
1175 assert(is_alive(), "Must be an alive method");
1176 // Set the traversal mark to ensure that the sweeper does 2
1177 // cleaning passes before moving to zombie.
1178 set_stack_traversal_mark(NMethodSweeper::traversal_count());
1179 }
1180
1181 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1182 // there are no activations on the stack, not in use by the VM,
1183 // and not in use by the ServiceThread)
1184 bool nmethod::can_not_entrant_be_converted() {
1185 assert(is_not_entrant(), "must be a non-entrant method");
1186
1187 // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1188 // count can be greater than the stack traversal count before it hits the
1189 // nmethod for the second time.
1190 return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
1191 !is_locked_by_vm();
1192 }
1193
1194 void nmethod::inc_decompile_count() {
1195 if (!is_compiled_by_c2()) return;
1196 // Could be gated by ProfileTraps, but do not bother...
1197 Method* m = method();
1198 if (m == NULL) return;
1199 MethodData* mdo = m->method_data();
1200 if (mdo == NULL) return;
1201 // There is a benign race here. See comments in methodData.hpp.
1202 mdo->inc_decompile_count();
1203 }
1204
1205 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1206
1207 post_compiled_method_unload();
1208
1209 // Since this nmethod is being unloaded, make sure that dependencies
1210 // recorded in instanceKlasses get flushed and pass non-NULL closure to
1211 // indicate that this work is being done during a GC.
1212 assert(Universe::heap()->is_gc_active(), "should only be called during gc");
1213 assert(is_alive != NULL, "Should be non-NULL");
1214 // A non-NULL is_alive closure indicates that this is being called during GC.
1215 flush_dependencies(is_alive);
1216
1217 // Break cycle between nmethod & method
1218 if (TraceClassUnloading && WizardMode) {
1219 tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
1220 " unloadable], Method*(" INTPTR_FORMAT
1221 "), cause(" INTPTR_FORMAT ")",
1222 this, (address)_method, (address)cause);
1223 if (!Universe::heap()->is_gc_active())
1224 cause->klass()->print();
1230 // If _method is already NULL the Method* is about to be unloaded,
1231 // so we don't have to break the cycle. Note that it is possible to
1232 // have the Method* live here, in case we unload the nmethod because
1233 // it is pointing to some oop (other than the Method*) being unloaded.
1234 if (_method != NULL) {
1235 // OSR methods point to the Method*, but the Method* does not
1236 // point back!
1237 if (_method->code() == this) {
1238 _method->clear_code(); // Break a cycle
1239 }
1240 _method = NULL; // Clear the method of this dead nmethod
1241 }
1242 // Make the class unloaded - i.e., change state and notify sweeper
1243 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1244 if (is_in_use()) {
1245 // Transitioning directly from live to unloaded -- so
1246 // we need to force a cache clean-up; remember this
1247 // for later on.
1248 CodeCache::set_needs_cache_clean(true);
1249 }
1250 _state = unloaded;
1251
1252 // Log the unloading.
1253 log_state_change();
1254
1255 // The Method* is gone at this point
1256 assert(_method == NULL, "Tautology");
1257
1258 set_osr_link(NULL);
1259 //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
1260 NMethodSweeper::report_state_change(this);
1261 }
1262
1263 void nmethod::invalidate_osr_method() {
1264 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1265 // Remove from list of active nmethods
1266 if (method() != NULL)
1267 method()->method_holder()->remove_osr_nmethod(this);
1268 // Set entry as invalid
1269 _entry_bci = InvalidOSREntryBci;
1573 JvmtiDeferredEvent::compiled_method_unload_event(this,
1574 _jmethod_id, insts_begin());
1575 if (SafepointSynchronize::is_at_safepoint()) {
1576 // Don't want to take the queueing lock. Add it as pending and
1577 // it will get enqueued later.
1578 JvmtiDeferredEventQueue::add_pending_event(event);
1579 } else {
1580 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1581 JvmtiDeferredEventQueue::enqueue(event);
1582 }
1583 }
1584
1585 // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1586 // any time. As the nmethod is being unloaded now we mark it has
1587 // having the unload event reported - this will ensure that we don't
1588 // attempt to report the event in the unlikely scenario where the
1589 // event is enabled at the time the nmethod is made a zombie.
1590 set_unload_reported();
1591 }
1592
1593 // This is called at the end of the strong tracing/marking phase of a
1594 // GC to unload an nmethod if it contains otherwise unreachable
1595 // oops.
1596
1597 void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
1598 // Make sure the oop's ready to receive visitors
1599 assert(!is_zombie() && !is_unloaded(),
1600 "should not call follow on zombie or unloaded nmethod");
1601
1602 // If the method is not entrant then a JMP is plastered over the
1603 // first few bytes. If an oop in the old code was there, that oop
1604 // should not get GC'd. Skip the first few bytes of oops on
1605 // not-entrant methods.
1606 address low_boundary = verified_entry_point();
1607 if (is_not_entrant()) {
1608 low_boundary += NativeJump::instruction_size;
1609 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1610 // (See comment above.)
1611 }
1612
1616 bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1617 if (a_class_was_redefined) {
1618 // This set of the unloading_occurred flag is done before the
1619 // call to post_compiled_method_unload() so that the unloading
1620 // of this nmethod is reported.
1621 unloading_occurred = true;
1622 }
1623
1624 // Exception cache
1625 clean_exception_cache(is_alive);
1626
1627 // If class unloading occurred we first iterate over all inline caches and
1628 // clear ICs where the cached oop is referring to an unloaded klass or method.
1629 // The remaining live cached oops will be traversed in the relocInfo::oop_type
1630 // iteration below.
1631 if (unloading_occurred) {
1632 RelocIterator iter(this, low_boundary);
1633 while(iter.next()) {
1634 if (iter.type() == relocInfo::virtual_call_type) {
1635 CompiledIC *ic = CompiledIC_at(&iter);
1636 if (ic->is_icholder_call()) {
1637 // The only exception is compiledICHolder oops which may
1638 // yet be marked below. (We check this further below).
1639 CompiledICHolder* cichk_oop = ic->cached_icholder();
1640 if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1641 cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1642 continue;
1643 }
1644 } else {
1645 Metadata* ic_oop = ic->cached_metadata();
1646 if (ic_oop != NULL) {
1647 if (ic_oop->is_klass()) {
1648 if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1649 continue;
1650 }
1651 } else if (ic_oop->is_method()) {
1652 if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1653 continue;
1654 }
1655 } else {
1656 ShouldNotReachHere();
1657 }
1658 }
1659 }
1660 ic->set_to_clean();
1661 }
1662 }
1663 }
1664
1665 // Compiled code
1666 {
1667 RelocIterator iter(this, low_boundary);
1668 while (iter.next()) {
1669 if (iter.type() == relocInfo::oop_type) {
1670 oop_Relocation* r = iter.oop_reloc();
1671 // In this loop, we must only traverse those oops directly embedded in
1672 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
1673 assert(1 == (r->oop_is_immediate()) +
1674 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1675 "oop must be found in exactly one place");
1676 if (r->oop_is_immediate() && r->oop_value() != NULL) {
1677 if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1678 return;
1679 }
1680 }
1681 }
1682 }
1683 }
1684
1685
1686 // Scopes
1687 for (oop* p = oops_begin(); p < oops_end(); p++) {
1688 if (*p == Universe::non_oop_word()) continue; // skip non-oops
1689 if (can_unload(is_alive, p, unloading_occurred)) {
1690 return;
1691 }
1692 }
1693
1694 // Ensure that all metadata is still alive
1695 verify_metadata_loaders(low_boundary, is_alive);
1696 }
1697
1698 #ifdef ASSERT
1699
1700 class CheckClass : AllStatic {
1701 static BoolObjectClosure* _is_alive;
1702
1703 // Check class_loader is alive for this bit of metadata.
1704 static void check_class(Metadata* md) {
1705 Klass* klass = NULL;
1706 if (md->is_klass()) {
1707 klass = ((Klass*)md);
1708 } else if (md->is_method()) {
1709 klass = ((Method*)md)->method_holder();
1710 } else if (md->is_methodData()) {
1711 klass = ((MethodData*)md)->method()->method_holder();
1712 } else {
1713 md->print();
1714 ShouldNotReachHere();
1715 }
1716 assert(klass->is_loader_alive(_is_alive), "must be alive");
1717 }
1894 // On fall through, another racing thread marked this nmethod before we did.
1895 return true;
1896 }
1897
1898 void nmethod::oops_do_marking_prologue() {
1899 NOT_PRODUCT(if (TraceScavenge) tty->print_cr("[oops_do_marking_prologue"));
1900 assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
1901 // We use cmpxchg_ptr instead of regular assignment here because the user
1902 // may fork a bunch of threads, and we need them all to see the same state.
1903 void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
1904 guarantee(observed == NULL, "no races in this sequential code");
1905 }
1906
1907 void nmethod::oops_do_marking_epilogue() {
1908 assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
1909 nmethod* cur = _oops_do_mark_nmethods;
1910 while (cur != NMETHOD_SENTINEL) {
1911 assert(cur != NULL, "not NULL-terminated");
1912 nmethod* next = cur->_oops_do_mark_link;
1913 cur->_oops_do_mark_link = NULL;
1914 cur->fix_oop_relocations();
1915 NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
1916 cur = next;
1917 }
1918 void* required = _oops_do_mark_nmethods;
1919 void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
1920 guarantee(observed == required, "no races in this sequential code");
1921 NOT_PRODUCT(if (TraceScavenge) tty->print_cr("oops_do_marking_epilogue]"));
1922 }
1923
1924 class DetectScavengeRoot: public OopClosure {
1925 bool _detected_scavenge_root;
1926 public:
1927 DetectScavengeRoot() : _detected_scavenge_root(false)
1928 { NOT_PRODUCT(_print_nm = NULL); }
1929 bool detected_scavenge_root() { return _detected_scavenge_root; }
1930 virtual void do_oop(oop* p) {
1931 if ((*p) != NULL && (*p)->is_scavengable()) {
1932 NOT_PRODUCT(maybe_print(p));
1933 _detected_scavenge_root = true;
1934 }
2462 class DebugScavengeRoot: public OopClosure {
2463 nmethod* _nm;
2464 bool _ok;
2465 public:
2466 DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
2467 bool ok() { return _ok; }
2468 virtual void do_oop(oop* p) {
2469 if ((*p) == NULL || !(*p)->is_scavengable()) return;
2470 if (_ok) {
2471 _nm->print_nmethod(true);
2472 _ok = false;
2473 }
2474 tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
2475 (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
2476 (*p)->print();
2477 }
2478 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2479 };
2480
2481 void nmethod::verify_scavenge_root_oops() {
2482 if (!on_scavenge_root_list()) {
2483 // Actually look inside, to verify the claim that it's clean.
2484 DebugScavengeRoot debug_scavenge_root(this);
2485 oops_do(&debug_scavenge_root);
2486 if (!debug_scavenge_root.ok())
2487 fatal("found an unadvertised bad scavengable oop in the code cache");
2488 }
2489 assert(scavenge_root_not_marked(), "");
2490 }
2491
2492 #endif // PRODUCT
2493
2494 // Printing operations
2495
2496 void nmethod::print() const {
2497 ResourceMark rm;
2498 ttyLocker ttyl; // keep the following output all in one block
2499
2500 tty->print("Compiled method ");
2501
|
34 #include "compiler/compilerOracle.hpp"
35 #include "compiler/disassembler.hpp"
36 #include "interpreter/bytecode.hpp"
37 #include "oops/methodData.hpp"
38 #include "prims/jvmtiRedefineClassesTrace.hpp"
39 #include "prims/jvmtiImpl.hpp"
40 #include "runtime/atomic.inline.hpp"
41 #include "runtime/orderAccess.inline.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/sweeper.hpp"
44 #include "utilities/resourceHash.hpp"
45 #include "utilities/dtrace.hpp"
46 #include "utilities/events.hpp"
47 #include "utilities/xmlstream.hpp"
48 #ifdef SHARK
49 #include "shark/sharkCompiler.hpp"
50 #endif
51
52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
53
54 unsigned char nmethod::_global_unloading_clock = 0;
55
56 #ifdef DTRACE_ENABLED
57
58 // Only bother with this argument setup if dtrace is available
59
60 #define DTRACE_METHOD_UNLOAD_PROBE(method) \
61 { \
62 Method* m = (method); \
63 if (m != NULL) { \
64 Symbol* klass_name = m->klass_name(); \
65 Symbol* name = m->name(); \
66 Symbol* signature = m->signature(); \
67 HOTSPOT_COMPILED_METHOD_UNLOAD( \
68 (char *) klass_name->bytes(), klass_name->utf8_length(), \
69 (char *) name->bytes(), name->utf8_length(), \
70 (char *) signature->bytes(), signature->utf8_length()); \
71 } \
72 }
73
74 #else // ndef DTRACE_ENABLED
75
431 int nmethod::total_size() const {
432 return
433 consts_size() +
434 insts_size() +
435 stub_size() +
436 scopes_data_size() +
437 scopes_pcs_size() +
438 handler_table_size() +
439 nul_chk_table_size();
440 }
441
442 const char* nmethod::compile_kind() const {
443 if (is_osr_method()) return "osr";
444 if (method() != NULL && is_native_method()) return "c2n";
445 return NULL;
446 }
447
448 // Fill in default values for various flag fields
449 void nmethod::init_defaults() {
450 _state = in_use;
451 _unloading_clock = 0;
452 _marked_for_reclamation = 0;
453 _has_flushed_dependencies = 0;
454 _has_unsafe_access = 0;
455 _has_method_handle_invokes = 0;
456 _lazy_critical_native = 0;
457 _has_wide_vectors = 0;
458 _marked_for_deoptimization = 0;
459 _lock_count = 0;
460 _stack_traversal_mark = 0;
461 _unload_reported = false; // jvmti state
462
463 #ifdef ASSERT
464 _oops_are_stale = false;
465 #endif
466
467 _oops_do_mark_link = NULL;
468 _jmethod_id = NULL;
469 _osr_link = NULL;
470 if (UseG1GC) {
471 _unloading_next = NULL;
472 } else {
473 _scavenge_root_link = NULL;
474 }
475 _scavenge_root_state = 0;
476 _compiler = NULL;
477 #if INCLUDE_RTM_OPT
478 _rtm_state = NoRTM;
479 #endif
480 #ifdef HAVE_DTRACE_H
481 _trap_offset = 0;
482 #endif // def HAVE_DTRACE_H
483 }
484
485 nmethod* nmethod::new_native_nmethod(methodHandle method,
486 int compile_id,
487 CodeBuffer *code_buffer,
488 int vep_offset,
489 int frame_complete,
490 int frame_size,
491 ByteSize basic_lock_owner_sp_offset,
492 ByteSize basic_lock_sp_offset,
493 OopMapSet* oop_maps) {
494 code_buffer->finalize_oop_references(method);
1160 nmethod* nm = (nmethod*)cb;
1161 // Clean inline caches pointing to both zombie and not_entrant methods
1162 if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
1163 }
1164 break;
1165 }
1166 case relocInfo::static_call_type: {
1167 CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1168 CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1169 if( cb != NULL && cb->is_nmethod() ) {
1170 nmethod* nm = (nmethod*)cb;
1171 // Clean inline caches pointing to both zombie and not_entrant methods
1172 if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
1173 }
1174 break;
1175 }
1176 }
1177 }
1178 }
1179
1180 void nmethod::verify_clean_inline_caches() {
1181 assert_locked_or_safepoint(CompiledIC_lock);
1182
1183 // If the method is not entrant or zombie then a JMP is plastered over the
1184 // first few bytes. If an oop in the old code was there, that oop
1185 // should not get GC'd. Skip the first few bytes of oops on
1186 // not-entrant methods.
1187 address low_boundary = verified_entry_point();
1188 if (!is_in_use()) {
1189 low_boundary += NativeJump::instruction_size;
1190 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1191 // This means that the low_boundary is going to be a little too high.
1192 // This shouldn't matter, since oops of non-entrant methods are never used.
1193 // In fact, why are we bothering to look at oops in a non-entrant method??
1194 }
1195
1196 // Find all calls in an nmethod, and clear the ones that points to zombie methods
1197 ResourceMark rm;
1198 RelocIterator iter(this, low_boundary);
1199 while(iter.next()) {
1200 switch(iter.type()) {
1201 case relocInfo::virtual_call_type:
1202 case relocInfo::opt_virtual_call_type: {
1203 CompiledIC *ic = CompiledIC_at(&iter);
1204 // Ok, to lookup references to zombies here
1205 CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1206 if( cb != NULL && cb->is_nmethod() ) {
1207 nmethod* nm = (nmethod*)cb;
1208 // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1209 if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1210 assert(ic->is_clean(), "IC should be clean");
1211 }
1212 }
1213 break;
1214 }
1215 case relocInfo::static_call_type: {
1216 CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1217 CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1218 if( cb != NULL && cb->is_nmethod() ) {
1219 nmethod* nm = (nmethod*)cb;
1220 // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1221 if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1222 assert(csc->is_clean(), "IC should be clean");
1223 }
1224 }
1225 break;
1226 }
1227 }
1228 }
1229 }
1230
1231 int nmethod::verify_icholder_relocations() {
1232 int count = 0;
1233
1234 RelocIterator iter(this);
1235 while(iter.next()) {
1236 if (iter.type() == relocInfo::virtual_call_type) {
1237 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
1238 CompiledIC *ic = CompiledIC_at(&iter);
1239 if (TraceCompiledIC) {
1240 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
1241 ic->print();
1242 }
1243 assert(ic->cached_icholder() != NULL, "must be non-NULL");
1244 count++;
1245 }
1246 }
1247 }
1248
1249 return count;
1250 }
1251
1252 // This is a private interface with the sweeper.
1253 void nmethod::mark_as_seen_on_stack() {
1254 assert(is_alive(), "Must be an alive method");
1255 // Set the traversal mark to ensure that the sweeper does 2
1256 // cleaning passes before moving to zombie.
1257 set_stack_traversal_mark(NMethodSweeper::traversal_count());
1258 }
1259
1260 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1261 // there are no activations on the stack, not in use by the VM,
1262 // and not in use by the ServiceThread)
1263 bool nmethod::can_not_entrant_be_converted() {
1264 assert(is_not_entrant(), "must be a non-entrant method");
1265
1266 // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1267 // count can be greater than the stack traversal count before it hits the
1268 // nmethod for the second time.
1269 return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
1270 !is_locked_by_vm();
1271 }
1272
1273 void nmethod::inc_decompile_count() {
1274 if (!is_compiled_by_c2()) return;
1275 // Could be gated by ProfileTraps, but do not bother...
1276 Method* m = method();
1277 if (m == NULL) return;
1278 MethodData* mdo = m->method_data();
1279 if (mdo == NULL) return;
1280 // There is a benign race here. See comments in methodData.hpp.
1281 mdo->inc_decompile_count();
1282 }
1283
1284 void nmethod::increase_unloading_clock() {
1285 _global_unloading_clock++;
1286 if (_global_unloading_clock == 0) {
1287 // _nmethods are allocated with _unloading_clock == 0,
1288 // so 0 is never used as a clock value.
1289 _global_unloading_clock = 1;
1290 }
1291 }
1292
1293 void nmethod::set_unloading_clock(unsigned char unloading_clock) {
1294 OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
1295 }
1296
1297 unsigned char nmethod::unloading_clock() {
1298 return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
1299 }
1300
1301 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1302
1303 post_compiled_method_unload();
1304
1305 // Since this nmethod is being unloaded, make sure that dependencies
1306 // recorded in instanceKlasses get flushed and pass non-NULL closure to
1307 // indicate that this work is being done during a GC.
1308 assert(Universe::heap()->is_gc_active(), "should only be called during gc");
1309 assert(is_alive != NULL, "Should be non-NULL");
1310 // A non-NULL is_alive closure indicates that this is being called during GC.
1311 flush_dependencies(is_alive);
1312
1313 // Break cycle between nmethod & method
1314 if (TraceClassUnloading && WizardMode) {
1315 tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
1316 " unloadable], Method*(" INTPTR_FORMAT
1317 "), cause(" INTPTR_FORMAT ")",
1318 this, (address)_method, (address)cause);
1319 if (!Universe::heap()->is_gc_active())
1320 cause->klass()->print();
1326 // If _method is already NULL the Method* is about to be unloaded,
1327 // so we don't have to break the cycle. Note that it is possible to
1328 // have the Method* live here, in case we unload the nmethod because
1329 // it is pointing to some oop (other than the Method*) being unloaded.
1330 if (_method != NULL) {
1331 // OSR methods point to the Method*, but the Method* does not
1332 // point back!
1333 if (_method->code() == this) {
1334 _method->clear_code(); // Break a cycle
1335 }
1336 _method = NULL; // Clear the method of this dead nmethod
1337 }
1338 // Make the class unloaded - i.e., change state and notify sweeper
1339 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1340 if (is_in_use()) {
1341 // Transitioning directly from live to unloaded -- so
1342 // we need to force a cache clean-up; remember this
1343 // for later on.
1344 CodeCache::set_needs_cache_clean(true);
1345 }
1346
1347 // Unregister must be done before the state change
1348 Universe::heap()->unregister_nmethod(this);
1349
1350 _state = unloaded;
1351
1352 // Log the unloading.
1353 log_state_change();
1354
1355 // The Method* is gone at this point
1356 assert(_method == NULL, "Tautology");
1357
1358 set_osr_link(NULL);
1359 //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
1360 NMethodSweeper::report_state_change(this);
1361 }
1362
1363 void nmethod::invalidate_osr_method() {
1364 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1365 // Remove from list of active nmethods
1366 if (method() != NULL)
1367 method()->method_holder()->remove_osr_nmethod(this);
1368 // Set entry as invalid
1369 _entry_bci = InvalidOSREntryBci;
1673 JvmtiDeferredEvent::compiled_method_unload_event(this,
1674 _jmethod_id, insts_begin());
1675 if (SafepointSynchronize::is_at_safepoint()) {
1676 // Don't want to take the queueing lock. Add it as pending and
1677 // it will get enqueued later.
1678 JvmtiDeferredEventQueue::add_pending_event(event);
1679 } else {
1680 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1681 JvmtiDeferredEventQueue::enqueue(event);
1682 }
1683 }
1684
1685 // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1686 // any time. As the nmethod is being unloaded now we mark it has
1687 // having the unload event reported - this will ensure that we don't
1688 // attempt to report the event in the unlikely scenario where the
1689 // event is enabled at the time the nmethod is made a zombie.
1690 set_unload_reported();
1691 }
1692
1693 void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
1694 if (ic->is_icholder_call()) {
1695 // The only exception is compiledICHolder oops which may
1696 // yet be marked below. (We check this further below).
1697 CompiledICHolder* cichk_oop = ic->cached_icholder();
1698 if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1699 cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1700 return;
1701 }
1702 } else {
1703 Metadata* ic_oop = ic->cached_metadata();
1704 if (ic_oop != NULL) {
1705 if (ic_oop->is_klass()) {
1706 if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1707 return;
1708 }
1709 } else if (ic_oop->is_method()) {
1710 if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1711 return;
1712 }
1713 } else {
1714 ShouldNotReachHere();
1715 }
1716 }
1717 }
1718
1719 ic->set_to_clean();
1720 }
1721
1722 // This is called at the end of the strong tracing/marking phase of a
1723 // GC to unload an nmethod if it contains otherwise unreachable
1724 // oops.
1725
1726 void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
1727 // Make sure the oop's ready to receive visitors
1728 assert(!is_zombie() && !is_unloaded(),
1729 "should not call follow on zombie or unloaded nmethod");
1730
1731 // If the method is not entrant then a JMP is plastered over the
1732 // first few bytes. If an oop in the old code was there, that oop
1733 // should not get GC'd. Skip the first few bytes of oops on
1734 // not-entrant methods.
1735 address low_boundary = verified_entry_point();
1736 if (is_not_entrant()) {
1737 low_boundary += NativeJump::instruction_size;
1738 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1739 // (See comment above.)
1740 }
1741
1745 bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1746 if (a_class_was_redefined) {
1747 // This set of the unloading_occurred flag is done before the
1748 // call to post_compiled_method_unload() so that the unloading
1749 // of this nmethod is reported.
1750 unloading_occurred = true;
1751 }
1752
1753 // Exception cache
1754 clean_exception_cache(is_alive);
1755
1756 // If class unloading occurred we first iterate over all inline caches and
1757 // clear ICs where the cached oop is referring to an unloaded klass or method.
1758 // The remaining live cached oops will be traversed in the relocInfo::oop_type
1759 // iteration below.
1760 if (unloading_occurred) {
1761 RelocIterator iter(this, low_boundary);
1762 while(iter.next()) {
1763 if (iter.type() == relocInfo::virtual_call_type) {
1764 CompiledIC *ic = CompiledIC_at(&iter);
1765 clean_ic_if_metadata_is_dead(ic, is_alive);
1766 }
1767 }
1768 }
1769
1770 // Compiled code
1771 {
1772 RelocIterator iter(this, low_boundary);
1773 while (iter.next()) {
1774 if (iter.type() == relocInfo::oop_type) {
1775 oop_Relocation* r = iter.oop_reloc();
1776 // In this loop, we must only traverse those oops directly embedded in
1777 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
1778 assert(1 == (r->oop_is_immediate()) +
1779 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1780 "oop must be found in exactly one place");
1781 if (r->oop_is_immediate() && r->oop_value() != NULL) {
1782 if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1783 return;
1784 }
1785 }
1786 }
1787 }
1788 }
1789
1790
1791 // Scopes
1792 for (oop* p = oops_begin(); p < oops_end(); p++) {
1793 if (*p == Universe::non_oop_word()) continue; // skip non-oops
1794 if (can_unload(is_alive, p, unloading_occurred)) {
1795 return;
1796 }
1797 }
1798
1799 // Ensure that all metadata is still alive
1800 verify_metadata_loaders(low_boundary, is_alive);
1801 }
1802
1803 template <class CompiledICorStaticCall>
1804 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
1805 // Ok, to lookup references to zombies here
1806 CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
1807 if (cb != NULL && cb->is_nmethod()) {
1808 nmethod* nm = (nmethod*)cb;
1809
1810 if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
1811 // The nmethod has not been processed yet.
1812 return true;
1813 }
1814
1815 // Clean inline caches pointing to both zombie and not_entrant methods
1816 if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1817 ic->set_to_clean();
1818 assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
1819 }
1820 }
1821
1822 return false;
1823 }
1824
1825 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
1826 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
1827 }
1828
1829 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
1830 return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
1831 }
1832
1833 bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
1834 ResourceMark rm;
1835
1836 // Make sure the oop's ready to receive visitors
1837 assert(!is_zombie() && !is_unloaded(),
1838 "should not call follow on zombie or unloaded nmethod");
1839
1840 // If the method is not entrant then a JMP is plastered over the
1841 // first few bytes. If an oop in the old code was there, that oop
1842 // should not get GC'd. Skip the first few bytes of oops on
1843 // not-entrant methods.
1844 address low_boundary = verified_entry_point();
1845 if (is_not_entrant()) {
1846 low_boundary += NativeJump::instruction_size;
1847 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1848 // (See comment above.)
1849 }
1850
1851 // The RedefineClasses() API can cause the class unloading invariant
1852 // to no longer be true. See jvmtiExport.hpp for details.
1853 // Also, leave a debugging breadcrumb in local flag.
1854 bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1855 if (a_class_was_redefined) {
1856 // This set of the unloading_occurred flag is done before the
1857 // call to post_compiled_method_unload() so that the unloading
1858 // of this nmethod is reported.
1859 unloading_occurred = true;
1860 }
1861
1862 // Exception cache
1863 clean_exception_cache(is_alive);
1864
1865 bool is_unloaded = false;
1866 bool postponed = false;
1867
1868 RelocIterator iter(this, low_boundary);
1869 while(iter.next()) {
1870
1871 switch (iter.type()) {
1872
1873 case relocInfo::virtual_call_type:
1874 if (unloading_occurred) {
1875 // If class unloading occurred we first iterate over all inline caches and
1876 // clear ICs where the cached oop is referring to an unloaded klass or method.
1877 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
1878 }
1879
1880 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1881 break;
1882
1883 case relocInfo::opt_virtual_call_type:
1884 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1885 break;
1886
1887 case relocInfo::static_call_type:
1888 postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1889 break;
1890
1891 case relocInfo::oop_type:
1892 if (!is_unloaded) {
1893 // Unload check
1894 oop_Relocation* r = iter.oop_reloc();
1895 // Traverse those oops directly embedded in the code.
1896 // Other oops (oop_index>0) are seen as part of scopes_oops.
1897 assert(1 == (r->oop_is_immediate()) +
1898 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1899 "oop must be found in exactly one place");
1900 if (r->oop_is_immediate() && r->oop_value() != NULL) {
1901 if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1902 is_unloaded = true;
1903 }
1904 }
1905 }
1906 break;
1907
1908 }
1909 }
1910
1911 if (is_unloaded) {
1912 return postponed;
1913 }
1914
1915 // Scopes
1916 for (oop* p = oops_begin(); p < oops_end(); p++) {
1917 if (*p == Universe::non_oop_word()) continue; // skip non-oops
1918 if (can_unload(is_alive, p, unloading_occurred)) {
1919 is_unloaded = true;
1920 break;
1921 }
1922 }
1923
1924 if (is_unloaded) {
1925 return postponed;
1926 }
1927
1928 // Ensure that all metadata is still alive
1929 verify_metadata_loaders(low_boundary, is_alive);
1930
1931 return postponed;
1932 }
1933
1934 void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
1935 ResourceMark rm;
1936
1937 // Make sure the oop's ready to receive visitors
1938 assert(!is_zombie(),
1939 "should not call follow on zombie nmethod");
1940
1941 // If the method is not entrant then a JMP is plastered over the
1942 // first few bytes. If an oop in the old code was there, that oop
1943 // should not get GC'd. Skip the first few bytes of oops on
1944 // not-entrant methods.
1945 address low_boundary = verified_entry_point();
1946 if (is_not_entrant()) {
1947 low_boundary += NativeJump::instruction_size;
1948 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1949 // (See comment above.)
1950 }
1951
1952 RelocIterator iter(this, low_boundary);
1953 while(iter.next()) {
1954
1955 switch (iter.type()) {
1956
1957 case relocInfo::virtual_call_type:
1958 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1959 break;
1960
1961 case relocInfo::opt_virtual_call_type:
1962 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1963 break;
1964
1965 case relocInfo::static_call_type:
1966 clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1967 break;
1968 }
1969 }
1970 }
1971
1972 #ifdef ASSERT
1973
1974 class CheckClass : AllStatic {
1975 static BoolObjectClosure* _is_alive;
1976
1977 // Check class_loader is alive for this bit of metadata.
1978 static void check_class(Metadata* md) {
1979 Klass* klass = NULL;
1980 if (md->is_klass()) {
1981 klass = ((Klass*)md);
1982 } else if (md->is_method()) {
1983 klass = ((Method*)md)->method_holder();
1984 } else if (md->is_methodData()) {
1985 klass = ((MethodData*)md)->method()->method_holder();
1986 } else {
1987 md->print();
1988 ShouldNotReachHere();
1989 }
1990 assert(klass->is_loader_alive(_is_alive), "must be alive");
1991 }
2168 // On fall through, another racing thread marked this nmethod before we did.
2169 return true;
2170 }
2171
2172 void nmethod::oops_do_marking_prologue() {
2173 NOT_PRODUCT(if (TraceScavenge) tty->print_cr("[oops_do_marking_prologue"));
2174 assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
2175 // We use cmpxchg_ptr instead of regular assignment here because the user
2176 // may fork a bunch of threads, and we need them all to see the same state.
2177 void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
2178 guarantee(observed == NULL, "no races in this sequential code");
2179 }
2180
2181 void nmethod::oops_do_marking_epilogue() {
2182 assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
2183 nmethod* cur = _oops_do_mark_nmethods;
2184 while (cur != NMETHOD_SENTINEL) {
2185 assert(cur != NULL, "not NULL-terminated");
2186 nmethod* next = cur->_oops_do_mark_link;
2187 cur->_oops_do_mark_link = NULL;
2188 cur->verify_oop_relocations();
2189 NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
2190 cur = next;
2191 }
2192 void* required = _oops_do_mark_nmethods;
2193 void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
2194 guarantee(observed == required, "no races in this sequential code");
2195 NOT_PRODUCT(if (TraceScavenge) tty->print_cr("oops_do_marking_epilogue]"));
2196 }
2197
2198 class DetectScavengeRoot: public OopClosure {
2199 bool _detected_scavenge_root;
2200 public:
2201 DetectScavengeRoot() : _detected_scavenge_root(false)
2202 { NOT_PRODUCT(_print_nm = NULL); }
2203 bool detected_scavenge_root() { return _detected_scavenge_root; }
2204 virtual void do_oop(oop* p) {
2205 if ((*p) != NULL && (*p)->is_scavengable()) {
2206 NOT_PRODUCT(maybe_print(p));
2207 _detected_scavenge_root = true;
2208 }
2736 class DebugScavengeRoot: public OopClosure {
2737 nmethod* _nm;
2738 bool _ok;
2739 public:
2740 DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
2741 bool ok() { return _ok; }
2742 virtual void do_oop(oop* p) {
2743 if ((*p) == NULL || !(*p)->is_scavengable()) return;
2744 if (_ok) {
2745 _nm->print_nmethod(true);
2746 _ok = false;
2747 }
2748 tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
2749 (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
2750 (*p)->print();
2751 }
2752 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2753 };
2754
2755 void nmethod::verify_scavenge_root_oops() {
2756 if (UseG1GC) {
2757 return;
2758 }
2759
2760 if (!on_scavenge_root_list()) {
2761 // Actually look inside, to verify the claim that it's clean.
2762 DebugScavengeRoot debug_scavenge_root(this);
2763 oops_do(&debug_scavenge_root);
2764 if (!debug_scavenge_root.ok())
2765 fatal("found an unadvertised bad scavengable oop in the code cache");
2766 }
2767 assert(scavenge_root_not_marked(), "");
2768 }
2769
2770 #endif // PRODUCT
2771
2772 // Printing operations
2773
2774 void nmethod::print() const {
2775 ResourceMark rm;
2776 ttyLocker ttyl; // keep the following output all in one block
2777
2778 tty->print("Compiled method ");
2779
|