< prev index next >

src/share/vm/code/nmethod.cpp

Print this page
rev 11777 : [mq]: gcinterface.patch


 608     _nmethod_end_offset      = _nul_chk_table_offset;
 609     _compile_id              = compile_id;
 610     _comp_level              = CompLevel_none;
 611     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
 612     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
 613     _osr_entry_point         = NULL;
 614     _exception_cache         = NULL;
 615     _pc_desc_container.reset_to(NULL);
 616     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 617 
 618     _scopes_data_begin = (address) this + scopes_data_offset;
 619     _deopt_handler_begin = (address) this + deoptimize_offset;
 620     _deopt_mh_handler_begin = (address) this + deoptimize_mh_offset;
 621 
 622     code_buffer->copy_code_and_locs_to(this);
 623     code_buffer->copy_values_to(this);
 624     if (ScavengeRootsInCode) {
 625       if (detect_scavenge_root_oops()) {
 626         CodeCache::add_scavenge_root_nmethod(this);
 627       }
 628       Universe::heap()->register_nmethod(this);
 629     }
 630     debug_only(verify_scavenge_root_oops());
 631     CodeCache::commit(this);
 632   }
 633 
 634   if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
 635     ttyLocker ttyl;  // keep the following output all in one block
 636     // This output goes directly to the tty, not the compiler log.
 637     // To enable tools to match it up with the compilation activity,
 638     // be sure to tag this tty output with the compile ID.
 639     if (xtty != NULL) {
 640       xtty->begin_head("print_native_nmethod");
 641       xtty->method(_method);
 642       xtty->stamp();
 643       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
 644     }
 645     // print the header part first
 646     print();
 647     // then print the requested information
 648     if (PrintNativeNMethods) {


 762     _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
 763     _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
 764     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
 765     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
 766     _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
 767     _exception_cache         = NULL;
 768 
 769     _scopes_data_begin = (address) this + scopes_data_offset;
 770 
 771     _pc_desc_container.reset_to(scopes_pcs_begin());
 772 
 773     code_buffer->copy_code_and_locs_to(this);
 774     // Copy contents of ScopeDescRecorder to nmethod
 775     code_buffer->copy_values_to(this);
 776     debug_info->copy_to(this);
 777     dependencies->copy_to(this);
 778     if (ScavengeRootsInCode) {
 779       if (detect_scavenge_root_oops()) {
 780         CodeCache::add_scavenge_root_nmethod(this);
 781       }
 782       Universe::heap()->register_nmethod(this);
 783     }
 784     debug_only(verify_scavenge_root_oops());
 785 
 786     CodeCache::commit(this);
 787 
 788     // Copy contents of ExceptionHandlerTable to nmethod
 789     handler_table->copy_to(this);
 790     nul_chk_table->copy_to(this);
 791 
 792     // we use the information of entry points to find out if a method is
 793     // static or non static
 794     assert(compiler->is_c2() || compiler->is_jvmci() ||
 795            _method->is_static() == (entry_point() == _verified_entry_point),
 796            " entry points must be same for static methods and vice versa");
 797   }
 798 }
 799 
 800 // Print a short set of xml attributes to identify this nmethod.  The
 801 // output should be embedded in some other element.
 802 void nmethod::log_identity(xmlStream* log) const {


1038 }
1039 
1040 void nmethod::inc_decompile_count() {
1041   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1042   // Could be gated by ProfileTraps, but do not bother...
1043   Method* m = method();
1044   if (m == NULL)  return;
1045   MethodData* mdo = m->method_data();
1046   if (mdo == NULL)  return;
1047   // There is a benign race here.  See comments in methodData.hpp.
1048   mdo->inc_decompile_count();
1049 }
1050 
1051 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1052 
1053   post_compiled_method_unload();
1054 
1055   // Since this nmethod is being unloaded, make sure that dependencies
1056   // recorded in instanceKlasses get flushed and pass non-NULL closure to
1057   // indicate that this work is being done during a GC.
1058   assert(Universe::heap()->is_gc_active(), "should only be called during gc");
1059   assert(is_alive != NULL, "Should be non-NULL");
1060   // A non-NULL is_alive closure indicates that this is being called during GC.
1061   flush_dependencies(is_alive);
1062 
1063   // Break cycle between nmethod & method
1064   if (log_is_enabled(Trace, class, unload)) {
1065     outputStream* log = Log(class, unload)::trace_stream();
1066     log->print_cr("making nmethod " INTPTR_FORMAT
1067                   " unloadable, Method*(" INTPTR_FORMAT
1068                   "), cause(" INTPTR_FORMAT ")",
1069                   p2i(this), p2i(_method), p2i(cause));
1070     if (!Universe::heap()->is_gc_active())
1071       cause->klass()->print_on(log);
1072   }
1073   // Unlink the osr method, so we do not look this up again
1074   if (is_osr_method()) {
1075     // Invalidate the osr nmethod only once
1076     if (is_in_use()) {
1077       invalidate_osr_method();
1078     }
1079 #ifdef ASSERT
1080     if (method() != NULL) {
1081       // Make sure osr nmethod is invalidated, i.e. not on the list
1082       bool found = method()->method_holder()->remove_osr_nmethod(this);
1083       assert(!found, "osr nmethod should have been invalidated");
1084     }
1085 #endif
1086   }
1087 
1088   // If _method is already NULL the Method* is about to be unloaded,
1089   // so we don't have to break the cycle. Note that it is possible to
1090   // have the Method* live here, in case we unload the nmethod because
1091   // it is pointing to some oop (other than the Method*) being unloaded.
1092   if (_method != NULL) {
1093     // OSR methods point to the Method*, but the Method* does not
1094     // point back!
1095     if (_method->code() == this) {
1096       _method->clear_code(); // Break a cycle
1097     }
1098     _method = NULL;            // Clear the method of this dead nmethod
1099   }
1100 
1101   // Make the class unloaded - i.e., change state and notify sweeper
1102   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1103   if (is_in_use()) {
1104     // Transitioning directly from live to unloaded -- so
1105     // we need to force a cache clean-up; remember this
1106     // for later on.
1107     CodeCache::set_needs_cache_clean(true);
1108   }
1109 
1110   // Unregister must be done before the state change
1111   Universe::heap()->unregister_nmethod(this);
1112 
1113   _state = unloaded;
1114 
1115   // Log the unloading.
1116   log_state_change();
1117 
1118 #if INCLUDE_JVMCI
1119   // The method can only be unloaded after the pointer to the installed code
1120   // Java wrapper is no longer alive. Here we need to clear out this weak
1121   // reference to the dead object. Nulling out the reference has to happen
1122   // after the method is unregistered since the original value may be still
1123   // tracked by the rset.
1124   maybe_invalidate_installed_code();
1125   // Clear these out after the nmethod has been unregistered and any
1126   // updates to the InstalledCode instance have been performed.
1127   _jvmci_installed_code = NULL;
1128   _speculation_log = NULL;
1129 #endif
1130 
1131   // The Method* is gone at this point


1258 
1259 #ifdef ASSERT
1260   if (is_osr_method() && method() != NULL) {
1261     // Make sure osr nmethod is invalidated, i.e. not on the list
1262     bool found = method()->method_holder()->remove_osr_nmethod(this);
1263     assert(!found, "osr nmethod should have been invalidated");
1264   }
1265 #endif
1266 
1267   // When the nmethod becomes zombie it is no longer alive so the
1268   // dependencies must be flushed.  nmethods in the not_entrant
1269   // state will be flushed later when the transition to zombie
1270   // happens or they get unloaded.
1271   if (state == zombie) {
1272     {
1273       // Flushing dependencies must be done before any possible
1274       // safepoint can sneak in, otherwise the oops used by the
1275       // dependency logic could have become stale.
1276       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1277       if (nmethod_needs_unregister) {
1278         Universe::heap()->unregister_nmethod(this);
1279 #ifdef JVMCI
1280         _jvmci_installed_code = NULL;
1281         _speculation_log = NULL;
1282 #endif
1283       }
1284       flush_dependencies(NULL);
1285     }
1286 
1287     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1288     // event and it hasn't already been reported for this nmethod then
1289     // report it now. The event may have been reported earlier if the GC
1290     // marked it for unloading). JvmtiDeferredEventQueue support means
1291     // we no longer go to a safepoint here.
1292     post_compiled_method_unload();
1293 
1294 #ifdef ASSERT
1295     // It's no longer safe to access the oops section since zombie
1296     // nmethods aren't scanned for GC.
1297     _oops_are_stale = true;
1298 #endif


1351   CodeBlob::flush();
1352   CodeCache::free(this);
1353 }
1354 
1355 //
1356 // Notify all classes this nmethod is dependent on that it is no
1357 // longer dependent. This should only be called in two situations.
1358 // First, when a nmethod transitions to a zombie all dependents need
1359 // to be clear.  Since zombification happens at a safepoint there's no
1360 // synchronization issues.  The second place is a little more tricky.
1361 // During phase 1 of mark sweep class unloading may happen and as a
1362 // result some nmethods may get unloaded.  In this case the flushing
1363 // of dependencies must happen during phase 1 since after GC any
1364 // dependencies in the unloaded nmethod won't be updated, so
1365 // traversing the dependency information in unsafe.  In that case this
1366 // function is called with a non-NULL argument and this function only
1367 // notifies instanceKlasses that are reachable
1368 
1369 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1370   assert_locked_or_safepoint(CodeCache_lock);
1371   assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1372   "is_alive is non-NULL if and only if we are called during GC");
1373   if (!has_flushed_dependencies()) {
1374     set_has_flushed_dependencies();
1375     for (Dependencies::DepStream deps(this); deps.next(); ) {
1376       if (deps.type() == Dependencies::call_site_target_value) {
1377         // CallSite dependencies are managed on per-CallSite instance basis.
1378         oop call_site = deps.argument_oop(0);
1379         MethodHandles::remove_dependent_nmethod(call_site, this);
1380       } else {
1381         Klass* klass = deps.context_type();
1382         if (klass == NULL) {
1383           continue;  // ignore things like evol_method
1384         }
1385         // During GC the is_alive closure is non-NULL, and is used to
1386         // determine liveness of dependees that need to be updated.
1387         if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
1388           // The GC defers deletion of this entry, since there might be multiple threads
1389           // iterating over the _dependencies graph. Other call paths are single-threaded
1390           // and may delete it immediately.
1391           bool delete_immediately = is_alive == NULL;


1525 bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
1526   // Compiled code
1527   {
1528   RelocIterator iter(this, low_boundary);
1529   while (iter.next()) {
1530     if (iter.type() == relocInfo::oop_type) {
1531       if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
1532         return true;
1533       }
1534     }
1535   }
1536   }
1537 
1538   return do_unloading_scopes(is_alive, unloading_occurred);
1539 }
1540 
1541 #if INCLUDE_JVMCI
1542 bool nmethod::do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) {
1543   bool is_unloaded = false;
1544   // Follow JVMCI method
1545   BarrierSet* bs = Universe::heap()->barrier_set();
1546   if (_jvmci_installed_code != NULL) {
1547     if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
1548       if (!is_alive->do_object_b(_jvmci_installed_code)) {
1549         clear_jvmci_installed_code();
1550       }
1551     } else {
1552       if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
1553         return true;
1554       }
1555     }
1556   }
1557 
1558   if (_speculation_log != NULL) {
1559     if (!is_alive->do_object_b(_speculation_log)) {
1560       bs->write_ref_nmethod_pre(&_speculation_log, this);
1561       _speculation_log = NULL;
1562       bs->write_ref_nmethod_post(&_speculation_log, this);
1563     }
1564   }
1565   return is_unloaded;


2800 #endif
2801   unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
2802   DebugInformationRecorder::print_statistics();
2803 #ifndef PRODUCT
2804   pc_nmethod_stats.print_pc_stats();
2805 #endif
2806   Dependencies::print_statistics();
2807   if (xtty != NULL)  xtty->tail("statistics");
2808 }
2809 
2810 #endif // !PRODUCT
2811 
2812 #if INCLUDE_JVMCI
2813 void nmethod::clear_jvmci_installed_code() {
2814   // write_ref_method_pre/post can only be safely called at a
2815   // safepoint or while holding the CodeCache_lock
2816   assert(CodeCache_lock->is_locked() ||
2817          SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
2818   if (_jvmci_installed_code != NULL) {
2819     // This must be done carefully to maintain nmethod remembered sets properly
2820     BarrierSet* bs = Universe::heap()->barrier_set();
2821     bs->write_ref_nmethod_pre(&_jvmci_installed_code, this);
2822     _jvmci_installed_code = NULL;
2823     bs->write_ref_nmethod_post(&_jvmci_installed_code, this);
2824   }
2825 }
2826 
2827 void nmethod::maybe_invalidate_installed_code() {
2828   assert(Patching_lock->is_locked() ||
2829          SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
2830   oop installed_code = jvmci_installed_code();
2831   if (installed_code != NULL) {
2832     nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
2833     if (nm == NULL || nm != this) {
2834       // The link has been broken or the InstalledCode instance is
2835       // associated with another nmethod so do nothing.
2836       return;
2837     }
2838     if (!is_alive()) {
2839       // Break the link between nmethod and InstalledCode such that the nmethod
2840       // can subsequently be flushed safely.  The link must be maintained while




 608     _nmethod_end_offset      = _nul_chk_table_offset;
 609     _compile_id              = compile_id;
 610     _comp_level              = CompLevel_none;
 611     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
 612     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
 613     _osr_entry_point         = NULL;
 614     _exception_cache         = NULL;
 615     _pc_desc_container.reset_to(NULL);
 616     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 617 
 618     _scopes_data_begin = (address) this + scopes_data_offset;
 619     _deopt_handler_begin = (address) this + deoptimize_offset;
 620     _deopt_mh_handler_begin = (address) this + deoptimize_mh_offset;
 621 
 622     code_buffer->copy_code_and_locs_to(this);
 623     code_buffer->copy_values_to(this);
 624     if (ScavengeRootsInCode) {
 625       if (detect_scavenge_root_oops()) {
 626         CodeCache::add_scavenge_root_nmethod(this);
 627       }
 628       GC::gc()->heap()->register_nmethod(this);
 629     }
 630     debug_only(verify_scavenge_root_oops());
 631     CodeCache::commit(this);
 632   }
 633 
 634   if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
 635     ttyLocker ttyl;  // keep the following output all in one block
 636     // This output goes directly to the tty, not the compiler log.
 637     // To enable tools to match it up with the compilation activity,
 638     // be sure to tag this tty output with the compile ID.
 639     if (xtty != NULL) {
 640       xtty->begin_head("print_native_nmethod");
 641       xtty->method(_method);
 642       xtty->stamp();
 643       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
 644     }
 645     // print the header part first
 646     print();
 647     // then print the requested information
 648     if (PrintNativeNMethods) {


 762     _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
 763     _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
 764     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
 765     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
 766     _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
 767     _exception_cache         = NULL;
 768 
 769     _scopes_data_begin = (address) this + scopes_data_offset;
 770 
 771     _pc_desc_container.reset_to(scopes_pcs_begin());
 772 
 773     code_buffer->copy_code_and_locs_to(this);
 774     // Copy contents of ScopeDescRecorder to nmethod
 775     code_buffer->copy_values_to(this);
 776     debug_info->copy_to(this);
 777     dependencies->copy_to(this);
 778     if (ScavengeRootsInCode) {
 779       if (detect_scavenge_root_oops()) {
 780         CodeCache::add_scavenge_root_nmethod(this);
 781       }
 782       GC::gc()->heap()->register_nmethod(this);
 783     }
 784     debug_only(verify_scavenge_root_oops());
 785 
 786     CodeCache::commit(this);
 787 
 788     // Copy contents of ExceptionHandlerTable to nmethod
 789     handler_table->copy_to(this);
 790     nul_chk_table->copy_to(this);
 791 
 792     // we use the information of entry points to find out if a method is
 793     // static or non static
 794     assert(compiler->is_c2() || compiler->is_jvmci() ||
 795            _method->is_static() == (entry_point() == _verified_entry_point),
 796            " entry points must be same for static methods and vice versa");
 797   }
 798 }
 799 
 800 // Print a short set of xml attributes to identify this nmethod.  The
 801 // output should be embedded in some other element.
 802 void nmethod::log_identity(xmlStream* log) const {


1038 }
1039 
1040 void nmethod::inc_decompile_count() {
1041   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1042   // Could be gated by ProfileTraps, but do not bother...
1043   Method* m = method();
1044   if (m == NULL)  return;
1045   MethodData* mdo = m->method_data();
1046   if (mdo == NULL)  return;
1047   // There is a benign race here.  See comments in methodData.hpp.
1048   mdo->inc_decompile_count();
1049 }
1050 
1051 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1052 
1053   post_compiled_method_unload();
1054 
1055   // Since this nmethod is being unloaded, make sure that dependencies
1056   // recorded in instanceKlasses get flushed and pass non-NULL closure to
1057   // indicate that this work is being done during a GC.
1058   assert(GC::gc()->heap()->is_gc_active(), "should only be called during gc");
1059   assert(is_alive != NULL, "Should be non-NULL");
1060   // A non-NULL is_alive closure indicates that this is being called during GC.
1061   flush_dependencies(is_alive);
1062 
1063   // Break cycle between nmethod & method
1064   if (log_is_enabled(Trace, class, unload)) {
1065     outputStream* log = Log(class, unload)::trace_stream();
1066     log->print_cr("making nmethod " INTPTR_FORMAT
1067                   " unloadable, Method*(" INTPTR_FORMAT
1068                   "), cause(" INTPTR_FORMAT ")",
1069                   p2i(this), p2i(_method), p2i(cause));
1070     if (!GC::gc()->heap()->is_gc_active())
1071       cause->klass()->print_on(log);
1072   }
1073   // Unlink the osr method, so we do not look this up again
1074   if (is_osr_method()) {
1075     // Invalidate the osr nmethod only once
1076     if (is_in_use()) {
1077       invalidate_osr_method();
1078     }
1079 #ifdef ASSERT
1080     if (method() != NULL) {
1081       // Make sure osr nmethod is invalidated, i.e. not on the list
1082       bool found = method()->method_holder()->remove_osr_nmethod(this);
1083       assert(!found, "osr nmethod should have been invalidated");
1084     }
1085 #endif
1086   }
1087 
1088   // If _method is already NULL the Method* is about to be unloaded,
1089   // so we don't have to break the cycle. Note that it is possible to
1090   // have the Method* live here, in case we unload the nmethod because
1091   // it is pointing to some oop (other than the Method*) being unloaded.
1092   if (_method != NULL) {
1093     // OSR methods point to the Method*, but the Method* does not
1094     // point back!
1095     if (_method->code() == this) {
1096       _method->clear_code(); // Break a cycle
1097     }
1098     _method = NULL;            // Clear the method of this dead nmethod
1099   }
1100 
1101   // Make the class unloaded - i.e., change state and notify sweeper
1102   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1103   if (is_in_use()) {
1104     // Transitioning directly from live to unloaded -- so
1105     // we need to force a cache clean-up; remember this
1106     // for later on.
1107     CodeCache::set_needs_cache_clean(true);
1108   }
1109 
1110   // Unregister must be done before the state change
1111   GC::gc()->heap()->unregister_nmethod(this);
1112 
1113   _state = unloaded;
1114 
1115   // Log the unloading.
1116   log_state_change();
1117 
1118 #if INCLUDE_JVMCI
1119   // The method can only be unloaded after the pointer to the installed code
1120   // Java wrapper is no longer alive. Here we need to clear out this weak
1121   // reference to the dead object. Nulling out the reference has to happen
1122   // after the method is unregistered since the original value may be still
1123   // tracked by the rset.
1124   maybe_invalidate_installed_code();
1125   // Clear these out after the nmethod has been unregistered and any
1126   // updates to the InstalledCode instance have been performed.
1127   _jvmci_installed_code = NULL;
1128   _speculation_log = NULL;
1129 #endif
1130 
1131   // The Method* is gone at this point


1258 
1259 #ifdef ASSERT
1260   if (is_osr_method() && method() != NULL) {
1261     // Make sure osr nmethod is invalidated, i.e. not on the list
1262     bool found = method()->method_holder()->remove_osr_nmethod(this);
1263     assert(!found, "osr nmethod should have been invalidated");
1264   }
1265 #endif
1266 
1267   // When the nmethod becomes zombie it is no longer alive so the
1268   // dependencies must be flushed.  nmethods in the not_entrant
1269   // state will be flushed later when the transition to zombie
1270   // happens or they get unloaded.
1271   if (state == zombie) {
1272     {
1273       // Flushing dependencies must be done before any possible
1274       // safepoint can sneak in, otherwise the oops used by the
1275       // dependency logic could have become stale.
1276       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1277       if (nmethod_needs_unregister) {
1278         GC::gc()->heap()->unregister_nmethod(this);
1279 #ifdef JVMCI
1280         _jvmci_installed_code = NULL;
1281         _speculation_log = NULL;
1282 #endif
1283       }
1284       flush_dependencies(NULL);
1285     }
1286 
1287     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1288     // event and it hasn't already been reported for this nmethod then
1289     // report it now. The event may have been reported earlier if the GC
1290     // marked it for unloading). JvmtiDeferredEventQueue support means
1291     // we no longer go to a safepoint here.
1292     post_compiled_method_unload();
1293 
1294 #ifdef ASSERT
1295     // It's no longer safe to access the oops section since zombie
1296     // nmethods aren't scanned for GC.
1297     _oops_are_stale = true;
1298 #endif


1351   CodeBlob::flush();
1352   CodeCache::free(this);
1353 }
1354 
1355 //
1356 // Notify all classes this nmethod is dependent on that it is no
1357 // longer dependent. This should only be called in two situations.
1358 // First, when a nmethod transitions to a zombie all dependents need
1359 // to be clear.  Since zombification happens at a safepoint there's no
1360 // synchronization issues.  The second place is a little more tricky.
1361 // During phase 1 of mark sweep class unloading may happen and as a
1362 // result some nmethods may get unloaded.  In this case the flushing
1363 // of dependencies must happen during phase 1 since after GC any
1364 // dependencies in the unloaded nmethod won't be updated, so
1365 // traversing the dependency information in unsafe.  In that case this
1366 // function is called with a non-NULL argument and this function only
1367 // notifies instanceKlasses that are reachable
1368 
1369 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1370   assert_locked_or_safepoint(CodeCache_lock);
1371   assert(GC::gc()->heap()->is_gc_active() == (is_alive != NULL),
1372   "is_alive is non-NULL if and only if we are called during GC");
1373   if (!has_flushed_dependencies()) {
1374     set_has_flushed_dependencies();
1375     for (Dependencies::DepStream deps(this); deps.next(); ) {
1376       if (deps.type() == Dependencies::call_site_target_value) {
1377         // CallSite dependencies are managed on per-CallSite instance basis.
1378         oop call_site = deps.argument_oop(0);
1379         MethodHandles::remove_dependent_nmethod(call_site, this);
1380       } else {
1381         Klass* klass = deps.context_type();
1382         if (klass == NULL) {
1383           continue;  // ignore things like evol_method
1384         }
1385         // During GC the is_alive closure is non-NULL, and is used to
1386         // determine liveness of dependees that need to be updated.
1387         if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
1388           // The GC defers deletion of this entry, since there might be multiple threads
1389           // iterating over the _dependencies graph. Other call paths are single-threaded
1390           // and may delete it immediately.
1391           bool delete_immediately = is_alive == NULL;


1525 bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
1526   // Compiled code
1527   {
1528   RelocIterator iter(this, low_boundary);
1529   while (iter.next()) {
1530     if (iter.type() == relocInfo::oop_type) {
1531       if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
1532         return true;
1533       }
1534     }
1535   }
1536   }
1537 
1538   return do_unloading_scopes(is_alive, unloading_occurred);
1539 }
1540 
1541 #if INCLUDE_JVMCI
1542 bool nmethod::do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) {
1543   bool is_unloaded = false;
1544   // Follow JVMCI method
1545   BarrierSet* bs = GC::gc()->heap()->barrier_set();
1546   if (_jvmci_installed_code != NULL) {
1547     if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
1548       if (!is_alive->do_object_b(_jvmci_installed_code)) {
1549         clear_jvmci_installed_code();
1550       }
1551     } else {
1552       if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
1553         return true;
1554       }
1555     }
1556   }
1557 
1558   if (_speculation_log != NULL) {
1559     if (!is_alive->do_object_b(_speculation_log)) {
1560       bs->write_ref_nmethod_pre(&_speculation_log, this);
1561       _speculation_log = NULL;
1562       bs->write_ref_nmethod_post(&_speculation_log, this);
1563     }
1564   }
1565   return is_unloaded;


2800 #endif
2801   unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
2802   DebugInformationRecorder::print_statistics();
2803 #ifndef PRODUCT
2804   pc_nmethod_stats.print_pc_stats();
2805 #endif
2806   Dependencies::print_statistics();
2807   if (xtty != NULL)  xtty->tail("statistics");
2808 }
2809 
2810 #endif // !PRODUCT
2811 
2812 #if INCLUDE_JVMCI
2813 void nmethod::clear_jvmci_installed_code() {
2814   // write_ref_method_pre/post can only be safely called at a
2815   // safepoint or while holding the CodeCache_lock
2816   assert(CodeCache_lock->is_locked() ||
2817          SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
2818   if (_jvmci_installed_code != NULL) {
2819     // This must be done carefully to maintain nmethod remembered sets properly
2820     BarrierSet* bs = GC::gc()->heap()->barrier_set();
2821     bs->write_ref_nmethod_pre(&_jvmci_installed_code, this);
2822     _jvmci_installed_code = NULL;
2823     bs->write_ref_nmethod_post(&_jvmci_installed_code, this);
2824   }
2825 }
2826 
2827 void nmethod::maybe_invalidate_installed_code() {
2828   assert(Patching_lock->is_locked() ||
2829          SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
2830   oop installed_code = jvmci_installed_code();
2831   if (installed_code != NULL) {
2832     nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
2833     if (nm == NULL || nm != this) {
2834       // The link has been broken or the InstalledCode instance is
2835       // associated with another nmethod so do nothing.
2836       return;
2837     }
2838     if (!is_alive()) {
2839       // Break the link between nmethod and InstalledCode such that the nmethod
2840       // can subsequently be flushed safely.  The link must be maintained while


< prev index next >