998 break;
999 }
1000 default:
1001 break;
1002 }
1003 }
1004 }
1005
1006 // This is a private interface with the sweeper.
1007 void nmethod::mark_as_seen_on_stack() {
1008 assert(is_alive(), "Must be an alive method");
1009 // Set the traversal mark to ensure that the sweeper does 2
1010 // cleaning passes before moving to zombie.
1011 set_stack_traversal_mark(NMethodSweeper::traversal_count());
1012 }
1013
1014 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1015 // there are no activations on the stack, not in use by the VM,
1016 // and not in use by the ServiceThread)
1017 bool nmethod::can_convert_to_zombie() {
1018 assert(is_not_entrant(), "must be a non-entrant method");
1019
1020 // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1021 // count can be greater than the stack traversal count before it hits the
1022 // nmethod for the second time.
1023 return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
1024 !is_locked_by_vm();
1025 }
1026
1027 void nmethod::inc_decompile_count() {
1028 if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1029 // Could be gated by ProfileTraps, but do not bother...
1030 Method* m = method();
1031 if (m == NULL) return;
1032 MethodData* mdo = m->method_data();
1033 if (mdo == NULL) return;
1034 // There is a benign race here. See comments in methodData.hpp.
1035 mdo->inc_decompile_count();
1036 }
1037
1038 void nmethod::make_unloaded() {
1039 post_compiled_method_unload();
1040
1041 // This nmethod is being unloaded, make sure that dependencies
1042 // recorded in instanceKlasses get flushed.
1043 // Since this work is being done during a GC, defer deleting dependencies from the
|
998 break;
999 }
1000 default:
1001 break;
1002 }
1003 }
1004 }
1005
1006 // This is a private interface with the sweeper.
1007 void nmethod::mark_as_seen_on_stack() {
1008 assert(is_alive(), "Must be an alive method");
1009 // Set the traversal mark to ensure that the sweeper does 2
1010 // cleaning passes before moving to zombie.
1011 set_stack_traversal_mark(NMethodSweeper::traversal_count());
1012 }
1013
1014 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1015 // there are no activations on the stack, not in use by the VM,
1016 // and not in use by the ServiceThread)
1017 bool nmethod::can_convert_to_zombie() {
1018 assert(is_not_entrant() || is_unloading(), "must be a non-entrant method");
1019
1020 // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1021 // count can be greater than the stack traversal count before it hits the
1022 // nmethod for the second time.
1023 return stack_traversal_mark() + 1 < NMethodSweeper::traversal_count() &&
1024 !is_locked_by_vm() && (!is_unloading() || is_unloaded());
1025 }
1026
1027 void nmethod::inc_decompile_count() {
1028 if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1029 // Could be gated by ProfileTraps, but do not bother...
1030 Method* m = method();
1031 if (m == NULL) return;
1032 MethodData* mdo = m->method_data();
1033 if (mdo == NULL) return;
1034 // There is a benign race here. See comments in methodData.hpp.
1035 mdo->inc_decompile_count();
1036 }
1037
1038 void nmethod::make_unloaded() {
1039 post_compiled_method_unload();
1040
1041 // This nmethod is being unloaded, make sure that dependencies
1042 // recorded in instanceKlasses get flushed.
1043 // Since this work is being done during a GC, defer deleting dependencies from the
|
1072
1073 // If _method is already NULL the Method* is about to be unloaded,
1074 // so we don't have to break the cycle. Note that it is possible to
1075 // have the Method* live here, in case we unload the nmethod because
1076 // it is pointing to some oop (other than the Method*) being unloaded.
1077 if (_method != NULL) {
1078 // OSR methods point to the Method*, but the Method* does not
1079 // point back!
1080 if (_method->code() == this) {
1081 _method->clear_code(); // Break a cycle
1082 }
1083 _method = NULL; // Clear the method of this dead nmethod
1084 }
1085
1086 // Make the class unloaded - i.e., change state and notify sweeper
1087 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1088
1089 // Unregister must be done before the state change
1090 Universe::heap()->unregister_nmethod(this);
1091
1092 _state = unloaded;
1093
1094 // Log the unloading.
1095 log_state_change();
1096
1097 #if INCLUDE_JVMCI
1098 // The method can only be unloaded after the pointer to the installed code
1099 // Java wrapper is no longer alive. Here we need to clear out this weak
1100 // reference to the dead object.
1101 maybe_invalidate_installed_code();
1102 #endif
1103
1104 // The Method* is gone at this point
1105 assert(_method == NULL, "Tautology");
1106
1107 set_osr_link(NULL);
1108 NMethodSweeper::report_state_change(this);
1109 }
1110
1111 void nmethod::invalidate_osr_method() {
1112 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1113 // Remove from list of active nmethods
1114 if (method() != NULL) {
1115 method()->method_holder()->remove_osr_nmethod(this);
1116 }
1117 }
1118
1119 void nmethod::log_state_change() const {
1120 if (LogCompilation) {
1121 if (xtty != NULL) {
1122 ttyLocker ttyl; // keep the following output all in one block
1123 if (_state == unloaded) {
1124 xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1125 os::current_thread_id());
1126 } else {
1127 xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
|
1072
1073 // If _method is already NULL the Method* is about to be unloaded,
1074 // so we don't have to break the cycle. Note that it is possible to
1075 // have the Method* live here, in case we unload the nmethod because
1076 // it is pointing to some oop (other than the Method*) being unloaded.
1077 if (_method != NULL) {
1078 // OSR methods point to the Method*, but the Method* does not
1079 // point back!
1080 if (_method->code() == this) {
1081 _method->clear_code(); // Break a cycle
1082 }
1083 _method = NULL; // Clear the method of this dead nmethod
1084 }
1085
1086 // Make the class unloaded - i.e., change state and notify sweeper
1087 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1088
1089 // Unregister must be done before the state change
1090 Universe::heap()->unregister_nmethod(this);
1091
1092 // Log the unloading.
1093 log_state_change();
1094
1095 #if INCLUDE_JVMCI
1096 // The method can only be unloaded after the pointer to the installed code
1097 // Java wrapper is no longer alive. Here we need to clear out this weak
1098 // reference to the dead object.
1099 maybe_invalidate_installed_code();
1100 #endif
1101
1102 // The Method* is gone at this point
1103 assert(_method == NULL, "Tautology");
1104
1105 set_osr_link(NULL);
1106 NMethodSweeper::report_state_change(this);
1107
1108 // The release is only needed for compile-time ordering, as accesses
1109 // into the nmethod after the store is not safe, due to the sweeper
1110 // being allowed to free it when the store is observed, during
1111 // concurrent nmethod unloading. Therefore, there is no need for
1112 // acquire on the loader side.
1113 OrderAccess::release_store(&_state, (signed char)unloaded);
1114 }
1115
1116 void nmethod::invalidate_osr_method() {
1117 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1118 // Remove from list of active nmethods
1119 if (method() != NULL) {
1120 method()->method_holder()->remove_osr_nmethod(this);
1121 }
1122 }
1123
1124 void nmethod::log_state_change() const {
1125 if (LogCompilation) {
1126 if (xtty != NULL) {
1127 ttyLocker ttyl; // keep the following output all in one block
1128 if (_state == unloaded) {
1129 xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1130 os::current_thread_id());
1131 } else {
1132 xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
|
1824 assert(NULL == linear_search(search, pc_offset, approximate), "search ok");
1825 return NULL;
1826 }
1827 }
1828
1829
1830 void nmethod::check_all_dependencies(DepChange& changes) {
1831 // Checked dependencies are allocated into this ResourceMark
1832 ResourceMark rm;
1833
1834 // Turn off dependency tracing while actually testing dependencies.
1835 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
1836
1837 typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
1838 &DependencySignature::equals, 11027> DepTable;
1839
1840 DepTable* table = new DepTable();
1841
1842 // Iterate over live nmethods and check dependencies of all nmethods that are not
1843 // marked for deoptimization. A particular dependency is only checked once.
1844 NMethodIterator iter;
1845 while(iter.next()) {
1846 nmethod* nm = iter.method();
1847 // Only notify for live nmethods
1848 if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
1849 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1850 // Construct abstraction of a dependency.
1851 DependencySignature* current_sig = new DependencySignature(deps);
1852
1853 // Determine if dependency is already checked. table->put(...) returns
1854 // 'true' if the dependency is added (i.e., was not in the hashtable).
1855 if (table->put(*current_sig, 1)) {
1856 if (deps.check_dependency() != NULL) {
1857 // Dependency checking failed. Print out information about the failed
1858 // dependency and finally fail with an assert. We can fail here, since
1859 // dependency checking is never done in a product build.
1860 tty->print_cr("Failed dependency:");
1861 changes.print();
1862 nm->print();
1863 nm->print_dependencies();
1864 assert(false, "Should have been marked for deoptimization");
1865 }
1866 }
1867 }
|
1829 assert(NULL == linear_search(search, pc_offset, approximate), "search ok");
1830 return NULL;
1831 }
1832 }
1833
1834
1835 void nmethod::check_all_dependencies(DepChange& changes) {
1836 // Checked dependencies are allocated into this ResourceMark
1837 ResourceMark rm;
1838
1839 // Turn off dependency tracing while actually testing dependencies.
1840 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
1841
1842 typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
1843 &DependencySignature::equals, 11027> DepTable;
1844
1845 DepTable* table = new DepTable();
1846
1847 // Iterate over live nmethods and check dependencies of all nmethods that are not
1848 // marked for deoptimization. A particular dependency is only checked once.
1849 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
1850 while(iter.next()) {
1851 nmethod* nm = iter.method();
1852 // Only notify for live nmethods
1853 if (!nm->is_marked_for_deoptimization()) {
1854 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1855 // Construct abstraction of a dependency.
1856 DependencySignature* current_sig = new DependencySignature(deps);
1857
1858 // Determine if dependency is already checked. table->put(...) returns
1859 // 'true' if the dependency is added (i.e., was not in the hashtable).
1860 if (table->put(*current_sig, 1)) {
1861 if (deps.check_dependency() != NULL) {
1862 // Dependency checking failed. Print out information about the failed
1863 // dependency and finally fail with an assert. We can fail here, since
1864 // dependency checking is never done in a product build.
1865 tty->print_cr("Failed dependency:");
1866 changes.print();
1867 nm->print();
1868 nm->print_dependencies();
1869 assert(false, "Should have been marked for deoptimization");
1870 }
1871 }
1872 }
|
2823 _jvmci_installed_code = NULL;
2824 }
2825 }
2826
2827 void nmethod::clear_speculation_log() {
2828 assert_locked_or_safepoint(Patching_lock);
2829 if (_speculation_log != NULL) {
2830 JNIHandles::destroy_weak_global(_speculation_log);
2831 _speculation_log = NULL;
2832 }
2833 }
2834
2835 void nmethod::maybe_invalidate_installed_code() {
2836 assert(Patching_lock->is_locked() ||
2837 SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
2838 oop installed_code = JNIHandles::resolve(_jvmci_installed_code);
2839 if (installed_code != NULL) {
2840 // Update the values in the InstalledCode instance if it still refers to this nmethod
2841 nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
2842 if (nm == this) {
2843 if (!is_alive()) {
2844 // Break the link between nmethod and InstalledCode such that the nmethod
2845 // can subsequently be flushed safely. The link must be maintained while
2846 // the method could have live activations since invalidateInstalledCode
2847 // might want to invalidate all existing activations.
2848 InstalledCode::set_address(installed_code, 0);
2849 InstalledCode::set_entryPoint(installed_code, 0);
2850 } else if (is_not_entrant()) {
2851 // Remove the entry point so any invocation will fail but keep
2852 // the address link around that so that existing activations can
2853 // be invalidated.
2854 InstalledCode::set_entryPoint(installed_code, 0);
2855 }
2856 }
2857 }
2858 if (!is_alive()) {
2859 // Clear these out after the nmethod has been unregistered and any
2860 // updates to the InstalledCode instance have been performed.
2861 clear_jvmci_installed_code();
2862 clear_speculation_log();
2863 }
2864 }
2865
2866 void nmethod::invalidate_installed_code(Handle installedCode, TRAPS) {
2867 if (installedCode() == NULL) {
2868 THROW(vmSymbols::java_lang_NullPointerException());
2869 }
2870 jlong nativeMethod = InstalledCode::address(installedCode);
2871 nmethod* nm = (nmethod*)nativeMethod;
2872 if (nm == NULL) {
2873 // Nothing to do
2874 return;
2875 }
2876
2877 nmethodLocker nml(nm);
2878 #ifdef ASSERT
2879 {
2880 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
2881 // This relationship can only be checked safely under a lock
2882 assert(!nm->is_alive() || nm->jvmci_installed_code() == installedCode(), "sanity check");
2883 }
2884 #endif
2885
2886 if (nm->is_alive()) {
2887 // Invalidating the InstalledCode means we want the nmethod
2888 // to be deoptimized.
2889 nm->mark_for_deoptimization();
2890 VM_Deoptimize op;
2891 VMThread::execute(&op);
2892 }
2893
2894 // Multiple threads could reach this point so we now need to
2895 // lock and re-check the link to the nmethod so that only one
2896 // thread clears it.
2897 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
2898 if (InstalledCode::address(installedCode) == nativeMethod) {
2899 InstalledCode::set_address(installedCode, 0);
2900 }
2901 }
|
2828 _jvmci_installed_code = NULL;
2829 }
2830 }
2831
2832 void nmethod::clear_speculation_log() {
2833 assert_locked_or_safepoint(Patching_lock);
2834 if (_speculation_log != NULL) {
2835 JNIHandles::destroy_weak_global(_speculation_log);
2836 _speculation_log = NULL;
2837 }
2838 }
2839
2840 void nmethod::maybe_invalidate_installed_code() {
2841 assert(Patching_lock->is_locked() ||
2842 SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
2843 oop installed_code = JNIHandles::resolve(_jvmci_installed_code);
2844 if (installed_code != NULL) {
2845 // Update the values in the InstalledCode instance if it still refers to this nmethod
2846 nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
2847 if (nm == this) {
2848 if (!is_alive() || is_unloading()) {
2849 // Break the link between nmethod and InstalledCode such that the nmethod
2850 // can subsequently be flushed safely. The link must be maintained while
2851 // the method could have live activations since invalidateInstalledCode
2852 // might want to invalidate all existing activations.
2853 InstalledCode::set_address(installed_code, 0);
2854 InstalledCode::set_entryPoint(installed_code, 0);
2855 } else if (is_not_entrant()) {
2856 // Remove the entry point so any invocation will fail but keep
2857 // the address link around that so that existing activations can
2858 // be invalidated.
2859 InstalledCode::set_entryPoint(installed_code, 0);
2860 }
2861 }
2862 }
2863 if (!is_alive() || is_unloading()) {
2864 // Clear these out after the nmethod has been unregistered and any
2865 // updates to the InstalledCode instance have been performed.
2866 clear_jvmci_installed_code();
2867 clear_speculation_log();
2868 }
2869 }
2870
2871 void nmethod::invalidate_installed_code(Handle installedCode, TRAPS) {
2872 if (installedCode() == NULL) {
2873 THROW(vmSymbols::java_lang_NullPointerException());
2874 }
2875 jlong nativeMethod = InstalledCode::address(installedCode);
2876 nmethod* nm = (nmethod*)nativeMethod;
2877 if (nm == NULL) {
2878 // Nothing to do
2879 return;
2880 }
2881
2882 nmethodLocker nml(nm);
2883 #ifdef ASSERT
2884 {
2885 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
2886 // This relationship can only be checked safely under a lock
2887 assert(!nm->is_alive() || nm->is_unloading() || nm->jvmci_installed_code() == installedCode(), "sanity check");
2888 }
2889 #endif
2890
2891 if (nm->is_alive()) {
2892 // Invalidating the InstalledCode means we want the nmethod
2893 // to be deoptimized.
2894 nm->mark_for_deoptimization();
2895 VM_Deoptimize op;
2896 VMThread::execute(&op);
2897 }
2898
2899 // Multiple threads could reach this point so we now need to
2900 // lock and re-check the link to the nmethod so that only one
2901 // thread clears it.
2902 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
2903 if (InstalledCode::address(installedCode) == nativeMethod) {
2904 InstalledCode::set_address(installedCode, 0);
2905 }
2906 }
|