1703 JvmtiDeferredEvent::compiled_method_unload_event(this,
1704 _jmethod_id, insts_begin());
1705 if (SafepointSynchronize::is_at_safepoint()) {
1706 // Don't want to take the queueing lock. Add it as pending and
1707 // it will get enqueued later.
1708 JvmtiDeferredEventQueue::add_pending_event(event);
1709 } else {
1710 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1711 JvmtiDeferredEventQueue::enqueue(event);
1712 }
1713 }
1714
1715 // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1716 // any time. As the nmethod is being unloaded now we mark it has
1717 // having the unload event reported - this will ensure that we don't
1718 // attempt to report the event in the unlikely scenario where the
1719 // event is enabled at the time the nmethod is made a zombie.
1720 set_unload_reported();
1721 }
1722
1723 void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
1724 if (ic->is_icholder_call()) {
1725 // The only exception is compiledICHolder oops which may
1726 // yet be marked below. (We check this further below).
1727 CompiledICHolder* cichk_oop = ic->cached_icholder();
1728 if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1729 cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1730 return;
1731 }
1732 } else {
1733 Metadata* ic_oop = ic->cached_metadata();
1734 if (ic_oop != NULL) {
1735 if (ic_oop->is_klass()) {
1736 if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1737 return;
1738 }
1739 } else if (ic_oop->is_method()) {
1740 if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1741 return;
1742 }
1743 } else {
1744 ShouldNotReachHere();
1745 }
1746 }
1747 }
1748
1749 ic->set_to_clean();
1750 }
1751
1752 // This is called at the end of the strong tracing/marking phase of a
1753 // GC to unload an nmethod if it contains otherwise unreachable
1754 // oops.
1775 bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1776 if (a_class_was_redefined) {
1777 // This set of the unloading_occurred flag is done before the
1778 // call to post_compiled_method_unload() so that the unloading
1779 // of this nmethod is reported.
1780 unloading_occurred = true;
1781 }
1782
1783 // Exception cache
1784 clean_exception_cache(is_alive);
1785
1786 // If class unloading occurred we first iterate over all inline caches and
1787 // clear ICs where the cached oop is referring to an unloaded klass or method.
1788 // The remaining live cached oops will be traversed in the relocInfo::oop_type
1789 // iteration below.
1790 if (unloading_occurred) {
1791 RelocIterator iter(this, low_boundary);
1792 while(iter.next()) {
1793 if (iter.type() == relocInfo::virtual_call_type) {
1794 CompiledIC *ic = CompiledIC_at(&iter);
1795 clean_ic_if_metadata_is_dead(ic, is_alive);
1796 }
1797 }
1798 }
1799
1800 // Compiled code
1801 {
1802 RelocIterator iter(this, low_boundary);
1803 while (iter.next()) {
1804 if (iter.type() == relocInfo::oop_type) {
1805 oop_Relocation* r = iter.oop_reloc();
1806 // In this loop, we must only traverse those oops directly embedded in
1807 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
1808 assert(1 == (r->oop_is_immediate()) +
1809 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1810 "oop must be found in exactly one place");
1811 if (r->oop_is_immediate() && r->oop_value() != NULL) {
1812 if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1813 return;
1814 }
1815 }
1843 }
1844
1845 // Clean inline caches pointing to both zombie and not_entrant methods
1846 if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1847 ic->set_to_clean();
1848 assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
1849 }
1850 }
1851
1852 return false;
1853 }
1854
1855 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
1856 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
1857 }
1858
1859 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
1860 return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
1861 }
1862
1863 bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
1864 ResourceMark rm;
1865
1866 // Make sure the oop's ready to receive visitors
1867 assert(!is_zombie() && !is_unloaded(),
1868 "should not call follow on zombie or unloaded nmethod");
1869
1870 // If the method is not entrant then a JMP is plastered over the
1871 // first few bytes. If an oop in the old code was there, that oop
1872 // should not get GC'd. Skip the first few bytes of oops on
1873 // not-entrant methods.
1874 address low_boundary = verified_entry_point();
1875 if (is_not_entrant()) {
1876 low_boundary += NativeJump::instruction_size;
1877 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1878 // (See comment above.)
1879 }
1880
1881 // The RedefineClasses() API can cause the class unloading invariant
1882 // to no longer be true. See jvmtiExport.hpp for details.
1883 // Also, leave a debugging breadcrumb in local flag.
1884 bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1885 if (a_class_was_redefined) {
1886 // This set of the unloading_occurred flag is done before the
1887 // call to post_compiled_method_unload() so that the unloading
1888 // of this nmethod is reported.
1889 unloading_occurred = true;
1890 }
1891
1892 // Exception cache
1893 clean_exception_cache(is_alive);
1894
1895 bool is_unloaded = false;
1896 bool postponed = false;
1897
1898 RelocIterator iter(this, low_boundary);
1899 while(iter.next()) {
1900
1901 switch (iter.type()) {
1902
1903 case relocInfo::virtual_call_type:
1904 if (unloading_occurred) {
1905 // If class unloading occurred we first iterate over all inline caches and
1906 // clear ICs where the cached oop is referring to an unloaded klass or method.
1907 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
1908 }
1909
1910 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1911 break;
1912
1913 case relocInfo::opt_virtual_call_type:
1914 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1915 break;
1916
1917 case relocInfo::static_call_type:
1918 postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1919 break;
1920
1921 case relocInfo::oop_type:
1922 if (!is_unloaded) {
1923 // Unload check
1924 oop_Relocation* r = iter.oop_reloc();
1925 // Traverse those oops directly embedded in the code.
1926 // Other oops (oop_index>0) are seen as part of scopes_oops.
1927 assert(1 == (r->oop_is_immediate()) +
1928 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1929 "oop must be found in exactly one place");
1930 if (r->oop_is_immediate() && r->oop_value() != NULL) {
1931 if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1932 is_unloaded = true;
1933 }
1934 }
1935 }
1936 break;
1937
1938 }
1939 }
1940
1941 if (is_unloaded) {
1942 return postponed;
1943 }
1944
1945 // Scopes
1946 for (oop* p = oops_begin(); p < oops_end(); p++) {
1947 if (*p == Universe::non_oop_word()) continue; // skip non-oops
1948 if (can_unload(is_alive, p, unloading_occurred)) {
1949 is_unloaded = true;
1950 break;
1951 }
1952 }
1953
1954 if (is_unloaded) {
1955 return postponed;
1956 }
1957
1958 // Ensure that all metadata is still alive
1959 verify_metadata_loaders(low_boundary, is_alive);
2068 // Check that the metadata embedded in the nmethod is alive
2069 CheckClass::do_check_class(is_alive, this);
2070 #endif
2071 }
2072
2073
2074 // Iterate over metadata calling this function. Used by RedefineClasses
2075 void nmethod::metadata_do(void f(Metadata*)) {
2076 address low_boundary = verified_entry_point();
2077 if (is_not_entrant()) {
2078 low_boundary += NativeJump::instruction_size;
2079 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
2080 // (See comment above.)
2081 }
2082 {
2083 // Visit all immediate references that are embedded in the instruction stream.
2084 RelocIterator iter(this, low_boundary);
2085 while (iter.next()) {
2086 if (iter.type() == relocInfo::metadata_type ) {
2087 metadata_Relocation* r = iter.metadata_reloc();
2088 // In this lmetadata, we must only follow those metadatas directly embedded in
2089 // the code. Other metadatas (oop_index>0) are seen as part of
2090 // the metadata section below.
2091 assert(1 == (r->metadata_is_immediate()) +
2092 (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2093 "metadata must be found in exactly one place");
2094 if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
2095 Metadata* md = r->metadata_value();
2096 f(md);
2097 }
2098 } else if (iter.type() == relocInfo::virtual_call_type) {
2099 // Check compiledIC holders associated with this nmethod
2100 CompiledIC *ic = CompiledIC_at(&iter);
2101 if (ic->is_icholder_call()) {
2102 CompiledICHolder* cichk = ic->cached_icholder();
2103 f(cichk->holder_method());
2104 f(cichk->holder_klass());
2105 } else {
2106 Metadata* ic_oop = ic->cached_metadata();
2107 if (ic_oop != NULL) {
2108 f(ic_oop);
2109 }
2110 }
2111 }
2112 }
2113 }
2114
2115 // Visit the metadata section
2116 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2117 if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops
2118 Metadata* md = *p;
2119 f(md);
2120 }
2121
2122 // Call function Method*, not embedded in these other places.
2123 if (_method != NULL) f(_method);
2124 }
2125
2126 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
2127 // make sure the oops ready to receive visitors
2128 assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
2129 assert(!is_unloaded(), "should not call follow on unloaded nmethod");
2130
2131 // If the method is not entrant or zombie then a JMP is plastered over the
2132 // first few bytes. If an oop in the old code was there, that oop
2133 // should not get GC'd. Skip the first few bytes of oops on
2134 // not-entrant methods.
2135 address low_boundary = verified_entry_point();
2136 if (is_not_entrant()) {
2137 low_boundary += NativeJump::instruction_size;
2138 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
2139 // (See comment above.)
2140 }
2141
2142 RelocIterator iter(this, low_boundary);
|
1703 JvmtiDeferredEvent::compiled_method_unload_event(this,
1704 _jmethod_id, insts_begin());
1705 if (SafepointSynchronize::is_at_safepoint()) {
1706 // Don't want to take the queueing lock. Add it as pending and
1707 // it will get enqueued later.
1708 JvmtiDeferredEventQueue::add_pending_event(event);
1709 } else {
1710 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1711 JvmtiDeferredEventQueue::enqueue(event);
1712 }
1713 }
1714
1715 // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1716 // any time. As the nmethod is being unloaded now we mark it has
1717 // having the unload event reported - this will ensure that we don't
1718 // attempt to report the event in the unlikely scenario where the
1719 // event is enabled at the time the nmethod is made a zombie.
1720 set_unload_reported();
1721 }
1722
1723 void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive, bool mark_on_stack) {
1724 if (ic->is_icholder_call()) {
1725 // The only exception is compiledICHolder oops which may
1726 // yet be marked below. (We check this further below).
1727 CompiledICHolder* cichk_oop = ic->cached_icholder();
1728
1729 if (mark_on_stack) {
1730 Metadata::mark_on_stack(cichk_oop->holder_method());
1731 Metadata::mark_on_stack(cichk_oop->holder_klass());
1732 }
1733
1734 if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1735 cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1736 return;
1737 }
1738 } else {
1739 Metadata* ic_oop = ic->cached_metadata();
1740 if (ic_oop != NULL) {
1741 if (mark_on_stack) {
1742 Metadata::mark_on_stack(ic_oop);
1743 }
1744
1745 if (ic_oop->is_klass()) {
1746 if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1747 return;
1748 }
1749 } else if (ic_oop->is_method()) {
1750 if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1751 return;
1752 }
1753 } else {
1754 ShouldNotReachHere();
1755 }
1756 }
1757 }
1758
1759 ic->set_to_clean();
1760 }
1761
1762 // This is called at the end of the strong tracing/marking phase of a
1763 // GC to unload an nmethod if it contains otherwise unreachable
1764 // oops.
1785 bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1786 if (a_class_was_redefined) {
1787 // This set of the unloading_occurred flag is done before the
1788 // call to post_compiled_method_unload() so that the unloading
1789 // of this nmethod is reported.
1790 unloading_occurred = true;
1791 }
1792
1793 // Exception cache
1794 clean_exception_cache(is_alive);
1795
1796 // If class unloading occurred we first iterate over all inline caches and
1797 // clear ICs where the cached oop is referring to an unloaded klass or method.
1798 // The remaining live cached oops will be traversed in the relocInfo::oop_type
1799 // iteration below.
1800 if (unloading_occurred) {
1801 RelocIterator iter(this, low_boundary);
1802 while(iter.next()) {
1803 if (iter.type() == relocInfo::virtual_call_type) {
1804 CompiledIC *ic = CompiledIC_at(&iter);
1805 clean_ic_if_metadata_is_dead(ic, is_alive, false);
1806 }
1807 }
1808 }
1809
1810 // Compiled code
1811 {
1812 RelocIterator iter(this, low_boundary);
1813 while (iter.next()) {
1814 if (iter.type() == relocInfo::oop_type) {
1815 oop_Relocation* r = iter.oop_reloc();
1816 // In this loop, we must only traverse those oops directly embedded in
1817 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
1818 assert(1 == (r->oop_is_immediate()) +
1819 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1820 "oop must be found in exactly one place");
1821 if (r->oop_is_immediate() && r->oop_value() != NULL) {
1822 if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1823 return;
1824 }
1825 }
1853 }
1854
1855 // Clean inline caches pointing to both zombie and not_entrant methods
1856 if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1857 ic->set_to_clean();
1858 assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
1859 }
1860 }
1861
1862 return false;
1863 }
1864
1865 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
1866 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
1867 }
1868
1869 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
1870 return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
1871 }
1872
1873 bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
1874 assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
1875
1876 oop_Relocation* r = iter_at_oop->oop_reloc();
1877 // Traverse those oops directly embedded in the code.
1878 // Other oops (oop_index>0) are seen as part of scopes_oops.
1879 assert(1 == (r->oop_is_immediate()) +
1880 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1881 "oop must be found in exactly one place");
1882 if (r->oop_is_immediate() && r->oop_value() != NULL) {
1883 // Unload this nmethod if the oop is dead.
1884 if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1885 return true;;
1886 }
1887 }
1888
1889 return false;
1890 }
1891
1892 void nmethod::mark_metadata_on_stack_at(RelocIterator* iter_at_metadata) {
1893 assert(iter_at_metadata->type() == relocInfo::metadata_type, "Wrong relocation type");
1894
1895 metadata_Relocation* r = iter_at_metadata->metadata_reloc();
1896 // In this metadata, we must only follow those metadatas directly embedded in
1897 // the code. Other metadatas (oop_index>0) are seen as part of
1898 // the metadata section below.
1899 assert(1 == (r->metadata_is_immediate()) +
1900 (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
1901 "metadata must be found in exactly one place");
1902 if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
1903 Metadata* md = r->metadata_value();
1904 if (md != _method) Metadata::mark_on_stack(md);
1905 }
1906 }
1907
1908 void nmethod::mark_metadata_on_stack_non_relocs() {
1909 // Visit the metadata section
1910 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
1911 if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops
1912 Metadata* md = *p;
1913 Metadata::mark_on_stack(md);
1914 }
1915
1916 // Visit metadata not embedded in the other places.
1917 if (_method != NULL) Metadata::mark_on_stack(_method);
1918 }
1919
1920 bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
1921 ResourceMark rm;
1922
1923 // Make sure the oop's ready to receive visitors
1924 assert(!is_zombie() && !is_unloaded(),
1925 "should not call follow on zombie or unloaded nmethod");
1926
1927 // If the method is not entrant then a JMP is plastered over the
1928 // first few bytes. If an oop in the old code was there, that oop
1929 // should not get GC'd. Skip the first few bytes of oops on
1930 // not-entrant methods.
1931 address low_boundary = verified_entry_point();
1932 if (is_not_entrant()) {
1933 low_boundary += NativeJump::instruction_size;
1934 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1935 // (See comment above.)
1936 }
1937
1938 // The RedefineClasses() API can cause the class unloading invariant
1939 // to no longer be true. See jvmtiExport.hpp for details.
1940 // Also, leave a debugging breadcrumb in local flag.
1941 bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1942 if (a_class_was_redefined) {
1943 // This set of the unloading_occurred flag is done before the
1944 // call to post_compiled_method_unload() so that the unloading
1945 // of this nmethod is reported.
1946 unloading_occurred = true;
1947 }
1948
1949 // When class redefinition is used all metadata in the CodeCache has to be recorded,
1950 // so that unused "previous versions" can be purged. Since walking the CodeCache can
1951 // be expensive, the "mark on stack" is piggy-backed on this parallel unloading code.
1952 bool mark_metadata_on_stack = a_class_was_redefined;
1953
1954 // Exception cache
1955 clean_exception_cache(is_alive);
1956
1957 bool is_unloaded = false;
1958 bool postponed = false;
1959
1960 RelocIterator iter(this, low_boundary);
1961 while(iter.next()) {
1962
1963 switch (iter.type()) {
1964
1965 case relocInfo::virtual_call_type:
1966 if (unloading_occurred) {
1967 // If class unloading occurred we first iterate over all inline caches and
1968 // clear ICs where the cached oop is referring to an unloaded klass or method.
1969 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive, mark_metadata_on_stack);
1970 }
1971
1972 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1973 break;
1974
1975 case relocInfo::opt_virtual_call_type:
1976 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1977 break;
1978
1979 case relocInfo::static_call_type:
1980 postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1981 break;
1982
1983 case relocInfo::oop_type:
1984 if (!is_unloaded) {
1985 is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred);
1986 }
1987 break;
1988
1989 case relocInfo::metadata_type:
1990 if (mark_metadata_on_stack) {
1991 mark_metadata_on_stack_at(&iter);
1992 }
1993 }
1994 }
1995
1996 if (mark_metadata_on_stack) {
1997 mark_metadata_on_stack_non_relocs();
1998 }
1999
2000 if (is_unloaded) {
2001 return postponed;
2002 }
2003
2004 // Scopes
2005 for (oop* p = oops_begin(); p < oops_end(); p++) {
2006 if (*p == Universe::non_oop_word()) continue; // skip non-oops
2007 if (can_unload(is_alive, p, unloading_occurred)) {
2008 is_unloaded = true;
2009 break;
2010 }
2011 }
2012
2013 if (is_unloaded) {
2014 return postponed;
2015 }
2016
2017 // Ensure that all metadata is still alive
2018 verify_metadata_loaders(low_boundary, is_alive);
2127 // Check that the metadata embedded in the nmethod is alive
2128 CheckClass::do_check_class(is_alive, this);
2129 #endif
2130 }
2131
2132
2133 // Iterate over metadata calling this function. Used by RedefineClasses
2134 void nmethod::metadata_do(void f(Metadata*)) {
2135 address low_boundary = verified_entry_point();
2136 if (is_not_entrant()) {
2137 low_boundary += NativeJump::instruction_size;
2138 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
2139 // (See comment above.)
2140 }
2141 {
2142 // Visit all immediate references that are embedded in the instruction stream.
2143 RelocIterator iter(this, low_boundary);
2144 while (iter.next()) {
2145 if (iter.type() == relocInfo::metadata_type ) {
2146 metadata_Relocation* r = iter.metadata_reloc();
2147 // In this metadata, we must only follow those metadatas directly embedded in
2148 // the code. Other metadatas (oop_index>0) are seen as part of
2149 // the metadata section below.
2150 assert(1 == (r->metadata_is_immediate()) +
2151 (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2152 "metadata must be found in exactly one place");
2153 if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
2154 Metadata* md = r->metadata_value();
2155 f(md);
2156 }
2157 } else if (iter.type() == relocInfo::virtual_call_type) {
2158 // Check compiledIC holders associated with this nmethod
2159 CompiledIC *ic = CompiledIC_at(&iter);
2160 if (ic->is_icholder_call()) {
2161 CompiledICHolder* cichk = ic->cached_icholder();
2162 f(cichk->holder_method());
2163 f(cichk->holder_klass());
2164 } else {
2165 Metadata* ic_oop = ic->cached_metadata();
2166 if (ic_oop != NULL) {
2167 f(ic_oop);
2168 }
2169 }
2170 }
2171 }
2172 }
2173
2174 // Visit the metadata section
2175 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2176 if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops
2177 Metadata* md = *p;
2178 f(md);
2179 }
2180
2181 // Visit metadata not embedded in the other places.
2182 if (_method != NULL) f(_method);
2183 }
2184
2185 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
2186 // make sure the oops ready to receive visitors
2187 assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
2188 assert(!is_unloaded(), "should not call follow on unloaded nmethod");
2189
2190 // If the method is not entrant or zombie then a JMP is plastered over the
2191 // first few bytes. If an oop in the old code was there, that oop
2192 // should not get GC'd. Skip the first few bytes of oops on
2193 // not-entrant methods.
2194 address low_boundary = verified_entry_point();
2195 if (is_not_entrant()) {
2196 low_boundary += NativeJump::instruction_size;
2197 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
2198 // (See comment above.)
2199 }
2200
2201 RelocIterator iter(this, low_boundary);
|