< prev index next >

src/share/vm/oops/instanceKlass.cpp

Print this page




1796     *id_p = NULL;
1797   } else {
1798     *id_p = cache[idnum+1];  // fetch jmethodID (if any)
1799   }
1800 }
1801 
1802 
1803 // Lookup a jmethodID, NULL if not found.  Do no blocking, no allocations, no handles
1804 jmethodID InstanceKlass::jmethod_id_or_null(Method* method) {
1805   size_t idnum = (size_t)method->method_idnum();
1806   jmethodID* jmeths = methods_jmethod_ids_acquire();
1807   size_t length;                                // length assigned as debugging crumb
1808   jmethodID id = NULL;
1809   if (jmeths != NULL &&                         // If there is a cache
1810       (length = (size_t)jmeths[0]) > idnum) {   // and if it is long enough,
1811     id = jmeths[idnum+1];                       // Look up the id (may be NULL)
1812   }
1813   return id;
1814 }
1815 
1816 int nmethodBucket::decrement() {
1817   return Atomic::add(-1, (volatile int *)&_count);
1818 }
1819 
1820 //
1821 // Walk the list of dependent nmethods searching for nmethods which
1822 // are dependent on the changes that were passed in and mark them for
1823 // deoptimization.  Returns the number of nmethods found.
1824 //
1825 int nmethodBucket::mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes) {
1826   assert_locked_or_safepoint(CodeCache_lock);
1827   int found = 0;
1828   for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
1829     nmethod* nm = b->get_nmethod();
1830     // since dependencies aren't removed until an nmethod becomes a zombie,
1831     // the dependency list may contain nmethods which aren't alive.
1832     if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
1833       if (TraceDependencies) {
1834         ResourceMark rm;
1835         tty->print_cr("Marked for deoptimization");
1836         changes.print();
1837         nm->print();
1838         nm->print_dependencies();
1839       }
1840       nm->mark_for_deoptimization();
1841       found++;
1842     }
1843   }

1844   return found;
1845 }
1846 
1847 //
1848 // Add an nmethodBucket to the list of dependencies for this nmethod.
1849 // It's possible that an nmethod has multiple dependencies on this klass
1850 // so a count is kept for each bucket to guarantee that creation and
1851 // deletion of dependencies is consistent. Returns new head of the list.
1852 //
1853 nmethodBucket* nmethodBucket::add_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
1854   assert_locked_or_safepoint(CodeCache_lock);
1855   for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
1856     if (nm == b->get_nmethod()) {
1857       b->increment();
1858       return deps;
1859     }
1860   }
1861   return new nmethodBucket(nm, deps);
1862 }
1863 
1864 //
1865 // Decrement count of the nmethod in the dependency list and remove
1866 // the bucket completely when the count goes to 0.  This method must
1867 // find a corresponding bucket otherwise there's a bug in the
1868 // recording of dependencies. Returns true if the bucket is ready for reclamation.
1869 //
1870 bool nmethodBucket::remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
1871   assert_locked_or_safepoint(CodeCache_lock);
1872 
1873   for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
1874     if (nm == b->get_nmethod()) {
1875       int val = b->decrement();
1876       guarantee(val >= 0, err_msg("Underflow: %d", val));

1877       return (val == 0);
1878     }
1879   }
1880 #ifdef ASSERT
1881   tty->print_raw_cr("### can't find dependent nmethod");
1882   nm->print();
1883 #endif // ASSERT
1884   ShouldNotReachHere();
1885   return false;
1886 }
1887 
1888 //
1889 // Reclaim all unused buckets. Returns new head of the list.
1890 //
1891 nmethodBucket* nmethodBucket::clean_dependent_nmethods(nmethodBucket* deps) {
1892   nmethodBucket* first = deps;
1893   nmethodBucket* last = NULL;
1894   nmethodBucket* b = first;
1895 
1896   while (b != NULL) {
1897     assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
1898     nmethodBucket* next = b->next();
1899     if (b->count() == 0) {
1900       if (last == NULL) {
1901         first = next;
1902       } else {
1903         last->set_next(next);
1904       }
1905       delete b;
1906       // last stays the same.
1907     } else {
1908       last = b;
1909     }
1910     b = next;
1911   }
1912   return first;
1913 }
1914 
1915 #ifndef PRODUCT
1916 void nmethodBucket::print_dependent_nmethods(nmethodBucket* deps, bool verbose) {







1917   int idx = 0;
1918   for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
1919     nmethod* nm = b->get_nmethod();
1920     tty->print("[%d] count=%d { ", idx++, b->count());
1921     if (!verbose) {
1922       nm->print_on(tty, "nmethod");
1923       tty->print_cr(" } ");
1924     } else {
1925       nm->print();
1926       nm->print_dependencies();
1927       tty->print_cr("--- } ");
1928     }
1929   }
1930 }
1931 
1932 bool nmethodBucket::is_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
1933   for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
1934     if (nm == b->get_nmethod()) {
1935 #ifdef ASSERT
1936       int count = b->count();
1937       assert(count >= 0, err_msg("count shouldn't be negative: %d", count));
1938 #endif
1939       return true;
1940     }
1941   }
1942   return false;
1943 }
1944 #endif //PRODUCT
1945 















































































































































1946 int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
1947   assert_locked_or_safepoint(CodeCache_lock);
1948   return nmethodBucket::mark_dependent_nmethods(_dependencies, changes);
1949 }
1950 
1951 void InstanceKlass::clean_dependent_nmethods() {
1952   assert_locked_or_safepoint(CodeCache_lock);
1953 
1954   if (has_unloaded_dependent()) {
1955     _dependencies = nmethodBucket::clean_dependent_nmethods(_dependencies);
1956     set_has_unloaded_dependent(false);
1957   }
1958 #ifdef ASSERT
1959   else {
1960     // Verification
1961     for (nmethodBucket* b = _dependencies; b != NULL; b = b->next()) {
1962       assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
1963       assert(b->count() != 0, "empty buckets need to be cleaned");
1964     }
1965   }
1966 #endif
1967 }
1968 
1969 void InstanceKlass::add_dependent_nmethod(nmethod* nm) {
1970   assert_locked_or_safepoint(CodeCache_lock);
1971   _dependencies = nmethodBucket::add_dependent_nmethod(_dependencies, nm);
1972 }
1973 
1974 void InstanceKlass::remove_dependent_nmethod(nmethod* nm) {
1975   assert_locked_or_safepoint(CodeCache_lock);
1976 
1977   if (nmethodBucket::remove_dependent_nmethod(_dependencies, nm)) {
1978     set_has_unloaded_dependent(true);
1979   }
1980 }
1981 
1982 #ifndef PRODUCT
1983 void InstanceKlass::print_dependent_nmethods(bool verbose) {
1984   nmethodBucket::print_dependent_nmethods(_dependencies, verbose);


2145   set_jni_ids(NULL);
2146 
2147   jmethodID* jmeths = methods_jmethod_ids_acquire();
2148   if (jmeths != (jmethodID*)NULL) {
2149     release_set_methods_jmethod_ids(NULL);
2150     FreeHeap(jmeths);
2151   }
2152 
2153   // Deallocate MemberNameTable
2154   {
2155     Mutex* lock_or_null = SafepointSynchronize::is_at_safepoint() ? NULL : MemberNameTable_lock;
2156     MutexLockerEx ml(lock_or_null, Mutex::_no_safepoint_check_flag);
2157     MemberNameTable* mnt = member_names();
2158     if (mnt != NULL) {
2159       delete mnt;
2160       set_member_names(NULL);
2161     }
2162   }
2163 
2164   // release dependencies
2165   nmethodBucket* b = _dependencies;

2166   _dependencies = NULL;
2167   while (b != NULL) {
2168     nmethodBucket* next = b->next();
2169     delete b;
2170     b = next;
2171   }
2172 
2173   // Deallocate breakpoint records
2174   if (breakpoints() != 0x0) {
2175     methods_do(clear_all_breakpoints);
2176     assert(breakpoints() == 0x0, "should have cleared breakpoints");
2177   }
2178 
2179   // deallocate the cached class file
2180   if (_cached_class_file != NULL) {
2181     os::free(_cached_class_file);
2182     _cached_class_file = NULL;
2183   }
2184 
2185   // Decrement symbol reference counts associated with the unloaded class.
2186   if (_name != NULL) _name->decrement_refcount();
2187   // unreference array name derived from this class name (arrays of an unloaded
2188   // class can't be referenced anymore).
2189   if (_array_name != NULL)  _array_name->decrement_refcount();
2190   if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension);
2191 




1796     *id_p = NULL;
1797   } else {
1798     *id_p = cache[idnum+1];  // fetch jmethodID (if any)
1799   }
1800 }
1801 
1802 
1803 // Lookup a jmethodID, NULL if not found.  Do no blocking, no allocations, no handles
1804 jmethodID InstanceKlass::jmethod_id_or_null(Method* method) {
1805   size_t idnum = (size_t)method->method_idnum();
1806   jmethodID* jmeths = methods_jmethod_ids_acquire();
1807   size_t length;                                // length assigned as debugging crumb
1808   jmethodID id = NULL;
1809   if (jmeths != NULL &&                         // If there is a cache
1810       (length = (size_t)jmeths[0]) > idnum) {   // and if it is long enough,
1811     id = jmeths[idnum+1];                       // Look up the id (may be NULL)
1812   }
1813   return id;
1814 }
1815 
1816 int nmethodBucketEntry::decrement() {
1817   return Atomic::add(-1, (volatile int *)&_count);
1818 }
1819 
1820 int nmethodBucketEntry::mark_dependent_nmethods(nmethodBucketEntry* deps, DepChange& changes) {
1821   int found = 0, total = 0;
1822   for (nmethodBucketEntry* b = deps; b != NULL; b = b->next(), total++) {






1823     nmethod* nm = b->get_nmethod();
1824     // since dependencies aren't removed until an nmethod becomes a zombie,
1825     // the dependency list may contain nmethods which aren't alive.
1826     if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
1827       if (TraceDependencies) {
1828         ResourceMark rm;
1829         tty->print_cr("Marked for deoptimization");
1830         changes.print();
1831         nm->print();
1832         nm->print_dependencies();
1833       }
1834       nm->mark_for_deoptimization();
1835       found++;
1836     }
1837   }
1838   Dependencies::_perf_dependencies_checked_count->inc(total);
1839   return found;
1840 }
1841 
1842 nmethodBucketEntry* nmethodBucketEntry::add_dependent_nmethod(nmethodBucketEntry* deps, nmethod* nm) {






1843   assert_locked_or_safepoint(CodeCache_lock);
1844   for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) {
1845     if (nm == b->get_nmethod()) {
1846       b->increment();
1847       return deps;
1848     }
1849   }
1850   return new nmethodBucketEntry(nm, deps);
1851 }
1852 
1853 bool nmethodBucketEntry::remove_dependent_nmethod(nmethodBucketEntry* deps, nmethod* nm, bool& found) {






1854   assert_locked_or_safepoint(CodeCache_lock);
1855   for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) {

1856     if (nm == b->get_nmethod()) {
1857       int val = b->decrement();
1858       guarantee(val >= 0, err_msg("Underflow: %d", val));
1859       found = true;
1860       return (val == 0);
1861     }
1862   }





1863   return false;
1864 }
1865 
1866 nmethodBucketEntry* nmethodBucketEntry::clean_dependent_nmethods(nmethodBucketEntry* deps) {
1867   nmethodBucketEntry* first = deps;
1868   nmethodBucketEntry* last = NULL;
1869   nmethodBucketEntry* b = first;



1870 
1871   while (b != NULL) {
1872     assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
1873     nmethodBucketEntry* next = b->next();
1874     if (b->count() == 0) {
1875       if (last == NULL) {
1876         first = next;
1877       } else {
1878         last->set_next(next);
1879       }
1880       delete b;
1881       // last stays the same.
1882     } else {
1883       last = b;
1884     }
1885     b = next;
1886   }
1887   return first;
1888 }
1889 
1890 #ifndef PRODUCT
1891 void nmethodBucketEntry::verify(nmethodBucketEntry* deps) {
1892   for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) {
1893     assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
1894     assert(b->count() != 0, "empty buckets need to be cleaned");
1895   }
1896 }
1897 
1898 void nmethodBucketEntry::print_dependent_nmethods(nmethodBucketEntry* deps, bool verbose) {
1899   int idx = 0;
1900   for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) {
1901     nmethod* nm = b->get_nmethod();
1902     tty->print("[%d] count=%d { ", idx++, b->count());
1903     if (!verbose) {
1904       nm->print_on(tty, "nmethod");
1905       tty->print_cr(" } ");
1906     } else {
1907       nm->print();
1908       nm->print_dependencies();
1909       tty->print_cr("--- } ");
1910     }
1911   }
1912 }
1913 
1914 bool nmethodBucketEntry::is_dependent_nmethod(nmethodBucketEntry* deps, nmethod* nm) {
1915   for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) {
1916     if (nm == b->get_nmethod()) {
1917 #ifdef ASSERT
1918       int count = b->count();
1919       assert(count >= 0, err_msg("count shouldn't be negative: %d", count));
1920 #endif
1921       return true;
1922     }
1923   }
1924   return false;
1925 }
1926 #endif //PRODUCT
1927 
1928 
1929 int nmethodBucket::bucket_index(DepChange& changes) {
1930   if (changes.is_klass_change())               return KlassBucket;
1931   else if (changes.is_call_site_change())      return CallSiteBucket;
1932   else if (changes.is_constant_field_change()) return ConstantFieldBucket;
1933   else {
1934     ShouldNotReachHere();
1935     return -1;
1936   }
1937 }
1938 
1939 //
1940 // Walk the list of dependent nmethods searching for nmethods which
1941 // are dependent on the changes that were passed in and mark them for
1942 // deoptimization.  Returns the number of nmethods found.
1943 //
1944 int nmethodBucket::mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes) {
1945   assert_locked_or_safepoint(CodeCache_lock);
1946   PerfTraceTime pt(Dependencies::_perf_dependency_checking_time);
1947   int found = 0;
1948   if (deps != NULL) {
1949     int idx = bucket_index(changes);
1950     nmethodBucketEntry* b = deps->_buckets[idx];
1951     found = nmethodBucketEntry::mark_dependent_nmethods(b, changes);
1952     Dependencies::_perf_dependencies_context_traversals->inc(1);
1953     Dependencies::_perf_dependencies_invalidated->inc(found);
1954   }
1955   return found;
1956 }
1957 
1958 //
1959 // Add an nmethodBucket to the list of dependencies for this nmethod.
1960 // It's possible that an nmethod has multiple dependencies on this klass
1961 // so a count is kept for each bucket to guarantee that creation and
1962 // deletion of dependencies is consistent. Returns new head of the list.
1963 //
1964 nmethodBucket* nmethodBucket::add_dependent_nmethod(nmethodBucket* b, nmethod* nm) {
1965   assert_locked_or_safepoint(CodeCache_lock);
1966   if (b == NULL) {
1967     b = new nmethodBucket();
1968   }
1969   bool has_deps[Bucket_LIMIT];
1970   for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) { has_deps[i] = false; }
1971   for (Dependencies::DepStream deps(nm); deps.next(); ) {
1972     switch(deps.type()) {
1973       case Dependencies::call_site_target_value:        has_deps[CallSiteBucket] = true;      break;
1974       case Dependencies::constant_field_value_instance: // fallthru
1975       case Dependencies::constant_field_value_klass:    has_deps[ConstantFieldBucket] = true; break;
1976       default:                                          has_deps[KlassBucket] = true;         break;
1977     }
1978   }
1979   for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
1980     if (has_deps[i]) {
1981       b->_buckets[i] = nmethodBucketEntry::add_dependent_nmethod(b->_buckets[i], nm);
1982     }
1983   }
1984   return b;
1985 }
1986 
1987 //
1988 // Decrement count of the nmethod in the dependency list and remove
1989 // the bucket completely when the count goes to 0.  This method must
1990 // find a corresponding bucket otherwise there's a bug in the
1991 // recording of dependencies. Returns true if the bucket is ready for reclamation.
1992 //
1993 bool nmethodBucket::remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
1994   if (deps != NULL) {
1995     assert_locked_or_safepoint(CodeCache_lock);
1996     bool found = false, removed = false;
1997     for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
1998       bool r = nmethodBucketEntry::remove_dependent_nmethod(deps->_buckets[i], nm, found);
1999       removed = removed || r;
2000     }
2001     if (found) {
2002       return removed;
2003     }
2004   }
2005 #ifdef ASSERT
2006   tty->print_raw_cr("### can't find dependent nmethod");
2007   nm->print();
2008 #endif // ASSERT
2009   ShouldNotReachHere();
2010   return false;
2011 }
2012 
2013 //
2014 // Reclaim all unused buckets. Returns new head of the list.
2015 //
2016 nmethodBucket* nmethodBucket::clean_dependent_nmethods(nmethodBucket* deps) {
2017   if (deps == NULL)  return NULL;
2018   for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
2019     deps->_buckets[i] = nmethodBucketEntry::clean_dependent_nmethods(deps->_buckets[i]);
2020   }
2021   return deps;
2022 }
2023 
2024 int nmethodBucket::release(nmethodBucket* deps) {
2025   if (deps == NULL)  return 0;
2026   int marked = 0;
2027   for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
2028     nmethodBucketEntry* entry = deps->_buckets[i];
2029     while (entry != NULL) {
2030       nmethod* nm = entry->get_nmethod();
2031       if (entry->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
2032         nm->mark_for_deoptimization();
2033         marked++;
2034       }
2035       nmethodBucketEntry* next = entry->next();
2036       delete entry;
2037       entry = next;
2038     }
2039   }
2040   delete deps;
2041   return marked;
2042 }
2043 
2044 #ifndef PRODUCT
2045 void nmethodBucket::verify(nmethodBucket* deps) {
2046   if (deps == NULL)  return;
2047   for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
2048     nmethodBucketEntry::verify(deps->_buckets[i]);
2049   }
2050 }
2051 
2052 void nmethodBucket::print_dependent_nmethods(nmethodBucket* deps, bool verbose) {
2053   if (deps == NULL)  return;
2054   for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
2055     tty->print_cr("Bucket #%d: ", i);
2056     nmethodBucketEntry::print_dependent_nmethods(deps->_buckets[i], verbose);
2057   }
2058 }
2059 
2060 bool nmethodBucket::is_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
2061   if (deps == NULL)  return false;
2062   for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
2063     if (nmethodBucketEntry::is_dependent_nmethod(deps->_buckets[i], nm)) {
2064       return true;
2065     }
2066   }
2067   return false;
2068 }
2069 #endif //PRODUCT
2070 
2071 int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
2072   assert_locked_or_safepoint(CodeCache_lock);
2073   return nmethodBucket::mark_dependent_nmethods(_dependencies, changes);
2074 }
2075 
2076 void InstanceKlass::clean_dependent_nmethods() {
2077   assert_locked_or_safepoint(CodeCache_lock);
2078 
2079   if (has_unloaded_dependent()) {
2080     _dependencies = nmethodBucket::clean_dependent_nmethods(_dependencies);
2081     set_has_unloaded_dependent(false);
2082   }
2083 #ifdef ASSERT
2084   else {
2085     // Verification
2086     nmethodBucket::verify(_dependencies);



2087   }
2088 #endif
2089 }
2090 
2091 void InstanceKlass::add_dependent_nmethod(nmethod* nm) {
2092   assert_locked_or_safepoint(CodeCache_lock);
2093   _dependencies = nmethodBucket::add_dependent_nmethod(_dependencies, nm);
2094 }
2095 
2096 void InstanceKlass::remove_dependent_nmethod(nmethod* nm) {
2097   assert_locked_or_safepoint(CodeCache_lock);
2098 
2099   if (nmethodBucket::remove_dependent_nmethod(_dependencies, nm)) {
2100     set_has_unloaded_dependent(true);
2101   }
2102 }
2103 
2104 #ifndef PRODUCT
2105 void InstanceKlass::print_dependent_nmethods(bool verbose) {
2106   nmethodBucket::print_dependent_nmethods(_dependencies, verbose);


2267   set_jni_ids(NULL);
2268 
2269   jmethodID* jmeths = methods_jmethod_ids_acquire();
2270   if (jmeths != (jmethodID*)NULL) {
2271     release_set_methods_jmethod_ids(NULL);
2272     FreeHeap(jmeths);
2273   }
2274 
2275   // Deallocate MemberNameTable
2276   {
2277     Mutex* lock_or_null = SafepointSynchronize::is_at_safepoint() ? NULL : MemberNameTable_lock;
2278     MutexLockerEx ml(lock_or_null, Mutex::_no_safepoint_check_flag);
2279     MemberNameTable* mnt = member_names();
2280     if (mnt != NULL) {
2281       delete mnt;
2282       set_member_names(NULL);
2283     }
2284   }
2285 
2286   // release dependencies
2287   int marked = nmethodBucket::release(_dependencies);
2288   assert(marked == 0, "");
2289   _dependencies = NULL;





2290 
2291   // Deallocate breakpoint records
2292   if (breakpoints() != 0x0) {
2293     methods_do(clear_all_breakpoints);
2294     assert(breakpoints() == 0x0, "should have cleared breakpoints");
2295   }
2296 
2297   // deallocate the cached class file
2298   if (_cached_class_file != NULL) {
2299     os::free(_cached_class_file);
2300     _cached_class_file = NULL;
2301   }
2302 
2303   // Decrement symbol reference counts associated with the unloaded class.
2304   if (_name != NULL) _name->decrement_refcount();
2305   // unreference array name derived from this class name (arrays of an unloaded
2306   // class can't be referenced anymore).
2307   if (_array_name != NULL)  _array_name->decrement_refcount();
2308   if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension);
2309 


< prev index next >