< prev index next >

src/hotspot/share/oops/instanceKlass.cpp

Print this page




1092   assert(!is_initialized(), "we cannot initialize twice");
1093   LogTarget(Info, class, init) lt;
1094   if (lt.is_enabled()) {
1095     ResourceMark rm;
1096     LogStream ls(lt);
1097     ls.print("%d Initializing ", call_class_initializer_counter++);
1098     name()->print_value_on(&ls);
1099     ls.print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", p2i(this));
1100   }
1101   if (h_method() != NULL) {
1102     JavaCallArguments args; // No arguments
1103     JavaValue result(T_VOID);
1104     JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
1105   }
1106 }
1107 
1108 
1109 void InstanceKlass::mask_for(const methodHandle& method, int bci,
1110   InterpreterOopMap* entry_for) {
1111   // Lazily create the _oop_map_cache at first request
1112   // Lock-free access requires load_ptr_acquire.
1113   OopMapCache* oop_map_cache =
1114       static_cast<OopMapCache*>(OrderAccess::load_ptr_acquire(&_oop_map_cache));
1115   if (oop_map_cache == NULL) {
1116     MutexLocker x(OopMapCacheAlloc_lock);
1117     // Check if _oop_map_cache was allocated while we were waiting for this lock
1118     if ((oop_map_cache = _oop_map_cache) == NULL) {
1119       oop_map_cache = new OopMapCache();
1120       // Ensure _oop_map_cache is stable, since it is examined without a lock
1121       OrderAccess::release_store_ptr(&_oop_map_cache, oop_map_cache);
1122     }
1123   }
1124   // _oop_map_cache is constant after init; lookup below does its own locking.
1125   oop_map_cache->lookup(method, bci, entry_for);
1126 }
1127 
1128 
1129 bool InstanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
1130   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
1131     Symbol* f_name = fs.name();
1132     Symbol* f_sig  = fs.signature();
1133     if (f_name == name && f_sig == sig) {
1134       fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
1135       return true;
1136     }
1137   }
1138   return false;
1139 }
1140 
1141 


1655       index + enclosing_method_method_index_offset, method_index);
1656   }
1657 }
1658 
1659 // Lookup or create a jmethodID.
1660 // This code is called by the VMThread and JavaThreads so the
1661 // locking has to be done very carefully to avoid deadlocks
1662 // and/or other cache consistency problems.
1663 //
1664 jmethodID InstanceKlass::get_jmethod_id(const methodHandle& method_h) {
1665   size_t idnum = (size_t)method_h->method_idnum();
1666   jmethodID* jmeths = methods_jmethod_ids_acquire();
1667   size_t length = 0;
1668   jmethodID id = NULL;
1669 
1670   // We use a double-check locking idiom here because this cache is
1671   // performance sensitive. In the normal system, this cache only
1672   // transitions from NULL to non-NULL which is safe because we use
1673   // release_set_methods_jmethod_ids() to advertise the new cache.
1674   // A partially constructed cache should never be seen by a racing
1675   // thread. We also use release_store_ptr() to save a new jmethodID
1676   // in the cache so a partially constructed jmethodID should never be
1677   // seen either. Cache reads of existing jmethodIDs proceed without a
1678   // lock, but cache writes of a new jmethodID requires uniqueness and
1679   // creation of the cache itself requires no leaks so a lock is
1680   // generally acquired in those two cases.
1681   //
1682   // If the RedefineClasses() API has been used, then this cache can
1683   // grow and we'll have transitions from non-NULL to bigger non-NULL.
1684   // Cache creation requires no leaks and we require safety between all
1685   // cache accesses and freeing of the old cache so a lock is generally
1686   // acquired when the RedefineClasses() API has been used.
1687 
1688   if (jmeths != NULL) {
1689     // the cache already exists
1690     if (!idnum_can_increment()) {
1691       // the cache can't grow so we can just get the current values
1692       get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1693     } else {
1694       // cache can grow so we have to be more careful
1695       if (Threads::number_of_threads() == 0 ||


1814         new_jmeths[index+1] = jmeths[index+1];
1815       }
1816       *to_dealloc_jmeths_p = jmeths;  // save old cache for later delete
1817     }
1818     release_set_methods_jmethod_ids(jmeths = new_jmeths);
1819   } else {
1820     // fetch jmethodID (if any) from the existing cache
1821     id = jmeths[idnum+1];
1822     *to_dealloc_jmeths_p = new_jmeths;  // save new cache for later delete
1823   }
1824   if (id == NULL) {
1825     // No matching jmethodID in the existing cache or we have a new
1826     // cache or we just grew the cache. This cache write is done here
1827     // by the first thread to win the foot race because a jmethodID
1828     // needs to be unique once it is generally available.
1829     id = new_id;
1830 
1831     // The jmethodID cache can be read while unlocked so we have to
1832     // make sure the new jmethodID is complete before installing it
1833     // in the cache.
1834     OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
1835   } else {
1836     *to_dealloc_id_p = new_id; // save new id for later delete
1837   }
1838   return id;
1839 }
1840 
1841 
1842 // Common code to get the jmethodID cache length and the jmethodID
1843 // value at index idnum if there is one.
1844 //
1845 void InstanceKlass::get_jmethod_id_length_value(jmethodID* cache,
1846        size_t idnum, size_t *length_p, jmethodID* id_p) {
1847   assert(cache != NULL, "sanity check");
1848   assert(length_p != NULL, "sanity check");
1849   assert(id_p != NULL, "sanity check");
1850 
1851   // cache size is stored in element[0], other elements offset by one
1852   *length_p = (size_t)cache[0];
1853   if (*length_p <= idnum) {  // cache is too short
1854     *id_p = NULL;




1092   assert(!is_initialized(), "we cannot initialize twice");
1093   LogTarget(Info, class, init) lt;
1094   if (lt.is_enabled()) {
1095     ResourceMark rm;
1096     LogStream ls(lt);
1097     ls.print("%d Initializing ", call_class_initializer_counter++);
1098     name()->print_value_on(&ls);
1099     ls.print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", p2i(this));
1100   }
1101   if (h_method() != NULL) {
1102     JavaCallArguments args; // No arguments
1103     JavaValue result(T_VOID);
1104     JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
1105   }
1106 }
1107 
1108 
1109 void InstanceKlass::mask_for(const methodHandle& method, int bci,
1110   InterpreterOopMap* entry_for) {
1111   // Lazily create the _oop_map_cache at first request
1112   // Lock-free access requires load_acquire.
1113   OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache);

1114   if (oop_map_cache == NULL) {
1115     MutexLocker x(OopMapCacheAlloc_lock);
1116     // Check if _oop_map_cache was allocated while we were waiting for this lock
1117     if ((oop_map_cache = _oop_map_cache) == NULL) {
1118       oop_map_cache = new OopMapCache();
1119       // Ensure _oop_map_cache is stable, since it is examined without a lock
1120       OrderAccess::release_store(&_oop_map_cache, oop_map_cache);
1121     }
1122   }
1123   // _oop_map_cache is constant after init; lookup below does its own locking.
1124   oop_map_cache->lookup(method, bci, entry_for);
1125 }
1126 
1127 
1128 bool InstanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
1129   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
1130     Symbol* f_name = fs.name();
1131     Symbol* f_sig  = fs.signature();
1132     if (f_name == name && f_sig == sig) {
1133       fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
1134       return true;
1135     }
1136   }
1137   return false;
1138 }
1139 
1140 


1654       index + enclosing_method_method_index_offset, method_index);
1655   }
1656 }
1657 
1658 // Lookup or create a jmethodID.
1659 // This code is called by the VMThread and JavaThreads so the
1660 // locking has to be done very carefully to avoid deadlocks
1661 // and/or other cache consistency problems.
1662 //
1663 jmethodID InstanceKlass::get_jmethod_id(const methodHandle& method_h) {
1664   size_t idnum = (size_t)method_h->method_idnum();
1665   jmethodID* jmeths = methods_jmethod_ids_acquire();
1666   size_t length = 0;
1667   jmethodID id = NULL;
1668 
1669   // We use a double-check locking idiom here because this cache is
1670   // performance sensitive. In the normal system, this cache only
1671   // transitions from NULL to non-NULL which is safe because we use
1672   // release_set_methods_jmethod_ids() to advertise the new cache.
1673   // A partially constructed cache should never be seen by a racing
1674   // thread. We also use release_store() to save a new jmethodID
1675   // in the cache so a partially constructed jmethodID should never be
1676   // seen either. Cache reads of existing jmethodIDs proceed without a
1677   // lock, but cache writes of a new jmethodID requires uniqueness and
1678   // creation of the cache itself requires no leaks so a lock is
1679   // generally acquired in those two cases.
1680   //
1681   // If the RedefineClasses() API has been used, then this cache can
1682   // grow and we'll have transitions from non-NULL to bigger non-NULL.
1683   // Cache creation requires no leaks and we require safety between all
1684   // cache accesses and freeing of the old cache so a lock is generally
1685   // acquired when the RedefineClasses() API has been used.
1686 
1687   if (jmeths != NULL) {
1688     // the cache already exists
1689     if (!idnum_can_increment()) {
1690       // the cache can't grow so we can just get the current values
1691       get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1692     } else {
1693       // cache can grow so we have to be more careful
1694       if (Threads::number_of_threads() == 0 ||


1813         new_jmeths[index+1] = jmeths[index+1];
1814       }
1815       *to_dealloc_jmeths_p = jmeths;  // save old cache for later delete
1816     }
1817     release_set_methods_jmethod_ids(jmeths = new_jmeths);
1818   } else {
1819     // fetch jmethodID (if any) from the existing cache
1820     id = jmeths[idnum+1];
1821     *to_dealloc_jmeths_p = new_jmeths;  // save new cache for later delete
1822   }
1823   if (id == NULL) {
1824     // No matching jmethodID in the existing cache or we have a new
1825     // cache or we just grew the cache. This cache write is done here
1826     // by the first thread to win the foot race because a jmethodID
1827     // needs to be unique once it is generally available.
1828     id = new_id;
1829 
1830     // The jmethodID cache can be read while unlocked so we have to
1831     // make sure the new jmethodID is complete before installing it
1832     // in the cache.
1833     OrderAccess::release_store(&jmeths[idnum+1], id);
1834   } else {
1835     *to_dealloc_id_p = new_id; // save new id for later delete
1836   }
1837   return id;
1838 }
1839 
1840 
1841 // Common code to get the jmethodID cache length and the jmethodID
1842 // value at index idnum if there is one.
1843 //
1844 void InstanceKlass::get_jmethod_id_length_value(jmethodID* cache,
1845        size_t idnum, size_t *length_p, jmethodID* id_p) {
1846   assert(cache != NULL, "sanity check");
1847   assert(length_p != NULL, "sanity check");
1848   assert(id_p != NULL, "sanity check");
1849 
1850   // cache size is stored in element[0], other elements offset by one
1851   *length_p = (size_t)cache[0];
1852   if (*length_p <= idnum) {  // cache is too short
1853     *id_p = NULL;


< prev index next >