1080 Handle h_init_lock(THREAD, init_lock());
1081 if (h_init_lock() != NULL) {
1082 ObjectLocker ol(h_init_lock, THREAD);
1083 set_init_thread(NULL); // reset _init_thread before changing _init_state
1084 set_init_state(state);
1085 fence_and_clear_init_lock();
1086 ol.notify_all(CHECK);
1087 } else {
1088 assert(h_init_lock() != NULL, "The initialization state should never be set twice");
1089 set_init_thread(NULL); // reset _init_thread before changing _init_state
1090 set_init_state(state);
1091 }
1092 }
1093
1094 Klass* InstanceKlass::implementor() const {
1095 Klass* volatile* k = adr_implementor();
1096 if (k == NULL) {
1097 return NULL;
1098 } else {
1099 // This load races with inserts, and therefore needs acquire.
1100 Klass* kls = OrderAccess::load_acquire(k);
1101 if (kls != NULL && !kls->is_loader_alive()) {
1102 return NULL; // don't return unloaded class
1103 } else {
1104 return kls;
1105 }
1106 }
1107 }
1108
1109
1110 void InstanceKlass::set_implementor(Klass* k) {
1111 assert_locked_or_safepoint(Compile_lock);
1112 assert(is_interface(), "not interface");
1113 Klass* volatile* addr = adr_implementor();
1114 assert(addr != NULL, "null addr");
1115 if (addr != NULL) {
1116 OrderAccess::release_store(addr, k);
1117 }
1118 }
1119
1120 int InstanceKlass::nof_implementors() const {
1121 Klass* k = implementor();
1122 if (k == NULL) {
1123 return 0;
1124 } else if (k != this) {
1125 return 1;
1126 } else {
1127 return 2;
1128 }
1129 }
1130
1131 // The embedded _implementor field can only record one implementor.
1132 // When there are more than one implementors, the _implementor field
1133 // is set to the interface Klass* itself. Following are the possible
1134 // values for the _implementor field:
1135 // NULL - no implementor
1136 // implementor Klass* - one implementor
1353 LogTarget(Info, class, init) lt;
1354 if (lt.is_enabled()) {
1355 ResourceMark rm;
1356 LogStream ls(lt);
1357 ls.print("%d Initializing ", call_class_initializer_counter++);
1358 name()->print_value_on(&ls);
1359 ls.print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", p2i(this));
1360 }
1361 if (h_method() != NULL) {
1362 JavaCallArguments args; // No arguments
1363 JavaValue result(T_VOID);
1364 JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
1365 }
1366 }
1367
1368
1369 void InstanceKlass::mask_for(const methodHandle& method, int bci,
1370 InterpreterOopMap* entry_for) {
1371 // Lazily create the _oop_map_cache at first request
1372 // Lock-free access requires load_acquire.
1373 OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache);
1374 if (oop_map_cache == NULL) {
1375 MutexLocker x(OopMapCacheAlloc_lock);
1376 // Check if _oop_map_cache was allocated while we were waiting for this lock
1377 if ((oop_map_cache = _oop_map_cache) == NULL) {
1378 oop_map_cache = new OopMapCache();
1379 // Ensure _oop_map_cache is stable, since it is examined without a lock
1380 OrderAccess::release_store(&_oop_map_cache, oop_map_cache);
1381 }
1382 }
1383 // _oop_map_cache is constant after init; lookup below does its own locking.
1384 oop_map_cache->lookup(method, bci, entry_for);
1385 }
1386
1387
1388 bool InstanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
1389 for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
1390 Symbol* f_name = fs.name();
1391 Symbol* f_sig = fs.signature();
1392 if (f_name == name && f_sig == sig) {
1393 fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
1394 return true;
1395 }
1396 }
1397 return false;
1398 }
1399
1400
2097 new_jmeths[index+1] = jmeths[index+1];
2098 }
2099 *to_dealloc_jmeths_p = jmeths; // save old cache for later delete
2100 }
2101 release_set_methods_jmethod_ids(jmeths = new_jmeths);
2102 } else {
2103 // fetch jmethodID (if any) from the existing cache
2104 id = jmeths[idnum+1];
2105 *to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete
2106 }
2107 if (id == NULL) {
2108 // No matching jmethodID in the existing cache or we have a new
2109 // cache or we just grew the cache. This cache write is done here
2110 // by the first thread to win the foot race because a jmethodID
2111 // needs to be unique once it is generally available.
2112 id = new_id;
2113
2114 // The jmethodID cache can be read while unlocked so we have to
2115 // make sure the new jmethodID is complete before installing it
2116 // in the cache.
2117 OrderAccess::release_store(&jmeths[idnum+1], id);
2118 } else {
2119 *to_dealloc_id_p = new_id; // save new id for later delete
2120 }
2121 return id;
2122 }
2123
2124
2125 // Common code to get the jmethodID cache length and the jmethodID
2126 // value at index idnum if there is one.
2127 //
2128 void InstanceKlass::get_jmethod_id_length_value(jmethodID* cache,
2129 size_t idnum, size_t *length_p, jmethodID* id_p) {
2130 assert(cache != NULL, "sanity check");
2131 assert(length_p != NULL, "sanity check");
2132 assert(id_p != NULL, "sanity check");
2133
2134 // cache size is stored in element[0], other elements offset by one
2135 *length_p = (size_t)cache[0];
2136 if (*length_p <= idnum) { // cache is too short
2137 *id_p = NULL;
2179 void InstanceKlass::print_dependent_nmethods(bool verbose) {
2180 dependencies().print_dependent_nmethods(verbose);
2181 }
2182
2183 bool InstanceKlass::is_dependent_nmethod(nmethod* nm) {
2184 return dependencies().is_dependent_nmethod(nm);
2185 }
2186 #endif //PRODUCT
2187
2188 void InstanceKlass::clean_weak_instanceklass_links() {
2189 clean_implementors_list();
2190 clean_method_data();
2191 }
2192
2193 void InstanceKlass::clean_implementors_list() {
2194 assert(is_loader_alive(), "this klass should be live");
2195 if (is_interface()) {
2196 assert (ClassUnloading, "only called for ClassUnloading");
2197 for (;;) {
2198 // Use load_acquire due to competing with inserts
2199 Klass* impl = OrderAccess::load_acquire(adr_implementor());
2200 if (impl != NULL && !impl->is_loader_alive()) {
2201 // NULL this field, might be an unloaded klass or NULL
2202 Klass* volatile* klass = adr_implementor();
2203 if (Atomic::cmpxchg((Klass*)NULL, klass, impl) == impl) {
2204 // Successfully unlinking implementor.
2205 if (log_is_enabled(Trace, class, unload)) {
2206 ResourceMark rm;
2207 log_trace(class, unload)("unlinking class (implementor): %s", impl->external_name());
2208 }
2209 return;
2210 }
2211 } else {
2212 return;
2213 }
2214 }
2215 }
2216 }
2217
2218 void InstanceKlass::clean_method_data() {
2219 for (int m = 0; m < methods()->length(); m++) {
|
1080 Handle h_init_lock(THREAD, init_lock());
1081 if (h_init_lock() != NULL) {
1082 ObjectLocker ol(h_init_lock, THREAD);
1083 set_init_thread(NULL); // reset _init_thread before changing _init_state
1084 set_init_state(state);
1085 fence_and_clear_init_lock();
1086 ol.notify_all(CHECK);
1087 } else {
1088 assert(h_init_lock() != NULL, "The initialization state should never be set twice");
1089 set_init_thread(NULL); // reset _init_thread before changing _init_state
1090 set_init_state(state);
1091 }
1092 }
1093
1094 Klass* InstanceKlass::implementor() const {
1095 Klass* volatile* k = adr_implementor();
1096 if (k == NULL) {
1097 return NULL;
1098 } else {
1099 // This load races with inserts, and therefore needs acquire.
1100 Klass* kls = Atomic::load_acquire(k);
1101 if (kls != NULL && !kls->is_loader_alive()) {
1102 return NULL; // don't return unloaded class
1103 } else {
1104 return kls;
1105 }
1106 }
1107 }
1108
1109
1110 void InstanceKlass::set_implementor(Klass* k) {
1111 assert_locked_or_safepoint(Compile_lock);
1112 assert(is_interface(), "not interface");
1113 Klass* volatile* addr = adr_implementor();
1114 assert(addr != NULL, "null addr");
1115 if (addr != NULL) {
1116 Atomic::release_store(addr, k);
1117 }
1118 }
1119
1120 int InstanceKlass::nof_implementors() const {
1121 Klass* k = implementor();
1122 if (k == NULL) {
1123 return 0;
1124 } else if (k != this) {
1125 return 1;
1126 } else {
1127 return 2;
1128 }
1129 }
1130
1131 // The embedded _implementor field can only record one implementor.
1132 // When there are more than one implementors, the _implementor field
1133 // is set to the interface Klass* itself. Following are the possible
1134 // values for the _implementor field:
1135 // NULL - no implementor
1136 // implementor Klass* - one implementor
1353 LogTarget(Info, class, init) lt;
1354 if (lt.is_enabled()) {
1355 ResourceMark rm;
1356 LogStream ls(lt);
1357 ls.print("%d Initializing ", call_class_initializer_counter++);
1358 name()->print_value_on(&ls);
1359 ls.print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", p2i(this));
1360 }
1361 if (h_method() != NULL) {
1362 JavaCallArguments args; // No arguments
1363 JavaValue result(T_VOID);
1364 JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
1365 }
1366 }
1367
1368
1369 void InstanceKlass::mask_for(const methodHandle& method, int bci,
1370 InterpreterOopMap* entry_for) {
1371 // Lazily create the _oop_map_cache at first request
1372 // Lock-free access requires load_acquire.
1373 OopMapCache* oop_map_cache = Atomic::load_acquire(&_oop_map_cache);
1374 if (oop_map_cache == NULL) {
1375 MutexLocker x(OopMapCacheAlloc_lock);
1376 // Check if _oop_map_cache was allocated while we were waiting for this lock
1377 if ((oop_map_cache = _oop_map_cache) == NULL) {
1378 oop_map_cache = new OopMapCache();
1379 // Ensure _oop_map_cache is stable, since it is examined without a lock
1380 Atomic::release_store(&_oop_map_cache, oop_map_cache);
1381 }
1382 }
1383 // _oop_map_cache is constant after init; lookup below does its own locking.
1384 oop_map_cache->lookup(method, bci, entry_for);
1385 }
1386
1387
1388 bool InstanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
1389 for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
1390 Symbol* f_name = fs.name();
1391 Symbol* f_sig = fs.signature();
1392 if (f_name == name && f_sig == sig) {
1393 fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
1394 return true;
1395 }
1396 }
1397 return false;
1398 }
1399
1400
2097 new_jmeths[index+1] = jmeths[index+1];
2098 }
2099 *to_dealloc_jmeths_p = jmeths; // save old cache for later delete
2100 }
2101 release_set_methods_jmethod_ids(jmeths = new_jmeths);
2102 } else {
2103 // fetch jmethodID (if any) from the existing cache
2104 id = jmeths[idnum+1];
2105 *to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete
2106 }
2107 if (id == NULL) {
2108 // No matching jmethodID in the existing cache or we have a new
2109 // cache or we just grew the cache. This cache write is done here
2110 // by the first thread to win the foot race because a jmethodID
2111 // needs to be unique once it is generally available.
2112 id = new_id;
2113
2114 // The jmethodID cache can be read while unlocked so we have to
2115 // make sure the new jmethodID is complete before installing it
2116 // in the cache.
2117 Atomic::release_store(&jmeths[idnum+1], id);
2118 } else {
2119 *to_dealloc_id_p = new_id; // save new id for later delete
2120 }
2121 return id;
2122 }
2123
2124
2125 // Common code to get the jmethodID cache length and the jmethodID
2126 // value at index idnum if there is one.
2127 //
2128 void InstanceKlass::get_jmethod_id_length_value(jmethodID* cache,
2129 size_t idnum, size_t *length_p, jmethodID* id_p) {
2130 assert(cache != NULL, "sanity check");
2131 assert(length_p != NULL, "sanity check");
2132 assert(id_p != NULL, "sanity check");
2133
2134 // cache size is stored in element[0], other elements offset by one
2135 *length_p = (size_t)cache[0];
2136 if (*length_p <= idnum) { // cache is too short
2137 *id_p = NULL;
2179 void InstanceKlass::print_dependent_nmethods(bool verbose) {
2180 dependencies().print_dependent_nmethods(verbose);
2181 }
2182
2183 bool InstanceKlass::is_dependent_nmethod(nmethod* nm) {
2184 return dependencies().is_dependent_nmethod(nm);
2185 }
2186 #endif //PRODUCT
2187
2188 void InstanceKlass::clean_weak_instanceklass_links() {
2189 clean_implementors_list();
2190 clean_method_data();
2191 }
2192
2193 void InstanceKlass::clean_implementors_list() {
2194 assert(is_loader_alive(), "this klass should be live");
2195 if (is_interface()) {
2196 assert (ClassUnloading, "only called for ClassUnloading");
2197 for (;;) {
2198 // Use load_acquire due to competing with inserts
2199 Klass* impl = Atomic::load_acquire(adr_implementor());
2200 if (impl != NULL && !impl->is_loader_alive()) {
2201 // NULL this field, might be an unloaded klass or NULL
2202 Klass* volatile* klass = adr_implementor();
2203 if (Atomic::cmpxchg((Klass*)NULL, klass, impl) == impl) {
2204 // Successfully unlinking implementor.
2205 if (log_is_enabled(Trace, class, unload)) {
2206 ResourceMark rm;
2207 log_trace(class, unload)("unlinking class (implementor): %s", impl->external_name());
2208 }
2209 return;
2210 }
2211 } else {
2212 return;
2213 }
2214 }
2215 }
2216 }
2217
2218 void InstanceKlass::clean_method_data() {
2219 for (int m = 0; m < methods()->length(); m++) {
|