1228 while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1229 callee_method->method_holder() != SystemDictionary::Object_klass()) {
1230 // If has a pending exception then there is no need to re-try to
1231 // resolve this method.
1232 // If the method has been redefined, we need to try again.
1233 // Hack: we have no way to update the vtables of arrays, so don't
1234 // require that java.lang.Object has been updated.
1235
1236 // It is very unlikely that method is redefined more than 100 times
1237 // in the middle of resolve. If it is looping here more than 100 times
1238 // means then there could be a bug here.
1239 guarantee((retry_count++ < 100),
1240 "Could not resolve to latest version of redefined method");
1241 // method is redefined in the middle of resolve so re-try.
1242 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
1243 }
1244 }
1245 return callee_method;
1246 }
1247
1248 // Resolves a call. The compilers generate code for calls that go here
1249 // and are patched with the real destination of the call.
1250 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
1251 bool is_virtual,
1252 bool is_optimized, TRAPS) {
1253
1254 ResourceMark rm(thread);
1255 RegisterMap cbl_map(thread, false);
1256 frame caller_frame = thread->last_frame().sender(&cbl_map);
1257
1258 CodeBlob* caller_cb = caller_frame.cb();
1259 guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
1260 CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
1261
1262 // make sure caller is not getting deoptimized
1263 // and removed before we are done with it.
1264 // CLEANUP - with lazy deopt shouldn't need this lock
1265 nmethodLocker caller_lock(caller_nm);
1266
1267 // determine call info & receiver
1268 // note: a) receiver is NULL for static calls
1269 // b) an exception is thrown if receiver is NULL for non-static calls
1270 CallInfo call_info;
1271 Bytecodes::Code invoke_code = Bytecodes::_illegal;
|
1228 while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1229 callee_method->method_holder() != SystemDictionary::Object_klass()) {
1230 // If has a pending exception then there is no need to re-try to
1231 // resolve this method.
1232 // If the method has been redefined, we need to try again.
1233 // Hack: we have no way to update the vtables of arrays, so don't
1234 // require that java.lang.Object has been updated.
1235
1236 // It is very unlikely that method is redefined more than 100 times
1237 // in the middle of resolve. If it is looping here more than 100 times
1238 // means then there could be a bug here.
1239 guarantee((retry_count++ < 100),
1240 "Could not resolve to latest version of redefined method");
1241 // method is redefined in the middle of resolve so re-try.
1242 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
1243 }
1244 }
1245 return callee_method;
1246 }
1247
1248 // This fails if resolution required refilling of IC stubs
1249 bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
1250 CompiledMethod* caller_nm, bool is_virtual, bool is_optimized,
1251 Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) {
1252 StaticCallInfo static_call_info;
1253 CompiledICInfo virtual_call_info;
1254
1255 // Make sure the callee nmethod does not get deoptimized and removed before
1256 // we are done patching the code.
1257 CompiledMethod* callee = callee_method->code();
1258
1259 if (callee != NULL) {
1260 assert(callee->is_compiled(), "must be nmethod for patching");
1261 }
1262
1263 if (callee != NULL && !callee->is_in_use()) {
1264 // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1265 callee = NULL;
1266 }
1267 nmethodLocker nl_callee(callee);
1268 #ifdef ASSERT
1269 address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
1270 #endif
1271
1272 bool is_nmethod = caller_nm->is_nmethod();
1273
1274 if (is_virtual) {
1275 assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
1276 bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1277 Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass();
1278 CompiledIC::compute_monomorphic_entry(callee_method, klass,
1279 is_optimized, static_bound, is_nmethod, virtual_call_info,
1280 CHECK_false);
1281 } else {
1282 // static call
1283 CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info);
1284 }
1285
1286 // grab lock, check for deoptimization and potentially patch caller
1287 {
1288 CompiledICLocker ml(caller_nm);
1289
1290 // Lock blocks for safepoint during which both nmethods can change state.
1291
1292 // Now that we are ready to patch if the Method* was redefined then
1293 // don't update call site and let the caller retry.
1294 // Don't update call site if callee nmethod was unloaded or deoptimized.
1295 // Don't update call site if callee nmethod was replaced by an other nmethod
1296 // which may happen when multiply alive nmethod (tiered compilation)
1297 // will be supported.
1298 if (!callee_method->is_old() &&
1299 (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) {
1300 #ifdef ASSERT
1301 // We must not try to patch to jump to an already unloaded method.
1302 if (dest_entry_point != 0) {
1303 CodeBlob* cb = CodeCache::find_blob(dest_entry_point);
1304 assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee),
1305 "should not call unloaded nmethod");
1306 }
1307 #endif
1308 if (is_virtual) {
1309 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1310 if (inline_cache->is_clean()) {
1311 if (!inline_cache->set_to_monomorphic(virtual_call_info)) {
1312 return false;
1313 }
1314 }
1315 } else {
1316 CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
1317 if (ssc->is_clean()) ssc->set(static_call_info);
1318 }
1319 }
1320 } // unlock CompiledICLocker
1321 return true;
1322 }
1323
1324 // Resolves a call. The compilers generate code for calls that go here
1325 // and are patched with the real destination of the call.
1326 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
1327 bool is_virtual,
1328 bool is_optimized, TRAPS) {
1329
1330 ResourceMark rm(thread);
1331 RegisterMap cbl_map(thread, false);
1332 frame caller_frame = thread->last_frame().sender(&cbl_map);
1333
1334 CodeBlob* caller_cb = caller_frame.cb();
1335 guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
1336 CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
1337
1338 // make sure caller is not getting deoptimized
1339 // and removed before we are done with it.
1340 // CLEANUP - with lazy deopt shouldn't need this lock
1341 nmethodLocker caller_lock(caller_nm);
1342
1343 // determine call info & receiver
1344 // note: a) receiver is NULL for static calls
1345 // b) an exception is thrown if receiver is NULL for non-static calls
1346 CallInfo call_info;
1347 Bytecodes::Code invoke_code = Bytecodes::_illegal;
|
1298 p2i(caller_frame.pc()), p2i(callee_method->code()));
1299 }
1300 #endif
1301
1302 // JSR 292 key invariant:
1303 // If the resolved method is a MethodHandle invoke target, the call
1304 // site must be a MethodHandle call site, because the lambda form might tail-call
1305 // leaving the stack in a state unknown to either caller or callee
1306 // TODO detune for now but we might need it again
1307 // assert(!callee_method->is_compiled_lambda_form() ||
1308 // caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1309
1310 // Compute entry points. This might require generation of C2I converter
1311 // frames, so we cannot be holding any locks here. Furthermore, the
1312 // computation of the entry points is independent of patching the call. We
1313 // always return the entry-point, but we only patch the stub if the call has
1314 // not been deoptimized. Return values: For a virtual call this is an
1315 // (cached_oop, destination address) pair. For a static call/optimized
1316 // virtual this is just a destination address.
1317
1318 bool first_try = true;
1319 for (;;) {
1320 if (!first_try) {
1321 // Patching IC caches may fail if we run out if transition stubs.
1322 // We refill the ic stubs then.
1323 InlineCacheBuffer::refill_ic_stubs();
1324 }
1325 first_try = false;
1326
1327 StaticCallInfo static_call_info;
1328 CompiledICInfo virtual_call_info;
1329
1330 // Make sure the callee nmethod does not get deoptimized and removed before
1331 // we are done patching the code.
1332 CompiledMethod* callee = callee_method->code();
1333
1334 if (callee != NULL) {
1335 assert(callee->is_compiled(), "must be nmethod for patching");
1336 }
1337
1338 if (callee != NULL && !callee->is_in_use()) {
1339 // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1340 callee = NULL;
1341 }
1342 nmethodLocker nl_callee(callee);
1343 #ifdef ASSERT
1344 address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
1345 #endif
1346
1347 bool is_nmethod = caller_nm->is_nmethod();
1348
1349 if (is_virtual) {
1350 assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
1351 bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1352 Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass();
1353 CompiledIC::compute_monomorphic_entry(callee_method, klass,
1354 is_optimized, static_bound, is_nmethod, virtual_call_info,
1355 CHECK_(methodHandle()));
1356 } else {
1357 // static call
1358 CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info);
1359 }
1360
1361 // grab lock, check for deoptimization and potentially patch caller
1362 {
1363 CompiledICLocker ml(caller_nm);
1364
1365 // Lock blocks for safepoint during which both nmethods can change state.
1366
1367 // Now that we are ready to patch if the Method* was redefined then
1368 // don't update call site and let the caller retry.
1369 // Don't update call site if callee nmethod was unloaded or deoptimized.
1370 // Don't update call site if callee nmethod was replaced by an other nmethod
1371 // which may happen when multiply alive nmethod (tiered compilation)
1372 // will be supported.
1373 if (!callee_method->is_old() &&
1374 (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) {
1375 #ifdef ASSERT
1376 // We must not try to patch to jump to an already unloaded method.
1377 if (dest_entry_point != 0) {
1378 CodeBlob* cb = CodeCache::find_blob(dest_entry_point);
1379 assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee),
1380 "should not call unloaded nmethod");
1381 }
1382 #endif
1383 if (is_virtual) {
1384 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1385 if (inline_cache->is_clean()) {
1386 if (!inline_cache->set_to_monomorphic(virtual_call_info)) {
1387 continue;
1388 }
1389 }
1390 } else {
1391 CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
1392 if (ssc->is_clean()) ssc->set(static_call_info);
1393 }
1394 }
1395 } // unlock CompiledICLocker
1396 break;
1397 }
1398
1399 return callee_method;
1400 }
1401
1402
1403 // Inline caches exist only in compiled code
1404 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
1405 #ifdef ASSERT
1406 RegisterMap reg_map(thread, false);
1407 frame stub_frame = thread->last_frame();
1408 assert(stub_frame.is_runtime_frame(), "sanity check");
1409 frame caller_frame = stub_frame.sender(®_map);
1410 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
1411 #endif /* ASSERT */
1412
1413 methodHandle callee_method;
1414 JRT_BLOCK
1415 callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
1416 // Return Method* through TLS
1417 thread->set_vm_result_2(callee_method());
1418 JRT_BLOCK_END
|
1374 p2i(caller_frame.pc()), p2i(callee_method->code()));
1375 }
1376 #endif
1377
1378 // JSR 292 key invariant:
1379 // If the resolved method is a MethodHandle invoke target, the call
1380 // site must be a MethodHandle call site, because the lambda form might tail-call
1381 // leaving the stack in a state unknown to either caller or callee
1382 // TODO detune for now but we might need it again
1383 // assert(!callee_method->is_compiled_lambda_form() ||
1384 // caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1385
1386 // Compute entry points. This might require generation of C2I converter
1387 // frames, so we cannot be holding any locks here. Furthermore, the
1388 // computation of the entry points is independent of patching the call. We
1389 // always return the entry-point, but we only patch the stub if the call has
1390 // not been deoptimized. Return values: For a virtual call this is an
1391 // (cached_oop, destination address) pair. For a static call/optimized
1392 // virtual this is just a destination address.
1393
1394 // Patching IC caches may fail if we run out if transition stubs.
1395 // We refill the ic stubs then and try again.
1396 for (;;) {
1397 bool successful = resolve_sub_helper_internal(callee_method, caller_frame, caller_nm,
1398 is_virtual, is_optimized, receiver,
1399 call_info, invoke_code, CHECK_(methodHandle()));
1400 if (successful) {
1401 return callee_method;
1402 } else {
1403 InlineCacheBuffer::refill_ic_stubs();
1404 }
1405 }
1406
1407 }
1408
1409
1410 // Inline caches exist only in compiled code
1411 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
1412 #ifdef ASSERT
1413 RegisterMap reg_map(thread, false);
1414 frame stub_frame = thread->last_frame();
1415 assert(stub_frame.is_runtime_frame(), "sanity check");
1416 frame caller_frame = stub_frame.sender(®_map);
1417 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
1418 #endif /* ASSERT */
1419
1420 methodHandle callee_method;
1421 JRT_BLOCK
1422 callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
1423 // Return Method* through TLS
1424 thread->set_vm_result_2(callee_method());
1425 JRT_BLOCK_END
|
1513 JRT_BLOCK_END
1514 // return compiled code entry point after potential safepoints
1515 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1516 return callee_method->verified_code_entry();
1517 JRT_END
1518
1519
1520 // Resolve a virtual call that can be statically bound (e.g., always
1521 // monomorphic, so it has no inline cache). Patch code to resolved target.
1522 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1523 methodHandle callee_method;
1524 JRT_BLOCK
1525 callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1526 thread->set_vm_result_2(callee_method());
1527 JRT_BLOCK_END
1528 // return compiled code entry point after potential safepoints
1529 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1530 return callee_method->verified_code_entry();
1531 JRT_END
1532
1533
1534
1535 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1536 ResourceMark rm(thread);
1537 CallInfo call_info;
1538 Bytecodes::Code bc;
1539
1540 // receiver is NULL for static calls. An exception is thrown for NULL
1541 // receivers for non-static calls
1542 Handle receiver = find_callee_info(thread, bc, call_info,
1543 CHECK_(methodHandle()));
1544 // Compiler1 can produce virtual call sites that can actually be statically bound
1545 // If we fell thru to below we would think that the site was going megamorphic
1546 // when in fact the site can never miss. Worse because we'd think it was megamorphic
1547 // we'd try and do a vtable dispatch however methods that can be statically bound
1548 // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1549 // reresolution of the call site (as if we did a handle_wrong_method and not an
1550 // plain ic_miss) and the site will be converted to an optimized virtual call site
1551 // never to miss again. I don't believe C2 will produce code like this but if it
1552 // did this would still be the correct thing to do for it too, hence no ifdef.
|
1520 JRT_BLOCK_END
1521 // return compiled code entry point after potential safepoints
1522 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1523 return callee_method->verified_code_entry();
1524 JRT_END
1525
1526
1527 // Resolve a virtual call that can be statically bound (e.g., always
1528 // monomorphic, so it has no inline cache). Patch code to resolved target.
1529 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1530 methodHandle callee_method;
1531 JRT_BLOCK
1532 callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1533 thread->set_vm_result_2(callee_method());
1534 JRT_BLOCK_END
1535 // return compiled code entry point after potential safepoints
1536 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1537 return callee_method->verified_code_entry();
1538 JRT_END
1539
1540 // The handle_ic_miss_helper_internal function returns false if it failed due
1541 // to either running out of vtable stubs or ic stubs due to IC transitions
1542 // to transitional states. The needs_ic_stub_refill value will be set if
1543 // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
1544 // refills the IC stubs and tries again.
1545 bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
1546 const frame& caller_frame, methodHandle callee_method,
1547 Bytecodes::Code bc, CallInfo& call_info,
1548 bool& needs_ic_stub_refill, TRAPS) {
1549 CompiledICLocker ml(caller_nm);
1550 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1551 bool should_be_mono = false;
1552 if (inline_cache->is_optimized()) {
1553 if (TraceCallFixup) {
1554 ResourceMark rm(THREAD);
1555 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1556 callee_method->print_short_name(tty);
1557 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1558 }
1559 should_be_mono = true;
1560 } else if (inline_cache->is_icholder_call()) {
1561 CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1562 if (ic_oop != NULL) {
1563 if (!ic_oop->is_loader_alive()) {
1564 // Deferred IC cleaning due to concurrent class unloading
1565 if (!inline_cache->set_to_clean()) {
1566 needs_ic_stub_refill = true;
1567 return false;
1568 }
1569 } else if (receiver()->klass() == ic_oop->holder_klass()) {
1570 // This isn't a real miss. We must have seen that compiled code
1571 // is now available and we want the call site converted to a
1572 // monomorphic compiled call site.
1573 // We can't assert for callee_method->code() != NULL because it
1574 // could have been deoptimized in the meantime
1575 if (TraceCallFixup) {
1576 ResourceMark rm(THREAD);
1577 tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1578 callee_method->print_short_name(tty);
1579 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1580 }
1581 should_be_mono = true;
1582 }
1583 }
1584 }
1585
1586 if (should_be_mono) {
1587 // We have a path that was monomorphic but was going interpreted
1588 // and now we have (or had) a compiled entry. We correct the IC
1589 // by using a new icBuffer.
1590 CompiledICInfo info;
1591 Klass* receiver_klass = receiver()->klass();
1592 inline_cache->compute_monomorphic_entry(callee_method,
1593 receiver_klass,
1594 inline_cache->is_optimized(),
1595 false, caller_nm->is_nmethod(),
1596 info, CHECK_false);
1597 if (!inline_cache->set_to_monomorphic(info)) {
1598 needs_ic_stub_refill = true;
1599 return false;
1600 }
1601 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1602 // Potential change to megamorphic
1603
1604 bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false);
1605 if (!successful) {
1606 if (!needs_ic_stub_refill) {
1607 return false;
1608 }
1609 if (!inline_cache->set_to_clean()) {
1610 needs_ic_stub_refill = true;
1611 return false;
1612 }
1613 }
1614 } else {
1615 // Either clean or megamorphic
1616 }
1617 return true;
1618 }
1619
1620 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1621 ResourceMark rm(thread);
1622 CallInfo call_info;
1623 Bytecodes::Code bc;
1624
1625 // receiver is NULL for static calls. An exception is thrown for NULL
1626 // receivers for non-static calls
1627 Handle receiver = find_callee_info(thread, bc, call_info,
1628 CHECK_(methodHandle()));
1629 // Compiler1 can produce virtual call sites that can actually be statically bound
1630 // If we fell thru to below we would think that the site was going megamorphic
1631 // when in fact the site can never miss. Worse because we'd think it was megamorphic
1632 // we'd try and do a vtable dispatch however methods that can be statically bound
1633 // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1634 // reresolution of the call site (as if we did a handle_wrong_method and not an
1635 // plain ic_miss) and the site will be converted to an optimized virtual call site
1636 // never to miss again. I don't believe C2 will produce code like this but if it
1637 // did this would still be the correct thing to do for it too, hence no ifdef.
|
1578 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1579 }
1580
1581 if (ICMissHistogram) {
1582 MutexLocker m(VMStatistic_lock);
1583 RegisterMap reg_map(thread, false);
1584 frame f = thread->last_frame().real_sender(®_map);// skip runtime stub
1585 // produce statistics under the lock
1586 trace_ic_miss(f.pc());
1587 }
1588 #endif
1589
1590 // install an event collector so that when a vtable stub is created the
1591 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1592 // event can't be posted when the stub is created as locks are held
1593 // - instead the event will be deferred until the event collector goes
1594 // out of scope.
1595 JvmtiDynamicCodeEventCollector event_collector;
1596
1597 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1598 bool first_try = true;
1599 for (;;) {
1600 if (!first_try) {
1601 // Transitioning IC caches may require transition stubs. If we run out
1602 // of transition stubs, we have to drop locks and perform a safepoint
1603 // that refills them.
1604 InlineCacheBuffer::refill_ic_stubs();
1605 }
1606 first_try = false;
1607 RegisterMap reg_map(thread, false);
1608 frame caller_frame = thread->last_frame().sender(®_map);
1609 CodeBlob* cb = caller_frame.cb();
1610 CompiledMethod* caller_nm = cb->as_compiled_method_or_null();
1611 CompiledICLocker ml(caller_nm);
1612
1613 if (!cb->is_compiled()) {
1614 Unimplemented();
1615 }
1616 CompiledIC* inline_cache = CompiledIC_before(((CompiledMethod*)cb), caller_frame.pc());
1617 bool should_be_mono = false;
1618 if (inline_cache->is_optimized()) {
1619 if (TraceCallFixup) {
1620 ResourceMark rm(thread);
1621 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1622 callee_method->print_short_name(tty);
1623 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1624 }
1625 should_be_mono = true;
1626 } else if (inline_cache->is_icholder_call()) {
1627 CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1628 if (ic_oop != NULL) {
1629 if (!ic_oop->is_loader_alive()) {
1630 // Deferred IC cleaning due to concurrent class unloading
1631 inline_cache->set_to_clean();
1632 } else if (receiver()->klass() == ic_oop->holder_klass()) {
1633 // This isn't a real miss. We must have seen that compiled code
1634 // is now available and we want the call site converted to a
1635 // monomorphic compiled call site.
1636 // We can't assert for callee_method->code() != NULL because it
1637 // could have been deoptimized in the meantime
1638 if (TraceCallFixup) {
1639 ResourceMark rm(thread);
1640 tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1641 callee_method->print_short_name(tty);
1642 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1643 }
1644 should_be_mono = true;
1645 }
1646 }
1647 }
1648
1649 if (should_be_mono) {
1650 // We have a path that was monomorphic but was going interpreted
1651 // and now we have (or had) a compiled entry. We correct the IC
1652 // by using a new icBuffer.
1653 CompiledICInfo info;
1654 Klass* receiver_klass = receiver()->klass();
1655 inline_cache->compute_monomorphic_entry(callee_method,
1656 receiver_klass,
1657 inline_cache->is_optimized(),
1658 false, caller_nm->is_nmethod(),
1659 info, CHECK_(methodHandle()));
1660 if (!inline_cache->set_to_monomorphic(info)) {
1661 continue;
1662 }
1663 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1664 // Potential change to megamorphic
1665 bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
1666 if (!successful) {
1667 if (!inline_cache->set_to_clean()) {
1668 continue;
1669 }
1670 }
1671 } else {
1672 // Either clean or megamorphic
1673 }
1674 break;
1675 } // Release CompiledICLocker
1676
1677 return callee_method;
1678 }
1679
1680 //
1681 // Resets a call-site in compiled code so it will get resolved again.
1682 // This routines handles both virtual call sites, optimized virtual call
1683 // sites, and static call sites. Typically used to change a call sites
1684 // destination from compiled to interpreted.
1685 //
1686 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
1687 ResourceMark rm(thread);
1688 RegisterMap reg_map(thread, false);
1689 frame stub_frame = thread->last_frame();
1690 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1691 frame caller = stub_frame.sender(®_map);
1692
1693 // Do nothing if the frame isn't a live compiled frame.
1694 // nmethod could be deoptimized by the time we get here
1695 // so no update to the caller is needed.
1696
|
1663 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1664 }
1665
1666 if (ICMissHistogram) {
1667 MutexLocker m(VMStatistic_lock);
1668 RegisterMap reg_map(thread, false);
1669 frame f = thread->last_frame().real_sender(®_map);// skip runtime stub
1670 // produce statistics under the lock
1671 trace_ic_miss(f.pc());
1672 }
1673 #endif
1674
1675 // install an event collector so that when a vtable stub is created the
1676 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1677 // event can't be posted when the stub is created as locks are held
1678 // - instead the event will be deferred until the event collector goes
1679 // out of scope.
1680 JvmtiDynamicCodeEventCollector event_collector;
1681
1682 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1683 // Transitioning IC caches may require transition stubs. If we run out
1684 // of transition stubs, we have to drop locks and perform a safepoint
1685 // that refills them.
1686 RegisterMap reg_map(thread, false);
1687 frame caller_frame = thread->last_frame().sender(®_map);
1688 CodeBlob* cb = caller_frame.cb();
1689 CompiledMethod* caller_nm = cb->as_compiled_method();
1690
1691 for (;;) {
1692 bool needs_ic_stub_refill = false;
1693 bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1694 bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle()));
1695 if (successful) {
1696 return callee_method;
1697 } else {
1698 if (needs_ic_stub_refill) {
1699 InlineCacheBuffer::refill_ic_stubs();
1700 }
1701 }
1702 }
1703 }
1704
1705 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1706 CompiledICLocker ml(caller_nm);
1707 if (is_static_call) {
1708 CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1709 if (!ssc->is_clean()) {
1710 return ssc->set_to_clean();
1711 }
1712 } else {
1713 // compiled, dispatched call (which used to call an interpreted method)
1714 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1715 if (!inline_cache->is_clean()) {
1716 return inline_cache->set_to_clean();
1717 }
1718 }
1719 return true;
1720 }
1721
1722 //
1723 // Resets a call-site in compiled code so it will get resolved again.
1724 // This routines handles both virtual call sites, optimized virtual call
1725 // sites, and static call sites. Typically used to change a call sites
1726 // destination from compiled to interpreted.
1727 //
1728 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
1729 ResourceMark rm(thread);
1730 RegisterMap reg_map(thread, false);
1731 frame stub_frame = thread->last_frame();
1732 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1733 frame caller = stub_frame.sender(®_map);
1734
1735 // Do nothing if the frame isn't a live compiled frame.
1736 // nmethod could be deoptimized by the time we get here
1737 // so no update to the caller is needed.
1738
|
1739 if (ret) {
1740 assert(iter.addr() == call_addr, "must find call");
1741 if (iter.type() == relocInfo::static_call_type) {
1742 is_static_call = true;
1743 } else {
1744 assert(iter.type() == relocInfo::virtual_call_type ||
1745 iter.type() == relocInfo::opt_virtual_call_type
1746 , "unexpected relocInfo. type");
1747 }
1748 } else {
1749 assert(!UseInlineCaches, "relocation info. must exist for this address");
1750 }
1751
1752 // Cleaning the inline cache will force a new resolve. This is more robust
1753 // than directly setting it to the new destination, since resolving of calls
1754 // is always done through the same code path. (experience shows that it
1755 // leads to very hard to track down bugs, if an inline cache gets updated
1756 // to a wrong method). It should not be performance critical, since the
1757 // resolve is only done once.
1758
1759 CompiledICLocker ml(caller_nm);
1760 if (is_static_call) {
1761 CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1762 if (!ssc->is_clean()) {
1763 ssc->set_to_clean();
1764 }
1765 } else {
1766 // compiled, dispatched call (which used to call an interpreted method)
1767 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1768 if (!inline_cache->is_clean()) {
1769 inline_cache->set_to_clean();
1770 }
1771 }
1772 }
1773 }
1774
1775 methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1776
1777
1778 #ifndef PRODUCT
1779 Atomic::inc(&_wrong_method_ctr);
1780
1781 if (TraceCallFixup) {
1782 ResourceMark rm(thread);
1783 tty->print("handle_wrong_method reresolving call to");
1784 callee_method->print_short_name(tty);
1785 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1786 }
1787 #endif
1788
|
1781 if (ret) {
1782 assert(iter.addr() == call_addr, "must find call");
1783 if (iter.type() == relocInfo::static_call_type) {
1784 is_static_call = true;
1785 } else {
1786 assert(iter.type() == relocInfo::virtual_call_type ||
1787 iter.type() == relocInfo::opt_virtual_call_type
1788 , "unexpected relocInfo. type");
1789 }
1790 } else {
1791 assert(!UseInlineCaches, "relocation info. must exist for this address");
1792 }
1793
1794 // Cleaning the inline cache will force a new resolve. This is more robust
1795 // than directly setting it to the new destination, since resolving of calls
1796 // is always done through the same code path. (experience shows that it
1797 // leads to very hard to track down bugs, if an inline cache gets updated
1798 // to a wrong method). It should not be performance critical, since the
1799 // resolve is only done once.
1800
1801 for (;;) {
1802 if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
1803 InlineCacheBuffer::refill_ic_stubs();
1804 } else {
1805 break;
1806 }
1807 }
1808 }
1809 }
1810
1811 methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1812
1813
1814 #ifndef PRODUCT
1815 Atomic::inc(&_wrong_method_ctr);
1816
1817 if (TraceCallFixup) {
1818 ResourceMark rm(thread);
1819 tty->print("handle_wrong_method reresolving call to");
1820 callee_method->print_short_name(tty);
1821 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1822 }
1823 #endif
1824
|