1377
1378 // JSR 292 key invariant:
1379 // If the resolved method is a MethodHandle invoke target, the call
1380 // site must be a MethodHandle call site, because the lambda form might tail-call
1381 // leaving the stack in a state unknown to either caller or callee
1382 // TODO detune for now but we might need it again
1383 // assert(!callee_method->is_compiled_lambda_form() ||
1384 // caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1385
1386 // Compute entry points. This might require generation of C2I converter
1387 // frames, so we cannot be holding any locks here. Furthermore, the
1388 // computation of the entry points is independent of patching the call. We
1389 // always return the entry-point, but we only patch the stub if the call has
1390 // not been deoptimized. Return values: For a virtual call this is an
1391 // (cached_oop, destination address) pair. For a static call/optimized
1392 // virtual this is just a destination address.
1393
1394 // Patching IC caches may fail if we run out if transition stubs.
1395 // We refill the ic stubs then and try again.
1396 for (;;) {
1397 bool successful = resolve_sub_helper_internal(callee_method, caller_frame, caller_nm,
1398 is_virtual, is_optimized, receiver,
1399 call_info, invoke_code, CHECK_(methodHandle()));
1400 if (successful) {
1401 return callee_method;
1402 } else {
1403 InlineCacheBuffer::refill_ic_stubs();
1404 }
1405 }
1406
1407 }
1408
1409
1410 // Inline caches exist only in compiled code
1411 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
1412 #ifdef ASSERT
1413 RegisterMap reg_map(thread, false);
1414 frame stub_frame = thread->last_frame();
1415 assert(stub_frame.is_runtime_frame(), "sanity check");
|
1377
1378 // JSR 292 key invariant:
1379 // If the resolved method is a MethodHandle invoke target, the call
1380 // site must be a MethodHandle call site, because the lambda form might tail-call
1381 // leaving the stack in a state unknown to either caller or callee
1382 // TODO detune for now but we might need it again
1383 // assert(!callee_method->is_compiled_lambda_form() ||
1384 // caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1385
1386 // Compute entry points. This might require generation of C2I converter
1387 // frames, so we cannot be holding any locks here. Furthermore, the
1388 // computation of the entry points is independent of patching the call. We
1389 // always return the entry-point, but we only patch the stub if the call has
1390 // not been deoptimized. Return values: For a virtual call this is an
1391 // (cached_oop, destination address) pair. For a static call/optimized
1392 // virtual this is just a destination address.
1393
1394 // Patching IC caches may fail if we run out if transition stubs.
1395 // We refill the ic stubs then and try again.
1396 for (;;) {
1397 DEBUG_ONLY(ICRefillVerifier ic_refill_verifier;)
1398 bool successful = resolve_sub_helper_internal(callee_method, caller_frame, caller_nm,
1399 is_virtual, is_optimized, receiver,
1400 call_info, invoke_code, CHECK_(methodHandle()));
1401 if (successful) {
1402 return callee_method;
1403 } else {
1404 InlineCacheBuffer::refill_ic_stubs();
1405 }
1406 }
1407
1408 }
1409
1410
1411 // Inline caches exist only in compiled code
1412 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
1413 #ifdef ASSERT
1414 RegisterMap reg_map(thread, false);
1415 frame stub_frame = thread->last_frame();
1416 assert(stub_frame.is_runtime_frame(), "sanity check");
|
1585
1586 if (should_be_mono) {
1587 // We have a path that was monomorphic but was going interpreted
1588 // and now we have (or had) a compiled entry. We correct the IC
1589 // by using a new icBuffer.
1590 CompiledICInfo info;
1591 Klass* receiver_klass = receiver()->klass();
1592 inline_cache->compute_monomorphic_entry(callee_method,
1593 receiver_klass,
1594 inline_cache->is_optimized(),
1595 false, caller_nm->is_nmethod(),
1596 info, CHECK_false);
1597 if (!inline_cache->set_to_monomorphic(info)) {
1598 needs_ic_stub_refill = true;
1599 return false;
1600 }
1601 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1602 // Potential change to megamorphic
1603
1604 bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false);
1605 if (!successful) {
1606 if (!needs_ic_stub_refill) {
1607 return false;
1608 }
1609 if (!inline_cache->set_to_clean()) {
1610 needs_ic_stub_refill = true;
1611 return false;
1612 }
1613 }
1614 } else {
1615 // Either clean or megamorphic
1616 }
1617 return true;
1618 }
1619
1620 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1621 ResourceMark rm(thread);
1622 CallInfo call_info;
1623 Bytecodes::Code bc;
1624
1625 // receiver is NULL for static calls. An exception is thrown for NULL
1626 // receivers for non-static calls
1627 Handle receiver = find_callee_info(thread, bc, call_info,
|
1586
1587 if (should_be_mono) {
1588 // We have a path that was monomorphic but was going interpreted
1589 // and now we have (or had) a compiled entry. We correct the IC
1590 // by using a new icBuffer.
1591 CompiledICInfo info;
1592 Klass* receiver_klass = receiver()->klass();
1593 inline_cache->compute_monomorphic_entry(callee_method,
1594 receiver_klass,
1595 inline_cache->is_optimized(),
1596 false, caller_nm->is_nmethod(),
1597 info, CHECK_false);
1598 if (!inline_cache->set_to_monomorphic(info)) {
1599 needs_ic_stub_refill = true;
1600 return false;
1601 }
1602 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1603 // Potential change to megamorphic
1604
1605 bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false);
1606 if (needs_ic_stub_refill) {
1607 return false;
1608 }
1609 if (!successful) {
1610 if (!inline_cache->set_to_clean()) {
1611 needs_ic_stub_refill = true;
1612 return false;
1613 }
1614 }
1615 } else {
1616 // Either clean or megamorphic
1617 }
1618 return true;
1619 }
1620
1621 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1622 ResourceMark rm(thread);
1623 CallInfo call_info;
1624 Bytecodes::Code bc;
1625
1626 // receiver is NULL for static calls. An exception is thrown for NULL
1627 // receivers for non-static calls
1628 Handle receiver = find_callee_info(thread, bc, call_info,
|
1672 }
1673 #endif
1674
1675 // install an event collector so that when a vtable stub is created the
1676 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1677 // event can't be posted when the stub is created as locks are held
1678 // - instead the event will be deferred until the event collector goes
1679 // out of scope.
1680 JvmtiDynamicCodeEventCollector event_collector;
1681
1682 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1683 // Transitioning IC caches may require transition stubs. If we run out
1684 // of transition stubs, we have to drop locks and perform a safepoint
1685 // that refills them.
1686 RegisterMap reg_map(thread, false);
1687 frame caller_frame = thread->last_frame().sender(®_map);
1688 CodeBlob* cb = caller_frame.cb();
1689 CompiledMethod* caller_nm = cb->as_compiled_method();
1690
1691 for (;;) {
1692 bool needs_ic_stub_refill = false;
1693 bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1694 bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle()));
1695 if (successful || !needs_ic_stub_refill) {
1696 return callee_method;
1697 } else {
1698 InlineCacheBuffer::refill_ic_stubs();
1699 }
1700 }
1701 }
1702
1703 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1704 CompiledICLocker ml(caller_nm);
1705 if (is_static_call) {
1706 CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1707 if (!ssc->is_clean()) {
1708 return ssc->set_to_clean();
1709 }
1710 } else {
|
1673 }
1674 #endif
1675
1676 // install an event collector so that when a vtable stub is created the
1677 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1678 // event can't be posted when the stub is created as locks are held
1679 // - instead the event will be deferred until the event collector goes
1680 // out of scope.
1681 JvmtiDynamicCodeEventCollector event_collector;
1682
1683 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1684 // Transitioning IC caches may require transition stubs. If we run out
1685 // of transition stubs, we have to drop locks and perform a safepoint
1686 // that refills them.
1687 RegisterMap reg_map(thread, false);
1688 frame caller_frame = thread->last_frame().sender(®_map);
1689 CodeBlob* cb = caller_frame.cb();
1690 CompiledMethod* caller_nm = cb->as_compiled_method();
1691
1692 for (;;) {
1693 DEBUG_ONLY(ICRefillVerifier ic_refill_verifier;)
1694 bool needs_ic_stub_refill = false;
1695 bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1696 bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle()));
1697 if (successful || !needs_ic_stub_refill) {
1698 return callee_method;
1699 } else {
1700 InlineCacheBuffer::refill_ic_stubs();
1701 }
1702 }
1703 }
1704
1705 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1706 CompiledICLocker ml(caller_nm);
1707 if (is_static_call) {
1708 CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1709 if (!ssc->is_clean()) {
1710 return ssc->set_to_clean();
1711 }
1712 } else {
|
1780 assert(iter.addr() == call_addr, "must find call");
1781 if (iter.type() == relocInfo::static_call_type) {
1782 is_static_call = true;
1783 } else {
1784 assert(iter.type() == relocInfo::virtual_call_type ||
1785 iter.type() == relocInfo::opt_virtual_call_type
1786 , "unexpected relocInfo. type");
1787 }
1788 } else {
1789 assert(!UseInlineCaches, "relocation info. must exist for this address");
1790 }
1791
1792 // Cleaning the inline cache will force a new resolve. This is more robust
1793 // than directly setting it to the new destination, since resolving of calls
1794 // is always done through the same code path. (experience shows that it
1795 // leads to very hard to track down bugs, if an inline cache gets updated
1796 // to a wrong method). It should not be performance critical, since the
1797 // resolve is only done once.
1798
1799 for (;;) {
1800 if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
1801 InlineCacheBuffer::refill_ic_stubs();
1802 } else {
1803 break;
1804 }
1805 }
1806 }
1807 }
1808
1809 methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1810
1811
1812 #ifndef PRODUCT
1813 Atomic::inc(&_wrong_method_ctr);
1814
1815 if (TraceCallFixup) {
1816 ResourceMark rm(thread);
1817 tty->print("handle_wrong_method reresolving call to");
1818 callee_method->print_short_name(tty);
|
1782 assert(iter.addr() == call_addr, "must find call");
1783 if (iter.type() == relocInfo::static_call_type) {
1784 is_static_call = true;
1785 } else {
1786 assert(iter.type() == relocInfo::virtual_call_type ||
1787 iter.type() == relocInfo::opt_virtual_call_type
1788 , "unexpected relocInfo. type");
1789 }
1790 } else {
1791 assert(!UseInlineCaches, "relocation info. must exist for this address");
1792 }
1793
1794 // Cleaning the inline cache will force a new resolve. This is more robust
1795 // than directly setting it to the new destination, since resolving of calls
1796 // is always done through the same code path. (experience shows that it
1797 // leads to very hard to track down bugs, if an inline cache gets updated
1798 // to a wrong method). It should not be performance critical, since the
1799 // resolve is only done once.
1800
1801 for (;;) {
1802 DEBUG_ONLY(ICRefillVerifier ic_refill_verifier;)
1803 if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
1804 InlineCacheBuffer::refill_ic_stubs();
1805 } else {
1806 break;
1807 }
1808 }
1809 }
1810 }
1811
1812 methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1813
1814
1815 #ifndef PRODUCT
1816 Atomic::inc(&_wrong_method_ctr);
1817
1818 if (TraceCallFixup) {
1819 ResourceMark rm(thread);
1820 tty->print("handle_wrong_method reresolving call to");
1821 callee_method->print_short_name(tty);
|