< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp

Print this page




  27 #include "aot/aotLoader.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "classfile/vmSymbols.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/compiledMethod.inline.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "code/vtableStubs.hpp"
  37 #include "compiler/abstractCompiler.hpp"
  38 #include "compiler/compileBroker.hpp"
  39 #include "compiler/disassembler.hpp"
  40 #include "gc/shared/barrierSet.hpp"
  41 #include "gc/shared/gcLocker.inline.hpp"
  42 #include "interpreter/interpreter.hpp"
  43 #include "interpreter/interpreterRuntime.hpp"
  44 #include "jfr/jfrEvents.hpp"
  45 #include "logging/log.hpp"
  46 #include "memory/metaspaceShared.hpp"

  47 #include "memory/resourceArea.hpp"
  48 #include "memory/universe.hpp"


  49 #include "oops/klass.hpp"
  50 #include "oops/method.inline.hpp"
  51 #include "oops/objArrayKlass.hpp"

  52 #include "oops/oop.inline.hpp"

  53 #include "prims/forte.hpp"
  54 #include "prims/jvmtiExport.hpp"
  55 #include "prims/methodHandles.hpp"
  56 #include "prims/nativeLookup.hpp"
  57 #include "runtime/arguments.hpp"
  58 #include "runtime/atomic.hpp"
  59 #include "runtime/biasedLocking.hpp"
  60 #include "runtime/compilationPolicy.hpp"
  61 #include "runtime/frame.inline.hpp"
  62 #include "runtime/handles.inline.hpp"
  63 #include "runtime/init.hpp"
  64 #include "runtime/interfaceSupport.inline.hpp"
  65 #include "runtime/java.hpp"
  66 #include "runtime/javaCalls.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/stubRoutines.hpp"
  69 #include "runtime/vframe.inline.hpp"
  70 #include "runtime/vframeArray.hpp"
  71 #include "utilities/copy.hpp"
  72 #include "utilities/dtrace.hpp"


1099       switch (bc) {
1100         case Bytecodes::_invokevirtual:
1101           if (attached_method->method_holder()->is_interface()) {
1102             bc = Bytecodes::_invokeinterface;
1103           }
1104           break;
1105         case Bytecodes::_invokeinterface:
1106           if (!attached_method->method_holder()->is_interface()) {
1107             bc = Bytecodes::_invokevirtual;
1108           }
1109           break;
1110         case Bytecodes::_invokehandle:
1111           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1112             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1113                                               : Bytecodes::_invokevirtual;
1114           }
1115           break;
1116         default:
1117           break;
1118       }






1119     }
1120   }
1121 
1122   assert(bc != Bytecodes::_illegal, "not initialized");
1123 
1124   bool has_receiver = bc != Bytecodes::_invokestatic &&
1125                       bc != Bytecodes::_invokedynamic &&
1126                       bc != Bytecodes::_invokehandle;
1127 
1128   // Find receiver for non-static call
1129   if (has_receiver) {
1130     // This register map must be update since we need to find the receiver for
1131     // compiled frames. The receiver might be in a register.
1132     RegisterMap reg_map2(thread);
1133     frame stubFrame   = thread->last_frame();
1134     // Caller-frame is a compiled frame
1135     frame callerFrame = stubFrame.sender(&reg_map2);
1136 
1137     if (attached_method.is_null()) {
1138       methodHandle callee = bytecode.static_target(CHECK_NH);

1139       if (callee.is_null()) {
1140         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1141       }
1142     }
1143 













1144     // Retrieve from a compiled argument list
1145     receiver = Handle(THREAD, callerFrame.retrieve_receiver(&reg_map2));
1146 
1147     if (receiver.is_null()) {
1148       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1149     }
1150   }

1151 
1152   // Resolve method
1153   if (attached_method.not_null()) {
1154     // Parameterized by attached method.
1155     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1156   } else {
1157     // Parameterized by bytecode.
1158     constantPoolHandle constants(THREAD, caller->constants());
1159     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1160   }
1161 
1162 #ifdef ASSERT
1163   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1164   if (has_receiver) {
1165     assert(receiver.not_null(), "should have thrown exception");
1166     Klass* receiver_klass = receiver->klass();
1167     Klass* rk = NULL;
1168     if (attached_method.not_null()) {
1169       // In case there's resolved method attached, use its holder during the check.
1170       rk = attached_method->method_holder();


1256   // Make sure the callee nmethod does not get deoptimized and removed before
1257   // we are done patching the code.
1258   CompiledMethod* callee = callee_method->code();
1259 
1260   if (callee != NULL) {
1261     assert(callee->is_compiled(), "must be nmethod for patching");
1262   }
1263 
1264   if (callee != NULL && !callee->is_in_use()) {
1265     // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1266     callee = NULL;
1267   }
1268   nmethodLocker nl_callee(callee);
1269 #ifdef ASSERT
1270   address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
1271 #endif
1272 
1273   bool is_nmethod = caller_nm->is_nmethod();
1274 
1275   if (is_virtual) {





1276     assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");


1277     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1278     Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass();
1279     CompiledIC::compute_monomorphic_entry(callee_method, klass,
1280                      is_optimized, static_bound, is_nmethod, virtual_call_info,
1281                      CHECK_false);
1282   } else {
1283     // static call
1284     CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info);
1285   }
1286 
1287   // grab lock, check for deoptimization and potentially patch caller
1288   {
1289     CompiledICLocker ml(caller_nm);
1290 
1291     // Lock blocks for safepoint during which both nmethods can change state.
1292 
1293     // Now that we are ready to patch if the Method* was redefined then
1294     // don't update call site and let the caller retry.
1295     // Don't update call site if callee nmethod was unloaded or deoptimized.
1296     // Don't update call site if callee nmethod was replaced by an other nmethod
1297     // which may happen when multiply alive nmethod (tiered compilation)
1298     // will be supported.
1299     if (!callee_method->is_old() &&


1324 
1325 // Resolves a call.  The compilers generate code for calls that go here
1326 // and are patched with the real destination of the call.
1327 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
1328                                                bool is_virtual,
1329                                                bool is_optimized, TRAPS) {
1330 
1331   ResourceMark rm(thread);
1332   RegisterMap cbl_map(thread, false);
1333   frame caller_frame = thread->last_frame().sender(&cbl_map);
1334 
1335   CodeBlob* caller_cb = caller_frame.cb();
1336   guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
1337   CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
1338 
1339   // make sure caller is not getting deoptimized
1340   // and removed before we are done with it.
1341   // CLEANUP - with lazy deopt shouldn't need this lock
1342   nmethodLocker caller_lock(caller_nm);
1343 










1344   // determine call info & receiver
1345   // note: a) receiver is NULL for static calls
1346   //       b) an exception is thrown if receiver is NULL for non-static calls
1347   CallInfo call_info;
1348   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1349   Handle receiver = find_callee_info(thread, invoke_code,
1350                                      call_info, CHECK_(methodHandle()));
1351   methodHandle callee_method = call_info.selected_method();
1352 
1353   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1354          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1355          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1356          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1357          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1358 
1359   assert(caller_nm->is_alive() && !caller_nm->is_unloading(), "It should be alive");
1360 
1361 #ifndef PRODUCT
1362   // tracing/debugging/statistics
1363   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :


1411       return callee_method;
1412     } else {
1413       InlineCacheBuffer::refill_ic_stubs();
1414     }
1415   }
1416 
1417 }
1418 
1419 
1420 // Inline caches exist only in compiled code
1421 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
1422 #ifdef ASSERT
1423   RegisterMap reg_map(thread, false);
1424   frame stub_frame = thread->last_frame();
1425   assert(stub_frame.is_runtime_frame(), "sanity check");
1426   frame caller_frame = stub_frame.sender(&reg_map);
1427   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
1428 #endif /* ASSERT */
1429 
1430   methodHandle callee_method;

1431   JRT_BLOCK
1432     callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
1433     // Return Method* through TLS
1434     thread->set_vm_result_2(callee_method());
1435   JRT_BLOCK_END
1436   // return compiled code entry point after potential safepoints
1437   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1438   return callee_method->verified_code_entry();

1439 JRT_END
1440 
1441 
1442 // Handle call site that has been made non-entrant
1443 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
1444   // 6243940 We might end up in here if the callee is deoptimized
1445   // as we race to call it.  We don't want to take a safepoint if
1446   // the caller was interpreted because the caller frame will look
1447   // interpreted to the stack walkers and arguments are now
1448   // "compiled" so it is much better to make this transition
1449   // invisible to the stack walking code. The i2c path will
1450   // place the callee method in the callee_target. It is stashed
1451   // there because if we try and find the callee by normal means a
1452   // safepoint is possible and have trouble gc'ing the compiled args.
1453   RegisterMap reg_map(thread, false);
1454   frame stub_frame = thread->last_frame();
1455   assert(stub_frame.is_runtime_frame(), "sanity check");
1456   frame caller_frame = stub_frame.sender(&reg_map);
1457 
1458   if (caller_frame.is_interpreted_frame() ||
1459       caller_frame.is_entry_frame()) {
1460     Method* callee = thread->callee_target();
1461     guarantee(callee != NULL && callee->is_method(), "bad handshake");
1462     thread->set_vm_result_2(callee);
1463     thread->set_callee_target(NULL);
1464     return callee->get_c2i_entry();
1465   }
1466 
1467   // Must be compiled to compiled path which is safe to stackwalk
1468   methodHandle callee_method;

1469   JRT_BLOCK
1470     // Force resolving of caller (if we called from compiled frame)
1471     callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
1472     thread->set_vm_result_2(callee_method());
1473   JRT_BLOCK_END
1474   // return compiled code entry point after potential safepoints
1475   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1476   return callee_method->verified_code_entry();

1477 JRT_END
1478 
1479 // Handle abstract method call
1480 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* thread))
1481   // Verbose error message for AbstractMethodError.
1482   // Get the called method from the invoke bytecode.
1483   vframeStream vfst(thread, true);
1484   assert(!vfst.at_end(), "Java frame must exist");
1485   methodHandle caller(vfst.method());
1486   Bytecode_invoke invoke(caller, vfst.bci());
1487   DEBUG_ONLY( invoke.verify(); )
1488 
1489   // Find the compiled caller frame.
1490   RegisterMap reg_map(thread);
1491   frame stubFrame = thread->last_frame();
1492   assert(stubFrame.is_runtime_frame(), "must be");
1493   frame callerFrame = stubFrame.sender(&reg_map);
1494   assert(callerFrame.is_compiled_frame(), "must be");
1495 
1496   // Install exception and return forward entry.


1499     methodHandle callee = invoke.static_target(thread);
1500     if (!callee.is_null()) {
1501       oop recv = callerFrame.retrieve_receiver(&reg_map);
1502       Klass *recv_klass = (recv != NULL) ? recv->klass() : NULL;
1503       LinkResolver::throw_abstract_method_error(callee, recv_klass, thread);
1504       res = StubRoutines::forward_exception_entry();
1505     }
1506   JRT_BLOCK_END
1507   return res;
1508 JRT_END
1509 
1510 
1511 // resolve a static call and patch code
1512 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
1513   methodHandle callee_method;
1514   JRT_BLOCK
1515     callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
1516     thread->set_vm_result_2(callee_method());
1517   JRT_BLOCK_END
1518   // return compiled code entry point after potential safepoints
1519   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1520   return callee_method->verified_code_entry();
1521 JRT_END
1522 
1523 
1524 // resolve virtual call and update inline cache to monomorphic
1525 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
1526   methodHandle callee_method;
1527   JRT_BLOCK
1528     callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
1529     thread->set_vm_result_2(callee_method());
1530   JRT_BLOCK_END
1531   // return compiled code entry point after potential safepoints
1532   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1533   return callee_method->verified_code_entry();
1534 JRT_END
1535 
1536 
1537 // Resolve a virtual call that can be statically bound (e.g., always
1538 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1539 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1540   methodHandle callee_method;
1541   JRT_BLOCK
1542     callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1543     thread->set_vm_result_2(callee_method());
1544   JRT_BLOCK_END
1545   // return compiled code entry point after potential safepoints
1546   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1547   return callee_method->verified_code_entry();
1548 JRT_END
1549 
1550 // The handle_ic_miss_helper_internal function returns false if it failed due
1551 // to either running out of vtable stubs or ic stubs due to IC transitions
1552 // to transitional states. The needs_ic_stub_refill value will be set if
1553 // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
1554 // refills the IC stubs and tries again.
1555 bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
1556                                                    const frame& caller_frame, methodHandle callee_method,
1557                                                    Bytecodes::Code bc, CallInfo& call_info,
1558                                                    bool& needs_ic_stub_refill, TRAPS) {
1559   CompiledICLocker ml(caller_nm);
1560   CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1561   bool should_be_mono = false;
1562   if (inline_cache->is_optimized()) {
1563     if (TraceCallFixup) {
1564       ResourceMark rm(THREAD);
1565       tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1566       callee_method->print_short_name(tty);
1567       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1568     }

1569     should_be_mono = true;
1570   } else if (inline_cache->is_icholder_call()) {
1571     CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1572     if (ic_oop != NULL) {
1573       if (!ic_oop->is_loader_alive()) {
1574         // Deferred IC cleaning due to concurrent class unloading
1575         if (!inline_cache->set_to_clean()) {
1576           needs_ic_stub_refill = true;
1577           return false;
1578         }
1579       } else if (receiver()->klass() == ic_oop->holder_klass()) {
1580         // This isn't a real miss. We must have seen that compiled code
1581         // is now available and we want the call site converted to a
1582         // monomorphic compiled call site.
1583         // We can't assert for callee_method->code() != NULL because it
1584         // could have been deoptimized in the meantime
1585         if (TraceCallFixup) {
1586           ResourceMark rm(THREAD);
1587           tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1588           callee_method->print_short_name(tty);


1610     }
1611   } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1612     // Potential change to megamorphic
1613 
1614     bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false);
1615     if (needs_ic_stub_refill) {
1616       return false;
1617     }
1618     if (!successful) {
1619       if (!inline_cache->set_to_clean()) {
1620         needs_ic_stub_refill = true;
1621         return false;
1622       }
1623     }
1624   } else {
1625     // Either clean or megamorphic
1626   }
1627   return true;
1628 }
1629 
1630 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1631   ResourceMark rm(thread);
1632   CallInfo call_info;
1633   Bytecodes::Code bc;
1634 
1635   // receiver is NULL for static calls. An exception is thrown for NULL
1636   // receivers for non-static calls
1637   Handle receiver = find_callee_info(thread, bc, call_info,
1638                                      CHECK_(methodHandle()));
1639   // Compiler1 can produce virtual call sites that can actually be statically bound
1640   // If we fell thru to below we would think that the site was going megamorphic
1641   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1642   // we'd try and do a vtable dispatch however methods that can be statically bound
1643   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1644   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1645   // plain ic_miss) and the site will be converted to an optimized virtual call site
1646   // never to miss again. I don't believe C2 will produce code like this but if it
1647   // did this would still be the correct thing to do for it too, hence no ifdef.
1648   //
1649   if (call_info.resolved_method()->can_be_statically_bound()) {
1650     methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle()));
1651     if (TraceCallFixup) {
1652       RegisterMap reg_map(thread, false);
1653       frame caller_frame = thread->last_frame().sender(&reg_map);
1654       ResourceMark rm(thread);
1655       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1656       callee_method->print_short_name(tty);
1657       tty->print_cr(" from pc: " INTPTR_FORMAT, p2i(caller_frame.pc()));
1658       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1659     }
1660     return callee_method;
1661   }
1662 
1663   methodHandle callee_method = call_info.selected_method();
1664 
1665 #ifndef PRODUCT
1666   Atomic::inc(&_ic_miss_ctr);
1667 
1668   // Statistics & Tracing
1669   if (TraceCallFixup) {
1670     ResourceMark rm(thread);


1685   // install an event collector so that when a vtable stub is created the
1686   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1687   // event can't be posted when the stub is created as locks are held
1688   // - instead the event will be deferred until the event collector goes
1689   // out of scope.
1690   JvmtiDynamicCodeEventCollector event_collector;
1691 
1692   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1693   // Transitioning IC caches may require transition stubs. If we run out
1694   // of transition stubs, we have to drop locks and perform a safepoint
1695   // that refills them.
1696   RegisterMap reg_map(thread, false);
1697   frame caller_frame = thread->last_frame().sender(&reg_map);
1698   CodeBlob* cb = caller_frame.cb();
1699   CompiledMethod* caller_nm = cb->as_compiled_method();
1700 
1701   for (;;) {
1702     ICRefillVerifier ic_refill_verifier;
1703     bool needs_ic_stub_refill = false;
1704     bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1705                                                      bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle()));
1706     if (successful || !needs_ic_stub_refill) {
1707       return callee_method;
1708     } else {
1709       InlineCacheBuffer::refill_ic_stubs();
1710     }
1711   }
1712 }
1713 
1714 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1715   CompiledICLocker ml(caller_nm);
1716   if (is_static_call) {
1717     CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1718     if (!ssc->is_clean()) {
1719       return ssc->set_to_clean();
1720     }
1721   } else {
1722     // compiled, dispatched call (which used to call an interpreted method)
1723     CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1724     if (!inline_cache->is_clean()) {
1725       return inline_cache->set_to_clean();
1726     }
1727   }
1728   return true;
1729 }
1730 
1731 //
1732 // Resets a call-site in compiled code so it will get resolved again.
1733 // This routines handles both virtual call sites, optimized virtual call
1734 // sites, and static call sites. Typically used to change a call sites
1735 // destination from compiled to interpreted.
1736 //
1737 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
1738   ResourceMark rm(thread);
1739   RegisterMap reg_map(thread, false);
1740   frame stub_frame = thread->last_frame();
1741   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1742   frame caller = stub_frame.sender(&reg_map);
1743 
1744   // Do nothing if the frame isn't a live compiled frame.
1745   // nmethod could be deoptimized by the time we get here
1746   // so no update to the caller is needed.
1747 
1748   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1749 
1750     address pc = caller.pc();
1751 
1752     // Check for static or virtual call
1753     bool is_static_call = false;
1754     CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
1755 
1756     // Default call_addr is the location of the "basic" call.
1757     // Determine the address of the call we a reresolving. With


1778       CompiledICLocker ml(caller_nm);
1779       // Location of call instruction
1780       call_addr = caller_nm->call_instruction_address(pc);
1781     }
1782     // Make sure nmethod doesn't get deoptimized and removed until
1783     // this is done with it.
1784     // CLEANUP - with lazy deopt shouldn't need this lock
1785     nmethodLocker nmlock(caller_nm);
1786 
1787     if (call_addr != NULL) {
1788       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1789       int ret = iter.next(); // Get item
1790       if (ret) {
1791         assert(iter.addr() == call_addr, "must find call");
1792         if (iter.type() == relocInfo::static_call_type) {
1793           is_static_call = true;
1794         } else {
1795           assert(iter.type() == relocInfo::virtual_call_type ||
1796                  iter.type() == relocInfo::opt_virtual_call_type
1797                 , "unexpected relocInfo. type");

1798         }
1799       } else {
1800         assert(!UseInlineCaches, "relocation info. must exist for this address");
1801       }
1802 
1803       // Cleaning the inline cache will force a new resolve. This is more robust
1804       // than directly setting it to the new destination, since resolving of calls
1805       // is always done through the same code path. (experience shows that it
1806       // leads to very hard to track down bugs, if an inline cache gets updated
1807       // to a wrong method). It should not be performance critical, since the
1808       // resolve is only done once.
1809 
1810       for (;;) {
1811         ICRefillVerifier ic_refill_verifier;
1812         if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
1813           InlineCacheBuffer::refill_ic_stubs();
1814         } else {
1815           break;
1816         }
1817       }
1818     }
1819   }
1820 
1821   methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1822 
1823 
1824 #ifndef PRODUCT
1825   Atomic::inc(&_wrong_method_ctr);
1826 
1827   if (TraceCallFixup) {
1828     ResourceMark rm(thread);
1829     tty->print("handle_wrong_method reresolving call to");
1830     callee_method->print_short_name(tty);
1831     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1832   }
1833 #endif
1834 
1835   return callee_method;
1836 }
1837 
1838 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1839   // The faulting unsafe accesses should be changed to throw the error
1840   // synchronously instead. Meanwhile the faulting instruction will be
1841   // skipped over (effectively turning it into a no-op) and an
1842   // asynchronous exception will be raised which the thread will
1843   // handle at a later point. If the instruction is a load it will


2299  private:
2300   enum {
2301     _basic_type_bits = 4,
2302     _basic_type_mask = right_n_bits(_basic_type_bits),
2303     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2304     _compact_int_count = 3
2305   };
2306   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2307   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2308 
2309   union {
2310     int  _compact[_compact_int_count];
2311     int* _fingerprint;
2312   } _value;
2313   int _length; // A negative length indicates the fingerprint is in the compact form,
2314                // Otherwise _value._fingerprint is the array.
2315 
2316   // Remap BasicTypes that are handled equivalently by the adapters.
2317   // These are correct for the current system but someday it might be
2318   // necessary to make this mapping platform dependent.
2319   static int adapter_encoding(BasicType in) {
2320     switch (in) {
2321       case T_BOOLEAN:
2322       case T_BYTE:
2323       case T_SHORT:
2324       case T_CHAR:
2325         // There are all promoted to T_INT in the calling convention





2326         return T_INT;








2327 
2328       case T_OBJECT:
2329       case T_ARRAY:
2330         // In other words, we assume that any register good enough for
2331         // an int or long is good enough for a managed pointer.
2332 #ifdef _LP64
2333         return T_LONG;
2334 #else
2335         return T_INT;
2336 #endif
2337 
2338       case T_INT:
2339       case T_LONG:
2340       case T_FLOAT:
2341       case T_DOUBLE:
2342       case T_VOID:
2343         return in;
2344 
2345       default:
2346         ShouldNotReachHere();
2347         return T_CONFLICT;
2348     }
2349   }
2350 
2351  public:
2352   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2353     // The fingerprint is based on the BasicType signature encoded
2354     // into an array of ints with eight entries per int.

2355     int* ptr;
2356     int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2357     if (len <= _compact_int_count) {
2358       assert(_compact_int_count == 3, "else change next line");
2359       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2360       // Storing the signature encoded as signed chars hits about 98%
2361       // of the time.
2362       _length = -len;
2363       ptr = _value._compact;
2364     } else {
2365       _length = len;
2366       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2367       ptr = _value._fingerprint;
2368     }
2369 
2370     // Now pack the BasicTypes with 8 per int
2371     int sig_index = 0;


2372     for (int index = 0; index < len; index++) {
2373       int value = 0;
2374       for (int byte = 0; byte < _basic_types_per_int; byte++) {
2375         int bt = ((sig_index < total_args_passed)
2376                   ? adapter_encoding(sig_bt[sig_index++])
2377                   : 0);

















2378         assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2379         value = (value << _basic_type_bits) | bt;
2380       }
2381       ptr[index] = value;
2382     }

2383   }
2384 
2385   ~AdapterFingerPrint() {
2386     if (_length > 0) {
2387       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2388     }
2389   }
2390 
2391   int value(int index) {
2392     if (_length < 0) {
2393       return _value._compact[index];
2394     }
2395     return _value._fingerprint[index];
2396   }
2397   int length() {
2398     if (_length < 0) return -_length;
2399     return _length;
2400   }
2401 
2402   bool is_compact() {


2448 
2449  private:
2450 
2451 #ifndef PRODUCT
2452   static int _lookups; // number of calls to lookup
2453   static int _buckets; // number of buckets checked
2454   static int _equals;  // number of buckets checked with matching hash
2455   static int _hits;    // number of successful lookups
2456   static int _compact; // number of equals calls with compact signature
2457 #endif
2458 
2459   AdapterHandlerEntry* bucket(int i) {
2460     return (AdapterHandlerEntry*)BasicHashtable<mtCode>::bucket(i);
2461   }
2462 
2463  public:
2464   AdapterHandlerTable()
2465     : BasicHashtable<mtCode>(293, (DumpSharedSpaces ? sizeof(CDSAdapterHandlerEntry) : sizeof(AdapterHandlerEntry))) { }
2466 
2467   // Create a new entry suitable for insertion in the table
2468   AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
2469     AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable<mtCode>::new_entry(fingerprint->compute_hash());
2470     entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
2471     if (DumpSharedSpaces) {
2472       ((CDSAdapterHandlerEntry*)entry)->init();
2473     }
2474     return entry;
2475   }
2476 
2477   // Insert an entry into the table
2478   void add(AdapterHandlerEntry* entry) {
2479     int index = hash_to_index(entry->hash());
2480     add_entry(index, entry);
2481   }
2482 
2483   void free_entry(AdapterHandlerEntry* entry) {
2484     entry->deallocate();
2485     BasicHashtable<mtCode>::free_entry(entry);
2486   }
2487 
2488   // Find a entry with the same fingerprint if it exists
2489   AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
2490     NOT_PRODUCT(_lookups++);
2491     AdapterFingerPrint fp(total_args_passed, sig_bt);
2492     unsigned int hash = fp.compute_hash();
2493     int index = hash_to_index(hash);
2494     for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
2495       NOT_PRODUCT(_buckets++);
2496       if (e->hash() == hash) {
2497         NOT_PRODUCT(_equals++);
2498         if (fp.equals(e->fingerprint())) {
2499 #ifndef PRODUCT
2500           if (fp.is_compact()) _compact++;
2501           _hits++;
2502 #endif
2503           return e;
2504         }
2505       }
2506     }
2507     return NULL;
2508   }
2509 
2510 #ifndef PRODUCT
2511   void print_statistics() {


2591   // Should be called only when AdapterHandlerLibrary_lock is active.
2592   if (_buffer == NULL) // Initialize lazily
2593       _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2594   return _buffer;
2595 }
2596 
2597 extern "C" void unexpected_adapter_call() {
2598   ShouldNotCallThis();
2599 }
2600 
2601 void AdapterHandlerLibrary::initialize() {
2602   if (_adapters != NULL) return;
2603   _adapters = new AdapterHandlerTable();
2604 
2605   // Create a special handler for abstract methods.  Abstract methods
2606   // are never compiled so an i2c entry is somewhat meaningless, but
2607   // throw AbstractMethodError just in case.
2608   // Pass wrong_method_abstract for the c2i transitions to return
2609   // AbstractMethodError for invalid invocations.
2610   address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2611   _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
2612                                                               StubRoutines::throw_AbstractMethodError_entry(),
2613                                                               wrong_method_abstract, wrong_method_abstract);
2614 }
2615 
2616 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2617                                                       address i2c_entry,
2618                                                       address c2i_entry,


2619                                                       address c2i_unverified_entry) {
2620   return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
2621 }
2622 
2623 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2624   AdapterHandlerEntry* entry = get_adapter0(method);
2625   if (method->is_shared()) {
2626     // See comments around Method::link_method()
2627     MutexLocker mu(AdapterHandlerLibrary_lock);
2628     if (method->adapter() == NULL) {
2629       method->update_adapter_trampoline(entry);
2630     }
2631     address trampoline = method->from_compiled_entry();
2632     if (*(int*)trampoline == 0) {
2633       CodeBuffer buffer(trampoline, (int)SharedRuntime::trampoline_size());
2634       MacroAssembler _masm(&buffer);
2635       SharedRuntime::generate_trampoline(&_masm, entry->get_c2i_entry());
2636       assert(*(int*)trampoline != 0, "Instruction(s) for trampoline must not be encoded as zeros.");
2637 
2638       if (PrintInterpreter) {
2639         Disassembler::decode(buffer.insts_begin(), buffer.insts_end());
2640       }
2641     }
2642   }
2643 
2644   return entry;
2645 }
2646 















































2647 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter0(const methodHandle& method) {
2648   // Use customized signature handler.  Need to lock around updates to
2649   // the AdapterHandlerTable (it is not safe for concurrent readers
2650   // and a single writer: this could be fixed if it becomes a
2651   // problem).
2652 
2653   ResourceMark rm;
2654 
2655   NOT_PRODUCT(int insts_size);
2656   AdapterBlob* new_adapter = NULL;
2657   AdapterHandlerEntry* entry = NULL;
2658   AdapterFingerPrint* fingerprint = NULL;

2659   {
2660     MutexLocker mu(AdapterHandlerLibrary_lock);
2661     // make sure data structure is initialized
2662     initialize();
2663 
2664     if (method->is_abstract()) {


















2665       return _abstract_method_handler;
2666     }
2667 
2668     // Fill in the signature array, for the calling-convention call.
2669     int total_args_passed = method->size_of_parameters(); // All args on stack

2670 
2671     BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2672     VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2673     int i = 0;
2674     if (!method->is_static())  // Pass in receiver first
2675       sig_bt[i++] = T_OBJECT;
2676     for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
2677       sig_bt[i++] = ss.type();  // Collect remaining bits of signature
2678       if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
2679         sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots




































































2680     }
2681     assert(i == total_args_passed, "");
2682 
2683     // Lookup method signature's fingerprint
2684     entry = _adapters->lookup(total_args_passed, sig_bt);
2685 
2686 #ifdef ASSERT
2687     AdapterHandlerEntry* shared_entry = NULL;
2688     // Start adapter sharing verification only after the VM is booted.
2689     if (VerifyAdapterSharing && (entry != NULL)) {
2690       shared_entry = entry;
2691       entry = NULL;
2692     }
2693 #endif
2694 
2695     if (entry != NULL) {
2696       return entry;
2697     }
2698 
2699     // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2700     int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
2701 
2702     // Make a C heap allocated version of the fingerprint to store in the adapter
2703     fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
2704 
2705     // StubRoutines::code2() is initialized after this function can be called. As a result,
2706     // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated
2707     // prior to StubRoutines::code2() being set. Checks refer to checks generated in an I2C
2708     // stub that ensure that an I2C stub is called from an interpreter frame.
2709     bool contains_all_checks = StubRoutines::code2() != NULL;
2710 
2711     // Create I2C & C2I handlers
2712     BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2713     if (buf != NULL) {
2714       CodeBuffer buffer(buf);
2715       short buffer_locs[20];
2716       buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2717                                              sizeof(buffer_locs)/sizeof(relocInfo));
2718 
2719       MacroAssembler _masm(&buffer);
2720       entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2721                                                      total_args_passed,
2722                                                      comp_args_on_stack,
2723                                                      sig_bt,
2724                                                      regs,
2725                                                      fingerprint);













2726 #ifdef ASSERT
2727       if (VerifyAdapterSharing) {
2728         if (shared_entry != NULL) {



2729           assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size()), "code must match");
2730           // Release the one just created and return the original
2731           _adapters->free_entry(entry);
2732           return shared_entry;
2733         } else  {
2734           entry->save_code(buf->code_begin(), buffer.insts_size());
2735         }
2736       }
2737 #endif
2738 
2739       new_adapter = AdapterBlob::create(&buffer);
2740       NOT_PRODUCT(insts_size = buffer.insts_size());
2741     }
2742     if (new_adapter == NULL) {
2743       // CodeCache is full, disable compilation
2744       // Ought to log this but compile log is only per compile thread
2745       // and we're some non descript Java thread.
2746       return NULL; // Out of CodeCache space
2747     }
2748     entry->relocate(new_adapter->content_begin());
2749 #ifndef PRODUCT
2750     // debugging suppport
2751     if (PrintAdapterHandlers || PrintStubCode) {
2752       ttyLocker ttyl;
2753       entry->print_adapter_on(tty);
2754       tty->print_cr("i2c argument handler #%d for: %s %s %s (%d bytes generated)",
2755                     _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
2756                     method->signature()->as_C_string(), fingerprint->as_string(), insts_size);
2757       tty->print_cr("c2i argument handler starts at %p", entry->get_c2i_entry());
2758       if (Verbose || PrintStubCode) {
2759         address first_pc = entry->base_address();


2775     char blob_id[256];
2776     jio_snprintf(blob_id,
2777                  sizeof(blob_id),
2778                  "%s(%s)@" PTR_FORMAT,
2779                  new_adapter->name(),
2780                  fingerprint->as_string(),
2781                  new_adapter->content_begin());
2782     Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2783 
2784     if (JvmtiExport::should_post_dynamic_code_generated()) {
2785       JvmtiExport::post_dynamic_code_generated(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2786     }
2787   }
2788   return entry;
2789 }
2790 
2791 address AdapterHandlerEntry::base_address() {
2792   address base = _i2c_entry;
2793   if (base == NULL)  base = _c2i_entry;
2794   assert(base <= _c2i_entry || _c2i_entry == NULL, "");


2795   assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, "");
2796   return base;
2797 }
2798 
2799 void AdapterHandlerEntry::relocate(address new_base) {
2800   address old_base = base_address();
2801   assert(old_base != NULL, "");
2802   ptrdiff_t delta = new_base - old_base;
2803   if (_i2c_entry != NULL)
2804     _i2c_entry += delta;
2805   if (_c2i_entry != NULL)
2806     _c2i_entry += delta;




2807   if (_c2i_unverified_entry != NULL)
2808     _c2i_unverified_entry += delta;
2809   assert(base_address() == new_base, "");
2810 }
2811 
2812 
2813 void AdapterHandlerEntry::deallocate() {
2814   delete _fingerprint;



2815 #ifdef ASSERT
2816   if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
2817 #endif
2818 }
2819 
2820 
2821 #ifdef ASSERT
2822 // Capture the code before relocation so that it can be compared
2823 // against other versions.  If the code is captured after relocation
2824 // then relative instructions won't be equivalent.
2825 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
2826   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
2827   _saved_code_length = length;
2828   memcpy(_saved_code, buffer, length);
2829 }
2830 
2831 
2832 bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length) {
2833   if (length != _saved_code_length) {
2834     return false;


2866 
2867 
2868     ResourceMark rm;
2869     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
2870     if (buf != NULL) {
2871       CodeBuffer buffer(buf);
2872       double locs_buf[20];
2873       buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
2874       MacroAssembler _masm(&buffer);
2875 
2876       // Fill in the signature array, for the calling-convention call.
2877       const int total_args_passed = method->size_of_parameters();
2878 
2879       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2880       VMRegPair*   regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2881       int i=0;
2882       if (!method->is_static())  // Pass in receiver first
2883         sig_bt[i++] = T_OBJECT;
2884       SignatureStream ss(method->signature());
2885       for (; !ss.at_return_type(); ss.next()) {
2886         sig_bt[i++] = ss.type();  // Collect remaining bits of signature

2887         if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
2888           sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
2889       }
2890       assert(i == total_args_passed, "");
2891       BasicType ret_type = ss.type();
2892 
2893       // Now get the compiled-Java layout as input (or output) arguments.
2894       // NOTE: Stubs for compiled entry points of method handle intrinsics
2895       // are just trampolines so the argument registers must be outgoing ones.
2896       const bool is_outgoing = method->is_method_handle_intrinsic();
2897       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, is_outgoing);
2898 
2899       // Generate the compiled-to-native wrapper code
2900       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
2901 
2902       if (nm != NULL) {
2903         method->set_code(method, nm);
2904 
2905         DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
2906         if (directive->PrintAssemblyOption) {


2981   int cnt = 0;
2982   if (has_receiver) {
2983     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
2984   }
2985 
2986   while (*s != ')') {          // Find closing right paren
2987     switch (*s++) {            // Switch on signature character
2988     case 'B': sig_bt[cnt++] = T_BYTE;    break;
2989     case 'C': sig_bt[cnt++] = T_CHAR;    break;
2990     case 'D': sig_bt[cnt++] = T_DOUBLE;  sig_bt[cnt++] = T_VOID; break;
2991     case 'F': sig_bt[cnt++] = T_FLOAT;   break;
2992     case 'I': sig_bt[cnt++] = T_INT;     break;
2993     case 'J': sig_bt[cnt++] = T_LONG;    sig_bt[cnt++] = T_VOID; break;
2994     case 'S': sig_bt[cnt++] = T_SHORT;   break;
2995     case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
2996     case 'V': sig_bt[cnt++] = T_VOID;    break;
2997     case 'L':                   // Oop
2998       while (*s++ != ';');   // Skip signature
2999       sig_bt[cnt++] = T_OBJECT;
3000       break;




3001     case '[': {                 // Array
3002       do {                      // Skip optional size
3003         while (*s >= '0' && *s <= '9') s++;
3004       } while (*s++ == '[');   // Nested arrays?
3005       // Skip element type
3006       if (s[-1] == 'L')
3007         while (*s++ != ';'); // Skip signature
3008       sig_bt[cnt++] = T_ARRAY;
3009       break;
3010     }
3011     default : ShouldNotReachHere();
3012     }
3013   }
3014 
3015   if (has_appendix) {
3016     sig_bt[cnt++] = T_OBJECT;
3017   }
3018 
3019   assert(cnt < 256, "grow table size");
3020 
3021   int comp_args_on_stack;
3022   comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
3023 
3024   // the calling convention doesn't count out_preserve_stack_slots so
3025   // we must add that in to get "true" stack offsets.
3026 


3127     AdapterHandlerEntry* a = iter.next();
3128     if (b == CodeCache::find_blob(a->get_i2c_entry())) return true;
3129   }
3130   return false;
3131 }
3132 
3133 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3134   AdapterHandlerTableIterator iter(_adapters);
3135   while (iter.has_next()) {
3136     AdapterHandlerEntry* a = iter.next();
3137     if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3138       st->print("Adapter for signature: ");
3139       a->print_adapter_on(tty);
3140       return;
3141     }
3142   }
3143   assert(false, "Should have found handler");
3144 }
3145 
3146 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3147   st->print_cr("AHE@" INTPTR_FORMAT ": %s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
3148                p2i(this), fingerprint()->as_string(),
3149                p2i(get_i2c_entry()), p2i(get_c2i_entry()), p2i(get_c2i_unverified_entry()));
3150 
3151 }
3152 
3153 #if INCLUDE_CDS
3154 
3155 void CDSAdapterHandlerEntry::init() {
3156   assert(DumpSharedSpaces, "used during dump time only");
3157   _c2i_entry_trampoline = (address)MetaspaceShared::misc_code_space_alloc(SharedRuntime::trampoline_size());
3158   _adapter_trampoline = (AdapterHandlerEntry**)MetaspaceShared::misc_code_space_alloc(sizeof(AdapterHandlerEntry*));
3159 };
3160 
3161 #endif // INCLUDE_CDS
3162 
3163 
3164 #ifndef PRODUCT
3165 
3166 void AdapterHandlerLibrary::print_statistics() {
3167   _adapters->print_statistics();
3168 }
3169 


3223       break;
3224     } else {
3225       fr = fr.java_sender();
3226     }
3227   }
3228   return activation;
3229 }
3230 
3231 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* thread) {
3232   // After any safepoint, just before going back to compiled code,
3233   // we inform the GC that we will be doing initializing writes to
3234   // this object in the future without emitting card-marks, so
3235   // GC may take any compensating steps.
3236 
3237   oop new_obj = thread->vm_result();
3238   if (new_obj == NULL) return;
3239 
3240   BarrierSet *bs = BarrierSet::barrier_set();
3241   bs->on_slowpath_allocation_exit(thread, new_obj);
3242 }

















































































































































































































  27 #include "aot/aotLoader.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "classfile/vmSymbols.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/compiledMethod.inline.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "code/vtableStubs.hpp"
  37 #include "compiler/abstractCompiler.hpp"
  38 #include "compiler/compileBroker.hpp"
  39 #include "compiler/disassembler.hpp"
  40 #include "gc/shared/barrierSet.hpp"
  41 #include "gc/shared/gcLocker.inline.hpp"
  42 #include "interpreter/interpreter.hpp"
  43 #include "interpreter/interpreterRuntime.hpp"
  44 #include "jfr/jfrEvents.hpp"
  45 #include "logging/log.hpp"
  46 #include "memory/metaspaceShared.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/access.hpp"
  51 #include "oops/fieldStreams.hpp"
  52 #include "oops/klass.hpp"
  53 #include "oops/method.inline.hpp"
  54 #include "oops/objArrayKlass.hpp"
  55 #include "oops/objArrayOop.inline.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "oops/valueKlass.hpp"
  58 #include "prims/forte.hpp"
  59 #include "prims/jvmtiExport.hpp"
  60 #include "prims/methodHandles.hpp"
  61 #include "prims/nativeLookup.hpp"
  62 #include "runtime/arguments.hpp"
  63 #include "runtime/atomic.hpp"
  64 #include "runtime/biasedLocking.hpp"
  65 #include "runtime/compilationPolicy.hpp"
  66 #include "runtime/frame.inline.hpp"
  67 #include "runtime/handles.inline.hpp"
  68 #include "runtime/init.hpp"
  69 #include "runtime/interfaceSupport.inline.hpp"
  70 #include "runtime/java.hpp"
  71 #include "runtime/javaCalls.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/stubRoutines.hpp"
  74 #include "runtime/vframe.inline.hpp"
  75 #include "runtime/vframeArray.hpp"
  76 #include "utilities/copy.hpp"
  77 #include "utilities/dtrace.hpp"


1104       switch (bc) {
1105         case Bytecodes::_invokevirtual:
1106           if (attached_method->method_holder()->is_interface()) {
1107             bc = Bytecodes::_invokeinterface;
1108           }
1109           break;
1110         case Bytecodes::_invokeinterface:
1111           if (!attached_method->method_holder()->is_interface()) {
1112             bc = Bytecodes::_invokevirtual;
1113           }
1114           break;
1115         case Bytecodes::_invokehandle:
1116           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1117             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1118                                               : Bytecodes::_invokevirtual;
1119           }
1120           break;
1121         default:
1122           break;
1123       }
1124     } else {
1125       assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1126       if (!attached_method->method_holder()->is_value()) {
1127         // Ignore the attached method in this case to not confuse below code
1128         attached_method = NULL;
1129       }
1130     }
1131   }
1132 
1133   assert(bc != Bytecodes::_illegal, "not initialized");
1134 
1135   bool has_receiver = bc != Bytecodes::_invokestatic &&
1136                       bc != Bytecodes::_invokedynamic &&
1137                       bc != Bytecodes::_invokehandle;
1138 
1139   // Find receiver for non-static call
1140   if (has_receiver) {
1141     // This register map must be update since we need to find the receiver for
1142     // compiled frames. The receiver might be in a register.
1143     RegisterMap reg_map2(thread);
1144     frame stubFrame   = thread->last_frame();
1145     // Caller-frame is a compiled frame
1146     frame callerFrame = stubFrame.sender(&reg_map2);
1147 
1148     methodHandle callee = attached_method;
1149     if (callee.is_null()) {
1150       callee = bytecode.static_target(CHECK_NH);
1151       if (callee.is_null()) {
1152         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1153       }
1154     }
1155     if (callee->has_scalarized_args() && callee->method_holder()->is_value()) {
1156       // If the receiver is a value type that is passed as fields, no oop is available.
1157       // Resolve the call without receiver null checking.
1158       assert(!attached_method.is_null(), "must have attached method");
1159       if (bc == Bytecodes::_invokevirtual) {
1160         LinkInfo link_info(attached_method->method_holder(), attached_method->name(), attached_method->signature());
1161         LinkResolver::resolve_virtual_call(callinfo, receiver, callee->method_holder(), link_info, /*check_null_and_abstract=*/ false, CHECK_NH);
1162       } else {
1163         assert(bc == Bytecodes::_invokeinterface, "anything else?");
1164         LinkInfo link_info(constantPoolHandle(THREAD, caller->constants()), bytecode_index, CHECK_NH);
1165         LinkResolver::resolve_interface_call(callinfo, receiver, callee->method_holder(), link_info, /*check_null_and_abstract=*/ false, CHECK_NH);
1166       }
1167       return receiver; // is null
1168     } else {
1169       // Retrieve from a compiled argument list
1170       receiver = Handle(THREAD, callerFrame.retrieve_receiver(&reg_map2));
1171 
1172       if (receiver.is_null()) {
1173         THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1174       }
1175     }
1176   }
1177 
1178   // Resolve method
1179   if (attached_method.not_null()) {
1180     // Parameterized by attached method.
1181     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1182   } else {
1183     // Parameterized by bytecode.
1184     constantPoolHandle constants(THREAD, caller->constants());
1185     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1186   }
1187 
1188 #ifdef ASSERT
1189   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1190   if (has_receiver) {
1191     assert(receiver.not_null(), "should have thrown exception");
1192     Klass* receiver_klass = receiver->klass();
1193     Klass* rk = NULL;
1194     if (attached_method.not_null()) {
1195       // In case there's resolved method attached, use its holder during the check.
1196       rk = attached_method->method_holder();


1282   // Make sure the callee nmethod does not get deoptimized and removed before
1283   // we are done patching the code.
1284   CompiledMethod* callee = callee_method->code();
1285 
1286   if (callee != NULL) {
1287     assert(callee->is_compiled(), "must be nmethod for patching");
1288   }
1289 
1290   if (callee != NULL && !callee->is_in_use()) {
1291     // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1292     callee = NULL;
1293   }
1294   nmethodLocker nl_callee(callee);
1295 #ifdef ASSERT
1296   address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
1297 #endif
1298 
1299   bool is_nmethod = caller_nm->is_nmethod();
1300 
1301   if (is_virtual) {
1302     Klass* receiver_klass = NULL;
1303     if (ValueTypePassFieldsAsArgs && callee_method->method_holder()->is_value()) {
1304       // If the receiver is a value type that is passed as fields, no oop is available
1305       receiver_klass = callee_method->method_holder();
1306     } else {
1307       assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
1308       receiver_klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass();
1309     }
1310     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1311     CompiledIC::compute_monomorphic_entry(callee_method, receiver_klass,

1312                      is_optimized, static_bound, is_nmethod, virtual_call_info,
1313                      CHECK_false);
1314   } else {
1315     // static call
1316     CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info);
1317   }
1318 
1319   // grab lock, check for deoptimization and potentially patch caller
1320   {
1321     CompiledICLocker ml(caller_nm);
1322 
1323     // Lock blocks for safepoint during which both nmethods can change state.
1324 
1325     // Now that we are ready to patch if the Method* was redefined then
1326     // don't update call site and let the caller retry.
1327     // Don't update call site if callee nmethod was unloaded or deoptimized.
1328     // Don't update call site if callee nmethod was replaced by an other nmethod
1329     // which may happen when multiply alive nmethod (tiered compilation)
1330     // will be supported.
1331     if (!callee_method->is_old() &&


1356 
1357 // Resolves a call.  The compilers generate code for calls that go here
1358 // and are patched with the real destination of the call.
1359 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
1360                                                bool is_virtual,
1361                                                bool is_optimized, TRAPS) {
1362 
1363   ResourceMark rm(thread);
1364   RegisterMap cbl_map(thread, false);
1365   frame caller_frame = thread->last_frame().sender(&cbl_map);
1366 
1367   CodeBlob* caller_cb = caller_frame.cb();
1368   guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
1369   CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
1370 
1371   // make sure caller is not getting deoptimized
1372   // and removed before we are done with it.
1373   // CLEANUP - with lazy deopt shouldn't need this lock
1374   nmethodLocker caller_lock(caller_nm);
1375 
1376   if (!is_virtual && !is_optimized) {
1377     SimpleScopeDesc ssd(caller_nm, caller_frame.pc());
1378     Bytecode bc(ssd.method(), ssd.method()->bcp_from(ssd.bci()));
1379     // Substitutability test implementation piggy backs on static call resolution
1380     if (bc.code() == Bytecodes::_if_acmpeq || bc.code() == Bytecodes::_if_acmpne) {
1381       SystemDictionary::ValueBootstrapMethods_klass()->initialize(CHECK_NULL);
1382       return SystemDictionary::ValueBootstrapMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature());
1383     }
1384   }
1385 
1386   // determine call info & receiver
1387   // note: a) receiver is NULL for static calls
1388   //       b) an exception is thrown if receiver is NULL for non-static calls
1389   CallInfo call_info;
1390   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1391   Handle receiver = find_callee_info(thread, invoke_code,
1392                                      call_info, CHECK_(methodHandle()));
1393   methodHandle callee_method = call_info.selected_method();
1394 
1395   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1396          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1397          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1398          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1399          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1400 
1401   assert(caller_nm->is_alive() && !caller_nm->is_unloading(), "It should be alive");
1402 
1403 #ifndef PRODUCT
1404   // tracing/debugging/statistics
1405   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :


1453       return callee_method;
1454     } else {
1455       InlineCacheBuffer::refill_ic_stubs();
1456     }
1457   }
1458 
1459 }
1460 
1461 
1462 // Inline caches exist only in compiled code
1463 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
1464 #ifdef ASSERT
1465   RegisterMap reg_map(thread, false);
1466   frame stub_frame = thread->last_frame();
1467   assert(stub_frame.is_runtime_frame(), "sanity check");
1468   frame caller_frame = stub_frame.sender(&reg_map);
1469   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
1470 #endif /* ASSERT */
1471 
1472   methodHandle callee_method;
1473   bool is_optimized = false;
1474   JRT_BLOCK
1475     callee_method = SharedRuntime::handle_ic_miss_helper(thread, is_optimized, CHECK_NULL);
1476     // Return Method* through TLS
1477     thread->set_vm_result_2(callee_method());
1478   JRT_BLOCK_END
1479   // return compiled code entry point after potential safepoints
1480   assert(callee_method->verified_code_entry() != NULL, "Jump to zero!");
1481   assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!");
1482   return is_optimized ? callee_method->verified_code_entry() : callee_method->verified_value_ro_code_entry();
1483 JRT_END
1484 
1485 
1486 // Handle call site that has been made non-entrant
1487 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
1488   // 6243940 We might end up in here if the callee is deoptimized
1489   // as we race to call it.  We don't want to take a safepoint if
1490   // the caller was interpreted because the caller frame will look
1491   // interpreted to the stack walkers and arguments are now
1492   // "compiled" so it is much better to make this transition
1493   // invisible to the stack walking code. The i2c path will
1494   // place the callee method in the callee_target. It is stashed
1495   // there because if we try and find the callee by normal means a
1496   // safepoint is possible and have trouble gc'ing the compiled args.
1497   RegisterMap reg_map(thread, false);
1498   frame stub_frame = thread->last_frame();
1499   assert(stub_frame.is_runtime_frame(), "sanity check");
1500   frame caller_frame = stub_frame.sender(&reg_map);
1501 
1502   if (caller_frame.is_interpreted_frame() ||
1503       caller_frame.is_entry_frame()) {
1504     Method* callee = thread->callee_target();
1505     guarantee(callee != NULL && callee->is_method(), "bad handshake");
1506     thread->set_vm_result_2(callee);
1507     thread->set_callee_target(NULL);
1508     return callee->get_c2i_entry();
1509   }
1510 
1511   // Must be compiled to compiled path which is safe to stackwalk
1512   methodHandle callee_method;
1513   bool is_optimized = false;
1514   JRT_BLOCK
1515     // Force resolving of caller (if we called from compiled frame)
1516     callee_method = SharedRuntime::reresolve_call_site(thread, is_optimized, CHECK_NULL);
1517     thread->set_vm_result_2(callee_method());
1518   JRT_BLOCK_END
1519   // return compiled code entry point after potential safepoints
1520   assert(callee_method->verified_code_entry() != NULL, "Jump to zero!");
1521   assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!");
1522   return is_optimized ? callee_method->verified_code_entry() : callee_method->verified_value_ro_code_entry();
1523 JRT_END
1524 
1525 // Handle abstract method call
1526 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* thread))
1527   // Verbose error message for AbstractMethodError.
1528   // Get the called method from the invoke bytecode.
1529   vframeStream vfst(thread, true);
1530   assert(!vfst.at_end(), "Java frame must exist");
1531   methodHandle caller(vfst.method());
1532   Bytecode_invoke invoke(caller, vfst.bci());
1533   DEBUG_ONLY( invoke.verify(); )
1534 
1535   // Find the compiled caller frame.
1536   RegisterMap reg_map(thread);
1537   frame stubFrame = thread->last_frame();
1538   assert(stubFrame.is_runtime_frame(), "must be");
1539   frame callerFrame = stubFrame.sender(&reg_map);
1540   assert(callerFrame.is_compiled_frame(), "must be");
1541 
1542   // Install exception and return forward entry.


1545     methodHandle callee = invoke.static_target(thread);
1546     if (!callee.is_null()) {
1547       oop recv = callerFrame.retrieve_receiver(&reg_map);
1548       Klass *recv_klass = (recv != NULL) ? recv->klass() : NULL;
1549       LinkResolver::throw_abstract_method_error(callee, recv_klass, thread);
1550       res = StubRoutines::forward_exception_entry();
1551     }
1552   JRT_BLOCK_END
1553   return res;
1554 JRT_END
1555 
1556 
1557 // resolve a static call and patch code
1558 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
1559   methodHandle callee_method;
1560   JRT_BLOCK
1561     callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
1562     thread->set_vm_result_2(callee_method());
1563   JRT_BLOCK_END
1564   // return compiled code entry point after potential safepoints
1565   assert(callee_method->verified_code_entry() != NULL, "Jump to zero!");
1566   return callee_method->verified_code_entry();
1567 JRT_END
1568 
1569 
1570 // resolve virtual call and update inline cache to monomorphic
1571 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
1572   methodHandle callee_method;
1573   JRT_BLOCK
1574     callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
1575     thread->set_vm_result_2(callee_method());
1576   JRT_BLOCK_END
1577   // return compiled code entry point after potential safepoints
1578   assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!");
1579   return callee_method->verified_value_ro_code_entry();
1580 JRT_END
1581 
1582 
1583 // Resolve a virtual call that can be statically bound (e.g., always
1584 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1585 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1586   methodHandle callee_method;
1587   JRT_BLOCK
1588     callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1589     thread->set_vm_result_2(callee_method());
1590   JRT_BLOCK_END
1591   // return compiled code entry point after potential safepoints
1592   assert(callee_method->verified_code_entry() != NULL, "Jump to zero!");
1593   return callee_method->verified_code_entry();
1594 JRT_END
1595 
1596 // The handle_ic_miss_helper_internal function returns false if it failed due
1597 // to either running out of vtable stubs or ic stubs due to IC transitions
1598 // to transitional states. The needs_ic_stub_refill value will be set if
1599 // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
1600 // refills the IC stubs and tries again.
1601 bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
1602                                                    const frame& caller_frame, methodHandle callee_method,
1603                                                    Bytecodes::Code bc, CallInfo& call_info,
1604                                                    bool& needs_ic_stub_refill, bool& is_optimized, TRAPS) {
1605   CompiledICLocker ml(caller_nm);
1606   CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1607   bool should_be_mono = false;
1608   if (inline_cache->is_optimized()) {
1609     if (TraceCallFixup) {
1610       ResourceMark rm(THREAD);
1611       tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1612       callee_method->print_short_name(tty);
1613       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1614     }
1615     is_optimized = true;
1616     should_be_mono = true;
1617   } else if (inline_cache->is_icholder_call()) {
1618     CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1619     if (ic_oop != NULL) {
1620       if (!ic_oop->is_loader_alive()) {
1621         // Deferred IC cleaning due to concurrent class unloading
1622         if (!inline_cache->set_to_clean()) {
1623           needs_ic_stub_refill = true;
1624           return false;
1625         }
1626       } else if (receiver()->klass() == ic_oop->holder_klass()) {
1627         // This isn't a real miss. We must have seen that compiled code
1628         // is now available and we want the call site converted to a
1629         // monomorphic compiled call site.
1630         // We can't assert for callee_method->code() != NULL because it
1631         // could have been deoptimized in the meantime
1632         if (TraceCallFixup) {
1633           ResourceMark rm(THREAD);
1634           tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1635           callee_method->print_short_name(tty);


1657     }
1658   } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1659     // Potential change to megamorphic
1660 
1661     bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false);
1662     if (needs_ic_stub_refill) {
1663       return false;
1664     }
1665     if (!successful) {
1666       if (!inline_cache->set_to_clean()) {
1667         needs_ic_stub_refill = true;
1668         return false;
1669       }
1670     }
1671   } else {
1672     // Either clean or megamorphic
1673   }
1674   return true;
1675 }
1676 
1677 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, bool& is_optimized, TRAPS) {
1678   ResourceMark rm(thread);
1679   CallInfo call_info;
1680   Bytecodes::Code bc;
1681 
1682   // receiver is NULL for static calls. An exception is thrown for NULL
1683   // receivers for non-static calls
1684   Handle receiver = find_callee_info(thread, bc, call_info,
1685                                      CHECK_(methodHandle()));
1686   // Compiler1 can produce virtual call sites that can actually be statically bound
1687   // If we fell thru to below we would think that the site was going megamorphic
1688   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1689   // we'd try and do a vtable dispatch however methods that can be statically bound
1690   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1691   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1692   // plain ic_miss) and the site will be converted to an optimized virtual call site
1693   // never to miss again. I don't believe C2 will produce code like this but if it
1694   // did this would still be the correct thing to do for it too, hence no ifdef.
1695   //
1696   if (call_info.resolved_method()->can_be_statically_bound()) {
1697     methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, is_optimized, CHECK_(methodHandle()));
1698     if (TraceCallFixup) {
1699       RegisterMap reg_map(thread, false);
1700       frame caller_frame = thread->last_frame().sender(&reg_map);
1701       ResourceMark rm(thread);
1702       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1703       callee_method->print_short_name(tty);
1704       tty->print_cr(" from pc: " INTPTR_FORMAT, p2i(caller_frame.pc()));
1705       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1706     }
1707     return callee_method;
1708   }
1709 
1710   methodHandle callee_method = call_info.selected_method();
1711 
1712 #ifndef PRODUCT
1713   Atomic::inc(&_ic_miss_ctr);
1714 
1715   // Statistics & Tracing
1716   if (TraceCallFixup) {
1717     ResourceMark rm(thread);


1732   // install an event collector so that when a vtable stub is created the
1733   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1734   // event can't be posted when the stub is created as locks are held
1735   // - instead the event will be deferred until the event collector goes
1736   // out of scope.
1737   JvmtiDynamicCodeEventCollector event_collector;
1738 
1739   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1740   // Transitioning IC caches may require transition stubs. If we run out
1741   // of transition stubs, we have to drop locks and perform a safepoint
1742   // that refills them.
1743   RegisterMap reg_map(thread, false);
1744   frame caller_frame = thread->last_frame().sender(&reg_map);
1745   CodeBlob* cb = caller_frame.cb();
1746   CompiledMethod* caller_nm = cb->as_compiled_method();
1747 
1748   for (;;) {
1749     ICRefillVerifier ic_refill_verifier;
1750     bool needs_ic_stub_refill = false;
1751     bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1752                                                      bc, call_info, needs_ic_stub_refill, is_optimized, CHECK_(methodHandle()));
1753     if (successful || !needs_ic_stub_refill) {
1754       return callee_method;
1755     } else {
1756       InlineCacheBuffer::refill_ic_stubs();
1757     }
1758   }
1759 }
1760 
1761 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1762   CompiledICLocker ml(caller_nm);
1763   if (is_static_call) {
1764     CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1765     if (!ssc->is_clean()) {
1766       return ssc->set_to_clean();
1767     }
1768   } else {
1769     // compiled, dispatched call (which used to call an interpreted method)
1770     CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1771     if (!inline_cache->is_clean()) {
1772       return inline_cache->set_to_clean();
1773     }
1774   }
1775   return true;
1776 }
1777 
1778 //
1779 // Resets a call-site in compiled code so it will get resolved again.
1780 // This routines handles both virtual call sites, optimized virtual call
1781 // sites, and static call sites. Typically used to change a call sites
1782 // destination from compiled to interpreted.
1783 //
1784 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, bool& is_optimized, TRAPS) {
1785   ResourceMark rm(thread);
1786   RegisterMap reg_map(thread, false);
1787   frame stub_frame = thread->last_frame();
1788   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1789   frame caller = stub_frame.sender(&reg_map);
1790 
1791   // Do nothing if the frame isn't a live compiled frame.
1792   // nmethod could be deoptimized by the time we get here
1793   // so no update to the caller is needed.
1794 
1795   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1796 
1797     address pc = caller.pc();
1798 
1799     // Check for static or virtual call
1800     bool is_static_call = false;
1801     CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
1802 
1803     // Default call_addr is the location of the "basic" call.
1804     // Determine the address of the call we a reresolving. With


1825       CompiledICLocker ml(caller_nm);
1826       // Location of call instruction
1827       call_addr = caller_nm->call_instruction_address(pc);
1828     }
1829     // Make sure nmethod doesn't get deoptimized and removed until
1830     // this is done with it.
1831     // CLEANUP - with lazy deopt shouldn't need this lock
1832     nmethodLocker nmlock(caller_nm);
1833 
1834     if (call_addr != NULL) {
1835       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1836       int ret = iter.next(); // Get item
1837       if (ret) {
1838         assert(iter.addr() == call_addr, "must find call");
1839         if (iter.type() == relocInfo::static_call_type) {
1840           is_static_call = true;
1841         } else {
1842           assert(iter.type() == relocInfo::virtual_call_type ||
1843                  iter.type() == relocInfo::opt_virtual_call_type
1844                 , "unexpected relocInfo. type");
1845           is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1846         }
1847       } else {
1848         assert(!UseInlineCaches, "relocation info. must exist for this address");
1849       }
1850 
1851       // Cleaning the inline cache will force a new resolve. This is more robust
1852       // than directly setting it to the new destination, since resolving of calls
1853       // is always done through the same code path. (experience shows that it
1854       // leads to very hard to track down bugs, if an inline cache gets updated
1855       // to a wrong method). It should not be performance critical, since the
1856       // resolve is only done once.
1857 
1858       for (;;) {
1859         ICRefillVerifier ic_refill_verifier;
1860         if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
1861           InlineCacheBuffer::refill_ic_stubs();
1862         } else {
1863           break;
1864         }
1865       }
1866     }
1867   }
1868 
1869   methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1870 

1871 #ifndef PRODUCT
1872   Atomic::inc(&_wrong_method_ctr);
1873 
1874   if (TraceCallFixup) {
1875     ResourceMark rm(thread);
1876     tty->print("handle_wrong_method reresolving call to");
1877     callee_method->print_short_name(tty);
1878     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1879   }
1880 #endif
1881 
1882   return callee_method;
1883 }
1884 
1885 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1886   // The faulting unsafe accesses should be changed to throw the error
1887   // synchronously instead. Meanwhile the faulting instruction will be
1888   // skipped over (effectively turning it into a no-op) and an
1889   // asynchronous exception will be raised which the thread will
1890   // handle at a later point. If the instruction is a load it will


2346  private:
2347   enum {
2348     _basic_type_bits = 4,
2349     _basic_type_mask = right_n_bits(_basic_type_bits),
2350     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2351     _compact_int_count = 3
2352   };
2353   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2354   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2355 
2356   union {
2357     int  _compact[_compact_int_count];
2358     int* _fingerprint;
2359   } _value;
2360   int _length; // A negative length indicates the fingerprint is in the compact form,
2361                // Otherwise _value._fingerprint is the array.
2362 
2363   // Remap BasicTypes that are handled equivalently by the adapters.
2364   // These are correct for the current system but someday it might be
2365   // necessary to make this mapping platform dependent.
2366   static int adapter_encoding(BasicType in, bool is_valuetype) {
2367     switch (in) {
2368       case T_BOOLEAN:
2369       case T_BYTE:
2370       case T_SHORT:
2371       case T_CHAR: {
2372         if (is_valuetype) {
2373           // Do not widen value type field types
2374           assert(ValueTypePassFieldsAsArgs, "must be enabled");
2375           return in;
2376         } else {
2377           // They are all promoted to T_INT in the calling convention
2378           return T_INT;
2379         }
2380       }
2381 
2382       case T_VALUETYPE: {
2383         // If value types are passed as fields, return 'in' to differentiate
2384         // between a T_VALUETYPE and a T_OBJECT in the signature.
2385         return ValueTypePassFieldsAsArgs ? in : adapter_encoding(T_OBJECT, false);
2386       }
2387 
2388       case T_OBJECT:
2389       case T_ARRAY:
2390         // In other words, we assume that any register good enough for
2391         // an int or long is good enough for a managed pointer.
2392 #ifdef _LP64
2393         return T_LONG;
2394 #else
2395         return T_INT;
2396 #endif
2397 
2398       case T_INT:
2399       case T_LONG:
2400       case T_FLOAT:
2401       case T_DOUBLE:
2402       case T_VOID:
2403         return in;
2404 
2405       default:
2406         ShouldNotReachHere();
2407         return T_CONFLICT;
2408     }
2409   }
2410 
2411  public:
2412   AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2413     // The fingerprint is based on the BasicType signature encoded
2414     // into an array of ints with eight entries per int.
2415     int total_args_passed = (sig != NULL) ? sig->length() : 0;
2416     int* ptr;
2417     int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2418     if (len <= _compact_int_count) {
2419       assert(_compact_int_count == 3, "else change next line");
2420       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2421       // Storing the signature encoded as signed chars hits about 98%
2422       // of the time.
2423       _length = -len;
2424       ptr = _value._compact;
2425     } else {
2426       _length = len;
2427       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2428       ptr = _value._fingerprint;
2429     }
2430 
2431     // Now pack the BasicTypes with 8 per int
2432     int sig_index = 0;
2433     BasicType prev_sbt = T_ILLEGAL;
2434     int vt_count = 0;
2435     for (int index = 0; index < len; index++) {
2436       int value = 0;
2437       for (int byte = 0; byte < _basic_types_per_int; byte++) {
2438         int bt = 0;
2439         if (sig_index < total_args_passed) {
2440           BasicType sbt = sig->at(sig_index++)._bt;
2441           if (ValueTypePassFieldsAsArgs && sbt == T_VALUETYPE) {
2442             // Found start of value type in signature
2443             vt_count++;
2444             if (sig_index == 1 && has_ro_adapter) {
2445               // With a ro_adapter, replace receiver value type delimiter by T_VOID to prevent matching
2446               // with other adapters that have the same value type as first argument and no receiver.
2447               sbt = T_VOID;
2448             }
2449           } else if (ValueTypePassFieldsAsArgs && sbt == T_VOID &&
2450                      prev_sbt != T_LONG && prev_sbt != T_DOUBLE) {
2451             // Found end of value type in signature
2452             vt_count--;
2453             assert(vt_count >= 0, "invalid vt_count");
2454           }
2455           bt = adapter_encoding(sbt, vt_count > 0);
2456           prev_sbt = sbt;
2457         }
2458         assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2459         value = (value << _basic_type_bits) | bt;
2460       }
2461       ptr[index] = value;
2462     }
2463     assert(vt_count == 0, "invalid vt_count");
2464   }
2465 
2466   ~AdapterFingerPrint() {
2467     if (_length > 0) {
2468       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2469     }
2470   }
2471 
2472   int value(int index) {
2473     if (_length < 0) {
2474       return _value._compact[index];
2475     }
2476     return _value._fingerprint[index];
2477   }
2478   int length() {
2479     if (_length < 0) return -_length;
2480     return _length;
2481   }
2482 
2483   bool is_compact() {


2529 
2530  private:
2531 
2532 #ifndef PRODUCT
2533   static int _lookups; // number of calls to lookup
2534   static int _buckets; // number of buckets checked
2535   static int _equals;  // number of buckets checked with matching hash
2536   static int _hits;    // number of successful lookups
2537   static int _compact; // number of equals calls with compact signature
2538 #endif
2539 
2540   AdapterHandlerEntry* bucket(int i) {
2541     return (AdapterHandlerEntry*)BasicHashtable<mtCode>::bucket(i);
2542   }
2543 
2544  public:
2545   AdapterHandlerTable()
2546     : BasicHashtable<mtCode>(293, (DumpSharedSpaces ? sizeof(CDSAdapterHandlerEntry) : sizeof(AdapterHandlerEntry))) { }
2547 
2548   // Create a new entry suitable for insertion in the table
2549   AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_value_entry, address c2i_value_ro_entry, address c2i_unverified_entry) {
2550     AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable<mtCode>::new_entry(fingerprint->compute_hash());
2551     entry->init(fingerprint, i2c_entry, c2i_entry, c2i_value_entry, c2i_value_ro_entry, c2i_unverified_entry);
2552     if (DumpSharedSpaces) {
2553       ((CDSAdapterHandlerEntry*)entry)->init();
2554     }
2555     return entry;
2556   }
2557 
2558   // Insert an entry into the table
2559   void add(AdapterHandlerEntry* entry) {
2560     int index = hash_to_index(entry->hash());
2561     add_entry(index, entry);
2562   }
2563 
2564   void free_entry(AdapterHandlerEntry* entry) {
2565     entry->deallocate();
2566     BasicHashtable<mtCode>::free_entry(entry);
2567   }
2568 
2569   // Find a entry with the same fingerprint if it exists
2570   AdapterHandlerEntry* lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2571     NOT_PRODUCT(_lookups++);
2572     AdapterFingerPrint fp(sig, has_ro_adapter);
2573     unsigned int hash = fp.compute_hash();
2574     int index = hash_to_index(hash);
2575     for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
2576       NOT_PRODUCT(_buckets++);
2577       if (e->hash() == hash) {
2578         NOT_PRODUCT(_equals++);
2579         if (fp.equals(e->fingerprint())) {
2580 #ifndef PRODUCT
2581           if (fp.is_compact()) _compact++;
2582           _hits++;
2583 #endif
2584           return e;
2585         }
2586       }
2587     }
2588     return NULL;
2589   }
2590 
2591 #ifndef PRODUCT
2592   void print_statistics() {


2672   // Should be called only when AdapterHandlerLibrary_lock is active.
2673   if (_buffer == NULL) // Initialize lazily
2674       _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2675   return _buffer;
2676 }
2677 
2678 extern "C" void unexpected_adapter_call() {
2679   ShouldNotCallThis();
2680 }
2681 
2682 void AdapterHandlerLibrary::initialize() {
2683   if (_adapters != NULL) return;
2684   _adapters = new AdapterHandlerTable();
2685 
2686   // Create a special handler for abstract methods.  Abstract methods
2687   // are never compiled so an i2c entry is somewhat meaningless, but
2688   // throw AbstractMethodError just in case.
2689   // Pass wrong_method_abstract for the c2i transitions to return
2690   // AbstractMethodError for invalid invocations.
2691   address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2692   _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(NULL),
2693                                                               StubRoutines::throw_AbstractMethodError_entry(),
2694                                                               wrong_method_abstract, wrong_method_abstract, wrong_method_abstract, wrong_method_abstract);
2695 }
2696 
2697 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2698                                                       address i2c_entry,
2699                                                       address c2i_entry,
2700                                                       address c2i_value_entry,
2701                                                       address c2i_value_ro_entry,
2702                                                       address c2i_unverified_entry) {
2703   return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_value_entry, c2i_value_ro_entry, c2i_unverified_entry);
2704 }
2705 
2706 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2707   AdapterHandlerEntry* entry = get_adapter0(method);
2708   if (method->is_shared()) {
2709     // See comments around Method::link_method()
2710     MutexLocker mu(AdapterHandlerLibrary_lock);
2711     if (method->adapter() == NULL) {
2712       method->update_adapter_trampoline(entry);
2713     }
2714     address trampoline = method->from_compiled_entry();
2715     if (*(int*)trampoline == 0) {
2716       CodeBuffer buffer(trampoline, (int)SharedRuntime::trampoline_size());
2717       MacroAssembler _masm(&buffer);
2718       SharedRuntime::generate_trampoline(&_masm, entry->get_c2i_entry());
2719       assert(*(int*)trampoline != 0, "Instruction(s) for trampoline must not be encoded as zeros.");
2720 
2721       if (PrintInterpreter) {
2722         Disassembler::decode(buffer.insts_begin(), buffer.insts_end());
2723       }
2724     }
2725   }
2726 
2727   return entry;
2728 }
2729 
2730 static int compute_scalarized_cc(const methodHandle& method, GrowableArray<SigEntry>& sig_cc, VMRegPair*& regs_cc, bool scalar_receiver) {
2731   InstanceKlass* holder = method->method_holder();
2732   sig_cc = GrowableArray<SigEntry>(method->size_of_parameters());
2733   if (!method->is_static()) {
2734     if (holder->is_value() && scalar_receiver) {
2735       sig_cc.appendAll(ValueKlass::cast(holder)->extended_sig());
2736     } else {
2737       SigEntry::add_entry(&sig_cc, T_OBJECT);
2738     }
2739   }
2740   Thread* THREAD = Thread::current();
2741   for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
2742     if (ss.type() == T_VALUETYPE) {
2743       Klass* k = ss.as_klass(Handle(THREAD, holder->class_loader()),
2744                              Handle(THREAD, holder->protection_domain()),
2745                              SignatureStream::ReturnNull, THREAD);
2746       assert(k != NULL && !HAS_PENDING_EXCEPTION, "value klass should have been pre-loaded");
2747       sig_cc.appendAll(ValueKlass::cast(k)->extended_sig());
2748     } else {
2749       SigEntry::add_entry(&sig_cc, ss.type());
2750     }
2751   }
2752   regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, sig_cc.length() + 2);
2753   return SharedRuntime::java_calling_convention(&sig_cc, regs_cc);
2754 }
2755 
2756 static int insert_reserved_entry(GrowableArray<SigEntry>& sig_cc, VMRegPair*& regs_cc, int ret_off) {
2757   // Find index in signature that belongs to return address slot
2758   BasicType bt = T_ILLEGAL;
2759   int i = 0;
2760   for (uint off = 0; i < sig_cc.length(); ++i) {
2761     if (SigEntry::skip_value_delimiters(&sig_cc, i)) {
2762       VMReg first = regs_cc[off++].first();
2763       if (first->is_valid() && first->is_stack()) {
2764         // Select a type for the reserved entry that will end up on the stack
2765         bt = sig_cc.at(i)._bt;
2766         if (((int)first->reg2stack() + VMRegImpl::slots_per_word) == ret_off) {
2767           break; // Index of the return address found
2768         }
2769       }
2770     }
2771   }
2772   // Insert reserved entry and re-compute calling convention
2773   SigEntry::insert_reserved_entry(&sig_cc, i, bt);
2774   return SharedRuntime::java_calling_convention(&sig_cc, regs_cc);
2775 }
2776 
2777 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter0(const methodHandle& method) {
2778   // Use customized signature handler.  Need to lock around updates to
2779   // the AdapterHandlerTable (it is not safe for concurrent readers
2780   // and a single writer: this could be fixed if it becomes a
2781   // problem).
2782 
2783   ResourceMark rm;
2784 
2785   NOT_PRODUCT(int insts_size = 0);
2786   AdapterBlob* new_adapter = NULL;
2787   AdapterHandlerEntry* entry = NULL;
2788   AdapterFingerPrint* fingerprint = NULL;
2789 
2790   {
2791     MutexLocker mu(AdapterHandlerLibrary_lock);
2792     // make sure data structure is initialized
2793     initialize();
2794 
2795     bool has_value_arg = false;
2796     bool has_value_recv = false;
2797     GrowableArray<SigEntry> sig(method->size_of_parameters());
2798     if (!method->is_static()) {
2799       has_value_recv = method->method_holder()->is_value();
2800       has_value_arg = has_value_recv;
2801       SigEntry::add_entry(&sig, T_OBJECT);
2802     }
2803     for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
2804       BasicType bt = ss.type();
2805       if (bt == T_VALUETYPE) {
2806         has_value_arg = true;
2807         bt = T_OBJECT;
2808       }
2809       SigEntry::add_entry(&sig, bt);
2810     }
2811 
2812     // Process abstract method if it has value type args to set has_scalarized_args accordingly
2813     if (method->is_abstract() && !(ValueTypePassFieldsAsArgs && has_value_arg)) {
2814       return _abstract_method_handler;
2815     }
2816 
2817     // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2818     VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sig.length());
2819     int args_on_stack = SharedRuntime::java_calling_convention(&sig, regs);
2820 
2821     // Now compute the scalarized calling convention if there are value types in the signature
2822     GrowableArray<SigEntry> sig_cc = sig;
2823     GrowableArray<SigEntry> sig_cc_ro = sig;
2824     VMRegPair* regs_cc = regs;
2825     VMRegPair* regs_cc_ro = regs;
2826     int args_on_stack_cc = args_on_stack;
2827     int args_on_stack_cc_ro = args_on_stack;
2828 
2829     if (ValueTypePassFieldsAsArgs && has_value_arg && !method->is_native()) {
2830       MutexUnlocker mul(AdapterHandlerLibrary_lock);
2831       args_on_stack_cc = compute_scalarized_cc(method, sig_cc, regs_cc, /* scalar_receiver = */ true);
2832 
2833       sig_cc_ro = sig_cc;
2834       regs_cc_ro = regs_cc;
2835       args_on_stack_cc_ro = args_on_stack_cc;
2836       if (has_value_recv || args_on_stack_cc > args_on_stack) {
2837         // For interface calls, we need another entry point / adapter to unpack the receiver
2838         args_on_stack_cc_ro = compute_scalarized_cc(method, sig_cc_ro, regs_cc_ro, /* scalar_receiver = */ false);
2839       }
2840 
2841       // Compute the stack extension that is required to convert between the calling conventions.
2842       // The stack slots at these offsets are occupied by the return address with the unscalarized
2843       // calling convention. Don't use them for arguments with the scalarized calling convention.
2844       int ret_off    = args_on_stack_cc - args_on_stack;
2845       int ret_off_ro = args_on_stack_cc - args_on_stack_cc_ro;
2846       assert(ret_off_ro <= 0 || ret_off > 0, "receiver unpacking requires more stack space than expected");
2847 
2848       if (ret_off > 0) {
2849         // Make sure the stack of the scalarized calling convention with the reserved
2850         // entries (2 slots each) remains 16-byte (4 slots) aligned after stack extension.
2851         int alignment = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
2852         if (ret_off_ro != ret_off && ret_off_ro >= 0) {
2853           ret_off    += 4; // Account for two reserved entries (4 slots)
2854           ret_off_ro += 4;
2855           ret_off     = align_up(ret_off, alignment);
2856           ret_off_ro  = align_up(ret_off_ro, alignment);
2857           // TODO can we avoid wasting a stack slot here?
2858           //assert(ret_off != ret_off_ro, "fail");
2859           if (ret_off > ret_off_ro) {
2860             swap(ret_off, ret_off_ro); // Sort by offset
2861           }
2862           args_on_stack_cc = insert_reserved_entry(sig_cc, regs_cc, ret_off);
2863           args_on_stack_cc = insert_reserved_entry(sig_cc, regs_cc, ret_off_ro);
2864         } else {
2865           ret_off += 2; // Account for one reserved entry (2 slots)
2866           ret_off = align_up(ret_off, alignment);
2867           args_on_stack_cc = insert_reserved_entry(sig_cc, regs_cc, ret_off);
2868         }
2869       }
2870 
2871       // Upper bound on stack arguments to avoid hitting the argument limit and
2872       // bailing out of compilation ("unsupported incoming calling sequence").
2873       // TODO we need a reasonable limit (flag?) here
2874       if (args_on_stack_cc > 50) {
2875         // Don't scalarize value type arguments
2876         sig_cc = sig;
2877         sig_cc_ro = sig;
2878         regs_cc = regs;
2879         regs_cc_ro = regs;
2880         args_on_stack_cc = args_on_stack;
2881       } else {
2882         method->set_has_scalarized_args(true);
2883         method->set_needs_stack_repair(args_on_stack_cc > args_on_stack);
2884       }
2885     }
2886 
2887     if (method->is_abstract()) {
2888       // Save a C heap allocated version of the signature for abstract methods with scalarized value type arguments
2889       assert(ValueTypePassFieldsAsArgs && has_value_arg, "must have scalarized value type args");
2890       address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2891       entry = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(NULL),
2892                                                StubRoutines::throw_AbstractMethodError_entry(),
2893                                                wrong_method_abstract, wrong_method_abstract, wrong_method_abstract, wrong_method_abstract);
2894       GrowableArray<SigEntry>* heap_sig = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<SigEntry>(sig_cc_ro.length(), true);
2895       heap_sig->appendAll(&sig_cc_ro);
2896       entry->set_sig_cc(heap_sig);
2897       return entry;
2898     }

2899 
2900     // Lookup method signature's fingerprint
2901     entry = _adapters->lookup(&sig_cc, regs_cc != regs_cc_ro);
2902 
2903 #ifdef ASSERT
2904     AdapterHandlerEntry* shared_entry = NULL;
2905     // Start adapter sharing verification only after the VM is booted.
2906     if (VerifyAdapterSharing && (entry != NULL)) {
2907       shared_entry = entry;
2908       entry = NULL;
2909     }
2910 #endif
2911 
2912     if (entry != NULL) {
2913       return entry;
2914     }
2915 



2916     // Make a C heap allocated version of the fingerprint to store in the adapter
2917     fingerprint = new AdapterFingerPrint(&sig_cc, regs_cc != regs_cc_ro);
2918 
2919     // StubRoutines::code2() is initialized after this function can be called. As a result,
2920     // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated
2921     // prior to StubRoutines::code2() being set. Checks refer to checks generated in an I2C
2922     // stub that ensure that an I2C stub is called from an interpreter frame.
2923     bool contains_all_checks = StubRoutines::code2() != NULL;
2924 
2925     // Create I2C & C2I handlers
2926     BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2927     if (buf != NULL) {
2928       CodeBuffer buffer(buf);
2929       short buffer_locs[20];
2930       buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2931                                              sizeof(buffer_locs)/sizeof(relocInfo));
2932 
2933       MacroAssembler _masm(&buffer);
2934       entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2935                                                      args_on_stack,
2936                                                      args_on_stack_cc,
2937                                                      &sig,
2938                                                      regs,
2939                                                      &sig_cc,
2940                                                      regs_cc,
2941                                                      &sig_cc_ro,
2942                                                      regs_cc_ro,
2943                                                      fingerprint,
2944                                                      new_adapter);
2945 
2946       if (regs != regs_cc) {
2947         // Save a C heap allocated version of the scalarized signature and store it in the adapter
2948         GrowableArray<SigEntry>* heap_sig = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<SigEntry>(sig_cc.length(), true);
2949         heap_sig->appendAll(&sig_cc);
2950         entry->set_sig_cc(heap_sig);
2951       }
2952 
2953 #ifdef ASSERT
2954       if (VerifyAdapterSharing) {
2955         if (shared_entry != NULL) {
2956           if (!shared_entry->compare_code(buf->code_begin(), buffer.insts_size())) {
2957             method->print();
2958           }
2959           assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size()), "code must match");
2960           // Release the one just created and return the original
2961           _adapters->free_entry(entry);
2962           return shared_entry;
2963         } else  {
2964           entry->save_code(buf->code_begin(), buffer.insts_size());
2965         }
2966       }
2967 #endif
2968 

2969       NOT_PRODUCT(insts_size = buffer.insts_size());
2970     }
2971     if (new_adapter == NULL) {
2972       // CodeCache is full, disable compilation
2973       // Ought to log this but compile log is only per compile thread
2974       // and we're some non descript Java thread.
2975       return NULL; // Out of CodeCache space
2976     }
2977     entry->relocate(new_adapter->content_begin());
2978 #ifndef PRODUCT
2979     // debugging suppport
2980     if (PrintAdapterHandlers || PrintStubCode) {
2981       ttyLocker ttyl;
2982       entry->print_adapter_on(tty);
2983       tty->print_cr("i2c argument handler #%d for: %s %s %s (%d bytes generated)",
2984                     _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
2985                     method->signature()->as_C_string(), fingerprint->as_string(), insts_size);
2986       tty->print_cr("c2i argument handler starts at %p", entry->get_c2i_entry());
2987       if (Verbose || PrintStubCode) {
2988         address first_pc = entry->base_address();


3004     char blob_id[256];
3005     jio_snprintf(blob_id,
3006                  sizeof(blob_id),
3007                  "%s(%s)@" PTR_FORMAT,
3008                  new_adapter->name(),
3009                  fingerprint->as_string(),
3010                  new_adapter->content_begin());
3011     Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
3012 
3013     if (JvmtiExport::should_post_dynamic_code_generated()) {
3014       JvmtiExport::post_dynamic_code_generated(blob_id, new_adapter->content_begin(), new_adapter->content_end());
3015     }
3016   }
3017   return entry;
3018 }
3019 
3020 address AdapterHandlerEntry::base_address() {
3021   address base = _i2c_entry;
3022   if (base == NULL)  base = _c2i_entry;
3023   assert(base <= _c2i_entry || _c2i_entry == NULL, "");
3024   assert(base <= _c2i_value_entry || _c2i_value_entry == NULL, "");
3025   assert(base <= _c2i_value_ro_entry || _c2i_value_ro_entry == NULL, "");
3026   assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, "");
3027   return base;
3028 }
3029 
3030 void AdapterHandlerEntry::relocate(address new_base) {
3031   address old_base = base_address();
3032   assert(old_base != NULL, "");
3033   ptrdiff_t delta = new_base - old_base;
3034   if (_i2c_entry != NULL)
3035     _i2c_entry += delta;
3036   if (_c2i_entry != NULL)
3037     _c2i_entry += delta;
3038   if (_c2i_value_entry != NULL)
3039     _c2i_value_entry += delta;
3040   if (_c2i_value_ro_entry != NULL)
3041     _c2i_value_ro_entry += delta;
3042   if (_c2i_unverified_entry != NULL)
3043     _c2i_unverified_entry += delta;
3044   assert(base_address() == new_base, "");
3045 }
3046 
3047 
3048 void AdapterHandlerEntry::deallocate() {
3049   delete _fingerprint;
3050   if (_sig_cc != NULL) {
3051     delete _sig_cc;
3052   }
3053 #ifdef ASSERT
3054   if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3055 #endif
3056 }
3057 
3058 
3059 #ifdef ASSERT
3060 // Capture the code before relocation so that it can be compared
3061 // against other versions.  If the code is captured after relocation
3062 // then relative instructions won't be equivalent.
3063 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3064   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3065   _saved_code_length = length;
3066   memcpy(_saved_code, buffer, length);
3067 }
3068 
3069 
3070 bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length) {
3071   if (length != _saved_code_length) {
3072     return false;


3104 
3105 
3106     ResourceMark rm;
3107     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
3108     if (buf != NULL) {
3109       CodeBuffer buffer(buf);
3110       double locs_buf[20];
3111       buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3112       MacroAssembler _masm(&buffer);
3113 
3114       // Fill in the signature array, for the calling-convention call.
3115       const int total_args_passed = method->size_of_parameters();
3116 
3117       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3118       VMRegPair*   regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3119       int i=0;
3120       if (!method->is_static())  // Pass in receiver first
3121         sig_bt[i++] = T_OBJECT;
3122       SignatureStream ss(method->signature());
3123       for (; !ss.at_return_type(); ss.next()) {
3124         BasicType bt = ss.type();
3125         sig_bt[i++] = bt;  // Collect remaining bits of signature
3126         if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
3127           sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
3128       }
3129       assert(i == total_args_passed, "");
3130       BasicType ret_type = ss.type();
3131 
3132       // Now get the compiled-Java layout as input (or output) arguments.
3133       // NOTE: Stubs for compiled entry points of method handle intrinsics
3134       // are just trampolines so the argument registers must be outgoing ones.
3135       const bool is_outgoing = method->is_method_handle_intrinsic();
3136       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, is_outgoing);
3137 
3138       // Generate the compiled-to-native wrapper code
3139       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3140 
3141       if (nm != NULL) {
3142         method->set_code(method, nm);
3143 
3144         DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
3145         if (directive->PrintAssemblyOption) {


3220   int cnt = 0;
3221   if (has_receiver) {
3222     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
3223   }
3224 
3225   while (*s != ')') {          // Find closing right paren
3226     switch (*s++) {            // Switch on signature character
3227     case 'B': sig_bt[cnt++] = T_BYTE;    break;
3228     case 'C': sig_bt[cnt++] = T_CHAR;    break;
3229     case 'D': sig_bt[cnt++] = T_DOUBLE;  sig_bt[cnt++] = T_VOID; break;
3230     case 'F': sig_bt[cnt++] = T_FLOAT;   break;
3231     case 'I': sig_bt[cnt++] = T_INT;     break;
3232     case 'J': sig_bt[cnt++] = T_LONG;    sig_bt[cnt++] = T_VOID; break;
3233     case 'S': sig_bt[cnt++] = T_SHORT;   break;
3234     case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
3235     case 'V': sig_bt[cnt++] = T_VOID;    break;
3236     case 'L':                   // Oop
3237       while (*s++ != ';');   // Skip signature
3238       sig_bt[cnt++] = T_OBJECT;
3239       break;
3240     case 'Q':                // Value type
3241       while (*s++ != ';');   // Skip signature
3242       sig_bt[cnt++] = T_VALUETYPE;
3243       break;
3244     case '[': {                 // Array
3245       do {                      // Skip optional size
3246         while (*s >= '0' && *s <= '9') s++;
3247       } while (*s++ == '[');   // Nested arrays?
3248       // Skip element type
3249       if (s[-1] == 'L' || s[-1] == 'Q')
3250         while (*s++ != ';'); // Skip signature
3251       sig_bt[cnt++] = T_ARRAY;
3252       break;
3253     }
3254     default : ShouldNotReachHere();
3255     }
3256   }
3257 
3258   if (has_appendix) {
3259     sig_bt[cnt++] = T_OBJECT;
3260   }
3261 
3262   assert(cnt < 256, "grow table size");
3263 
3264   int comp_args_on_stack;
3265   comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
3266 
3267   // the calling convention doesn't count out_preserve_stack_slots so
3268   // we must add that in to get "true" stack offsets.
3269 


3370     AdapterHandlerEntry* a = iter.next();
3371     if (b == CodeCache::find_blob(a->get_i2c_entry())) return true;
3372   }
3373   return false;
3374 }
3375 
3376 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3377   AdapterHandlerTableIterator iter(_adapters);
3378   while (iter.has_next()) {
3379     AdapterHandlerEntry* a = iter.next();
3380     if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3381       st->print("Adapter for signature: ");
3382       a->print_adapter_on(tty);
3383       return;
3384     }
3385   }
3386   assert(false, "Should have found handler");
3387 }
3388 
3389 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3390   st->print_cr("AHE@" INTPTR_FORMAT ": %s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iVE: " INTPTR_FORMAT " c2iVROE: " INTPTR_FORMAT " c2iUE: " INTPTR_FORMAT,
3391                p2i(this), fingerprint()->as_string(),
3392                p2i(get_i2c_entry()), p2i(get_c2i_entry()), p2i(get_c2i_value_entry()), p2i(get_c2i_value_ro_entry()), p2i(get_c2i_unverified_entry()));
3393 
3394 }
3395 
3396 #if INCLUDE_CDS
3397 
3398 void CDSAdapterHandlerEntry::init() {
3399   assert(DumpSharedSpaces, "used during dump time only");
3400   _c2i_entry_trampoline = (address)MetaspaceShared::misc_code_space_alloc(SharedRuntime::trampoline_size());
3401   _adapter_trampoline = (AdapterHandlerEntry**)MetaspaceShared::misc_code_space_alloc(sizeof(AdapterHandlerEntry*));
3402 };
3403 
3404 #endif // INCLUDE_CDS
3405 
3406 
3407 #ifndef PRODUCT
3408 
3409 void AdapterHandlerLibrary::print_statistics() {
3410   _adapters->print_statistics();
3411 }
3412 


3466       break;
3467     } else {
3468       fr = fr.java_sender();
3469     }
3470   }
3471   return activation;
3472 }
3473 
3474 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* thread) {
3475   // After any safepoint, just before going back to compiled code,
3476   // we inform the GC that we will be doing initializing writes to
3477   // this object in the future without emitting card-marks, so
3478   // GC may take any compensating steps.
3479 
3480   oop new_obj = thread->vm_result();
3481   if (new_obj == NULL) return;
3482 
3483   BarrierSet *bs = BarrierSet::barrier_set();
3484   bs->on_slowpath_allocation_exit(thread, new_obj);
3485 }
3486 
3487 // We are at a compiled code to interpreter call. We need backing
3488 // buffers for all value type arguments. Allocate an object array to
3489 // hold them (convenient because once we're done with it we don't have
3490 // to worry about freeing it).
3491 JRT_ENTRY(void, SharedRuntime::allocate_value_types(JavaThread* thread, Method* callee_method, bool allocate_receiver))
3492 {
3493   assert(ValueTypePassFieldsAsArgs, "no reason to call this");
3494   ResourceMark rm;
3495   JavaThread* THREAD = thread;
3496   methodHandle callee(callee_method);
3497 
3498   int nb_slots = 0;
3499   InstanceKlass* holder = callee->method_holder();
3500   allocate_receiver &= !callee->is_static() && holder->is_value();
3501   if (allocate_receiver) {
3502     nb_slots++;
3503   }
3504   Handle class_loader(THREAD, holder->class_loader());
3505   Handle protection_domain(THREAD, holder->protection_domain());
3506   for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3507     if (ss.type() == T_VALUETYPE) {
3508       nb_slots++;
3509     }
3510   }
3511   objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK);
3512   objArrayHandle array(THREAD, array_oop);
3513   int i = 0;
3514   if (allocate_receiver) {
3515     ValueKlass* vk = ValueKlass::cast(holder);
3516     oop res = vk->allocate_instance(CHECK);
3517     array->obj_at_put(i, res);
3518     i++;
3519   }
3520   for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3521     if (ss.type() == T_VALUETYPE) {
3522       Klass* k = ss.as_klass(class_loader, protection_domain, SignatureStream::ReturnNull, THREAD);
3523       assert(k != NULL && !HAS_PENDING_EXCEPTION, "can't resolve klass");
3524       ValueKlass* vk = ValueKlass::cast(k);
3525       oop res = vk->allocate_instance(CHECK);
3526       array->obj_at_put(i, res);
3527       i++;
3528     }
3529   }
3530   thread->set_vm_result(array());
3531   thread->set_vm_result_2(callee()); // TODO: required to keep callee live?
3532 }
3533 JRT_END
3534 
3535 // Iterate of the array of heap allocated value types and apply the GC post barrier to all reference fields.
3536 // This is called from the C2I adapter after value type arguments are heap allocated and initialized.
3537 JRT_LEAF(void, SharedRuntime::apply_post_barriers(JavaThread* thread, objArrayOopDesc* array))
3538 {
3539   assert(ValueTypePassFieldsAsArgs, "no reason to call this");
3540   assert(oopDesc::is_oop(array), "should be oop");
3541   for (int i = 0; i < array->length(); ++i) {
3542     instanceOop valueOop = (instanceOop)array->obj_at(i);
3543     ValueKlass* vk = ValueKlass::cast(valueOop->klass());
3544     if (vk->contains_oops()) {
3545       const address dst_oop_addr = ((address) (void*) valueOop);
3546       OopMapBlock* map = vk->start_of_nonstatic_oop_maps();
3547       OopMapBlock* const end = map + vk->nonstatic_oop_map_count();
3548       while (map != end) {
3549         address doop_address = dst_oop_addr + map->offset();
3550         barrier_set_cast<ModRefBarrierSet>(BarrierSet::barrier_set())->
3551           write_ref_array((HeapWord*) doop_address, map->count());
3552         map++;
3553       }
3554     }
3555   }
3556 }
3557 JRT_END
3558 
3559 // We're returning from an interpreted method: load each field into a
3560 // register following the calling convention
3561 JRT_LEAF(void, SharedRuntime::load_value_type_fields_in_regs(JavaThread* thread, oopDesc* res))
3562 {
3563   assert(res->klass()->is_value(), "only value types here");
3564   ResourceMark rm;
3565   RegisterMap reg_map(thread);
3566   frame stubFrame = thread->last_frame();
3567   frame callerFrame = stubFrame.sender(&reg_map);
3568   assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
3569 
3570   ValueKlass* vk = ValueKlass::cast(res->klass());
3571 
3572   const Array<SigEntry>* sig_vk = vk->extended_sig();
3573   const Array<VMRegPair>* regs = vk->return_regs();
3574 
3575   if (regs == NULL) {
3576     // The fields of the value klass don't fit in registers, bail out
3577     return;
3578   }
3579 
3580   int j = 1;
3581   for (int i = 0; i < sig_vk->length(); i++) {
3582     BasicType bt = sig_vk->at(i)._bt;
3583     if (bt == T_VALUETYPE) {
3584       continue;
3585     }
3586     if (bt == T_VOID) {
3587       if (sig_vk->at(i-1)._bt == T_LONG ||
3588           sig_vk->at(i-1)._bt == T_DOUBLE) {
3589         j++;
3590       }
3591       continue;
3592     }
3593     int off = sig_vk->at(i)._offset;
3594     assert(off > 0, "offset in object should be positive");
3595     VMRegPair pair = regs->at(j);
3596     address loc = reg_map.location(pair.first());
3597     switch(bt) {
3598     case T_BOOLEAN:
3599       *(jboolean*)loc = res->bool_field(off);
3600       break;
3601     case T_CHAR:
3602       *(jchar*)loc = res->char_field(off);
3603       break;
3604     case T_BYTE:
3605       *(jbyte*)loc = res->byte_field(off);
3606       break;
3607     case T_SHORT:
3608       *(jshort*)loc = res->short_field(off);
3609       break;
3610     case T_INT: {
3611       *(jint*)loc = res->int_field(off);
3612       break;
3613     }
3614     case T_LONG:
3615 #ifdef _LP64
3616       *(intptr_t*)loc = res->long_field(off);
3617 #else
3618       Unimplemented();
3619 #endif
3620       break;
3621     case T_OBJECT:
3622     case T_ARRAY: {
3623       *(oop*)loc = res->obj_field(off);
3624       break;
3625     }
3626     case T_FLOAT:
3627       *(jfloat*)loc = res->float_field(off);
3628       break;
3629     case T_DOUBLE:
3630       *(jdouble*)loc = res->double_field(off);
3631       break;
3632     default:
3633       ShouldNotReachHere();
3634     }
3635     j++;
3636   }
3637   assert(j == regs->length(), "missed a field?");
3638 
3639 #ifdef ASSERT
3640   VMRegPair pair = regs->at(0);
3641   address loc = reg_map.location(pair.first());
3642   assert(*(oopDesc**)loc == res, "overwritten object");
3643 #endif
3644 
3645   thread->set_vm_result(res);
3646 }
3647 JRT_END
3648 
3649 // We've returned to an interpreted method, the interpreter needs a
3650 // reference to a value type instance. Allocate it and initialize it
3651 // from field's values in registers.
3652 JRT_BLOCK_ENTRY(void, SharedRuntime::store_value_type_fields_to_buf(JavaThread* thread, intptr_t res))
3653 {
3654   ResourceMark rm;
3655   RegisterMap reg_map(thread);
3656   frame stubFrame = thread->last_frame();
3657   frame callerFrame = stubFrame.sender(&reg_map);
3658 
3659 #ifdef ASSERT
3660   ValueKlass* verif_vk = ValueKlass::returned_value_klass(reg_map);
3661 #endif
3662 
3663   if (!is_set_nth_bit(res, 0)) {
3664     // We're not returning with value type fields in registers (the
3665     // calling convention didn't allow it for this value klass)
3666     assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
3667     thread->set_vm_result((oopDesc*)res);
3668     assert(verif_vk == NULL, "broken calling convention");
3669     return;
3670   }
3671 
3672   clear_nth_bit(res, 0);
3673   ValueKlass* vk = (ValueKlass*)res;
3674   assert(verif_vk == vk, "broken calling convention");
3675   assert(Metaspace::contains((void*)res), "should be klass");
3676 
3677   // Allocate handles for every oop field so they are safe in case of
3678   // a safepoint when allocating
3679   GrowableArray<Handle> handles;
3680   vk->save_oop_fields(reg_map, handles);
3681 
3682   // It's unsafe to safepoint until we are here
3683   JRT_BLOCK;
3684   {
3685     Thread* THREAD = thread;
3686     oop vt = vk->realloc_result(reg_map, handles, CHECK);
3687     thread->set_vm_result(vt);
3688   }
3689   JRT_BLOCK_END;
3690 }
3691 JRT_END
3692 
< prev index next >