src/share/vm/runtime/sharedRuntime.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/runtime

src/share/vm/runtime/sharedRuntime.cpp

Print this page




  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/codeCacheExtensions.hpp"
  32 #include "code/scopeDesc.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/abstractCompiler.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "gc/shared/gcLocker.inline.hpp"
  38 #include "interpreter/interpreter.hpp"
  39 #include "interpreter/interpreterRuntime.hpp"
  40 #include "logging/log.hpp"
  41 #include "memory/metaspaceShared.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "memory/universe.inline.hpp"
  44 #include "oops/klass.hpp"
  45 #include "oops/objArrayKlass.hpp"
  46 #include "oops/oop.inline.hpp"

  47 #include "prims/forte.hpp"
  48 #include "prims/jvmtiExport.hpp"
  49 #include "prims/methodHandles.hpp"
  50 #include "prims/nativeLookup.hpp"
  51 #include "runtime/arguments.hpp"
  52 #include "runtime/atomic.hpp"
  53 #include "runtime/biasedLocking.hpp"
  54 #include "runtime/compilationPolicy.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/init.hpp"
  57 #include "runtime/interfaceSupport.hpp"
  58 #include "runtime/javaCalls.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/stubRoutines.hpp"
  61 #include "runtime/vframe.hpp"
  62 #include "runtime/vframeArray.hpp"
  63 #include "trace/tracing.hpp"
  64 #include "utilities/copy.hpp"
  65 #include "utilities/dtrace.hpp"
  66 #include "utilities/events.hpp"
  67 #include "utilities/hashtable.inline.hpp"
  68 #include "utilities/macros.hpp"
  69 #include "utilities/xmlstream.hpp"
  70 #ifdef COMPILER1
  71 #include "c1/c1_Runtime1.hpp"
  72 #endif
  73 
  74 // Shared stub locations
  75 RuntimeStub*        SharedRuntime::_wrong_method_blob;
  76 RuntimeStub*        SharedRuntime::_wrong_method_abstract_blob;
  77 RuntimeStub*        SharedRuntime::_ic_miss_blob;
  78 RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
  79 RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
  80 RuntimeStub*        SharedRuntime::_resolve_static_call_blob;

  81 
  82 DeoptimizationBlob* SharedRuntime::_deopt_blob;
  83 SafepointBlob*      SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
  84 SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
  85 SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
  86 
  87 #ifdef COMPILER2
  88 UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
  89 #endif // COMPILER2
  90 
  91 
  92 //----------------------------generate_stubs-----------------------------------
  93 void SharedRuntime::generate_stubs() {
  94   _wrong_method_blob                   = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),          "wrong_method_stub");
  95   _wrong_method_abstract_blob          = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
  96   _ic_miss_blob                        = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),  "ic_miss_stub");
  97   _resolve_opt_virtual_call_blob       = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),   "resolve_opt_virtual_call");
  98   _resolve_virtual_call_blob           = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),       "resolve_virtual_call");
  99   _resolve_static_call_blob            = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),        "resolve_static_call");

 100 
 101 #if defined(COMPILER2) || INCLUDE_JVMCI
 102   // Vectors are generated only by C2 and JVMCI.
 103   bool support_wide = is_wide_vector(MaxVectorSize);
 104   if (support_wide) {
 105     _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
 106   }
 107 #endif // COMPILER2 || INCLUDE_JVMCI
 108   _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
 109   _polling_page_return_handler_blob    = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
 110 
 111   generate_deopt_blob();
 112 
 113 #ifdef COMPILER2
 114   generate_uncommon_trap_blob();
 115 #endif // COMPILER2
 116 }
 117 
 118 #include <math.h>
 119 


 458   return (jdouble)x;
 459 JRT_END
 460 
 461 // Exception handling across interpreter/compiler boundaries
 462 //
 463 // exception_handler_for_return_address(...) returns the continuation address.
 464 // The continuation address is the entry point of the exception handler of the
 465 // previous frame depending on the return address.
 466 
 467 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
 468   assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
 469   assert(thread->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
 470 
 471   // Reset method handle flag.
 472   thread->set_is_method_handle_return(false);
 473 
 474 #if INCLUDE_JVMCI
 475   // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
 476   // and other exception handler continuations do not read it
 477   thread->set_exception_pc(NULL);
 478 #endif
 479 
 480   // The fastest case first
 481   CodeBlob* blob = CodeCache::find_blob(return_address);
 482   nmethod* nm = (blob != NULL) ? blob->as_nmethod_or_null() : NULL;
 483   if (nm != NULL) {
 484     // Set flag if return address is a method handle call site.
 485     thread->set_is_method_handle_return(nm->is_method_handle_return(return_address));
 486     // native nmethods don't have exception handlers
 487     assert(!nm->is_native_method(), "no exception handler");
 488     assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
 489     if (nm->is_deopt_pc(return_address)) {
 490       // If we come here because of a stack overflow, the stack may be
 491       // unguarded. Reguard the stack otherwise if we return to the
 492       // deopt blob and the stack bang causes a stack overflow we
 493       // crash.
 494       bool guard_pages_enabled = thread->stack_guards_enabled();
 495       if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 496       if (thread->reserved_stack_activation() != thread->stack_base()) {
 497         thread->set_reserved_stack_activation(thread->stack_base());
 498       }
 499       assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
 500       return SharedRuntime::deopt_blob()->unpack_with_exception();
 501     } else {
 502       return nm->exception_begin();
 503     }
 504   }
 505 
 506   // Entry code
 507   if (StubRoutines::returns_to_call_stub(return_address)) {
 508     return StubRoutines::catch_exception_entry();
 509   }
 510   // Interpreted code
 511   if (Interpreter::contains(return_address)) {
 512     return Interpreter::rethrow_exception_entry();
 513   }
 514 










 515   guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
 516   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
 517 
 518 #ifndef PRODUCT
 519   { ResourceMark rm;
 520     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
 521     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
 522     tty->print_cr("b) other problem");
 523   }
 524 #endif // PRODUCT
 525 
 526   ShouldNotReachHere();
 527   return NULL;
 528 }
 529 
 530 
 531 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
 532   return raw_exception_handler_for_return_address(thread, return_address);
 533 JRT_END
 534 


 971  * \note
 972  * This method actually never gets called!  The reason is because
 973  * the interpreter's native entries call NativeLookup::lookup() which
 974  * throws the exception when the lookup fails.  The exception is then
 975  * caught and forwarded on the return from NativeLookup::lookup() call
 976  * before the call to the native function.  This might change in the future.
 977  */
 978 JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
 979 {
 980   // We return a bad value here to make sure that the exception is
 981   // forwarded before we look at the return value.
 982   THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badJNIHandle);
 983 }
 984 JNI_END
 985 
 986 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
 987   return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
 988 }
 989 
 990 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
 991   assert(obj->is_oop(), "must be a valid oop");
 992 #if INCLUDE_JVMCI
 993   // This removes the requirement for JVMCI compilers to emit code
 994   // performing a dynamic check that obj has a finalizer before
 995   // calling this routine. There should be no performance impact
 996   // for C1 since it emits a dynamic check. C2 and the interpreter
 997   // uses other runtime routines for registering finalizers.
 998   if (!obj->klass()->has_finalizer()) {
 999     return;
1000   }
1001 #endif // INCLUDE_JVMCI

1002   assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1003   InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1004 JRT_END
1005 
1006 
1007 jlong SharedRuntime::get_java_tid(Thread* thread) {
1008   if (thread != NULL) {
1009     if (thread->is_Java_thread()) {
1010       oop obj = ((JavaThread*)thread)->threadObj();
1011       return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
1012     }
1013   }
1014   return 0;
1015 }
1016 
1017 /**
1018  * This function ought to be a void function, but cannot be because
1019  * it gets turned into a tail-call on sparc, which runs into dtrace bug
1020  * 6254741.  Once that is fixed we can remove the dummy return value.
1021  */


1208 }
1209 
1210 methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) {
1211   ResourceMark rm(THREAD);
1212   // We need first to check if any Java activations (compiled, interpreted)
1213   // exist on the stack since last JavaCall.  If not, we need
1214   // to get the target method from the JavaCall wrapper.
1215   vframeStream vfst(thread, true);  // Do not skip any javaCalls
1216   methodHandle callee_method;
1217   if (vfst.at_end()) {
1218     // No Java frames were found on stack since we did the JavaCall.
1219     // Hence the stack can only contain an entry_frame.  We need to
1220     // find the target method from the stub frame.
1221     RegisterMap reg_map(thread, false);
1222     frame fr = thread->last_frame();
1223     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1224     fr = fr.sender(&reg_map);
1225     assert(fr.is_entry_frame(), "must be");
1226     // fr is now pointing to the entry frame.
1227     callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());
1228     assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??");
1229   } else {
1230     Bytecodes::Code bc;
1231     CallInfo callinfo;
1232     find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
1233     callee_method = callinfo.selected_method();
1234   }
1235   assert(callee_method()->is_method(), "must be");
1236   return callee_method;
1237 }
1238 
1239 // Resolves a call.
1240 methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
1241                                            bool is_virtual,
1242                                            bool is_optimized, TRAPS) {
1243   methodHandle callee_method;
1244   callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
1245   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1246     int retry_count = 0;
1247     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1248            callee_method->method_holder() != SystemDictionary::Object_klass()) {


1337   StaticCallInfo static_call_info;
1338   CompiledICInfo virtual_call_info;
1339 
1340   // Make sure the callee nmethod does not get deoptimized and removed before
1341   // we are done patching the code.
1342   CompiledMethod* callee = callee_method->code();
1343 
1344   if (callee != NULL) {
1345     assert(callee->is_compiled(), "must be nmethod for patching");
1346   }
1347 
1348   if (callee != NULL && !callee->is_in_use()) {
1349     // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1350     callee = NULL;
1351   }
1352   nmethodLocker nl_callee(callee);
1353 #ifdef ASSERT
1354   address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
1355 #endif
1356 


1357   if (is_virtual) {
1358     assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
1359     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1360     KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass());
1361     CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
1362                      is_optimized, static_bound, virtual_call_info,
1363                      CHECK_(methodHandle()));
1364   } else {
1365     // static call
1366     CompiledStaticCall::compute_entry(callee_method, static_call_info);
1367   }
1368 
1369   // grab lock, check for deoptimization and potentially patch caller
1370   {
1371     MutexLocker ml_patch(CompiledIC_lock);
1372 
1373     // Lock blocks for safepoint during which both nmethods can change state.
1374 
1375     // Now that we are ready to patch if the Method* was redefined then
1376     // don't update call site and let the caller retry.
1377     // Don't update call site if callee nmethod was unloaded or deoptimized.
1378     // Don't update call site if callee nmethod was replaced by an other nmethod
1379     // which may happen when multiply alive nmethod (tiered compilation)
1380     // will be supported.
1381     if (!callee_method->is_old() &&
1382         (callee == NULL || callee->is_in_use() && (callee_method->code() == callee))) {
1383 #ifdef ASSERT
1384       // We must not try to patch to jump to an already unloaded method.
1385       if (dest_entry_point != 0) {
1386         CodeBlob* cb = CodeCache::find_blob(dest_entry_point);
1387         assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee),
1388                "should not call unloaded nmethod");
1389       }
1390 #endif
1391       if (is_virtual) {
1392         CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1393         if (inline_cache->is_clean()) {
1394           inline_cache->set_to_monomorphic(virtual_call_info);
1395         }
1396       } else {
1397         CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
1398         if (ssc->is_clean()) ssc->set(static_call_info);
1399       }
1400     }
1401 
1402   } // unlock CompiledIC_lock
1403 
1404   return callee_method;
1405 }
1406 
1407 
1408 // Inline caches exist only in compiled code
1409 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
1410 #ifdef ASSERT
1411   RegisterMap reg_map(thread, false);
1412   frame stub_frame = thread->last_frame();
1413   assert(stub_frame.is_runtime_frame(), "sanity check");
1414   frame caller_frame = stub_frame.sender(&reg_map);
1415   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
1416 #endif /* ASSERT */
1417 


1493   // return compiled code entry point after potential safepoints
1494   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1495   return callee_method->verified_code_entry();
1496 JRT_END
1497 
1498 
1499 // Resolve a virtual call that can be statically bound (e.g., always
1500 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1501 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1502   methodHandle callee_method;
1503   JRT_BLOCK
1504     callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1505     thread->set_vm_result_2(callee_method());
1506   JRT_BLOCK_END
1507   // return compiled code entry point after potential safepoints
1508   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1509   return callee_method->verified_code_entry();
1510 JRT_END
1511 
1512 

1513 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1514   ResourceMark rm(thread);
1515   CallInfo call_info;
1516   Bytecodes::Code bc;
1517 
1518   // receiver is NULL for static calls. An exception is thrown for NULL
1519   // receivers for non-static calls
1520   Handle receiver = find_callee_info(thread, bc, call_info,
1521                                      CHECK_(methodHandle()));
1522   // Compiler1 can produce virtual call sites that can actually be statically bound
1523   // If we fell thru to below we would think that the site was going megamorphic
1524   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1525   // we'd try and do a vtable dispatch however methods that can be statically bound
1526   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1527   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1528   // plain ic_miss) and the site will be converted to an optimized virtual call site
1529   // never to miss again. I don't believe C2 will produce code like this but if it
1530   // did this would still be the correct thing to do for it too, hence no ifdef.
1531   //
1532   if (call_info.resolved_method()->can_be_statically_bound()) {


1605               ResourceMark rm(thread);
1606               tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1607               callee_method->print_short_name(tty);
1608               tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1609             }
1610             should_be_mono = true;
1611           }
1612         }
1613       }
1614 
1615       if (should_be_mono) {
1616 
1617         // We have a path that was monomorphic but was going interpreted
1618         // and now we have (or had) a compiled entry. We correct the IC
1619         // by using a new icBuffer.
1620         CompiledICInfo info;
1621         KlassHandle receiver_klass(THREAD, receiver()->klass());
1622         inline_cache->compute_monomorphic_entry(callee_method,
1623                                                 receiver_klass,
1624                                                 inline_cache->is_optimized(),
1625                                                 false,
1626                                                 info, CHECK_(methodHandle()));
1627         inline_cache->set_to_monomorphic(info);
1628       } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1629         // Potential change to megamorphic
1630         bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
1631         if (!successful) {
1632           inline_cache->set_to_clean();
1633         }
1634       } else {
1635         // Either clean or megamorphic
1636       }
1637     } else {
1638       fatal("Unimplemented");
1639     }
1640   } // Release CompiledIC_lock
1641 
1642   return callee_method;
1643 }
1644 
1645 //


1674     // recognizable call. We will always find a call for static
1675     // calls and for optimized virtual calls. For vanilla virtual
1676     // calls it depends on the state of the UseInlineCaches switch.
1677     //
1678     // With Inline Caches disabled we can get here for a virtual call
1679     // for two reasons:
1680     //   1 - calling an abstract method. The vtable for abstract methods
1681     //       will run us thru handle_wrong_method and we will eventually
1682     //       end up in the interpreter to throw the ame.
1683     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1684     //       call and between the time we fetch the entry address and
1685     //       we jump to it the target gets deoptimized. Similar to 1
1686     //       we will wind up in the interprter (thru a c2i with c2).
1687     //
1688     address call_addr = NULL;
1689     {
1690       // Get call instruction under lock because another thread may be
1691       // busy patching it.
1692       MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1693       // Location of call instruction
1694       if (NativeCall::is_call_before(pc)) {
1695         NativeCall *ncall = nativeCall_before(pc);
1696         call_addr = ncall->instruction_address();
1697       }
1698     }
1699     // Make sure nmethod doesn't get deoptimized and removed until
1700     // this is done with it.
1701     // CLEANUP - with lazy deopt shouldn't need this lock
1702     nmethodLocker nmlock(caller_nm);
1703 
1704     if (call_addr != NULL) {
1705       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1706       int ret = iter.next(); // Get item
1707       if (ret) {
1708         assert(iter.addr() == call_addr, "must find call");
1709         if (iter.type() == relocInfo::static_call_type) {
1710           is_static_call = true;
1711         } else {
1712           assert(iter.type() == relocInfo::virtual_call_type ||
1713                  iter.type() == relocInfo::opt_virtual_call_type
1714                 , "unexpected relocInfo. type");
1715         }
1716       } else {
1717         assert(!UseInlineCaches, "relocation info. must exist for this address");
1718       }
1719 
1720       // Cleaning the inline cache will force a new resolve. This is more robust
1721       // than directly setting it to the new destination, since resolving of calls
1722       // is always done through the same code path. (experience shows that it
1723       // leads to very hard to track down bugs, if an inline cache gets updated
1724       // to a wrong method). It should not be performance critical, since the
1725       // resolve is only done once.
1726 

1727       MutexLocker ml(CompiledIC_lock);
1728       if (is_static_call) {
1729         CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
1730         ssc->set_to_clean();
1731       } else {
1732         // compiled, dispatched call (which used to call an interpreted method)
1733         CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1734         inline_cache->set_to_clean();
1735       }
1736     }
1737   }
1738 
1739   methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1740 
1741 
1742 #ifndef PRODUCT
1743   Atomic::inc(&_wrong_method_ctr);
1744 
1745   if (TraceCallFixup) {
1746     ResourceMark rm(thread);
1747     tty->print("handle_wrong_method reresolving call to");
1748     callee_method->print_short_name(tty);
1749     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));


1776   const int total_args_passed = method->size_of_parameters();
1777   const VMRegPair*    regs_with_member_name = regs;
1778         VMRegPair* regs_without_member_name = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed - 1);
1779 
1780   const int member_arg_pos = total_args_passed - 1;
1781   assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
1782   assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
1783 
1784   const bool is_outgoing = method->is_method_handle_intrinsic();
1785   int comp_args_on_stack = java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1, is_outgoing);
1786 
1787   for (int i = 0; i < member_arg_pos; i++) {
1788     VMReg a =    regs_with_member_name[i].first();
1789     VMReg b = regs_without_member_name[i].first();
1790     assert(a->value() == b->value(), "register allocation mismatch: a=" INTX_FORMAT ", b=" INTX_FORMAT, a->value(), b->value());
1791   }
1792   assert(regs_with_member_name[member_arg_pos].first()->is_valid(), "bad member arg");
1793 }
1794 #endif
1795 































1796 // ---------------------------------------------------------------------------
1797 // We are calling the interpreter via a c2i. Normally this would mean that
1798 // we were called by a compiled method. However we could have lost a race
1799 // where we went int -> i2c -> c2i and so the caller could in fact be
1800 // interpreted. If the caller is compiled we attempt to patch the caller
1801 // so he no longer calls into the interpreter.
1802 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1803   Method* moop(method);
1804 
1805   address entry_point = moop->from_compiled_entry_no_trampoline();
1806 
1807   // It's possible that deoptimization can occur at a call site which hasn't
1808   // been resolved yet, in which case this function will be called from
1809   // an nmethod that has been patched for deopt and we can ignore the
1810   // request for a fixup.
1811   // Also it is possible that we lost a race in that from_compiled_entry
1812   // is now back to the i2c in that case we don't need to patch and if
1813   // we did we'd leap into space because the callsite needs to use
1814   // "to interpreter" stub in order to load up the Method*. Don't
1815   // ask me how I know this...


1825 
1826   // Get the return PC for the passed caller PC.
1827   address return_pc = caller_pc + frame::pc_return_offset;
1828 
1829   // There is a benign race here. We could be attempting to patch to a compiled
1830   // entry point at the same time the callee is being deoptimized. If that is
1831   // the case then entry_point may in fact point to a c2i and we'd patch the
1832   // call site with the same old data. clear_code will set code() to NULL
1833   // at the end of it. If we happen to see that NULL then we can skip trying
1834   // to patch. If we hit the window where the callee has a c2i in the
1835   // from_compiled_entry and the NULL isn't present yet then we lose the race
1836   // and patch the code with the same old data. Asi es la vida.
1837 
1838   if (moop->code() == NULL) return;
1839 
1840   if (nm->is_in_use()) {
1841 
1842     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1843     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1844     if (NativeCall::is_call_before(return_pc)) {
1845       NativeCall *call = nativeCall_before(return_pc);

1846       //
1847       // bug 6281185. We might get here after resolving a call site to a vanilla
1848       // virtual call. Because the resolvee uses the verified entry it may then
1849       // see compiled code and attempt to patch the site by calling us. This would
1850       // then incorrectly convert the call site to optimized and its downhill from
1851       // there. If you're lucky you'll get the assert in the bugid, if not you've
1852       // just made a call site that could be megamorphic into a monomorphic site
1853       // for the rest of its life! Just another racing bug in the life of
1854       // fixup_callers_callsite ...
1855       //
1856       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
1857       iter.next();
1858       assert(iter.has_current(), "must have a reloc at java call site");
1859       relocInfo::relocType typ = iter.reloc()->type();
1860       if (typ != relocInfo::static_call_type &&
1861            typ != relocInfo::opt_virtual_call_type &&
1862            typ != relocInfo::static_stub_type) {
1863         return;
1864       }
1865       address destination = call->destination();
1866       if (destination != entry_point) {
1867         CodeBlob* callee = CodeCache::find_blob(destination);
1868         // callee == cb seems weird. It means calling interpreter thru stub.
1869         if (callee == cb || callee->is_adapter_blob()) {
1870           // static call or optimized virtual
1871           if (TraceCallFixup) {
1872             tty->print("fixup callsite           at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
1873             moop->print_short_name(tty);
1874             tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
1875           }
1876           call->set_destination_mt_safe(entry_point);
1877         } else {
1878           if (TraceCallFixup) {
1879             tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
1880             moop->print_short_name(tty);
1881             tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
1882           }
1883           // assert is too strong could also be resolve destinations.
1884           // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
1885         }
1886       } else {
1887           if (TraceCallFixup) {
1888             tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
1889             moop->print_short_name(tty);
1890             tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
1891           }
1892       }
1893     }
1894   }
1895 IRT_END
1896 
1897 
1898 // same as JVM_Arraycopy, but called directly from compiled code
1899 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
1900                                                 oopDesc* dest, jint dest_pos,
1901                                                 jint length,
1902                                                 JavaThread* thread)) {
1903 #ifndef PRODUCT
1904   _slow_array_copy_ctr++;
1905 #endif
1906   // Check if we have null pointers
1907   if (src == NULL || dest == NULL) {
1908     THROW(vmSymbols::java_lang_NullPointerException());
1909   }
1910   // Do the copy.  The casts to arrayOop are necessary to the copy_array API,
1911   // even though the copy_array API also performs dynamic checks to ensure




  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/codeCacheExtensions.hpp"
  32 #include "code/scopeDesc.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/abstractCompiler.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "gc/shared/gcLocker.inline.hpp"
  38 #include "interpreter/interpreter.hpp"
  39 #include "interpreter/interpreterRuntime.hpp"
  40 #include "logging/log.hpp"
  41 #include "memory/metaspaceShared.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "memory/universe.inline.hpp"
  44 #include "oops/klass.hpp"
  45 #include "oops/objArrayKlass.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "aot/aotLoader.hpp"
  48 #include "prims/forte.hpp"
  49 #include "prims/jvmtiExport.hpp"
  50 #include "prims/methodHandles.hpp"
  51 #include "prims/nativeLookup.hpp"
  52 #include "runtime/arguments.hpp"
  53 #include "runtime/atomic.hpp"
  54 #include "runtime/biasedLocking.hpp"
  55 #include "runtime/compilationPolicy.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/init.hpp"
  58 #include "runtime/interfaceSupport.hpp"
  59 #include "runtime/javaCalls.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/vframe.hpp"
  63 #include "runtime/vframeArray.hpp"
  64 #include "trace/tracing.hpp"
  65 #include "utilities/copy.hpp"
  66 #include "utilities/dtrace.hpp"
  67 #include "utilities/events.hpp"
  68 #include "utilities/hashtable.inline.hpp"
  69 #include "utilities/macros.hpp"
  70 #include "utilities/xmlstream.hpp"
  71 #ifdef COMPILER1
  72 #include "c1/c1_Runtime1.hpp"
  73 #endif
  74 
  75 // Shared stub locations
  76 RuntimeStub*        SharedRuntime::_wrong_method_blob;
  77 RuntimeStub*        SharedRuntime::_wrong_method_abstract_blob;
  78 RuntimeStub*        SharedRuntime::_ic_miss_blob;
  79 RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
  80 RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
  81 RuntimeStub*        SharedRuntime::_resolve_static_call_blob;
  82 address             SharedRuntime::_resolve_static_call_entry;
  83 
  84 DeoptimizationBlob* SharedRuntime::_deopt_blob;
  85 SafepointBlob*      SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
  86 SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
  87 SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
  88 
  89 #ifdef COMPILER2
  90 UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
  91 #endif // COMPILER2
  92 
  93 
  94 //----------------------------generate_stubs-----------------------------------
  95 void SharedRuntime::generate_stubs() {
  96   _wrong_method_blob                   = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),          "wrong_method_stub");
  97   _wrong_method_abstract_blob          = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
  98   _ic_miss_blob                        = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),  "ic_miss_stub");
  99   _resolve_opt_virtual_call_blob       = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),   "resolve_opt_virtual_call");
 100   _resolve_virtual_call_blob           = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),       "resolve_virtual_call");
 101   _resolve_static_call_blob            = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),        "resolve_static_call");
 102   _resolve_static_call_entry           = _resolve_static_call_blob->entry_point();
 103 
 104 #if defined(COMPILER2) || INCLUDE_JVMCI
 105   // Vectors are generated only by C2 and JVMCI.
 106   bool support_wide = is_wide_vector(MaxVectorSize);
 107   if (support_wide) {
 108     _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
 109   }
 110 #endif // COMPILER2 || INCLUDE_JVMCI
 111   _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
 112   _polling_page_return_handler_blob    = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
 113 
 114   generate_deopt_blob();
 115 
 116 #ifdef COMPILER2
 117   generate_uncommon_trap_blob();
 118 #endif // COMPILER2
 119 }
 120 
 121 #include <math.h>
 122 


 461   return (jdouble)x;
 462 JRT_END
 463 
 464 // Exception handling across interpreter/compiler boundaries
 465 //
 466 // exception_handler_for_return_address(...) returns the continuation address.
 467 // The continuation address is the entry point of the exception handler of the
 468 // previous frame depending on the return address.
 469 
 470 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
 471   assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
 472   assert(thread->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
 473 
 474   // Reset method handle flag.
 475   thread->set_is_method_handle_return(false);
 476 
 477 #if INCLUDE_JVMCI
 478   // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
 479   // and other exception handler continuations do not read it
 480   thread->set_exception_pc(NULL);
 481 #endif // INCLUDE_JVMCI
 482 
 483   // The fastest case first
 484   CodeBlob* blob = CodeCache::find_blob(return_address);
 485   nmethod* nm = (blob != NULL) ? blob->as_nmethod_or_null() : NULL;
 486   if (nm != NULL) {
 487     // Set flag if return address is a method handle call site.
 488     thread->set_is_method_handle_return(nm->is_method_handle_return(return_address));
 489     // native nmethods don't have exception handlers
 490     assert(!nm->is_native_method(), "no exception handler");
 491     assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
 492     if (nm->is_deopt_pc(return_address)) {
 493       // If we come here because of a stack overflow, the stack may be
 494       // unguarded. Reguard the stack otherwise if we return to the
 495       // deopt blob and the stack bang causes a stack overflow we
 496       // crash.
 497       bool guard_pages_enabled = thread->stack_guards_enabled();
 498       if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 499       if (thread->reserved_stack_activation() != thread->stack_base()) {
 500         thread->set_reserved_stack_activation(thread->stack_base());
 501       }
 502       assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
 503       return SharedRuntime::deopt_blob()->unpack_with_exception();
 504     } else {
 505       return nm->exception_begin();
 506     }
 507   }
 508 
 509   // Entry code
 510   if (StubRoutines::returns_to_call_stub(return_address)) {
 511     return StubRoutines::catch_exception_entry();
 512   }
 513   // Interpreted code
 514   if (Interpreter::contains(return_address)) {
 515     return Interpreter::rethrow_exception_entry();
 516   }
 517 
 518 #if INCLUDE_AOT
 519   // AOT Compiled code
 520   if (UseAOT && AOTLoader::contains(return_address)) {
 521       AOTCompiledMethod* aotm = AOTLoader::find_aot((address) return_address);
 522       // Set flag if return address is a method handle call site.
 523       thread->set_is_method_handle_return(aotm->is_method_handle_return(return_address));
 524       return aotm->exception_begin();
 525   }
 526 #endif
 527 
 528   guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
 529   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
 530 
 531 #ifndef PRODUCT
 532   { ResourceMark rm;
 533     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
 534     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
 535     tty->print_cr("b) other problem");
 536   }
 537 #endif // PRODUCT
 538 
 539   ShouldNotReachHere();
 540   return NULL;
 541 }
 542 
 543 
 544 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
 545   return raw_exception_handler_for_return_address(thread, return_address);
 546 JRT_END
 547 


 984  * \note
 985  * This method actually never gets called!  The reason is because
 986  * the interpreter's native entries call NativeLookup::lookup() which
 987  * throws the exception when the lookup fails.  The exception is then
 988  * caught and forwarded on the return from NativeLookup::lookup() call
 989  * before the call to the native function.  This might change in the future.
 990  */
 991 JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
 992 {
 993   // We return a bad value here to make sure that the exception is
 994   // forwarded before we look at the return value.
 995   THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badJNIHandle);
 996 }
 997 JNI_END
 998 
 999 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
1000   return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
1001 }
1002 
1003 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))

1004 #if INCLUDE_JVMCI





1005   if (!obj->klass()->has_finalizer()) {
1006     return;
1007   }
1008 #endif // INCLUDE_JVMCI
1009   assert(obj->is_oop(), "must be a valid oop");
1010   assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1011   InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1012 JRT_END
1013 
1014 
1015 jlong SharedRuntime::get_java_tid(Thread* thread) {
1016   if (thread != NULL) {
1017     if (thread->is_Java_thread()) {
1018       oop obj = ((JavaThread*)thread)->threadObj();
1019       return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
1020     }
1021   }
1022   return 0;
1023 }
1024 
1025 /**
1026  * This function ought to be a void function, but cannot be because
1027  * it gets turned into a tail-call on sparc, which runs into dtrace bug
1028  * 6254741.  Once that is fixed we can remove the dummy return value.
1029  */


1216 }
1217 
1218 methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) {
1219   ResourceMark rm(THREAD);
1220   // We need first to check if any Java activations (compiled, interpreted)
1221   // exist on the stack since last JavaCall.  If not, we need
1222   // to get the target method from the JavaCall wrapper.
1223   vframeStream vfst(thread, true);  // Do not skip any javaCalls
1224   methodHandle callee_method;
1225   if (vfst.at_end()) {
1226     // No Java frames were found on stack since we did the JavaCall.
1227     // Hence the stack can only contain an entry_frame.  We need to
1228     // find the target method from the stub frame.
1229     RegisterMap reg_map(thread, false);
1230     frame fr = thread->last_frame();
1231     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1232     fr = fr.sender(&reg_map);
1233     assert(fr.is_entry_frame(), "must be");
1234     // fr is now pointing to the entry frame.
1235     callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());

1236   } else {
1237     Bytecodes::Code bc;
1238     CallInfo callinfo;
1239     find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
1240     callee_method = callinfo.selected_method();
1241   }
1242   assert(callee_method()->is_method(), "must be");
1243   return callee_method;
1244 }
1245 
1246 // Resolves a call.
1247 methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
1248                                            bool is_virtual,
1249                                            bool is_optimized, TRAPS) {
1250   methodHandle callee_method;
1251   callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
1252   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1253     int retry_count = 0;
1254     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1255            callee_method->method_holder() != SystemDictionary::Object_klass()) {


1344   StaticCallInfo static_call_info;
1345   CompiledICInfo virtual_call_info;
1346 
1347   // Make sure the callee nmethod does not get deoptimized and removed before
1348   // we are done patching the code.
1349   CompiledMethod* callee = callee_method->code();
1350 
1351   if (callee != NULL) {
1352     assert(callee->is_compiled(), "must be nmethod for patching");
1353   }
1354 
1355   if (callee != NULL && !callee->is_in_use()) {
1356     // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1357     callee = NULL;
1358   }
1359   nmethodLocker nl_callee(callee);
1360 #ifdef ASSERT
1361   address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
1362 #endif
1363 
1364   bool is_nmethod = caller_nm->is_nmethod();
1365 
1366   if (is_virtual) {
1367     assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
1368     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1369     KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass());
1370     CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
1371                      is_optimized, static_bound, is_nmethod, virtual_call_info,
1372                      CHECK_(methodHandle()));
1373   } else {
1374     // static call
1375     CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info);
1376   }
1377 
1378   // grab lock, check for deoptimization and potentially patch caller
1379   {
1380     MutexLocker ml_patch(CompiledIC_lock);
1381 
1382     // Lock blocks for safepoint during which both nmethods can change state.
1383 
1384     // Now that we are ready to patch if the Method* was redefined then
1385     // don't update call site and let the caller retry.
1386     // Don't update call site if callee nmethod was unloaded or deoptimized.
1387     // Don't update call site if callee nmethod was replaced by an other nmethod
1388     // which may happen when multiply alive nmethod (tiered compilation)
1389     // will be supported.
1390     if (!callee_method->is_old() &&
1391         (callee == NULL || callee->is_in_use() && (callee_method->code() == callee))) {
1392 #ifdef ASSERT
1393       // We must not try to patch to jump to an already unloaded method.
1394       if (dest_entry_point != 0) {
1395         CodeBlob* cb = CodeCache::find_blob(dest_entry_point);
1396         assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee),
1397                "should not call unloaded nmethod");
1398       }
1399 #endif
1400       if (is_virtual) {
1401         CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1402         if (inline_cache->is_clean()) {
1403           inline_cache->set_to_monomorphic(virtual_call_info);
1404         }
1405       } else {
1406         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
1407         if (ssc->is_clean()) ssc->set(static_call_info);
1408       }
1409     }
1410 
1411   } // unlock CompiledIC_lock
1412 
1413   return callee_method;
1414 }
1415 
1416 
1417 // Inline caches exist only in compiled code
1418 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
1419 #ifdef ASSERT
1420   RegisterMap reg_map(thread, false);
1421   frame stub_frame = thread->last_frame();
1422   assert(stub_frame.is_runtime_frame(), "sanity check");
1423   frame caller_frame = stub_frame.sender(&reg_map);
1424   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
1425 #endif /* ASSERT */
1426 


1502   // return compiled code entry point after potential safepoints
1503   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1504   return callee_method->verified_code_entry();
1505 JRT_END
1506 
1507 
1508 // Resolve a virtual call that can be statically bound (e.g., always
1509 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1510 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1511   methodHandle callee_method;
1512   JRT_BLOCK
1513     callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1514     thread->set_vm_result_2(callee_method());
1515   JRT_BLOCK_END
1516   // return compiled code entry point after potential safepoints
1517   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1518   return callee_method->verified_code_entry();
1519 JRT_END
1520 
1521 
1522 
1523 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1524   ResourceMark rm(thread);
1525   CallInfo call_info;
1526   Bytecodes::Code bc;
1527 
1528   // receiver is NULL for static calls. An exception is thrown for NULL
1529   // receivers for non-static calls
1530   Handle receiver = find_callee_info(thread, bc, call_info,
1531                                      CHECK_(methodHandle()));
1532   // Compiler1 can produce virtual call sites that can actually be statically bound
1533   // If we fell thru to below we would think that the site was going megamorphic
1534   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1535   // we'd try and do a vtable dispatch however methods that can be statically bound
1536   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1537   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1538   // plain ic_miss) and the site will be converted to an optimized virtual call site
1539   // never to miss again. I don't believe C2 will produce code like this but if it
1540   // did this would still be the correct thing to do for it too, hence no ifdef.
1541   //
1542   if (call_info.resolved_method()->can_be_statically_bound()) {


1615               ResourceMark rm(thread);
1616               tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1617               callee_method->print_short_name(tty);
1618               tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1619             }
1620             should_be_mono = true;
1621           }
1622         }
1623       }
1624 
1625       if (should_be_mono) {
1626 
1627         // We have a path that was monomorphic but was going interpreted
1628         // and now we have (or had) a compiled entry. We correct the IC
1629         // by using a new icBuffer.
1630         CompiledICInfo info;
1631         KlassHandle receiver_klass(THREAD, receiver()->klass());
1632         inline_cache->compute_monomorphic_entry(callee_method,
1633                                                 receiver_klass,
1634                                                 inline_cache->is_optimized(),
1635                                                 false, caller_nm->is_nmethod(),
1636                                                 info, CHECK_(methodHandle()));
1637         inline_cache->set_to_monomorphic(info);
1638       } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1639         // Potential change to megamorphic
1640         bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
1641         if (!successful) {
1642           inline_cache->set_to_clean();
1643         }
1644       } else {
1645         // Either clean or megamorphic
1646       }
1647     } else {
1648       fatal("Unimplemented");
1649     }
1650   } // Release CompiledIC_lock
1651 
1652   return callee_method;
1653 }
1654 
1655 //


1684     // recognizable call. We will always find a call for static
1685     // calls and for optimized virtual calls. For vanilla virtual
1686     // calls it depends on the state of the UseInlineCaches switch.
1687     //
1688     // With Inline Caches disabled we can get here for a virtual call
1689     // for two reasons:
1690     //   1 - calling an abstract method. The vtable for abstract methods
1691     //       will run us thru handle_wrong_method and we will eventually
1692     //       end up in the interpreter to throw the ame.
1693     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1694     //       call and between the time we fetch the entry address and
1695     //       we jump to it the target gets deoptimized. Similar to 1
1696     //       we will wind up in the interprter (thru a c2i with c2).
1697     //
1698     address call_addr = NULL;
1699     {
1700       // Get call instruction under lock because another thread may be
1701       // busy patching it.
1702       MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1703       // Location of call instruction
1704       call_addr = caller_nm->call_instruction_address(pc);



1705     }
1706     // Make sure nmethod doesn't get deoptimized and removed until
1707     // this is done with it.
1708     // CLEANUP - with lazy deopt shouldn't need this lock
1709     nmethodLocker nmlock(caller_nm);
1710 
1711     if (call_addr != NULL) {
1712       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1713       int ret = iter.next(); // Get item
1714       if (ret) {
1715         assert(iter.addr() == call_addr, "must find call");
1716         if (iter.type() == relocInfo::static_call_type) {
1717           is_static_call = true;
1718         } else {
1719           assert(iter.type() == relocInfo::virtual_call_type ||
1720                  iter.type() == relocInfo::opt_virtual_call_type
1721                 , "unexpected relocInfo. type");
1722         }
1723       } else {
1724         assert(!UseInlineCaches, "relocation info. must exist for this address");
1725       }
1726 
1727       // Cleaning the inline cache will force a new resolve. This is more robust
1728       // than directly setting it to the new destination, since resolving of calls
1729       // is always done through the same code path. (experience shows that it
1730       // leads to very hard to track down bugs, if an inline cache gets updated
1731       // to a wrong method). It should not be performance critical, since the
1732       // resolve is only done once.
1733 
1734       bool is_nmethod = caller_nm->is_nmethod();
1735       MutexLocker ml(CompiledIC_lock);
1736       if (is_static_call) {
1737         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1738         ssc->set_to_clean();
1739       } else {
1740         // compiled, dispatched call (which used to call an interpreted method)
1741         CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1742         inline_cache->set_to_clean();
1743       }
1744     }
1745   }
1746 
1747   methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1748 
1749 
1750 #ifndef PRODUCT
1751   Atomic::inc(&_wrong_method_ctr);
1752 
1753   if (TraceCallFixup) {
1754     ResourceMark rm(thread);
1755     tty->print("handle_wrong_method reresolving call to");
1756     callee_method->print_short_name(tty);
1757     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));


1784   const int total_args_passed = method->size_of_parameters();
1785   const VMRegPair*    regs_with_member_name = regs;
1786         VMRegPair* regs_without_member_name = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed - 1);
1787 
1788   const int member_arg_pos = total_args_passed - 1;
1789   assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
1790   assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
1791 
1792   const bool is_outgoing = method->is_method_handle_intrinsic();
1793   int comp_args_on_stack = java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1, is_outgoing);
1794 
1795   for (int i = 0; i < member_arg_pos; i++) {
1796     VMReg a =    regs_with_member_name[i].first();
1797     VMReg b = regs_without_member_name[i].first();
1798     assert(a->value() == b->value(), "register allocation mismatch: a=" INTX_FORMAT ", b=" INTX_FORMAT, a->value(), b->value());
1799   }
1800   assert(regs_with_member_name[member_arg_pos].first()->is_valid(), "bad member arg");
1801 }
1802 #endif
1803 
1804 bool SharedRuntime::should_fixup_call_destination(address destination, address entry_point, address caller_pc, Method* moop, CodeBlob* cb) {
1805   if (destination != entry_point) {
1806     CodeBlob* callee = CodeCache::find_blob(destination);
1807     // callee == cb seems weird. It means calling interpreter thru stub.
1808     if (callee == cb || callee->is_adapter_blob()) {
1809       // static call or optimized virtual
1810       if (TraceCallFixup) {
1811         tty->print("fixup callsite           at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
1812         moop->print_short_name(tty);
1813         tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
1814       }
1815       return true;
1816     } else {
1817       if (TraceCallFixup) {
1818         tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
1819         moop->print_short_name(tty);
1820         tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
1821       }
1822       // assert is too strong could also be resolve destinations.
1823       // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
1824     }
1825   } else {
1826     if (TraceCallFixup) {
1827       tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
1828       moop->print_short_name(tty);
1829       tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
1830     }
1831   }
1832   return false;
1833 }
1834 
1835 // ---------------------------------------------------------------------------
1836 // We are calling the interpreter via a c2i. Normally this would mean that
1837 // we were called by a compiled method. However we could have lost a race
1838 // where we went int -> i2c -> c2i and so the caller could in fact be
1839 // interpreted. If the caller is compiled we attempt to patch the caller
1840 // so he no longer calls into the interpreter.
1841 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1842   Method* moop(method);
1843 
1844   address entry_point = moop->from_compiled_entry_no_trampoline();
1845 
1846   // It's possible that deoptimization can occur at a call site which hasn't
1847   // been resolved yet, in which case this function will be called from
1848   // an nmethod that has been patched for deopt and we can ignore the
1849   // request for a fixup.
1850   // Also it is possible that we lost a race in that from_compiled_entry
1851   // is now back to the i2c in that case we don't need to patch and if
1852   // we did we'd leap into space because the callsite needs to use
1853   // "to interpreter" stub in order to load up the Method*. Don't
1854   // ask me how I know this...


1864 
1865   // Get the return PC for the passed caller PC.
1866   address return_pc = caller_pc + frame::pc_return_offset;
1867 
1868   // There is a benign race here. We could be attempting to patch to a compiled
1869   // entry point at the same time the callee is being deoptimized. If that is
1870   // the case then entry_point may in fact point to a c2i and we'd patch the
1871   // call site with the same old data. clear_code will set code() to NULL
1872   // at the end of it. If we happen to see that NULL then we can skip trying
1873   // to patch. If we hit the window where the callee has a c2i in the
1874   // from_compiled_entry and the NULL isn't present yet then we lose the race
1875   // and patch the code with the same old data. Asi es la vida.
1876 
1877   if (moop->code() == NULL) return;
1878 
1879   if (nm->is_in_use()) {
1880 
1881     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1882     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1883     if (NativeCall::is_call_before(return_pc)) {
1884       ResourceMark mark;
1885       NativeCallWrapper* call = nm->call_wrapper_before(return_pc);
1886       //
1887       // bug 6281185. We might get here after resolving a call site to a vanilla
1888       // virtual call. Because the resolvee uses the verified entry it may then
1889       // see compiled code and attempt to patch the site by calling us. This would
1890       // then incorrectly convert the call site to optimized and its downhill from
1891       // there. If you're lucky you'll get the assert in the bugid, if not you've
1892       // just made a call site that could be megamorphic into a monomorphic site
1893       // for the rest of its life! Just another racing bug in the life of
1894       // fixup_callers_callsite ...
1895       //
1896       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
1897       iter.next();
1898       assert(iter.has_current(), "must have a reloc at java call site");
1899       relocInfo::relocType typ = iter.reloc()->type();
1900       if (typ != relocInfo::static_call_type &&
1901            typ != relocInfo::opt_virtual_call_type &&
1902            typ != relocInfo::static_stub_type) {
1903         return;
1904       }
1905       address destination = call->destination();
1906       if (should_fixup_call_destination(destination, entry_point, caller_pc, moop, cb)) {









1907         call->set_destination_mt_safe(entry_point);















1908       }
1909     }
1910   }
1911 IRT_END
1912 
1913 
1914 // same as JVM_Arraycopy, but called directly from compiled code
1915 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
1916                                                 oopDesc* dest, jint dest_pos,
1917                                                 jint length,
1918                                                 JavaThread* thread)) {
1919 #ifndef PRODUCT
1920   _slow_array_copy_ctr++;
1921 #endif
1922   // Check if we have null pointers
1923   if (src == NULL || dest == NULL) {
1924     THROW(vmSymbols::java_lang_NullPointerException());
1925   }
1926   // Do the copy.  The casts to arrayOop are necessary to the copy_array API,
1927   // even though the copy_array API also performs dynamic checks to ensure


src/share/vm/runtime/sharedRuntime.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File