src/share/vm/runtime/sharedRuntime.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/runtime/sharedRuntime.cpp

src/share/vm/runtime/sharedRuntime.cpp

Print this page

        

*** 42,51 **** --- 42,52 ---- #include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/klass.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" + #include "aot/aotLoader.hpp" #include "prims/forte.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" #include "prims/nativeLookup.hpp" #include "runtime/arguments.hpp"
*** 76,85 **** --- 77,87 ---- RuntimeStub* SharedRuntime::_wrong_method_abstract_blob; RuntimeStub* SharedRuntime::_ic_miss_blob; RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob; RuntimeStub* SharedRuntime::_resolve_virtual_call_blob; RuntimeStub* SharedRuntime::_resolve_static_call_blob; + address SharedRuntime::_resolve_static_call_entry; DeoptimizationBlob* SharedRuntime::_deopt_blob; SafepointBlob* SharedRuntime::_polling_page_vectors_safepoint_handler_blob; SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob; SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
*** 95,104 **** --- 97,107 ---- _wrong_method_abstract_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub"); _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub"); _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call"); _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call"); _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call"); + _resolve_static_call_entry = _resolve_static_call_blob->entry_point(); #if defined(COMPILER2) || INCLUDE_JVMCI // Vectors are generated only by C2 and JVMCI. bool support_wide = is_wide_vector(MaxVectorSize); if (support_wide) {
*** 473,483 **** #if INCLUDE_JVMCI // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear // and other exception handler continuations do not read it thread->set_exception_pc(NULL); ! #endif // The fastest case first CodeBlob* blob = CodeCache::find_blob(return_address); nmethod* nm = (blob != NULL) ? blob->as_nmethod_or_null() : NULL; if (nm != NULL) { --- 476,486 ---- #if INCLUDE_JVMCI // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear // and other exception handler continuations do not read it thread->set_exception_pc(NULL); ! #endif // INCLUDE_JVMCI // The fastest case first CodeBlob* blob = CodeCache::find_blob(return_address); nmethod* nm = (blob != NULL) ? blob->as_nmethod_or_null() : NULL; if (nm != NULL) {
*** 510,519 **** --- 513,532 ---- // Interpreted code if (Interpreter::contains(return_address)) { return Interpreter::rethrow_exception_entry(); } + #if INCLUDE_AOT + // AOT Compiled code + if (UseAOT && AOTLoader::contains(return_address)) { + AOTCompiledMethod* aotm = AOTLoader::find_aot((address) return_address); + // Set flag if return address is a method handle call site. + thread->set_is_method_handle_return(aotm->is_method_handle_return(return_address)); + return aotm->exception_begin(); + } + #endif + guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub"); guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); #ifndef PRODUCT { ResourceMark rm;
*** 986,1006 **** address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() { return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error); } JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) - assert(obj->is_oop(), "must be a valid oop"); #if INCLUDE_JVMCI - // This removes the requirement for JVMCI compilers to emit code - // performing a dynamic check that obj has a finalizer before - // calling this routine. There should be no performance impact - // for C1 since it emits a dynamic check. C2 and the interpreter - // uses other runtime routines for registering finalizers. if (!obj->klass()->has_finalizer()) { return; } #endif // INCLUDE_JVMCI assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); InstanceKlass::register_finalizer(instanceOop(obj), CHECK); JRT_END --- 999,1014 ---- address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() { return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error); } JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) #if INCLUDE_JVMCI if (!obj->klass()->has_finalizer()) { return; } #endif // INCLUDE_JVMCI + assert(obj->is_oop(), "must be a valid oop"); assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); InstanceKlass::register_finalizer(instanceOop(obj), CHECK); JRT_END
*** 1223,1233 **** assert(fr.is_runtime_frame(), "must be a runtimeStub"); fr = fr.sender(&reg_map); assert(fr.is_entry_frame(), "must be"); // fr is now pointing to the entry frame. callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method()); - assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??"); } else { Bytecodes::Code bc; CallInfo callinfo; find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle())); callee_method = callinfo.selected_method(); --- 1231,1240 ----
*** 1352,1371 **** nmethodLocker nl_callee(callee); #ifdef ASSERT address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below #endif if (is_virtual) { assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); bool static_bound = call_info.resolved_method()->can_be_statically_bound(); KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass()); CompiledIC::compute_monomorphic_entry(callee_method, h_klass, ! is_optimized, static_bound, virtual_call_info, CHECK_(methodHandle())); } else { // static call ! CompiledStaticCall::compute_entry(callee_method, static_call_info); } // grab lock, check for deoptimization and potentially patch caller { MutexLocker ml_patch(CompiledIC_lock); --- 1359,1380 ---- nmethodLocker nl_callee(callee); #ifdef ASSERT address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below #endif + bool is_nmethod = caller_nm->is_nmethod(); + if (is_virtual) { assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); bool static_bound = call_info.resolved_method()->can_be_statically_bound(); KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass()); CompiledIC::compute_monomorphic_entry(callee_method, h_klass, ! is_optimized, static_bound, is_nmethod, virtual_call_info, CHECK_(methodHandle())); } else { // static call ! CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); } // grab lock, check for deoptimization and potentially patch caller { MutexLocker ml_patch(CompiledIC_lock);
*** 1392,1402 **** CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); if (inline_cache->is_clean()) { inline_cache->set_to_monomorphic(virtual_call_info); } } else { ! CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc()); if (ssc->is_clean()) ssc->set(static_call_info); } } } // unlock CompiledIC_lock --- 1401,1411 ---- CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); if (inline_cache->is_clean()) { inline_cache->set_to_monomorphic(virtual_call_info); } } else { ! CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc()); if (ssc->is_clean()) ssc->set(static_call_info); } } } // unlock CompiledIC_lock
*** 1508,1517 **** --- 1517,1527 ---- assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); return callee_method->verified_code_entry(); JRT_END + methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) { ResourceMark rm(thread); CallInfo call_info; Bytecodes::Code bc;
*** 1620,1630 **** CompiledICInfo info; KlassHandle receiver_klass(THREAD, receiver()->klass()); inline_cache->compute_monomorphic_entry(callee_method, receiver_klass, inline_cache->is_optimized(), ! false, info, CHECK_(methodHandle())); inline_cache->set_to_monomorphic(info); } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { // Potential change to megamorphic bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle())); --- 1630,1640 ---- CompiledICInfo info; KlassHandle receiver_klass(THREAD, receiver()->klass()); inline_cache->compute_monomorphic_entry(callee_method, receiver_klass, inline_cache->is_optimized(), ! false, caller_nm->is_nmethod(), info, CHECK_(methodHandle())); inline_cache->set_to_monomorphic(info); } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { // Potential change to megamorphic bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
*** 1689,1702 **** { // Get call instruction under lock because another thread may be // busy patching it. MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); // Location of call instruction ! if (NativeCall::is_call_before(pc)) { ! NativeCall *ncall = nativeCall_before(pc); ! call_addr = ncall->instruction_address(); ! } } // Make sure nmethod doesn't get deoptimized and removed until // this is done with it. // CLEANUP - with lazy deopt shouldn't need this lock nmethodLocker nmlock(caller_nm); --- 1699,1709 ---- { // Get call instruction under lock because another thread may be // busy patching it. MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); // Location of call instruction ! call_addr = caller_nm->call_instruction_address(pc); } // Make sure nmethod doesn't get deoptimized and removed until // this is done with it. // CLEANUP - with lazy deopt shouldn't need this lock nmethodLocker nmlock(caller_nm);
*** 1722,1734 **** // is always done through the same code path. (experience shows that it // leads to very hard to track down bugs, if an inline cache gets updated // to a wrong method). It should not be performance critical, since the // resolve is only done once. MutexLocker ml(CompiledIC_lock); if (is_static_call) { ! CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); ssc->set_to_clean(); } else { // compiled, dispatched call (which used to call an interpreted method) CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); inline_cache->set_to_clean(); --- 1729,1742 ---- // is always done through the same code path. (experience shows that it // leads to very hard to track down bugs, if an inline cache gets updated // to a wrong method). It should not be performance critical, since the // resolve is only done once. + bool is_nmethod = caller_nm->is_nmethod(); MutexLocker ml(CompiledIC_lock); if (is_static_call) { ! CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr); ssc->set_to_clean(); } else { // compiled, dispatched call (which used to call an interpreted method) CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); inline_cache->set_to_clean();
*** 1791,1800 **** --- 1799,1839 ---- } assert(regs_with_member_name[member_arg_pos].first()->is_valid(), "bad member arg"); } #endif + bool SharedRuntime::should_fixup_call_destination(address destination, address entry_point, address caller_pc, Method* moop, CodeBlob* cb) { + if (destination != entry_point) { + CodeBlob* callee = CodeCache::find_blob(destination); + // callee == cb seems weird. It means calling interpreter thru stub. + if (callee == cb || callee->is_adapter_blob()) { + // static call or optimized virtual + if (TraceCallFixup) { + tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); + moop->print_short_name(tty); + tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point)); + } + return true; + } else { + if (TraceCallFixup) { + tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); + moop->print_short_name(tty); + tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point)); + } + // assert is too strong could also be resolve destinations. + // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be"); + } + } else { + if (TraceCallFixup) { + tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); + moop->print_short_name(tty); + tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point)); + } + } + return false; + } + // --------------------------------------------------------------------------- // We are calling the interpreter via a c2i. Normally this would mean that // we were called by a compiled method. However we could have lost a race // where we went int -> i2c -> c2i and so the caller could in fact be // interpreted. If the caller is compiled we attempt to patch the caller
*** 1840,1850 **** if (nm->is_in_use()) { // Expect to find a native call there (unless it was no-inline cache vtable dispatch) MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); if (NativeCall::is_call_before(return_pc)) { ! NativeCall *call = nativeCall_before(return_pc); // // bug 6281185. We might get here after resolving a call site to a vanilla // virtual call. Because the resolvee uses the verified entry it may then // see compiled code and attempt to patch the site by calling us. This would // then incorrectly convert the call site to optimized and its downhill from --- 1879,1890 ---- if (nm->is_in_use()) { // Expect to find a native call there (unless it was no-inline cache vtable dispatch) MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); if (NativeCall::is_call_before(return_pc)) { ! ResourceMark mark; ! NativeCallWrapper* call = nm->call_wrapper_before(return_pc); // // bug 6281185. We might get here after resolving a call site to a vanilla // virtual call. Because the resolvee uses the verified entry it may then // see compiled code and attempt to patch the site by calling us. This would // then incorrectly convert the call site to optimized and its downhill from
*** 1861,1896 **** typ != relocInfo::opt_virtual_call_type && typ != relocInfo::static_stub_type) { return; } address destination = call->destination(); ! if (destination != entry_point) { ! CodeBlob* callee = CodeCache::find_blob(destination); ! // callee == cb seems weird. It means calling interpreter thru stub. ! if (callee == cb || callee->is_adapter_blob()) { ! // static call or optimized virtual ! if (TraceCallFixup) { ! tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); ! moop->print_short_name(tty); ! tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point)); ! } call->set_destination_mt_safe(entry_point); - } else { - if (TraceCallFixup) { - tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); - moop->print_short_name(tty); - tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point)); - } - // assert is too strong could also be resolve destinations. - // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be"); - } - } else { - if (TraceCallFixup) { - tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); - moop->print_short_name(tty); - tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point)); - } } } } IRT_END --- 1901,1912 ---- typ != relocInfo::opt_virtual_call_type && typ != relocInfo::static_stub_type) { return; } address destination = call->destination(); ! if (should_fixup_call_destination(destination, entry_point, caller_pc, moop, cb)) { call->set_destination_mt_safe(entry_point); } } } IRT_END
src/share/vm/runtime/sharedRuntime.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File