< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp

Print this page

*** 1245,1257 **** } // Resolves a call. methodHandle SharedRuntime::resolve_helper(JavaThread *thread, bool is_virtual, ! bool is_optimized, TRAPS) { methodHandle callee_method; ! callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); if (JvmtiExport::can_hotswap_or_post_breakpoint()) { int retry_count = 0; while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && callee_method->method_holder() != SystemDictionary::Object_klass()) { // If has a pending exception then there is no need to re-try to --- 1245,1258 ---- } // Resolves a call. methodHandle SharedRuntime::resolve_helper(JavaThread *thread, bool is_virtual, ! bool is_optimized, ! bool* caller_is_c1, TRAPS) { methodHandle callee_method; ! callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, caller_is_c1, THREAD); if (JvmtiExport::can_hotswap_or_post_breakpoint()) { int retry_count = 0; while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && callee_method->method_holder() != SystemDictionary::Object_klass()) { // If has a pending exception then there is no need to re-try to
*** 1264,1274 **** // in the middle of resolve. If it is looping here more than 100 times // means then there could be a bug here. guarantee((retry_count++ < 100), "Could not resolve to latest version of redefined method"); // method is redefined in the middle of resolve so re-try. ! callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); } } return callee_method; } --- 1265,1275 ---- // in the middle of resolve. If it is looping here more than 100 times // means then there could be a bug here. guarantee((retry_count++ < 100), "Could not resolve to latest version of redefined method"); // method is redefined in the middle of resolve so re-try. ! callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, caller_is_c1, THREAD); } } return callee_method; }
*** 1295,1321 **** #ifdef ASSERT address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below #endif bool is_nmethod = caller_nm->is_nmethod(); if (is_virtual) { Klass* receiver_klass = NULL; ! if (ValueTypePassFieldsAsArgs && callee_method->method_holder()->is_value()) { // If the receiver is a value type that is passed as fields, no oop is available receiver_klass = callee_method->method_holder(); } else { assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); receiver_klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); } bool static_bound = call_info.resolved_method()->can_be_statically_bound(); CompiledIC::compute_monomorphic_entry(callee_method, receiver_klass, ! is_optimized, static_bound, is_nmethod, virtual_call_info, CHECK_false); } else { // static call ! CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); } // grab lock, check for deoptimization and potentially patch caller { CompiledICLocker ml(caller_nm); --- 1296,1323 ---- #ifdef ASSERT address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below #endif bool is_nmethod = caller_nm->is_nmethod(); + bool caller_is_c1 = caller_nm->is_c1(); if (is_virtual) { Klass* receiver_klass = NULL; ! if (ValueTypePassFieldsAsArgs && !caller_is_c1 && callee_method->method_holder()->is_value()) { // If the receiver is a value type that is passed as fields, no oop is available receiver_klass = callee_method->method_holder(); } else { assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); receiver_klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); } bool static_bound = call_info.resolved_method()->can_be_statically_bound(); CompiledIC::compute_monomorphic_entry(callee_method, receiver_klass, ! is_optimized, static_bound, is_nmethod, caller_is_c1, virtual_call_info, CHECK_false); } else { // static call ! CompiledStaticCall::compute_entry(callee_method, caller_nm, static_call_info); } // grab lock, check for deoptimization and potentially patch caller { CompiledICLocker ml(caller_nm);
*** 1356,1374 **** // Resolves a call. The compilers generate code for calls that go here // and are patched with the real destination of the call. methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, bool is_virtual, ! bool is_optimized, TRAPS) { ResourceMark rm(thread); RegisterMap cbl_map(thread, false); frame caller_frame = thread->last_frame().sender(&cbl_map); CodeBlob* caller_cb = caller_frame.cb(); guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method"); CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null(); // make sure caller is not getting deoptimized // and removed before we are done with it. // CLEANUP - with lazy deopt shouldn't need this lock nmethodLocker caller_lock(caller_nm); --- 1358,1378 ---- // Resolves a call. The compilers generate code for calls that go here // and are patched with the real destination of the call. methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, bool is_virtual, ! bool is_optimized, ! bool* caller_is_c1, TRAPS) { ResourceMark rm(thread); RegisterMap cbl_map(thread, false); frame caller_frame = thread->last_frame().sender(&cbl_map); CodeBlob* caller_cb = caller_frame.cb(); guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method"); CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null(); + *caller_is_c1 = caller_nm->is_c1(); // make sure caller is not getting deoptimized // and removed before we are done with it. // CLEANUP - with lazy deopt shouldn't need this lock nmethodLocker caller_lock(caller_nm);
*** 1475,1493 **** assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame"); #endif /* ASSERT */ methodHandle callee_method; bool is_optimized = false; JRT_BLOCK ! callee_method = SharedRuntime::handle_ic_miss_helper(thread, is_optimized, CHECK_NULL); // Return Method* through TLS thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); ! assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!"); ! return is_optimized ? callee_method->verified_code_entry() : callee_method->verified_value_ro_code_entry(); JRT_END // Handle call site that has been made non-entrant JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread)) --- 1479,1496 ---- assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame"); #endif /* ASSERT */ methodHandle callee_method; bool is_optimized = false; + bool caller_is_c1 = false; JRT_BLOCK ! callee_method = SharedRuntime::handle_ic_miss_helper(thread, is_optimized, caller_is_c1, CHECK_NULL); // Return Method* through TLS thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! return entry_for_handle_wrong_method(callee_method, is_optimized, caller_is_c1); JRT_END // Handle call site that has been made non-entrant JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
*** 1515,1533 **** } // Must be compiled to compiled path which is safe to stackwalk methodHandle callee_method; bool is_optimized = false; JRT_BLOCK // Force resolving of caller (if we called from compiled frame) ! callee_method = SharedRuntime::reresolve_call_site(thread, is_optimized, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); ! assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!"); ! return is_optimized ? callee_method->verified_code_entry() : callee_method->verified_value_ro_code_entry(); JRT_END // Handle abstract method call JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* thread)) // Verbose error message for AbstractMethodError. --- 1518,1535 ---- } // Must be compiled to compiled path which is safe to stackwalk methodHandle callee_method; bool is_optimized = false; + bool caller_is_c1 = false; JRT_BLOCK // Force resolving of caller (if we called from compiled frame) ! callee_method = SharedRuntime::reresolve_call_site(thread, is_optimized, caller_is_c1, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! return entry_for_handle_wrong_method(callee_method, is_optimized, caller_is_c1); JRT_END // Handle abstract method call JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* thread)) // Verbose error message for AbstractMethodError.
*** 1561,1615 **** // resolve a static call and patch code JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread )) methodHandle callee_method; JRT_BLOCK ! callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); ! return callee_method->verified_code_entry(); JRT_END // resolve virtual call and update inline cache to monomorphic JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread )) methodHandle callee_method; JRT_BLOCK ! callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! assert(callee_method->verified_value_ro_code_entry() != NULL, "Jump to zero!"); ! return callee_method->verified_value_ro_code_entry(); JRT_END // Resolve a virtual call that can be statically bound (e.g., always // monomorphic, so it has no inline cache). Patch code to resolved target. JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread)) methodHandle callee_method; JRT_BLOCK ! callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); ! return callee_method->verified_code_entry(); JRT_END // The handle_ic_miss_helper_internal function returns false if it failed due // to either running out of vtable stubs or ic stubs due to IC transitions // to transitional states. The needs_ic_stub_refill value will be set if // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper // refills the IC stubs and tries again. bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame, methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, ! bool& needs_ic_stub_refill, bool& is_optimized, TRAPS) { CompiledICLocker ml(caller_nm); CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); bool should_be_mono = false; if (inline_cache->is_optimized()) { if (TraceCallFixup) { --- 1563,1626 ---- // resolve a static call and patch code JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread )) methodHandle callee_method; + bool caller_is_c1; JRT_BLOCK ! callee_method = SharedRuntime::resolve_helper(thread, false, false, &caller_is_c1, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! address entry = caller_is_c1 ? ! callee_method->verified_value_code_entry() : callee_method->verified_code_entry(); ! assert(entry != NULL, "Jump to zero!"); ! return entry; JRT_END // resolve virtual call and update inline cache to monomorphic JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread )) methodHandle callee_method; + bool caller_is_c1; JRT_BLOCK ! callee_method = SharedRuntime::resolve_helper(thread, true, false, &caller_is_c1, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! address entry = caller_is_c1 ? ! callee_method->verified_value_code_entry() : callee_method->verified_value_ro_code_entry(); ! assert(entry != NULL, "Jump to zero!"); ! return entry; JRT_END // Resolve a virtual call that can be statically bound (e.g., always // monomorphic, so it has no inline cache). Patch code to resolved target. JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread)) methodHandle callee_method; + bool caller_is_c1; JRT_BLOCK ! callee_method = SharedRuntime::resolve_helper(thread, true, true, &caller_is_c1, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! address entry = caller_is_c1 ? ! callee_method->verified_value_code_entry() : callee_method->verified_code_entry(); ! assert(entry != NULL, "Jump to zero!"); ! return entry; JRT_END // The handle_ic_miss_helper_internal function returns false if it failed due // to either running out of vtable stubs or ic stubs due to IC transitions // to transitional states. The needs_ic_stub_refill value will be set if // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper // refills the IC stubs and tries again. bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame, methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, ! bool& needs_ic_stub_refill, bool& is_optimized, bool caller_is_c1, TRAPS) { CompiledICLocker ml(caller_nm); CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); bool should_be_mono = false; if (inline_cache->is_optimized()) { if (TraceCallFixup) {
*** 1654,1672 **** Klass* receiver_klass = receiver()->klass(); inline_cache->compute_monomorphic_entry(callee_method, receiver_klass, inline_cache->is_optimized(), false, caller_nm->is_nmethod(), info, CHECK_false); if (!inline_cache->set_to_monomorphic(info)) { needs_ic_stub_refill = true; return false; } } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { // Potential change to megamorphic ! bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false); if (needs_ic_stub_refill) { return false; } if (!successful) { if (!inline_cache->set_to_clean()) { --- 1665,1684 ---- Klass* receiver_klass = receiver()->klass(); inline_cache->compute_monomorphic_entry(callee_method, receiver_klass, inline_cache->is_optimized(), false, caller_nm->is_nmethod(), + caller_nm->is_c1(), info, CHECK_false); if (!inline_cache->set_to_monomorphic(info)) { needs_ic_stub_refill = true; return false; } } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { // Potential change to megamorphic ! bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, caller_is_c1, CHECK_false); if (needs_ic_stub_refill) { return false; } if (!successful) { if (!inline_cache->set_to_clean()) {
*** 1678,1688 **** // Either clean or megamorphic } return true; } ! methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, bool& is_optimized, TRAPS) { ResourceMark rm(thread); CallInfo call_info; Bytecodes::Code bc; // receiver is NULL for static calls. An exception is thrown for NULL --- 1690,1700 ---- // Either clean or megamorphic } return true; } ! methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, bool& is_optimized, bool& caller_is_c1, TRAPS) { ResourceMark rm(thread); CallInfo call_info; Bytecodes::Code bc; // receiver is NULL for static calls. An exception is thrown for NULL
*** 1698,1708 **** // plain ic_miss) and the site will be converted to an optimized virtual call site // never to miss again. I don't believe C2 will produce code like this but if it // did this would still be the correct thing to do for it too, hence no ifdef. // if (call_info.resolved_method()->can_be_statically_bound()) { ! methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, is_optimized, CHECK_(methodHandle())); if (TraceCallFixup) { RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(&reg_map); ResourceMark rm(thread); tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc)); --- 1710,1720 ---- // plain ic_miss) and the site will be converted to an optimized virtual call site // never to miss again. I don't believe C2 will produce code like this but if it // did this would still be the correct thing to do for it too, hence no ifdef. // if (call_info.resolved_method()->can_be_statically_bound()) { ! methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, is_optimized, caller_is_c1, CHECK_(methodHandle())); if (TraceCallFixup) { RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(&reg_map); ResourceMark rm(thread); tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
*** 1748,1763 **** // that refills them. RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(&reg_map); CodeBlob* cb = caller_frame.cb(); CompiledMethod* caller_nm = cb->as_compiled_method(); for (;;) { ICRefillVerifier ic_refill_verifier; bool needs_ic_stub_refill = false; bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method, ! bc, call_info, needs_ic_stub_refill, is_optimized, CHECK_(methodHandle())); if (successful || !needs_ic_stub_refill) { return callee_method; } else { InlineCacheBuffer::refill_ic_stubs(); } --- 1760,1776 ---- // that refills them. RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(&reg_map); CodeBlob* cb = caller_frame.cb(); CompiledMethod* caller_nm = cb->as_compiled_method(); + caller_is_c1 = caller_nm->is_c1(); for (;;) { ICRefillVerifier ic_refill_verifier; bool needs_ic_stub_refill = false; bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method, ! bc, call_info, needs_ic_stub_refill, is_optimized, caller_is_c1, CHECK_(methodHandle())); if (successful || !needs_ic_stub_refill) { return callee_method; } else { InlineCacheBuffer::refill_ic_stubs(); }
*** 1785,1795 **** // Resets a call-site in compiled code so it will get resolved again. // This routines handles both virtual call sites, optimized virtual call // sites, and static call sites. Typically used to change a call sites // destination from compiled to interpreted. // ! methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, bool& is_optimized, TRAPS) { ResourceMark rm(thread); RegisterMap reg_map(thread, false); frame stub_frame = thread->last_frame(); assert(stub_frame.is_runtime_frame(), "must be a runtimeStub"); frame caller = stub_frame.sender(&reg_map); --- 1798,1808 ---- // Resets a call-site in compiled code so it will get resolved again. // This routines handles both virtual call sites, optimized virtual call // sites, and static call sites. Typically used to change a call sites // destination from compiled to interpreted. // ! methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, bool& is_optimized, bool& caller_is_c1, TRAPS) { ResourceMark rm(thread); RegisterMap reg_map(thread, false); frame stub_frame = thread->last_frame(); assert(stub_frame.is_runtime_frame(), "must be a runtimeStub"); frame caller = stub_frame.sender(&reg_map);
*** 1803,1812 **** --- 1816,1826 ---- address pc = caller.pc(); // Check for static or virtual call bool is_static_call = false; CompiledMethod* caller_nm = CodeCache::find_compiled(pc); + caller_is_c1 = caller_nm->is_c1(); // Default call_addr is the location of the "basic" call. // Determine the address of the call we a reresolving. With // Inline Caches we will always find a recognizable call. // With Inline Caches disabled we may or may not find a
< prev index next >