< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp


*** 1244,1258 **** } } return callee_method; } // Resolves a call. The compilers generate code for calls that go here // and are patched with the real destination of the call. methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, ! bool is_virtual, ! bool is_optimized, TRAPS) { ResourceMark rm(thread); RegisterMap cbl_map(thread, false); frame caller_frame = thread->last_frame().sender(&cbl_map); --- 1244,1334 ---- } } return callee_method; } + // This fails if resolution required refilling of IC stubs + bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame, + CompiledMethod* caller_nm, bool is_virtual, bool is_optimized, + Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) { + StaticCallInfo static_call_info; + CompiledICInfo virtual_call_info; + + // Make sure the callee nmethod does not get deoptimized and removed before + // we are done patching the code. + CompiledMethod* callee = callee_method->code(); + + if (callee != NULL) { + assert(callee->is_compiled(), "must be nmethod for patching"); + } + + if (callee != NULL && !callee->is_in_use()) { + // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded. + callee = NULL; + } + nmethodLocker nl_callee(callee); + #ifdef ASSERT + address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below + #endif + + bool is_nmethod = caller_nm->is_nmethod(); + + if (is_virtual) { + assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); + bool static_bound = call_info.resolved_method()->can_be_statically_bound(); + Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); + CompiledIC::compute_monomorphic_entry(callee_method, klass, + is_optimized, static_bound, is_nmethod, virtual_call_info, + CHECK_false); + } else { + // static call + CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); + } + + // grab lock, check for deoptimization and potentially patch caller + { + CompiledICLocker ml(caller_nm); + + // Lock blocks for safepoint during which both nmethods can change state. + + // Now that we are ready to patch if the Method* was redefined then + // don't update call site and let the caller retry. + // Don't update call site if callee nmethod was unloaded or deoptimized. + // Don't update call site if callee nmethod was replaced by an other nmethod + // which may happen when multiply alive nmethod (tiered compilation) + // will be supported. + if (!callee_method->is_old() && + (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) { + #ifdef ASSERT + // We must not try to patch to jump to an already unloaded method. + if (dest_entry_point != 0) { + CodeBlob* cb = CodeCache::find_blob(dest_entry_point); + assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee), + "should not call unloaded nmethod"); + } + #endif + if (is_virtual) { + CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); + if (inline_cache->is_clean()) { + if (!inline_cache->set_to_monomorphic(virtual_call_info)) { + return false; + } + } + } else { + CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc()); + if (ssc->is_clean()) ssc->set(static_call_info); + } + } + } // unlock CompiledICLocker + return true; + } + // Resolves a call. The compilers generate code for calls that go here // and are patched with the real destination of the call. methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, ! bool is_virtual, ! bool is_optimized, TRAPS) { ResourceMark rm(thread); RegisterMap cbl_map(thread, false); frame caller_frame = thread->last_frame().sender(&cbl_map); ***************
*** 1314,1405 **** // always return the entry-point, but we only patch the stub if the call has // not been deoptimized. Return values: For a virtual call this is an // (cached_oop, destination address) pair. For a static call/optimized // virtual this is just a destination address. ! bool first_try = true; for (;;) { ! if (!first_try) { ! // Patching IC caches may fail if we run out if transition stubs. ! // We refill the ic stubs then. ! InlineCacheBuffer::refill_ic_stubs(); ! } ! first_try = false; ! ! StaticCallInfo static_call_info; ! CompiledICInfo virtual_call_info; ! ! // Make sure the callee nmethod does not get deoptimized and removed before ! // we are done patching the code. ! CompiledMethod* callee = callee_method->code(); ! ! if (callee != NULL) { ! assert(callee->is_compiled(), "must be nmethod for patching"); ! } ! ! if (callee != NULL && !callee->is_in_use()) { ! // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded. ! callee = NULL; ! } ! nmethodLocker nl_callee(callee); ! #ifdef ASSERT ! address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below ! #endif ! ! bool is_nmethod = caller_nm->is_nmethod(); ! ! if (is_virtual) { ! assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); ! bool static_bound = call_info.resolved_method()->can_be_statically_bound(); ! Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); ! CompiledIC::compute_monomorphic_entry(callee_method, klass, ! is_optimized, static_bound, is_nmethod, virtual_call_info, ! CHECK_(methodHandle())); } else { ! // static call ! CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); } - - // grab lock, check for deoptimization and potentially patch caller - { - CompiledICLocker ml(caller_nm); - - // Lock blocks for safepoint during which both nmethods can change state. - - // Now that we are ready to patch if the Method* was redefined then - // don't update call site and let the caller retry. - // Don't update call site if callee nmethod was unloaded or deoptimized. - // Don't update call site if callee nmethod was replaced by an other nmethod - // which may happen when multiply alive nmethod (tiered compilation) - // will be supported. - if (!callee_method->is_old() && - (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) { - #ifdef ASSERT - // We must not try to patch to jump to an already unloaded method. - if (dest_entry_point != 0) { - CodeBlob* cb = CodeCache::find_blob(dest_entry_point); - assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee), - "should not call unloaded nmethod"); - } - #endif - if (is_virtual) { - CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); - if (inline_cache->is_clean()) { - if (!inline_cache->set_to_monomorphic(virtual_call_info)) { - continue; - } - } - } else { - CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc()); - if (ssc->is_clean()) ssc->set(static_call_info); - } - } - } // unlock CompiledICLocker - break; } - return callee_method; } // Inline caches exist only in compiled code JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread)) --- 1390,1412 ---- // always return the entry-point, but we only patch the stub if the call has // not been deoptimized. Return values: For a virtual call this is an // (cached_oop, destination address) pair. For a static call/optimized // virtual this is just a destination address. ! // Patching IC caches may fail if we run out if transition stubs. ! // We refill the ic stubs then and try again. for (;;) { ! bool successful = resolve_sub_helper_internal(callee_method, caller_frame, caller_nm, ! is_virtual, is_optimized, receiver, ! call_info, invoke_code, CHECK_(methodHandle())); ! if (successful) { ! return callee_method; } else { ! InlineCacheBuffer::refill_ic_stubs(); } } } // Inline caches exist only in compiled code JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread)) ***************
*** 1529,1539 **** --- 1536,1624 ---- // return compiled code entry point after potential safepoints assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); return callee_method->verified_code_entry(); JRT_END + // The handle_ic_miss_helper_internal function returns false if it failed due + // to either running out of vtable stubs or ic stubs due to IC transitions + // to transitional states. The needs_ic_stub_refill value will be set if + // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper + // refills the IC stubs and tries again. + bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, + const frame& caller_frame, methodHandle callee_method, + Bytecodes::Code bc, CallInfo& call_info, + bool& needs_ic_stub_refill, TRAPS) { + CompiledICLocker ml(caller_nm); + CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); + bool should_be_mono = false; + if (inline_cache->is_optimized()) { + if (TraceCallFixup) { + ResourceMark rm(THREAD); + tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); + callee_method->print_short_name(tty); + tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); + } + should_be_mono = true; + } else if (inline_cache->is_icholder_call()) { + CompiledICHolder* ic_oop = inline_cache->cached_icholder(); + if (ic_oop != NULL) { + if (!ic_oop->is_loader_alive()) { + // Deferred IC cleaning due to concurrent class unloading + if (!inline_cache->set_to_clean()) { + needs_ic_stub_refill = true; + return false; + } + } else if (receiver()->klass() == ic_oop->holder_klass()) { + // This isn't a real miss. We must have seen that compiled code + // is now available and we want the call site converted to a + // monomorphic compiled call site. + // We can't assert for callee_method->code() != NULL because it + // could have been deoptimized in the meantime + if (TraceCallFixup) { + ResourceMark rm(THREAD); + tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc)); + callee_method->print_short_name(tty); + tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); + } + should_be_mono = true; + } + } + } + if (should_be_mono) { + // We have a path that was monomorphic but was going interpreted + // and now we have (or had) a compiled entry. We correct the IC + // by using a new icBuffer. + CompiledICInfo info; + Klass* receiver_klass = receiver()->klass(); + inline_cache->compute_monomorphic_entry(callee_method, + receiver_klass, + inline_cache->is_optimized(), + false, caller_nm->is_nmethod(), + info, CHECK_false); + if (!inline_cache->set_to_monomorphic(info)) { + needs_ic_stub_refill = true; + return false; + } + } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { + // Potential change to megamorphic + + bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false); + if (!successful) { + if (!needs_ic_stub_refill) { + return false; + } + if (!inline_cache->set_to_clean()) { + needs_ic_stub_refill = true; + return false; + } + } + } else { + // Either clean or megamorphic + } + return true; + } methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) { ResourceMark rm(thread); CallInfo call_info; Bytecodes::Code bc; ***************
*** 1594,1683 **** // - instead the event will be deferred until the event collector goes // out of scope. JvmtiDynamicCodeEventCollector event_collector; // Update inline cache to megamorphic. Skip update if we are called from interpreted. ! bool first_try = true; ! for (;;) { ! if (!first_try) { ! // Transitioning IC caches may require transition stubs. If we run out ! // of transition stubs, we have to drop locks and perform a safepoint ! // that refills them. ! InlineCacheBuffer::refill_ic_stubs(); ! } ! first_try = false; ! RegisterMap reg_map(thread, false); ! frame caller_frame = thread->last_frame().sender(&reg_map); ! CodeBlob* cb = caller_frame.cb(); ! CompiledMethod* caller_nm = cb->as_compiled_method_or_null(); ! CompiledICLocker ml(caller_nm); ! if (!cb->is_compiled()) { ! Unimplemented(); ! } ! CompiledIC* inline_cache = CompiledIC_before(((CompiledMethod*)cb), caller_frame.pc()); ! bool should_be_mono = false; ! if (inline_cache->is_optimized()) { ! if (TraceCallFixup) { ! ResourceMark rm(thread); ! tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); ! callee_method->print_short_name(tty); ! tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); ! } ! should_be_mono = true; ! } else if (inline_cache->is_icholder_call()) { ! CompiledICHolder* ic_oop = inline_cache->cached_icholder(); ! if (ic_oop != NULL) { ! if (!ic_oop->is_loader_alive()) { ! // Deferred IC cleaning due to concurrent class unloading ! inline_cache->set_to_clean(); ! } else if (receiver()->klass() == ic_oop->holder_klass()) { ! // This isn't a real miss. We must have seen that compiled code ! // is now available and we want the call site converted to a ! // monomorphic compiled call site. ! // We can't assert for callee_method->code() != NULL because it ! // could have been deoptimized in the meantime ! if (TraceCallFixup) { ! ResourceMark rm(thread); ! tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc)); ! callee_method->print_short_name(tty); ! tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); ! } ! should_be_mono = true; ! } } } ! if (should_be_mono) { ! // We have a path that was monomorphic but was going interpreted ! // and now we have (or had) a compiled entry. We correct the IC ! // by using a new icBuffer. ! CompiledICInfo info; ! Klass* receiver_klass = receiver()->klass(); ! inline_cache->compute_monomorphic_entry(callee_method, ! receiver_klass, ! inline_cache->is_optimized(), ! false, caller_nm->is_nmethod(), ! info, CHECK_(methodHandle())); ! if (!inline_cache->set_to_monomorphic(info)) { ! continue; ! } ! } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { ! // Potential change to megamorphic ! bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle())); ! if (!successful) { ! if (!inline_cache->set_to_clean()) { ! continue; ! } ! } ! } else { ! // Either clean or megamorphic } ! break; ! } // Release CompiledICLocker ! ! return callee_method; } // // Resets a call-site in compiled code so it will get resolved again. // This routines handles both virtual call sites, optimized virtual call --- 1679,1725 ---- // - instead the event will be deferred until the event collector goes // out of scope. JvmtiDynamicCodeEventCollector event_collector; // Update inline cache to megamorphic. Skip update if we are called from interpreted. ! // Transitioning IC caches may require transition stubs. If we run out ! // of transition stubs, we have to drop locks and perform a safepoint ! // that refills them. ! RegisterMap reg_map(thread, false); ! frame caller_frame = thread->last_frame().sender(&reg_map); ! CodeBlob* cb = caller_frame.cb(); ! CompiledMethod* caller_nm = cb->as_compiled_method(); ! for (;;) { ! bool needs_ic_stub_refill = false; ! bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method, ! bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle())); ! if (successful) { ! return callee_method; ! } else { ! if (needs_ic_stub_refill) { ! InlineCacheBuffer::refill_ic_stubs(); } } + } + } ! static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) { ! CompiledICLocker ml(caller_nm); ! if (is_static_call) { ! CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr); ! if (!ssc->is_clean()) { ! return ssc->set_to_clean(); ! } ! } else { ! // compiled, dispatched call (which used to call an interpreted method) ! CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); ! if (!inline_cache->is_clean()) { ! return inline_cache->set_to_clean(); } ! } ! return true; } // // Resets a call-site in compiled code so it will get resolved again. // This routines handles both virtual call sites, optimized virtual call ***************
*** 1755,1775 **** // is always done through the same code path. (experience shows that it // leads to very hard to track down bugs, if an inline cache gets updated // to a wrong method). It should not be performance critical, since the // resolve is only done once. ! CompiledICLocker ml(caller_nm); ! if (is_static_call) { ! CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr); ! if (!ssc->is_clean()) { ! ssc->set_to_clean(); ! } ! } else { ! // compiled, dispatched call (which used to call an interpreted method) ! CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); ! if (!inline_cache->is_clean()) { ! inline_cache->set_to_clean(); } } } } --- 1797,1811 ---- // is always done through the same code path. (experience shows that it // leads to very hard to track down bugs, if an inline cache gets updated // to a wrong method). It should not be performance critical, since the // resolve is only done once. ! for (;;) { ! if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) { ! InlineCacheBuffer::refill_ic_stubs(); ! } else { ! break; } } } }
< prev index next >