diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp index 794e21a..ea5ed87 100644 --- a/src/hotspot/share/code/compiledIC.cpp +++ b/src/hotspot/share/code/compiledIC.cpp @@ -237,7 +237,13 @@ CompiledIC::CompiledIC(RelocIterator* iter) initialize_from_iter(iter); } -bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { +// This function may fail for two reasons: either due to running out of vtable +// stubs, or due to running out of IC stubs in an attempted transition to a +// transitional state. The needs_ic_stub_refill value will be set if the failure +// was due to running out of IC stubs, in which case the caller will refill IC +// stubs and retry. +bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, + bool& needs_ic_stub_refill, TRAPS) { assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic"); assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?"); @@ -261,6 +267,7 @@ bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod holder->claim(); if (!InlineCacheBuffer::create_transition_stub(this, holder, entry)) { delete holder; + needs_ic_stub_refill = true; return false; } } else { @@ -273,6 +280,7 @@ bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod return false; } if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) { + needs_ic_stub_refill = true; return false; } } diff --git a/src/hotspot/share/code/compiledIC.hpp b/src/hotspot/share/code/compiledIC.hpp index 08b2a6e..803b6b1 100644 --- a/src/hotspot/share/code/compiledIC.hpp +++ b/src/hotspot/share/code/compiledIC.hpp @@ -28,6 +28,7 @@ #include "code/nativeInst.hpp" #include "interpreter/linkResolver.hpp" #include "oops/compiledICHolder.hpp" +#include "runtime/safepointVerifiers.hpp" //----------------------------------------------------------------------------- // The CompiledIC represents a compiled inline cache. @@ -67,6 +68,7 @@ class CompiledICLocker: public StackObj { CompiledMethod* _method; CompiledICProtectionBehaviour* _behaviour; bool _locked; + NoSafepointVerifier _nsv; public: CompiledICLocker(CompiledMethod* method); @@ -277,8 +279,8 @@ class CompiledIC: public ResourceObj { void clear_ic_stub(); // Returns true if successful and false otherwise. The call can fail if memory - // allocation in the code cache fails. - bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS); + // allocation in the code cache fails, or ic stub refill is required. + bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, bool& needs_ic_stub_refill, TRAPS); static void compute_monomorphic_entry(const methodHandle& method, Klass* receiver_klass, bool is_optimized, bool static_bound, bool caller_is_nmethod, diff --git a/src/hotspot/share/code/compiledMethod.cpp b/src/hotspot/share/code/compiledMethod.cpp index 9aed89f..2932e4a 100644 --- a/src/hotspot/share/code/compiledMethod.cpp +++ b/src/hotspot/share/code/compiledMethod.cpp @@ -432,6 +432,9 @@ static void check_class(Metadata* md) { bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) { + if (ic->is_clean()) { + return true; + } if (ic->is_icholder_call()) { // The only exception is compiledICHolder metdata which may // yet be marked below. (We check this further below). @@ -459,9 +462,6 @@ bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) { } } - if (ic->is_clean()) { - return true; - } return ic->set_to_clean(); } diff --git a/src/hotspot/share/code/icBuffer.cpp b/src/hotspot/share/code/icBuffer.cpp index 341ca85..8e0fee4 100644 --- a/src/hotspot/share/code/icBuffer.cpp +++ b/src/hotspot/share/code/icBuffer.cpp @@ -45,6 +45,7 @@ StubQueue* InlineCacheBuffer::_buffer = NULL; CompiledICHolder* InlineCacheBuffer::_pending_released = NULL; int InlineCacheBuffer::_pending_count = 0; +DEBUG_ONLY(volatile int InlineCacheBuffer::_needs_refill = 0;) void ICStub::finalize() { if (!is_empty()) { @@ -116,6 +117,7 @@ ICStub* InlineCacheBuffer::new_ic_stub() { void InlineCacheBuffer::refill_ic_stubs() { + DEBUG_ONLY(Atomic::store(0, &_needs_refill)); // we ran out of inline cache buffer space; must enter safepoint. // We do this by forcing a safepoint EXCEPTION_MARK; @@ -133,6 +135,8 @@ void InlineCacheBuffer::refill_ic_stubs() { void InlineCacheBuffer::update_inline_caches() { + assert(_needs_refill == 0, + "Forgot to handle a failed IC transition requiring IC stubs"); if (buffer()->number_of_stubs() > 0) { if (TraceICBuffer) { tty->print_cr("[updating inline caches with %d stubs]", buffer()->number_of_stubs()); @@ -169,6 +173,7 @@ bool InlineCacheBuffer::create_transition_stub(CompiledIC *ic, void* cached_valu // allocate and initialize new "out-of-line" inline-cache ICStub* ic_stub = new_ic_stub(); if (ic_stub == NULL) { + DEBUG_ONLY(Atomic::inc(&_needs_refill)); return false; } diff --git a/src/hotspot/share/code/icBuffer.hpp b/src/hotspot/share/code/icBuffer.hpp index 4821e30..b6322bd 100644 --- a/src/hotspot/share/code/icBuffer.hpp +++ b/src/hotspot/share/code/icBuffer.hpp @@ -30,6 +30,7 @@ #include "interpreter/bytecodes.hpp" #include "memory/allocation.hpp" #include "utilities/align.hpp" +#include "utilities/macros.hpp" // // For CompiledIC's: @@ -104,6 +105,8 @@ class InlineCacheBuffer: public AllStatic { static CompiledICHolder* _pending_released; static int _pending_count; + DEBUG_ONLY(static volatile int _needs_refill;) + static StubQueue* buffer() { return _buffer; } static ICStub* new_ic_stub(); diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp index 08afb8f..875ef90 100644 --- a/src/hotspot/share/code/relocInfo.cpp +++ b/src/hotspot/share/code/relocInfo.cpp @@ -672,17 +672,20 @@ Method* opt_virtual_call_Relocation::method_value() { return (Method*)m; } +template +static bool set_to_clean_no_ic_refill(CompiledICorStaticCall* ic) { + guarantee(ic->set_to_clean(), "Should not need transition stubs"); + return true; +} + bool opt_virtual_call_Relocation::clear_inline_cache() { // No stubs for ICs // Clean IC ResourceMark rm; CompiledIC* icache = CompiledIC_at(this); - guarantee(icache->set_to_clean(), - "Should not need transition stubs"); - return true; + return set_to_clean_no_ic_refill(icache); } - address opt_virtual_call_Relocation::static_stub(bool is_aot) { // search for the static stub who points back to this static call address static_call_addr = addr(); @@ -720,9 +723,7 @@ void static_call_Relocation::unpack_data() { bool static_call_Relocation::clear_inline_cache() { // Safe call site info CompiledStaticCall* handler = this->code()->compiledStaticCall_at(this); - guarantee(handler->set_to_clean(), - "Should not need transition stubs"); - return true; + return set_to_clean_no_ic_refill(handler); } diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index 279b02e..c0dd95f 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -1246,11 +1246,87 @@ methodHandle SharedRuntime::resolve_helper(JavaThread *thread, return callee_method; } +// This fails if resolution required refilling of IC stubs +bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame, + CompiledMethod* caller_nm, bool is_virtual, bool is_optimized, + Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) { + StaticCallInfo static_call_info; + CompiledICInfo virtual_call_info; + + // Make sure the callee nmethod does not get deoptimized and removed before + // we are done patching the code. + CompiledMethod* callee = callee_method->code(); + + if (callee != NULL) { + assert(callee->is_compiled(), "must be nmethod for patching"); + } + + if (callee != NULL && !callee->is_in_use()) { + // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded. + callee = NULL; + } + nmethodLocker nl_callee(callee); +#ifdef ASSERT + address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below +#endif + + bool is_nmethod = caller_nm->is_nmethod(); + + if (is_virtual) { + assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); + bool static_bound = call_info.resolved_method()->can_be_statically_bound(); + Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); + CompiledIC::compute_monomorphic_entry(callee_method, klass, + is_optimized, static_bound, is_nmethod, virtual_call_info, + CHECK_false); + } else { + // static call + CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); + } + + // grab lock, check for deoptimization and potentially patch caller + { + CompiledICLocker ml(caller_nm); + + // Lock blocks for safepoint during which both nmethods can change state. + + // Now that we are ready to patch if the Method* was redefined then + // don't update call site and let the caller retry. + // Don't update call site if callee nmethod was unloaded or deoptimized. + // Don't update call site if callee nmethod was replaced by an other nmethod + // which may happen when multiply alive nmethod (tiered compilation) + // will be supported. + if (!callee_method->is_old() && + (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) { +#ifdef ASSERT + // We must not try to patch to jump to an already unloaded method. + if (dest_entry_point != 0) { + CodeBlob* cb = CodeCache::find_blob(dest_entry_point); + assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee), + "should not call unloaded nmethod"); + } +#endif + if (is_virtual) { + CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); + if (inline_cache->is_clean()) { + if (!inline_cache->set_to_monomorphic(virtual_call_info)) { + return false; + } + } + } else { + CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc()); + if (ssc->is_clean()) ssc->set(static_call_info); + } + } + } // unlock CompiledICLocker + return true; +} + // Resolves a call. The compilers generate code for calls that go here // and are patched with the real destination of the call. methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, - bool is_virtual, - bool is_optimized, TRAPS) { + bool is_virtual, + bool is_optimized, TRAPS) { ResourceMark rm(thread); RegisterMap cbl_map(thread, false); @@ -1316,88 +1392,19 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, // (cached_oop, destination address) pair. For a static call/optimized // virtual this is just a destination address. - bool first_try = true; + // Patching IC caches may fail if we run out if transition stubs. + // We refill the ic stubs then and try again. for (;;) { - if (!first_try) { - // Patching IC caches may fail if we run out if transition stubs. - // We refill the ic stubs then. - InlineCacheBuffer::refill_ic_stubs(); - } - first_try = false; - - StaticCallInfo static_call_info; - CompiledICInfo virtual_call_info; - - // Make sure the callee nmethod does not get deoptimized and removed before - // we are done patching the code. - CompiledMethod* callee = callee_method->code(); - - if (callee != NULL) { - assert(callee->is_compiled(), "must be nmethod for patching"); - } - - if (callee != NULL && !callee->is_in_use()) { - // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded. - callee = NULL; - } - nmethodLocker nl_callee(callee); -#ifdef ASSERT - address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below -#endif - - bool is_nmethod = caller_nm->is_nmethod(); - - if (is_virtual) { - assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); - bool static_bound = call_info.resolved_method()->can_be_statically_bound(); - Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); - CompiledIC::compute_monomorphic_entry(callee_method, klass, - is_optimized, static_bound, is_nmethod, virtual_call_info, - CHECK_(methodHandle())); + bool successful = resolve_sub_helper_internal(callee_method, caller_frame, caller_nm, + is_virtual, is_optimized, receiver, + call_info, invoke_code, CHECK_(methodHandle())); + if (successful) { + return callee_method; } else { - // static call - CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); + InlineCacheBuffer::refill_ic_stubs(); } - - // grab lock, check for deoptimization and potentially patch caller - { - CompiledICLocker ml(caller_nm); - - // Lock blocks for safepoint during which both nmethods can change state. - - // Now that we are ready to patch if the Method* was redefined then - // don't update call site and let the caller retry. - // Don't update call site if callee nmethod was unloaded or deoptimized. - // Don't update call site if callee nmethod was replaced by an other nmethod - // which may happen when multiply alive nmethod (tiered compilation) - // will be supported. - if (!callee_method->is_old() && - (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) { -#ifdef ASSERT - // We must not try to patch to jump to an already unloaded method. - if (dest_entry_point != 0) { - CodeBlob* cb = CodeCache::find_blob(dest_entry_point); - assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee), - "should not call unloaded nmethod"); - } -#endif - if (is_virtual) { - CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); - if (inline_cache->is_clean()) { - if (!inline_cache->set_to_monomorphic(virtual_call_info)) { - continue; - } - } - } else { - CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc()); - if (ssc->is_clean()) ssc->set(static_call_info); - } - } - } // unlock CompiledICLocker - break; } - return callee_method; } @@ -1531,7 +1538,85 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *t return callee_method->verified_code_entry(); JRT_END +// The handle_ic_miss_helper_internal function returns false if it failed due +// to either running out of vtable stubs or ic stubs due to IC transitions +// to transitional states. The needs_ic_stub_refill value will be set if +// the failure was due to running out of IC stubs, in which case handle_ic_miss_helper +// refills the IC stubs and tries again. +bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, + const frame& caller_frame, methodHandle callee_method, + Bytecodes::Code bc, CallInfo& call_info, + bool& needs_ic_stub_refill, TRAPS) { + CompiledICLocker ml(caller_nm); + CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); + bool should_be_mono = false; + if (inline_cache->is_optimized()) { + if (TraceCallFixup) { + ResourceMark rm(THREAD); + tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); + callee_method->print_short_name(tty); + tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); + } + should_be_mono = true; + } else if (inline_cache->is_icholder_call()) { + CompiledICHolder* ic_oop = inline_cache->cached_icholder(); + if (ic_oop != NULL) { + if (!ic_oop->is_loader_alive()) { + // Deferred IC cleaning due to concurrent class unloading + if (!inline_cache->set_to_clean()) { + needs_ic_stub_refill = true; + return false; + } + } else if (receiver()->klass() == ic_oop->holder_klass()) { + // This isn't a real miss. We must have seen that compiled code + // is now available and we want the call site converted to a + // monomorphic compiled call site. + // We can't assert for callee_method->code() != NULL because it + // could have been deoptimized in the meantime + if (TraceCallFixup) { + ResourceMark rm(THREAD); + tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc)); + callee_method->print_short_name(tty); + tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); + } + should_be_mono = true; + } + } + } + if (should_be_mono) { + // We have a path that was monomorphic but was going interpreted + // and now we have (or had) a compiled entry. We correct the IC + // by using a new icBuffer. + CompiledICInfo info; + Klass* receiver_klass = receiver()->klass(); + inline_cache->compute_monomorphic_entry(callee_method, + receiver_klass, + inline_cache->is_optimized(), + false, caller_nm->is_nmethod(), + info, CHECK_false); + if (!inline_cache->set_to_monomorphic(info)) { + needs_ic_stub_refill = true; + return false; + } + } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { + // Potential change to megamorphic + + bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false); + if (!successful) { + if (!needs_ic_stub_refill) { + return false; + } + if (!inline_cache->set_to_clean()) { + needs_ic_stub_refill = true; + return false; + } + } + } else { + // Either clean or megamorphic + } + return true; +} methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) { ResourceMark rm(thread); @@ -1596,86 +1681,43 @@ methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) { JvmtiDynamicCodeEventCollector event_collector; // Update inline cache to megamorphic. Skip update if we are called from interpreted. - bool first_try = true; - for (;;) { - if (!first_try) { - // Transitioning IC caches may require transition stubs. If we run out - // of transition stubs, we have to drop locks and perform a safepoint - // that refills them. - InlineCacheBuffer::refill_ic_stubs(); - } - first_try = false; - RegisterMap reg_map(thread, false); - frame caller_frame = thread->last_frame().sender(®_map); - CodeBlob* cb = caller_frame.cb(); - CompiledMethod* caller_nm = cb->as_compiled_method_or_null(); - CompiledICLocker ml(caller_nm); + // Transitioning IC caches may require transition stubs. If we run out + // of transition stubs, we have to drop locks and perform a safepoint + // that refills them. + RegisterMap reg_map(thread, false); + frame caller_frame = thread->last_frame().sender(®_map); + CodeBlob* cb = caller_frame.cb(); + CompiledMethod* caller_nm = cb->as_compiled_method(); - if (!cb->is_compiled()) { - Unimplemented(); - } - CompiledIC* inline_cache = CompiledIC_before(((CompiledMethod*)cb), caller_frame.pc()); - bool should_be_mono = false; - if (inline_cache->is_optimized()) { - if (TraceCallFixup) { - ResourceMark rm(thread); - tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); - callee_method->print_short_name(tty); - tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); - } - should_be_mono = true; - } else if (inline_cache->is_icholder_call()) { - CompiledICHolder* ic_oop = inline_cache->cached_icholder(); - if (ic_oop != NULL) { - if (!ic_oop->is_loader_alive()) { - // Deferred IC cleaning due to concurrent class unloading - inline_cache->set_to_clean(); - } else if (receiver()->klass() == ic_oop->holder_klass()) { - // This isn't a real miss. We must have seen that compiled code - // is now available and we want the call site converted to a - // monomorphic compiled call site. - // We can't assert for callee_method->code() != NULL because it - // could have been deoptimized in the meantime - if (TraceCallFixup) { - ResourceMark rm(thread); - tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc)); - callee_method->print_short_name(tty); - tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); - } - should_be_mono = true; - } + for (;;) { + bool needs_ic_stub_refill = false; + bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method, + bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle())); + if (successful) { + return callee_method; + } else { + if (needs_ic_stub_refill) { + InlineCacheBuffer::refill_ic_stubs(); } } + } +} - if (should_be_mono) { - // We have a path that was monomorphic but was going interpreted - // and now we have (or had) a compiled entry. We correct the IC - // by using a new icBuffer. - CompiledICInfo info; - Klass* receiver_klass = receiver()->klass(); - inline_cache->compute_monomorphic_entry(callee_method, - receiver_klass, - inline_cache->is_optimized(), - false, caller_nm->is_nmethod(), - info, CHECK_(methodHandle())); - if (!inline_cache->set_to_monomorphic(info)) { - continue; - } - } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { - // Potential change to megamorphic - bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle())); - if (!successful) { - if (!inline_cache->set_to_clean()) { - continue; - } - } - } else { - // Either clean or megamorphic +static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) { + CompiledICLocker ml(caller_nm); + if (is_static_call) { + CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr); + if (!ssc->is_clean()) { + return ssc->set_to_clean(); } - break; - } // Release CompiledICLocker - - return callee_method; + } else { + // compiled, dispatched call (which used to call an interpreted method) + CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); + if (!inline_cache->is_clean()) { + return inline_cache->set_to_clean(); + } + } + return true; } // @@ -1757,17 +1799,11 @@ methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) { // to a wrong method). It should not be performance critical, since the // resolve is only done once. - CompiledICLocker ml(caller_nm); - if (is_static_call) { - CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr); - if (!ssc->is_clean()) { - ssc->set_to_clean(); - } - } else { - // compiled, dispatched call (which used to call an interpreted method) - CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); - if (!inline_cache->is_clean()) { - inline_cache->set_to_clean(); + for (;;) { + if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) { + InlineCacheBuffer::refill_ic_stubs(); + } else { + break; } } } diff --git a/src/hotspot/share/runtime/sharedRuntime.hpp b/src/hotspot/share/runtime/sharedRuntime.hpp index d2b946b..15e51fa 100644 --- a/src/hotspot/share/runtime/sharedRuntime.hpp +++ b/src/hotspot/share/runtime/sharedRuntime.hpp @@ -48,6 +48,9 @@ class SharedRuntime: AllStatic { friend class VMStructs; private: + static bool resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame, + CompiledMethod* caller_nm, bool is_virtual, bool is_optimized, + Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS); static methodHandle resolve_sub_helper(JavaThread *thread, bool is_virtual, bool is_optimized, TRAPS); @@ -324,6 +327,10 @@ class SharedRuntime: AllStatic { // deopt blob static void generate_deopt_blob(void); + static bool handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame, + methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, + bool& needs_ic_stub_refill, TRAPS); + public: static DeoptimizationBlob* deopt_blob(void) { return _deopt_blob; }