< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp


*** 23,38 **** */ #include "precompiled.hpp" #include "jvm.h" #include "aot/aotLoader.hpp" - #include "code/compiledMethod.inline.hpp" #include "classfile/stringTable.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/compiledIC.hpp" #include "code/scopeDesc.hpp" #include "code/vtableStubs.hpp" #include "compiler/abstractCompiler.hpp" #include "compiler/compileBroker.hpp" #include "compiler/disassembler.hpp" --- 23,39 ---- */ #include "precompiled.hpp" #include "jvm.h" #include "aot/aotLoader.hpp" #include "classfile/stringTable.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/compiledIC.hpp" + #include "code/icBuffer.hpp" + #include "code/compiledMethod.inline.hpp" #include "code/scopeDesc.hpp" #include "code/vtableStubs.hpp" #include "compiler/abstractCompiler.hpp" #include "compiler/compileBroker.hpp" #include "compiler/disassembler.hpp" ***************
*** 1313,1390 **** // always return the entry-point, but we only patch the stub if the call has // not been deoptimized. Return values: For a virtual call this is an // (cached_oop, destination address) pair. For a static call/optimized // virtual this is just a destination address. ! StaticCallInfo static_call_info; ! CompiledICInfo virtual_call_info; ! // Make sure the callee nmethod does not get deoptimized and removed before ! // we are done patching the code. ! CompiledMethod* callee = callee_method->code(); ! if (callee != NULL) { ! assert(callee->is_compiled(), "must be nmethod for patching"); ! } ! if (callee != NULL && !callee->is_in_use()) { ! // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded. ! callee = NULL; ! } ! nmethodLocker nl_callee(callee); #ifdef ASSERT ! address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below #endif ! bool is_nmethod = caller_nm->is_nmethod(); ! if (is_virtual) { ! assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); ! bool static_bound = call_info.resolved_method()->can_be_statically_bound(); ! Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); ! CompiledIC::compute_monomorphic_entry(callee_method, klass, ! is_optimized, static_bound, is_nmethod, virtual_call_info, ! CHECK_(methodHandle())); ! } else { ! // static call ! CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); ! } ! // grab lock, check for deoptimization and potentially patch caller ! { ! CompiledICLocker ml(caller_nm); ! // Lock blocks for safepoint during which both nmethods can change state. ! // Now that we are ready to patch if the Method* was redefined then ! // don't update call site and let the caller retry. ! // Don't update call site if callee nmethod was unloaded or deoptimized. ! // Don't update call site if callee nmethod was replaced by an other nmethod ! // which may happen when multiply alive nmethod (tiered compilation) ! // will be supported. ! if (!callee_method->is_old() && ! (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) { #ifdef ASSERT ! // We must not try to patch to jump to an already unloaded method. ! if (dest_entry_point != 0) { ! CodeBlob* cb = CodeCache::find_blob(dest_entry_point); ! assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee), ! "should not call unloaded nmethod"); ! } #endif ! if (is_virtual) { ! CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); ! if (inline_cache->is_clean()) { ! inline_cache->set_to_monomorphic(virtual_call_info); } - } else { - CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc()); - if (ssc->is_clean()) ssc->set(static_call_info); } ! } ! ! } // unlock CompiledICLocker return callee_method; } --- 1314,1403 ---- // always return the entry-point, but we only patch the stub if the call has // not been deoptimized. Return values: For a virtual call this is an // (cached_oop, destination address) pair. For a static call/optimized // virtual this is just a destination address. ! bool first_try = true; ! for (;;) { ! if (!first_try) { ! // Patching IC caches may fail if we run out if transition stubs. ! // We refill the ic stubs then. ! InlineCacheBuffer::refill_ic_stubs(); ! } ! first_try = false; ! StaticCallInfo static_call_info; ! CompiledICInfo virtual_call_info; ! // Make sure the callee nmethod does not get deoptimized and removed before ! // we are done patching the code. ! CompiledMethod* callee = callee_method->code(); ! if (callee != NULL) { ! assert(callee->is_compiled(), "must be nmethod for patching"); ! } ! ! if (callee != NULL && !callee->is_in_use()) { ! // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded. ! callee = NULL; ! } ! nmethodLocker nl_callee(callee); #ifdef ASSERT ! address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below #endif ! bool is_nmethod = caller_nm->is_nmethod(); ! if (is_virtual) { ! assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); ! bool static_bound = call_info.resolved_method()->can_be_statically_bound(); ! Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); ! CompiledIC::compute_monomorphic_entry(callee_method, klass, ! is_optimized, static_bound, is_nmethod, virtual_call_info, ! CHECK_(methodHandle())); ! } else { ! // static call ! CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); ! } ! // grab lock, check for deoptimization and potentially patch caller ! { ! CompiledICLocker ml(caller_nm); ! // Lock blocks for safepoint during which both nmethods can change state. ! // Now that we are ready to patch if the Method* was redefined then ! // don't update call site and let the caller retry. ! // Don't update call site if callee nmethod was unloaded or deoptimized. ! // Don't update call site if callee nmethod was replaced by an other nmethod ! // which may happen when multiply alive nmethod (tiered compilation) ! // will be supported. ! if (!callee_method->is_old() && ! (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) { #ifdef ASSERT ! // We must not try to patch to jump to an already unloaded method. ! if (dest_entry_point != 0) { ! CodeBlob* cb = CodeCache::find_blob(dest_entry_point); ! assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee), ! "should not call unloaded nmethod"); ! } #endif ! if (is_virtual) { ! CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); ! if (inline_cache->is_clean()) { ! if (!inline_cache->set_to_monomorphic(virtual_call_info)) { ! continue; ! } ! } ! } else { ! CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc()); ! if (ssc->is_clean()) ssc->set(static_call_info); } } ! } // unlock CompiledICLocker ! break; ! } return callee_method; } ***************
*** 1553,1564 **** return callee_method; } methodHandle callee_method = call_info.selected_method(); - bool should_be_mono = false; - #ifndef PRODUCT Atomic::inc(&_ic_miss_ctr); // Statistics & Tracing if (TraceCallFixup) { --- 1566,1575 ---- ***************
*** 1583,1658 **** // - instead the event will be deferred until the event collector goes // out of scope. JvmtiDynamicCodeEventCollector event_collector; // Update inline cache to megamorphic. Skip update if we are called from interpreted. ! { RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(&reg_map); CodeBlob* cb = caller_frame.cb(); CompiledMethod* caller_nm = cb->as_compiled_method_or_null(); CompiledICLocker ml(caller_nm); ! if (cb->is_compiled()) { ! CompiledIC* inline_cache = CompiledIC_before(((CompiledMethod*)cb), caller_frame.pc()); ! bool should_be_mono = false; ! if (inline_cache->is_optimized()) { ! if (TraceCallFixup) { ! ResourceMark rm(thread); ! tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); ! callee_method->print_short_name(tty); ! tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); ! } ! should_be_mono = true; ! } else if (inline_cache->is_icholder_call()) { ! CompiledICHolder* ic_oop = inline_cache->cached_icholder(); ! if (ic_oop != NULL) { ! if (!ic_oop->is_loader_alive()) { ! // Deferred IC cleaning due to concurrent class unloading ! inline_cache->set_to_clean(); ! } else if (receiver()->klass() == ic_oop->holder_klass()) { ! // This isn't a real miss. We must have seen that compiled code ! // is now available and we want the call site converted to a ! // monomorphic compiled call site. ! // We can't assert for callee_method->code() != NULL because it ! // could have been deoptimized in the meantime ! if (TraceCallFixup) { ! ResourceMark rm(thread); ! tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc)); ! callee_method->print_short_name(tty); ! tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); ! } ! should_be_mono = true; } } } ! if (should_be_mono) { ! ! // We have a path that was monomorphic but was going interpreted ! // and now we have (or had) a compiled entry. We correct the IC ! // by using a new icBuffer. ! CompiledICInfo info; ! Klass* receiver_klass = receiver()->klass(); ! inline_cache->compute_monomorphic_entry(callee_method, ! receiver_klass, ! inline_cache->is_optimized(), ! false, caller_nm->is_nmethod(), ! info, CHECK_(methodHandle())); ! inline_cache->set_to_monomorphic(info); ! } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { ! // Potential change to megamorphic ! bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle())); ! if (!successful) { ! inline_cache->set_to_clean(); } - } else { - // Either clean or megamorphic } } else { ! fatal("Unimplemented"); } } // Release CompiledICLocker return callee_method; } --- 1594,1680 ---- // - instead the event will be deferred until the event collector goes // out of scope. JvmtiDynamicCodeEventCollector event_collector; // Update inline cache to megamorphic. Skip update if we are called from interpreted. ! bool first_try = true; ! for (;;) { ! if (!first_try) { ! // Transitioning IC caches may require transition stubs. If we run out ! // of transition stubs, we have to drop locks and perform a safepoint ! // that refills them. ! InlineCacheBuffer::refill_ic_stubs(); ! } ! first_try = false; RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(&reg_map); CodeBlob* cb = caller_frame.cb(); CompiledMethod* caller_nm = cb->as_compiled_method_or_null(); CompiledICLocker ml(caller_nm); ! if (!cb->is_compiled()) { ! Unimplemented(); ! } ! CompiledIC* inline_cache = CompiledIC_before(((CompiledMethod*)cb), caller_frame.pc()); ! bool should_be_mono = false; ! if (inline_cache->is_optimized()) { ! if (TraceCallFixup) { ! ResourceMark rm(thread); ! tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); ! callee_method->print_short_name(tty); ! tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); ! } ! should_be_mono = true; ! } else if (inline_cache->is_icholder_call()) { ! CompiledICHolder* ic_oop = inline_cache->cached_icholder(); ! if (ic_oop != NULL) { ! if (!ic_oop->is_loader_alive()) { ! // Deferred IC cleaning due to concurrent class unloading ! inline_cache->set_to_clean(); ! } else if (receiver()->klass() == ic_oop->holder_klass()) { ! // This isn't a real miss. We must have seen that compiled code ! // is now available and we want the call site converted to a ! // monomorphic compiled call site. ! // We can't assert for callee_method->code() != NULL because it ! // could have been deoptimized in the meantime ! if (TraceCallFixup) { ! ResourceMark rm(thread); ! tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc)); ! callee_method->print_short_name(tty); ! tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); } + should_be_mono = true; } } + } ! if (should_be_mono) { ! // We have a path that was monomorphic but was going interpreted ! // and now we have (or had) a compiled entry. We correct the IC ! // by using a new icBuffer. ! CompiledICInfo info; ! Klass* receiver_klass = receiver()->klass(); ! inline_cache->compute_monomorphic_entry(callee_method, ! receiver_klass, ! inline_cache->is_optimized(), ! false, caller_nm->is_nmethod(), ! info, CHECK_(methodHandle())); ! if (!inline_cache->set_to_monomorphic(info)) { ! continue; ! } ! } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { ! // Potential change to megamorphic ! bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle())); ! if (!successful) { ! if (!inline_cache->set_to_clean()) { ! continue; } } } else { ! // Either clean or megamorphic } + break; } // Release CompiledICLocker return callee_method; } ***************
*** 1736,1750 **** // resolve is only done once. CompiledICLocker ml(caller_nm); if (is_static_call) { CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr); ! ssc->set_to_clean(); } else { // compiled, dispatched call (which used to call an interpreted method) CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); ! inline_cache->set_to_clean(); } } } methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle())); --- 1758,1776 ---- // resolve is only done once. CompiledICLocker ml(caller_nm); if (is_static_call) { CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr); ! if (!ssc->is_clean()) { ! ssc->set_to_clean(); ! } } else { // compiled, dispatched call (which used to call an interpreted method) CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); ! if (!inline_cache->is_clean()) { ! inline_cache->set_to_clean(); ! } } } } methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
< prev index next >