< prev index next >

src/hotspot/share/opto/callGenerator.cpp

Print this page

        

*** 631,640 **** --- 631,662 ---- CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { return new LateInlineBoxingCallGenerator(method, inline_cg); } + class LateInlineObjectEqualsCallGenerator : public LateInlineCallGenerator { + + public: + LateInlineObjectEqualsCallGenerator(ciMethod* method, CallGenerator* inline_cg) : + LateInlineCallGenerator(method, inline_cg) {} + + virtual JVMState* generate(JVMState* jvms) { + Compile *C = Compile::current(); + + C->log_inline_id(this); + + C->add_object_equals_late_inline(this); + + JVMState* new_jvms = DirectCallGenerator::generate(jvms); + return new_jvms; + } + }; + + CallGenerator* CallGenerator::for_object_equals_late_inline(ciMethod* method, CallGenerator* inline_cg) { + return new LateInlineObjectEqualsCallGenerator(method, inline_cg); + } + //---------------------------WarmCallGenerator-------------------------------- // Internal class which handles initial deferral of inlining decisions. class WarmCallGenerator : public CallGenerator { WarmCallInfo* _call_info; CallGenerator* _if_cold;
*** 1307,1316 **** --- 1329,1545 ---- kit.uncommon_trap(_reason, _action); } return kit.transfer_exceptions_into_jvms(); } + class LateInlineVirtualCallGenerator : public InlineCallGenerator { + private: + int _vtable_index; + float _call_does_dispatch; + bool _allow_inline; + float _prof_factor; + ciKlass* _speculative_receiver_type; + bool _delayed_forbidden; + + CallGenerator* call_generator(JVMState* jvms) { + Compile* C = Compile::current(); + if (_allow_inline) { + ciMethod* caller = jvms->method(); + int bci = jvms->bci(); + ciCallProfile profile = caller->call_profile_at_bci(bci); + int site_count = profile.count(); + int receiver_count = -1; + if (_call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) { + // Receivers in the profile structure are ordered by call counts + // so that the most called (major) receiver is profile.receiver(0). + receiver_count = profile.receiver_count(0); + } + + // The profile data is only partly attributable to this caller, + // scale back the call site information. + float past_uses = jvms->method()->scale_count(site_count, _prof_factor); + // This is the number of times we expect the call code to be used. + float expected_uses = past_uses; + + // Try inlining a bytecoded method: + if (!_call_does_dispatch) { + InlineTree* ilt = InlineTree::find_subtree_from_root(C->ilt(), jvms->caller(), jvms->method()); + WarmCallInfo scratch_ci; + bool should_delay = false; + WarmCallInfo* ci = ilt->ok_to_inline(method(), jvms, profile, &scratch_ci, should_delay); + assert(ci != &scratch_ci, "do not let this pointer escape"); + bool allow_inline = (ci != NULL && !ci->is_cold()); + bool require_inline = (allow_inline && ci->is_hot()); + + if (allow_inline) { + CallGenerator* cg = CallGenerator::for_inline(method(), expected_uses); + + if (require_inline && cg != NULL) { + // Delay the inlining of this method to give us the + // opportunity to perform some high level optimizations + // first. + if (C->should_delay_string_inlining(method(), jvms)) { + assert(!_delayed_forbidden, "strange"); + return CallGenerator::for_string_late_inline(method(), cg); + } else if (C->should_delay_boxing_inlining(method(), jvms)) { + assert(!_delayed_forbidden, "strange"); + return CallGenerator::for_boxing_late_inline(method(), cg); + } else if ((should_delay || AlwaysIncrementalInline) && !_delayed_forbidden) { + return CallGenerator::for_late_inline(method(), cg); + } + } + if (cg == NULL || should_delay) { + // Fall through. + } else if (require_inline || !InlineWarmCalls) { + return cg; + } else { + CallGenerator* cold_cg = C->call_generator(method(), _vtable_index, _call_does_dispatch, jvms, false, _prof_factor); + return CallGenerator::for_warm_call(ci, cold_cg, cg); + } + } + } + + // Try using the type profile. + if (_call_does_dispatch && site_count > 0 && receiver_count > 0) { + // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count. + bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent); + ciMethod* receiver_method = NULL; + + int morphism = profile.morphism(); + if (_speculative_receiver_type != NULL) { + if (!C->too_many_traps(caller, bci, Deoptimization::Reason_speculate_class_check)) { + // We have a speculative type, we should be able to resolve + // the call. We do that before looking at the profiling at + // this invoke because it may lead to bimorphic inlining which + // a speculative type should help us avoid. + receiver_method = method()->resolve_invoke(jvms->method()->holder(), + _speculative_receiver_type); + if (receiver_method == NULL) { + _speculative_receiver_type = NULL; + } else { + morphism = 1; + } + } else { + // speculation failed before. Use profiling at the call + // (could allow bimorphic inlining for instance). + _speculative_receiver_type = NULL; + } + } + if (receiver_method == NULL && + (have_major_receiver || morphism == 1 || + (morphism == 2 && UseBimorphicInlining))) { + // receiver_method = profile.method(); + // Profiles do not suggest methods now. Look it up in the major receiver. + receiver_method = method()->resolve_invoke(jvms->method()->holder(), + profile.receiver(0)); + } + if (receiver_method != NULL) { + // The single majority receiver sufficiently outweighs the minority. + CallGenerator* hit_cg = C->call_generator(receiver_method, + _vtable_index, !_call_does_dispatch, jvms, _allow_inline, _prof_factor); + if (hit_cg != NULL) { + // Look up second receiver. + CallGenerator* next_hit_cg = NULL; + ciMethod* next_receiver_method = NULL; + if (morphism == 2 && UseBimorphicInlining) { + next_receiver_method = method()->resolve_invoke(jvms->method()->holder(), + profile.receiver(1)); + if (next_receiver_method != NULL) { + next_hit_cg = C->call_generator(next_receiver_method, + _vtable_index, !_call_does_dispatch, jvms, + _allow_inline, _prof_factor); + if (next_hit_cg != NULL && !next_hit_cg->is_inline() && + have_major_receiver && UseOnlyInlinedBimorphic) { + // Skip if we can't inline second receiver's method + next_hit_cg = NULL; + } + } + } + CallGenerator* miss_cg; + Deoptimization::DeoptReason reason = morphism == 2 ? + Deoptimization::Reason_bimorphic : Deoptimization::reason_class_check(_speculative_receiver_type != NULL); + if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) && + !C->too_many_traps(caller, bci, reason) + ) { + // Generate uncommon trap for class check failure path + // in case of monomorphic or bimorphic virtual call site. + miss_cg = CallGenerator::for_uncommon_trap(method(), reason, + Deoptimization::Action_maybe_recompile); + } else { + // Generate virtual call for class check failure path + // in case of polymorphic virtual call site. + miss_cg = CallGenerator::for_virtual_call(method(), _vtable_index); + } + if (miss_cg != NULL) { + if (next_hit_cg != NULL) { + assert(_speculative_receiver_type == NULL, "shouldn't end up here if we used speculation"); + C->trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)); + // We don't need to record dependency on a receiver here and below. + // Whenever we inline, the dependency is added by Parse::Parse(). + miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX); + } + if (miss_cg != NULL) { + C->trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count); + ciKlass* k = _speculative_receiver_type != NULL ? _speculative_receiver_type : profile.receiver(0); + float hit_prob = _speculative_receiver_type != NULL ? 1.0 : profile.receiver_prob(0); + CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob); + if (cg != NULL) return cg; + } + } + } + } + } + } + + // There was no special inlining tactic, or it bailed out. + // Use a more generic tactic, like a simple call. + if (_call_does_dispatch) { + const char* msg = "virtual call"; + if (PrintInlining) C->print_inlining(method(), jvms->depth() - 1, jvms->bci(), msg); + C->log_inline_failure(msg); + return CallGenerator::for_virtual_call(method(), _vtable_index); + } else { + // Class Hierarchy Analysis or Type Profile reveals a unique target, + // or it is a static or special call. + return CallGenerator::for_direct_call(method(), C->should_delay_inlining(method(), jvms)); + } + } + + public: + LateInlineVirtualCallGenerator(ciMethod* method, + int vtable_index, + bool call_does_dispatch, + bool allow_inline, + float prof_factor, + ciKlass* speculative_receiver_type, + bool delayed_forbidden) + : InlineCallGenerator(method), + _vtable_index(vtable_index), + _call_does_dispatch(call_does_dispatch), + _allow_inline(allow_inline), + _prof_factor(prof_factor), + _speculative_receiver_type(speculative_receiver_type), + _delayed_forbidden(delayed_forbidden) + { + } + + virtual JVMState* generate(JVMState* jvms) { + CallGenerator* cg = call_generator(jvms); + return cg->generate(jvms); + } + }; + + CallGenerator* CallGenerator::for_late_inline_Virtual_call(ciMethod* method, + int vtable_index, + bool call_does_dispatch, + bool allow_inline, + float prof_factor, + ciKlass* speculative_receiver_type, + bool delayed_forbidden) { + return new LateInlineVirtualCallGenerator(method, vtable_index, call_does_dispatch, allow_inline, prof_factor, speculative_receiver_type, delayed_forbidden); + } + // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) // (Node: Merged hook_up_exits into ParseGenerator::generate.) #define NODES_OVERHEAD_PER_METHOD (30.0)
< prev index next >