< prev index next >

src/hotspot/share/opto/callGenerator.cpp

Print this page

        

*** 37,46 **** --- 37,47 ---- #include "opto/cfgnode.hpp" #include "opto/parse.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "opto/subnode.hpp" + #include "opto/valuetypenode.hpp" #include "runtime/sharedRuntime.hpp" // Utility function. const TypeFunc* CallGenerator::tf() const { return TypeFunc::make(method());
*** 121,147 **** // Internal class which handles all out-of-line calls w/o receiver type checks. class DirectCallGenerator : public CallGenerator { private: CallStaticJavaNode* _call_node; // Force separate memory and I/O projections for the exceptional ! // paths to facilitate late inlinig. bool _separate_io_proj; public: DirectCallGenerator(ciMethod* method, bool separate_io_proj) : CallGenerator(method), _separate_io_proj(separate_io_proj) { } virtual JVMState* generate(JVMState* jvms); CallStaticJavaNode* call_node() const { return _call_node; } }; JVMState* DirectCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); kit.C->print_inlining_update(this); bool is_static = method()->is_static(); address target = is_static ? SharedRuntime::get_resolve_static_call_stub() : SharedRuntime::get_resolve_opt_virtual_call_stub(); if (kit.C->log() != NULL) { --- 122,158 ---- // Internal class which handles all out-of-line calls w/o receiver type checks. class DirectCallGenerator : public CallGenerator { private: CallStaticJavaNode* _call_node; // Force separate memory and I/O projections for the exceptional ! // paths to facilitate late inlining. bool _separate_io_proj; public: DirectCallGenerator(ciMethod* method, bool separate_io_proj) : CallGenerator(method), + _call_node(NULL), _separate_io_proj(separate_io_proj) { + if (ValueTypeReturnedAsFields && method->is_method_handle_intrinsic()) { + // If that call has not been optimized by the time optimizations are over, + // we'll need to add a call to create a value type instance from the klass + // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return). + // Separating memory and I/O projections for exceptions is required to + // perform that graph transformation. + _separate_io_proj = true; + } } virtual JVMState* generate(JVMState* jvms); CallStaticJavaNode* call_node() const { return _call_node; } }; JVMState* DirectCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); kit.C->print_inlining_update(this); + PhaseGVN& gvn = kit.gvn(); bool is_static = method()->is_static(); address target = is_static ? SharedRuntime::get_resolve_static_call_stub() : SharedRuntime::get_resolve_opt_virtual_call_stub(); if (kit.C->log() != NULL) {
*** 170,180 **** if (method()->is_method_handle_intrinsic() || method()->is_compiled_lambda_form()) { call->set_method_handle_invoke(true); } } ! kit.set_arguments_for_java_call(call); kit.set_edges_for_java_call(call, false, _separate_io_proj); Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); kit.push_node(method()->return_type()->basic_type(), ret); return kit.transfer_exceptions_into_jvms(); } --- 181,194 ---- if (method()->is_method_handle_intrinsic() || method()->is_compiled_lambda_form()) { call->set_method_handle_invoke(true); } } ! kit.set_arguments_for_java_call(call, is_late_inline()); ! if (kit.stopped()) { ! return kit.transfer_exceptions_into_jvms(); ! } kit.set_edges_for_java_call(call, false, _separate_io_proj); Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); kit.push_node(method()->return_type()->basic_type(), ret); return kit.transfer_exceptions_into_jvms(); }
*** 196,206 **** }; JVMState* VirtualCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); Node* receiver = kit.argument(0); - kit.C->print_inlining_update(this); if (kit.C->log() != NULL) { kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); } --- 210,219 ----
*** 208,218 **** // If the receiver is a constant null, do not torture the system // by attempting to call through it. The compile will proceed // correctly, but may bail out in final_graph_reshaping, because // the call instruction will have a seemingly deficient out-count. // (The bailout says something misleading about an "infinite loop".) ! if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())); ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); kit.inc_sp(arg_size); // restore arguments kit.uncommon_trap(Deoptimization::Reason_null_check, --- 221,231 ---- // If the receiver is a constant null, do not torture the system // by attempting to call through it. The compile will proceed // correctly, but may bail out in final_graph_reshaping, because // the call instruction will have a seemingly deficient out-count. // (The bailout says something misleading about an "infinite loop".) ! if (!receiver->is_ValueType() && kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())); ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); kit.inc_sp(arg_size); // restore arguments kit.uncommon_trap(Deoptimization::Reason_null_check,
*** 254,263 **** --- 267,279 ---- // about the method being invoked should be attached to the call site to // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C). call->set_override_symbolic_info(true); } kit.set_arguments_for_java_call(call); + if (kit.stopped()) { + return kit.transfer_exceptions_into_jvms(); + } kit.set_edges_for_java_call(call); Node* ret = kit.set_results_for_java_call(call); kit.push_node(method()->return_type()->basic_type(), ret); // Represent the effect of an implicit receiver null_check
*** 354,364 **** if (call == NULL || call->outcnt() == 0 || call->in(0) == NULL || call->in(0)->is_top()) { return; } ! const TypeTuple *r = call->tf()->domain(); for (int i1 = 0; i1 < method()->arg_size(); i1++) { if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); return; } --- 370,380 ---- if (call == NULL || call->outcnt() == 0 || call->in(0) == NULL || call->in(0)->is_top()) { return; } ! const TypeTuple *r = call->tf()->domain_cc(); for (int i1 = 0; i1 < method()->arg_size(); i1++) { if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); return; }
*** 368,397 **** assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); return; } // check for unreachable loop ! CallProjections callprojs; ! call->extract_projections(&callprojs, true); ! if (callprojs.fallthrough_catchproj == call->in(0) || ! callprojs.catchall_catchproj == call->in(0) || ! callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) || ! callprojs.catchall_memproj == call->in(TypeFunc::Memory) || ! callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) || ! callprojs.catchall_ioproj == call->in(TypeFunc::I_O) || ! (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) || ! (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) { return; } Compile* C = Compile::current(); // Remove inlined methods from Compiler's lists. if (call->is_macro()) { C->remove_macro_node(call); } - bool result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0); if (_is_pure_call && result_not_used) { // The call is marked as pure (no important side effects), but result isn't used. // It's safe to remove the call. GraphKit kit(call->jvms()); kit.replace_call(call, C->top(), true); --- 384,421 ---- assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); return; } // check for unreachable loop ! CallProjections* callprojs = call->extract_projections(true); ! if (callprojs->fallthrough_catchproj == call->in(0) || ! callprojs->catchall_catchproj == call->in(0) || ! callprojs->fallthrough_memproj == call->in(TypeFunc::Memory) || ! callprojs->catchall_memproj == call->in(TypeFunc::Memory) || ! callprojs->fallthrough_ioproj == call->in(TypeFunc::I_O) || ! callprojs->catchall_ioproj == call->in(TypeFunc::I_O) || ! (callprojs->exobj != NULL && call->find_edge(callprojs->exobj) != -1)) { return; } + bool result_not_used = true; + for (uint i = 0; i < callprojs->nb_resproj; i++) { + if (callprojs->resproj[i] != NULL) { + if (callprojs->resproj[i]->outcnt() != 0) { + result_not_used = false; + } + if (call->find_edge(callprojs->resproj[i]) != -1) { + return; + } + } + } Compile* C = Compile::current(); // Remove inlined methods from Compiler's lists. if (call->is_macro()) { C->remove_macro_node(call); } if (_is_pure_call && result_not_used) { // The call is marked as pure (no important side effects), but result isn't used. // It's safe to remove the call. GraphKit kit(call->jvms()); kit.replace_call(call, C->top(), true);
*** 403,432 **** SafePointNode* map = new SafePointNode(size, jvms); for (uint i1 = 0; i1 < size; i1++) { map->init_req(i1, call->in(i1)); } // Make sure the state is a MergeMem for parsing. if (!map->in(TypeFunc::Memory)->is_MergeMem()) { Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory)); ! C->initial_gvn()->set_type_bottom(mem); map->set_req(TypeFunc::Memory, mem); } - uint nargs = method()->arg_size(); // blow away old call arguments Node* top = C->top(); ! for (uint i1 = 0; i1 < nargs; i1++) { ! map->set_req(TypeFunc::Parms + i1, top); } jvms->set_map(map); // Make enough space in the expression stack to transfer // the incoming arguments and return value. map->ensure_stack(jvms, jvms->method()->max_stack()); for (uint i1 = 0; i1 < nargs; i1++) { ! map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); } C->print_inlining_assert_ready(); C->print_inlining_move_to(this); --- 427,477 ---- SafePointNode* map = new SafePointNode(size, jvms); for (uint i1 = 0; i1 < size; i1++) { map->init_req(i1, call->in(i1)); } + PhaseGVN& gvn = *C->initial_gvn(); // Make sure the state is a MergeMem for parsing. if (!map->in(TypeFunc::Memory)->is_MergeMem()) { Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory)); ! gvn.set_type_bottom(mem); map->set_req(TypeFunc::Memory, mem); } // blow away old call arguments Node* top = C->top(); ! for (uint i1 = TypeFunc::Parms; i1 < call->_tf->domain_cc()->cnt(); i1++) { ! map->set_req(i1, top); } jvms->set_map(map); // Make enough space in the expression stack to transfer // the incoming arguments and return value. map->ensure_stack(jvms, jvms->method()->max_stack()); + const TypeTuple *domain_sig = call->_tf->domain_sig(); + ExtendedSignature sig_cc = ExtendedSignature(method()->get_sig_cc(), SigEntryFilter()); + uint nargs = method()->arg_size(); + assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature"); + + uint j = TypeFunc::Parms; for (uint i1 = 0; i1 < nargs; i1++) { ! const Type* t = domain_sig->field_at(TypeFunc::Parms + i1); ! if (method()->has_scalarized_args() && t->is_valuetypeptr() && !t->maybe_null()) { ! // Value type arguments are not passed by reference: we get an argument per ! // field of the value type. Build ValueTypeNodes from the value type arguments. ! GraphKit arg_kit(jvms, &gvn); ! arg_kit.set_control(map->control()); ! ValueTypeNode* vt = ValueTypeNode::make_from_multi(&arg_kit, call, sig_cc, t->value_klass(), j, true); ! map->set_control(arg_kit.control()); ! map->set_argument(jvms, i1, vt); ! } else { ! map->set_argument(jvms, i1, call->in(j++)); ! BasicType bt = t->basic_type(); ! while (SigEntry::next_is_reserved(sig_cc, bt, true)) { ! j += type2size[bt]; // Skip reserved arguments ! } ! } } C->print_inlining_assert_ready(); C->print_inlining_move_to(this);
*** 465,474 **** --- 510,544 ---- C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); C->env()->notice_inlined_method(_inline_cg->method()); C->set_inlining_progress(true); C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup + + // Handle value type returns + bool returned_as_fields = call->tf()->returns_value_type_as_fields(); + if (result->is_ValueType()) { + ValueTypeNode* vt = result->as_ValueType(); + if (returned_as_fields) { + // Return of multiple values (the fields of a value type) + vt->replace_call_results(&kit, call, C); + if (vt->is_allocated(&gvn) && !StressValueTypeReturnedAsFields) { + result = vt->get_oop(); + } else { + result = vt->tagged_klass(gvn); + } + } else { + result = ValueTypePtrNode::make_from_value_type(&kit, vt); + } + } else if (gvn.type(result)->is_valuetypeptr() && returned_as_fields) { + const Type* vt_t = call->_tf->range_sig()->field_at(TypeFunc::Parms); + Node* cast = new CheckCastPPNode(NULL, result, vt_t); + gvn.record_for_igvn(cast); + ValueTypePtrNode* vtptr = ValueTypePtrNode::make_from_oop(&kit, gvn.transform(cast)); + vtptr->replace_call_results(&kit, call, C); + result = cast; + } + kit.replace_call(call, result, true); } }
*** 504,522 **** } }; bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { ! CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); Compile::current()->print_inlining_update_delayed(this); if (!_input_not_const) { _attempt++; } ! if (cg != NULL && cg->is_inline()) { assert(!cg->is_late_inline(), "we're doing late inlining"); _inline_cg = cg; Compile::current()->dec_number_of_mh_late_inlines(); return true; } --- 574,592 ---- } }; bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { ! CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const, AlwaysIncrementalInline); Compile::current()->print_inlining_update_delayed(this); if (!_input_not_const) { _attempt++; } ! if (cg != NULL && (cg->is_inline() || cg->is_inlined_method_handle_intrinsic(jvms, cg->method()))) { assert(!cg->is_late_inline(), "we're doing late inlining"); _inline_cg = cg; Compile::current()->dec_number_of_mh_late_inlines(); return true; }
*** 782,791 **** --- 852,883 ---- // Inlined method threw an exception, so it's just the slow path after all. kit.set_jvms(slow_jvms); return kit.transfer_exceptions_into_jvms(); } + // Allocate value types if they are merged with objects (similar to Parse::merge_common()) + uint tos = kit.jvms()->stkoff() + kit.sp(); + uint limit = slow_map->req(); + for (uint i = TypeFunc::Parms; i < limit; i++) { + Node* m = kit.map()->in(i); + Node* n = slow_map->in(i); + const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); + if (m->is_ValueType() && !t->isa_valuetype()) { + // Allocate value type in fast path + m = ValueTypePtrNode::make_from_value_type(&kit, m->as_ValueType()); + kit.map()->set_req(i, m); + } + if (n->is_ValueType() && !t->isa_valuetype()) { + // Allocate value type in slow path + PreserveJVMState pjvms(&kit); + kit.set_map(slow_map); + n = ValueTypePtrNode::make_from_value_type(&kit, n->as_ValueType()); + kit.map()->set_req(i, n); + slow_map = kit.stop(); + } + } + // There are 2 branches and the replaced nodes are only valid on // one: restore the replaced nodes to what they were before the // branch. kit.map()->set_replaced_nodes(replaced_nodes);
*** 805,816 **** Node* phi = mms.memory(); if (phi->is_Phi() && phi->in(0) == region) { mms.set_memory(gvn.transform(phi)); } } - uint tos = kit.jvms()->stkoff() + kit.sp(); - uint limit = slow_map->req(); for (uint i = TypeFunc::Parms; i < limit; i++) { // Skip unused stack slots; fast forward to monoff(); if (i == tos) { i = kit.jvms()->monoff(); if( i >= limit ) break; --- 897,906 ----
*** 829,839 **** CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch"); bool input_not_const; ! CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const); Compile* C = Compile::current(); if (cg != NULL) { if (!delayed_forbidden && AlwaysIncrementalInline) { return CallGenerator::for_late_inline(callee, cg); } else { --- 919,929 ---- CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch"); bool input_not_const; ! CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const, false); Compile* C = Compile::current(); if (cg != NULL) { if (!delayed_forbidden && AlwaysIncrementalInline) { return CallGenerator::for_late_inline(callee, cg); } else {
*** 842,861 **** } int bci = jvms->bci(); ciCallProfile profile = caller->call_profile_at_bci(bci); int call_site_count = caller->scale_count(profile.count()); ! if (IncrementalInline && call_site_count > 0 && ! (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) { return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); } else { // Out-of-line call. return CallGenerator::for_direct_call(callee); } } ! CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) { GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); Compile* C = kit.C; vmIntrinsics::ID iid = callee->intrinsic_id(); input_not_const = true; --- 932,968 ---- } int bci = jvms->bci(); ciCallProfile profile = caller->call_profile_at_bci(bci); int call_site_count = caller->scale_count(profile.count()); ! if (IncrementalInline && (AlwaysIncrementalInline || ! (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) { return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); } else { // Out-of-line call. return CallGenerator::for_direct_call(callee); } } ! static void cast_argument(int nargs, int arg_nb, ciType* t, GraphKit& kit) { ! PhaseGVN& gvn = kit.gvn(); ! Node* arg = kit.argument(arg_nb); ! const Type* arg_type = arg->bottom_type(); ! const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); ! if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) { ! const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part ! arg = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type)); ! kit.set_argument(arg_nb, arg); ! } ! if (sig_type->is_valuetypeptr() && !arg->is_ValueType() && ! !kit.gvn().type(arg)->maybe_null() && t->as_value_klass()->is_scalarizable()) { ! arg = ValueTypeNode::make_from_oop(&kit, arg, t->as_value_klass()); ! kit.set_argument(arg_nb, arg); ! } ! } ! ! CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const, bool delayed_forbidden) { GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); Compile* C = kit.C; vmIntrinsics::ID iid = callee->intrinsic_id(); input_not_const = true;
*** 878,888 **** CallGenerator* cg = C->call_generator(target, vtable_index, false /* call_does_dispatch */, jvms, true /* allow_inline */, ! PROB_ALWAYS); return cg; } else { print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), "receiver not constant"); } --- 985,998 ---- CallGenerator* cg = C->call_generator(target, vtable_index, false /* call_does_dispatch */, jvms, true /* allow_inline */, ! PROB_ALWAYS, ! NULL, ! true, ! delayed_forbidden); return cg; } else { print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), "receiver not constant"); }
*** 892,903 **** case vmIntrinsics::_linkToVirtual: case vmIntrinsics::_linkToStatic: case vmIntrinsics::_linkToSpecial: case vmIntrinsics::_linkToInterface: { // Get MemberName argument: ! Node* member_name = kit.argument(callee->arg_size() - 1); if (member_name->Opcode() == Op_ConP) { input_not_const = false; const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); --- 1002,1014 ---- case vmIntrinsics::_linkToVirtual: case vmIntrinsics::_linkToStatic: case vmIntrinsics::_linkToSpecial: case vmIntrinsics::_linkToInterface: { + int nargs = callee->arg_size(); // Get MemberName argument: ! Node* member_name = kit.argument(nargs - 1); if (member_name->Opcode() == Op_ConP) { input_not_const = false; const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
*** 913,943 **** // actual types. ciSignature* signature = target->signature(); const int receiver_skip = target->is_static() ? 0 : 1; // Cast receiver to its type. if (!target->is_static()) { ! Node* arg = kit.argument(0); ! const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); ! const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); ! if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { ! const Type* recv_type = arg_type->join_speculative(sig_type); // keep speculative part ! Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type)); ! kit.set_argument(0, cast_obj); ! } } // Cast reference arguments to its type. for (int i = 0, j = 0; i < signature->count(); i++) { ciType* t = signature->type_at(i); if (t->is_klass()) { ! Node* arg = kit.argument(receiver_skip + j); ! const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); ! const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); ! if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { ! const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part ! Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type)); ! kit.set_argument(receiver_skip + j, cast_obj); ! } } j += t->size(); // long and double take two slots } // Try to get the most accurate receiver type --- 1024,1040 ---- // actual types. ciSignature* signature = target->signature(); const int receiver_skip = target->is_static() ? 0 : 1; // Cast receiver to its type. if (!target->is_static()) { ! cast_argument(nargs, 0, signature->accessing_klass(), kit); } // Cast reference arguments to its type. for (int i = 0, j = 0; i < signature->count(); i++) { ciType* t = signature->type_at(i); if (t->is_klass()) { ! cast_argument(nargs, receiver_skip + j, t, kit); } j += t->size(); // long and double take two slots } // Try to get the most accurate receiver type
*** 964,974 **** speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; } CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, !StressMethodHandleLinkerInlining /* allow_inline */, PROB_ALWAYS, ! speculative_receiver_type); return cg; } else { print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), "member_name not constant"); } --- 1061,1073 ---- speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; } CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, !StressMethodHandleLinkerInlining /* allow_inline */, PROB_ALWAYS, ! speculative_receiver_type, ! true, ! delayed_forbidden); return cg; } else { print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), "member_name not constant"); }
< prev index next >