src/share/vm/opto/callGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/callGenerator.cpp

Print this page




 120       _separate_io_proj(separate_io_proj)
 121   {
 122   }
 123   virtual JVMState* generate(JVMState* jvms);
 124 
 125   CallStaticJavaNode* call_node() const { return _call_node; }
 126 };
 127 
 128 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 129   GraphKit kit(jvms);
 130   kit.C->print_inlining_update(this);
 131   bool is_static = method()->is_static();
 132   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 133                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 134 
 135   if (kit.C->log() != NULL) {
 136     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 137   }
 138 
 139   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());

 140   _call_node = call;  // Save the call node in case we need it later
 141   if (!is_static) {
 142     // Make an explicit receiver null_check as part of this call.
 143     // Since we share a map with the caller, his JVMS gets adjusted.
 144     kit.null_check_receiver_before_call(method());
 145     if (kit.stopped()) {
 146       // And dump it back to the caller, decorated with any exceptions:
 147       return kit.transfer_exceptions_into_jvms();
 148     }
 149     // Mark the call node as virtual, sort of:
 150     call->set_optimized_virtual(true);
 151     if (method()->is_method_handle_intrinsic() ||
 152         method()->is_compiled_lambda_form()) {
 153       call->set_method_handle_invoke(true);
 154     }
 155   }
 156   kit.set_arguments_for_java_call(call);
 157   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 158   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 159   kit.push_node(method()->return_type()->basic_type(), ret);


 175   virtual bool      is_virtual() const          { return true; }
 176   virtual JVMState* generate(JVMState* jvms);
 177 };
 178 
 179 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 180   GraphKit kit(jvms);
 181   Node* receiver = kit.argument(0);
 182 
 183   kit.C->print_inlining_update(this);
 184 
 185   if (kit.C->log() != NULL) {
 186     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 187   }
 188 
 189   // If the receiver is a constant null, do not torture the system
 190   // by attempting to call through it.  The compile will proceed
 191   // correctly, but may bail out in final_graph_reshaping, because
 192   // the call instruction will have a seemingly deficient out-count.
 193   // (The bailout says something misleading about an "infinite loop".)
 194   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 195     kit.inc_sp(method()->arg_size());  // restore arguments



 196     kit.uncommon_trap(Deoptimization::Reason_null_check,
 197                       Deoptimization::Action_none,
 198                       NULL, "null receiver");
 199     return kit.transfer_exceptions_into_jvms();
 200   }
 201 
 202   // Ideally we would unconditionally do a null check here and let it
 203   // be converted to an implicit check based on profile information.
 204   // However currently the conversion to implicit null checks in
 205   // Block::implicit_null_check() only looks for loads and stores, not calls.
 206   ciMethod *caller = kit.method();
 207   ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
 208   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 209        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 210        (caller_md->trap_count(Deoptimization::Reason_null_check)
 211        >= (uint)ImplicitNullCheckThreshold))) {
 212     // Make an explicit receiver null_check as part of this call.
 213     // Since we share a map with the caller, his JVMS gets adjusted.
 214     receiver = kit.null_check_receiver_before_call(method());
 215     if (kit.stopped()) {
 216       // And dump it back to the caller, decorated with any exceptions:
 217       return kit.transfer_exceptions_into_jvms();
 218     }
 219   }
 220 
 221   assert(!method()->is_static(), "virtual call must not be to static");
 222   assert(!method()->is_final(), "virtual call should not be to final");
 223   assert(!method()->is_private(), "virtual call should not be to private");
 224   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 225          "no vtable calls if +UseInlineCaches ");
 226   address target = SharedRuntime::get_resolve_virtual_call_stub();
 227   // Normal inline cache used for call
 228   CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());

 229   kit.set_arguments_for_java_call(call);
 230   kit.set_edges_for_java_call(call);
 231   Node* ret = kit.set_results_for_java_call(call);
 232   kit.push_node(method()->return_type()->basic_type(), ret);
 233 
 234   // Represent the effect of an implicit receiver null_check
 235   // as part of this call.  Since we share a map with the caller,
 236   // his JVMS gets adjusted.
 237   kit.cast_not_null(receiver);
 238   return kit.transfer_exceptions_into_jvms();
 239 }
 240 
 241 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 242   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 243   return new ParseGenerator(m, expected_uses);
 244 }
 245 
 246 // As a special case, the JVMS passed to this CallGenerator is
 247 // for the method execution already in progress, not just the JVMS
 248 // of the caller.  Thus, this CallGenerator cannot be mixed with others!


 446     if (_input_not_const) {
 447       // inlining won't be possible so no need to enqueue right now.
 448       call_node()->set_generator(this);
 449     } else {
 450       C->add_late_inline(this);
 451     }
 452     return new_jvms;
 453   }
 454 };
 455 
 456 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 457 
 458   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 459 
 460   Compile::current()->print_inlining_update_delayed(this);
 461 
 462   if (!_input_not_const) {
 463     _attempt++;
 464   }
 465 
 466   if (cg != NULL) {
 467     assert(!cg->is_late_inline() && cg->is_inline(), "we're doing late inlining");
 468     _inline_cg = cg;
 469     Compile::current()->dec_number_of_mh_late_inlines();
 470     return true;
 471   }
 472 
 473   call_node()->set_generator(this);
 474   return false;
 475 }
 476 
 477 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 478   Compile::current()->inc_number_of_mh_late_inlines();
 479   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 480   return cg;
 481 }
 482 
 483 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 484 
 485  public:
 486   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 487     LateInlineCallGenerator(method, inline_cg) {}


 790 
 791 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
 792   GraphKit kit(jvms);
 793   PhaseGVN& gvn = kit.gvn();
 794   Compile* C = kit.C;
 795   vmIntrinsics::ID iid = callee->intrinsic_id();
 796   input_not_const = true;
 797   switch (iid) {
 798   case vmIntrinsics::_invokeBasic:
 799     {
 800       // Get MethodHandle receiver:
 801       Node* receiver = kit.argument(0);
 802       if (receiver->Opcode() == Op_ConP) {
 803         input_not_const = false;
 804         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 805         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 806         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
 807         const int vtable_index = Method::invalid_vtable_index;
 808         CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
 809         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
 810         if (cg != NULL && cg->is_inline())






 811           return cg;

 812       } else {
 813         const char* msg = "receiver not constant";
 814         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
 815         C->log_inline_failure(msg);
 816       }
 817     }
 818     break;
 819 
 820   case vmIntrinsics::_linkToVirtual:
 821   case vmIntrinsics::_linkToStatic:
 822   case vmIntrinsics::_linkToSpecial:
 823   case vmIntrinsics::_linkToInterface:
 824     {
 825       // Get MemberName argument:
 826       Node* member_name = kit.argument(callee->arg_size() - 1);
 827       if (member_name->Opcode() == Op_ConP) {
 828         input_not_const = false;
 829         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 830         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 831 
 832         // In lamda forms we erase signature types to avoid resolving issues
 833         // involving class loaders.  When we optimize a method handle invoke
 834         // to a direct call we must cast the receiver and arguments to its
 835         // actual types.
 836         ciSignature* signature = target->signature();
 837         const int receiver_skip = target->is_static() ? 0 : 1;
 838         // Cast receiver to its type.
 839         if (!target->is_static()) {
 840           Node* arg = kit.argument(0);
 841           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
 842           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
 843           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
 844             Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
 845             kit.set_argument(0, cast_obj);
 846           }
 847         }
 848         // Cast reference arguments to its type.
 849         for (int i = 0; i < signature->count(); i++) {
 850           ciType* t = signature->type_at(i);
 851           if (t->is_klass()) {
 852             Node* arg = kit.argument(receiver_skip + i);


 865         int  vtable_index       = Method::invalid_vtable_index;
 866         bool call_does_dispatch = false;
 867 
 868         ciKlass* speculative_receiver_type = NULL;
 869         if (is_virtual_or_interface) {
 870           ciInstanceKlass* klass = target->holder();
 871           Node*             receiver_node = kit.argument(0);
 872           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
 873           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
 874           // optimize_virtual_call() takes 2 different holder
 875           // arguments for a corner case that doesn't apply here (see
 876           // Parse::do_call())
 877           target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
 878                                             target, receiver_type, is_virtual,
 879                                             call_does_dispatch, vtable_index, // out-parameters
 880                                             /*check_access=*/false);
 881           // We lack profiling at this call but type speculation may
 882           // provide us with a type
 883           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
 884         }
 885         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
 886         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
 887         if (cg != NULL && cg->is_inline())







 888           return cg;

 889       } else {
 890         const char* msg = "member_name not constant";
 891         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
 892         C->log_inline_failure(msg);
 893       }
 894     }
 895     break;
 896 
 897   default:
 898     fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
 899     break;
 900   }
 901   return NULL;
 902 }
 903 
 904 
 905 //------------------------PredicatedIntrinsicGenerator------------------------------
 906 // Internal class which handles all predicated Intrinsic calls.
 907 class PredicatedIntrinsicGenerator : public CallGenerator {
 908   CallGenerator* _intrinsic;




 120       _separate_io_proj(separate_io_proj)
 121   {
 122   }
 123   virtual JVMState* generate(JVMState* jvms);
 124 
 125   CallStaticJavaNode* call_node() const { return _call_node; }
 126 };
 127 
 128 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 129   GraphKit kit(jvms);
 130   kit.C->print_inlining_update(this);
 131   bool is_static = method()->is_static();
 132   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 133                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 134 
 135   if (kit.C->log() != NULL) {
 136     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 137   }
 138 
 139   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
 140   call->set_override_symbolic_info(override_symbolic_info());
 141   _call_node = call;  // Save the call node in case we need it later
 142   if (!is_static) {
 143     // Make an explicit receiver null_check as part of this call.
 144     // Since we share a map with the caller, his JVMS gets adjusted.
 145     kit.null_check_receiver_before_call(method());
 146     if (kit.stopped()) {
 147       // And dump it back to the caller, decorated with any exceptions:
 148       return kit.transfer_exceptions_into_jvms();
 149     }
 150     // Mark the call node as virtual, sort of:
 151     call->set_optimized_virtual(true);
 152     if (method()->is_method_handle_intrinsic() ||
 153         method()->is_compiled_lambda_form()) {
 154       call->set_method_handle_invoke(true);
 155     }
 156   }
 157   kit.set_arguments_for_java_call(call);
 158   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 159   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 160   kit.push_node(method()->return_type()->basic_type(), ret);


 176   virtual bool      is_virtual() const          { return true; }
 177   virtual JVMState* generate(JVMState* jvms);
 178 };
 179 
 180 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 181   GraphKit kit(jvms);
 182   Node* receiver = kit.argument(0);
 183 
 184   kit.C->print_inlining_update(this);
 185 
 186   if (kit.C->log() != NULL) {
 187     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 188   }
 189 
 190   // If the receiver is a constant null, do not torture the system
 191   // by attempting to call through it.  The compile will proceed
 192   // correctly, but may bail out in final_graph_reshaping, because
 193   // the call instruction will have a seemingly deficient out-count.
 194   // (The bailout says something misleading about an "infinite loop".)
 195   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 196     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 197     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 198     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 199     kit.inc_sp(arg_size);  // restore arguments
 200     kit.uncommon_trap(Deoptimization::Reason_null_check,
 201                       Deoptimization::Action_none,
 202                       NULL, "null receiver");
 203     return kit.transfer_exceptions_into_jvms();
 204   }
 205 
 206   // Ideally we would unconditionally do a null check here and let it
 207   // be converted to an implicit check based on profile information.
 208   // However currently the conversion to implicit null checks in
 209   // Block::implicit_null_check() only looks for loads and stores, not calls.
 210   ciMethod *caller = kit.method();
 211   ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
 212   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 213        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 214        (caller_md->trap_count(Deoptimization::Reason_null_check)
 215        >= (uint)ImplicitNullCheckThreshold))) {
 216     // Make an explicit receiver null_check as part of this call.
 217     // Since we share a map with the caller, his JVMS gets adjusted.
 218     receiver = kit.null_check_receiver_before_call(method());
 219     if (kit.stopped()) {
 220       // And dump it back to the caller, decorated with any exceptions:
 221       return kit.transfer_exceptions_into_jvms();
 222     }
 223   }
 224 
 225   assert(!method()->is_static(), "virtual call must not be to static");
 226   assert(!method()->is_final(), "virtual call should not be to final");
 227   assert(!method()->is_private(), "virtual call should not be to private");
 228   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 229          "no vtable calls if +UseInlineCaches ");
 230   address target = SharedRuntime::get_resolve_virtual_call_stub();
 231   // Normal inline cache used for call
 232   CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
 233   call->set_override_symbolic_info(override_symbolic_info());
 234   kit.set_arguments_for_java_call(call);
 235   kit.set_edges_for_java_call(call);
 236   Node* ret = kit.set_results_for_java_call(call);
 237   kit.push_node(method()->return_type()->basic_type(), ret);
 238 
 239   // Represent the effect of an implicit receiver null_check
 240   // as part of this call.  Since we share a map with the caller,
 241   // his JVMS gets adjusted.
 242   kit.cast_not_null(receiver);
 243   return kit.transfer_exceptions_into_jvms();
 244 }
 245 
 246 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 247   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 248   return new ParseGenerator(m, expected_uses);
 249 }
 250 
 251 // As a special case, the JVMS passed to this CallGenerator is
 252 // for the method execution already in progress, not just the JVMS
 253 // of the caller.  Thus, this CallGenerator cannot be mixed with others!


 451     if (_input_not_const) {
 452       // inlining won't be possible so no need to enqueue right now.
 453       call_node()->set_generator(this);
 454     } else {
 455       C->add_late_inline(this);
 456     }
 457     return new_jvms;
 458   }
 459 };
 460 
 461 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 462 
 463   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 464 
 465   Compile::current()->print_inlining_update_delayed(this);
 466 
 467   if (!_input_not_const) {
 468     _attempt++;
 469   }
 470 
 471   if (cg != NULL && cg->is_inline()) {
 472     assert(!cg->is_late_inline(), "we're doing late inlining");
 473     _inline_cg = cg;
 474     Compile::current()->dec_number_of_mh_late_inlines();
 475     return true;
 476   }
 477 
 478   call_node()->set_generator(this);
 479   return false;
 480 }
 481 
 482 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 483   Compile::current()->inc_number_of_mh_late_inlines();
 484   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 485   return cg;
 486 }
 487 
 488 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 489 
 490  public:
 491   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 492     LateInlineCallGenerator(method, inline_cg) {}


 795 
 796 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
 797   GraphKit kit(jvms);
 798   PhaseGVN& gvn = kit.gvn();
 799   Compile* C = kit.C;
 800   vmIntrinsics::ID iid = callee->intrinsic_id();
 801   input_not_const = true;
 802   switch (iid) {
 803   case vmIntrinsics::_invokeBasic:
 804     {
 805       // Get MethodHandle receiver:
 806       Node* receiver = kit.argument(0);
 807       if (receiver->Opcode() == Op_ConP) {
 808         input_not_const = false;
 809         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 810         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 811         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
 812         const int vtable_index = Method::invalid_vtable_index;
 813         CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
 814         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
 815         if (cg != NULL) {
 816           if (!cg->is_inline()) {
 817             // To be able to issue a static call (and skip a call to MH.invokeBasic adapter),
 818             // additional information about the method being invoked should be attached
 819             // to the call site to make resolution logic work (see SharedRuntime::resolve_static_call_C).
 820             cg->set_override_symbolic_info(true);
 821           }
 822           return cg;
 823         }
 824       } else {
 825         const char* msg = "receiver not constant";
 826         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
 827         C->log_inline_failure(msg);
 828       }
 829     }
 830     break;
 831 
 832   case vmIntrinsics::_linkToVirtual:
 833   case vmIntrinsics::_linkToStatic:
 834   case vmIntrinsics::_linkToSpecial:
 835   case vmIntrinsics::_linkToInterface:
 836     {
 837       // Get MemberName argument:
 838       Node* member_name = kit.argument(callee->arg_size() - 1);
 839       if (member_name->Opcode() == Op_ConP) {
 840         input_not_const = false;
 841         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 842         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 843 
 844         // In lambda forms we erase signature types to avoid resolving issues
 845         // involving class loaders.  When we optimize a method handle invoke
 846         // to a direct call we must cast the receiver and arguments to its
 847         // actual types.
 848         ciSignature* signature = target->signature();
 849         const int receiver_skip = target->is_static() ? 0 : 1;
 850         // Cast receiver to its type.
 851         if (!target->is_static()) {
 852           Node* arg = kit.argument(0);
 853           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
 854           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
 855           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
 856             Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
 857             kit.set_argument(0, cast_obj);
 858           }
 859         }
 860         // Cast reference arguments to its type.
 861         for (int i = 0; i < signature->count(); i++) {
 862           ciType* t = signature->type_at(i);
 863           if (t->is_klass()) {
 864             Node* arg = kit.argument(receiver_skip + i);


 877         int  vtable_index       = Method::invalid_vtable_index;
 878         bool call_does_dispatch = false;
 879 
 880         ciKlass* speculative_receiver_type = NULL;
 881         if (is_virtual_or_interface) {
 882           ciInstanceKlass* klass = target->holder();
 883           Node*             receiver_node = kit.argument(0);
 884           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
 885           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
 886           // optimize_virtual_call() takes 2 different holder
 887           // arguments for a corner case that doesn't apply here (see
 888           // Parse::do_call())
 889           target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
 890                                             target, receiver_type, is_virtual,
 891                                             call_does_dispatch, vtable_index, // out-parameters
 892                                             /*check_access=*/false);
 893           // We lack profiling at this call but type speculation may
 894           // provide us with a type
 895           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
 896         }
 897         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, /*allow_inline=*/true, PROB_ALWAYS, speculative_receiver_type, true, true);
 898         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
 899         if (cg != NULL) {
 900           if (!cg->is_inline()) {
 901             // To be able to issue a direct call (static, optimized virtual, or virtual) 
 902             // and skip a call to MH.linkTo* adapter, additional information about the method
 903             // being invoked should be attached to the call site to make resolution logic work
 904             // (see SharedRuntime::resolve_{static,virtual,opt_virtual}_call_C).
 905             cg->set_override_symbolic_info(true);
 906           }
 907           return cg;
 908         }
 909       } else {
 910         const char* msg = "member_name not constant";
 911         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
 912         C->log_inline_failure(msg);
 913       }
 914     }
 915     break;
 916 
 917   default:
 918     fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
 919     break;
 920   }
 921   return NULL;
 922 }
 923 
 924 
 925 //------------------------PredicatedIntrinsicGenerator------------------------------
 926 // Internal class which handles all predicated Intrinsic calls.
 927 class PredicatedIntrinsicGenerator : public CallGenerator {
 928   CallGenerator* _intrinsic;


src/share/vm/opto/callGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File