src/share/vm/opto/callGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/callGenerator.cpp

Print this page




  29 #include "ci/ciMemberName.hpp"
  30 #include "ci/ciMethodHandle.hpp"
  31 #include "classfile/javaClasses.hpp"
  32 #include "compiler/compileLog.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 
  44 // Utility function.
  45 const TypeFunc* CallGenerator::tf() const {
  46   return TypeFunc::make(method());
  47 }
  48 





  49 //-----------------------------ParseGenerator---------------------------------
  50 // Internal class which handles all direct bytecode traversal.
  51 class ParseGenerator : public InlineCallGenerator {
  52 private:
  53   bool  _is_osr;
  54   float _expected_uses;
  55 
  56 public:
  57   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  58     : InlineCallGenerator(method)
  59   {
  60     _is_osr        = is_osr;
  61     _expected_uses = expected_uses;
  62     assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
  63   }
  64 
  65   virtual bool      is_parse() const           { return true; }
  66   virtual JVMState* generate(JVMState* jvms);
  67   int is_osr() { return _is_osr; }
  68 


 120       _separate_io_proj(separate_io_proj)
 121   {
 122   }
 123   virtual JVMState* generate(JVMState* jvms);
 124 
 125   CallStaticJavaNode* call_node() const { return _call_node; }
 126 };
 127 
 128 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 129   GraphKit kit(jvms);
 130   kit.C->print_inlining_update(this);
 131   bool is_static = method()->is_static();
 132   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 133                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 134 
 135   if (kit.C->log() != NULL) {
 136     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 137   }
 138 
 139   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());







 140   _call_node = call;  // Save the call node in case we need it later
 141   if (!is_static) {
 142     // Make an explicit receiver null_check as part of this call.
 143     // Since we share a map with the caller, his JVMS gets adjusted.
 144     kit.null_check_receiver_before_call(method());
 145     if (kit.stopped()) {
 146       // And dump it back to the caller, decorated with any exceptions:
 147       return kit.transfer_exceptions_into_jvms();
 148     }
 149     // Mark the call node as virtual, sort of:
 150     call->set_optimized_virtual(true);
 151     if (method()->is_method_handle_intrinsic() ||
 152         method()->is_compiled_lambda_form()) {
 153       call->set_method_handle_invoke(true);
 154     }
 155   }
 156   kit.set_arguments_for_java_call(call);
 157   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 158   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 159   kit.push_node(method()->return_type()->basic_type(), ret);


 175   virtual bool      is_virtual() const          { return true; }
 176   virtual JVMState* generate(JVMState* jvms);
 177 };
 178 
 179 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 180   GraphKit kit(jvms);
 181   Node* receiver = kit.argument(0);
 182 
 183   kit.C->print_inlining_update(this);
 184 
 185   if (kit.C->log() != NULL) {
 186     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 187   }
 188 
 189   // If the receiver is a constant null, do not torture the system
 190   // by attempting to call through it.  The compile will proceed
 191   // correctly, but may bail out in final_graph_reshaping, because
 192   // the call instruction will have a seemingly deficient out-count.
 193   // (The bailout says something misleading about an "infinite loop".)
 194   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 195     kit.inc_sp(method()->arg_size());  // restore arguments



 196     kit.uncommon_trap(Deoptimization::Reason_null_check,
 197                       Deoptimization::Action_none,
 198                       NULL, "null receiver");
 199     return kit.transfer_exceptions_into_jvms();
 200   }
 201 
 202   // Ideally we would unconditionally do a null check here and let it
 203   // be converted to an implicit check based on profile information.
 204   // However currently the conversion to implicit null checks in
 205   // Block::implicit_null_check() only looks for loads and stores, not calls.
 206   ciMethod *caller = kit.method();
 207   ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
 208   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 209        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 210        (caller_md->trap_count(Deoptimization::Reason_null_check)
 211        >= (uint)ImplicitNullCheckThreshold))) {
 212     // Make an explicit receiver null_check as part of this call.
 213     // Since we share a map with the caller, his JVMS gets adjusted.
 214     receiver = kit.null_check_receiver_before_call(method());
 215     if (kit.stopped()) {
 216       // And dump it back to the caller, decorated with any exceptions:
 217       return kit.transfer_exceptions_into_jvms();
 218     }
 219   }
 220 
 221   assert(!method()->is_static(), "virtual call must not be to static");
 222   assert(!method()->is_final(), "virtual call should not be to final");
 223   assert(!method()->is_private(), "virtual call should not be to private");
 224   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 225          "no vtable calls if +UseInlineCaches ");
 226   address target = SharedRuntime::get_resolve_virtual_call_stub();
 227   // Normal inline cache used for call
 228   CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());







 229   kit.set_arguments_for_java_call(call);
 230   kit.set_edges_for_java_call(call);
 231   Node* ret = kit.set_results_for_java_call(call);
 232   kit.push_node(method()->return_type()->basic_type(), ret);
 233 
 234   // Represent the effect of an implicit receiver null_check
 235   // as part of this call.  Since we share a map with the caller,
 236   // his JVMS gets adjusted.
 237   kit.cast_not_null(receiver);
 238   return kit.transfer_exceptions_into_jvms();
 239 }
 240 
 241 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 242   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 243   return new ParseGenerator(m, expected_uses);
 244 }
 245 
 246 // As a special case, the JVMS passed to this CallGenerator is
 247 // for the method execution already in progress, not just the JVMS
 248 // of the caller.  Thus, this CallGenerator cannot be mixed with others!


 446     if (_input_not_const) {
 447       // inlining won't be possible so no need to enqueue right now.
 448       call_node()->set_generator(this);
 449     } else {
 450       C->add_late_inline(this);
 451     }
 452     return new_jvms;
 453   }
 454 };
 455 
 456 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 457 
 458   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 459 
 460   Compile::current()->print_inlining_update_delayed(this);
 461 
 462   if (!_input_not_const) {
 463     _attempt++;
 464   }
 465 
 466   if (cg != NULL) {
 467     assert(!cg->is_late_inline() && cg->is_inline(), "we're doing late inlining");
 468     _inline_cg = cg;
 469     Compile::current()->dec_number_of_mh_late_inlines();
 470     return true;
 471   }
 472 
 473   call_node()->set_generator(this);
 474   return false;
 475 }
 476 
 477 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 478   Compile::current()->inc_number_of_mh_late_inlines();
 479   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 480   return cg;
 481 }
 482 
 483 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 484 
 485  public:
 486   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 487     LateInlineCallGenerator(method, inline_cg) {}


 790 
 791 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
 792   GraphKit kit(jvms);
 793   PhaseGVN& gvn = kit.gvn();
 794   Compile* C = kit.C;
 795   vmIntrinsics::ID iid = callee->intrinsic_id();
 796   input_not_const = true;
 797   switch (iid) {
 798   case vmIntrinsics::_invokeBasic:
 799     {
 800       // Get MethodHandle receiver:
 801       Node* receiver = kit.argument(0);
 802       if (receiver->Opcode() == Op_ConP) {
 803         input_not_const = false;
 804         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 805         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 806         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
 807         const int vtable_index = Method::invalid_vtable_index;
 808         CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
 809         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
 810         if (cg != NULL && cg->is_inline())
 811           return cg;
 812       } else {
 813         const char* msg = "receiver not constant";
 814         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
 815         C->log_inline_failure(msg);
 816       }
 817     }
 818     break;
 819 
 820   case vmIntrinsics::_linkToVirtual:
 821   case vmIntrinsics::_linkToStatic:
 822   case vmIntrinsics::_linkToSpecial:
 823   case vmIntrinsics::_linkToInterface:
 824     {
 825       // Get MemberName argument:
 826       Node* member_name = kit.argument(callee->arg_size() - 1);
 827       if (member_name->Opcode() == Op_ConP) {
 828         input_not_const = false;
 829         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 830         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 831 
 832         // In lamda forms we erase signature types to avoid resolving issues
 833         // involving class loaders.  When we optimize a method handle invoke
 834         // to a direct call we must cast the receiver and arguments to its
 835         // actual types.
 836         ciSignature* signature = target->signature();
 837         const int receiver_skip = target->is_static() ? 0 : 1;
 838         // Cast receiver to its type.
 839         if (!target->is_static()) {
 840           Node* arg = kit.argument(0);
 841           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
 842           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
 843           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
 844             Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
 845             kit.set_argument(0, cast_obj);
 846           }
 847         }
 848         // Cast reference arguments to its type.
 849         for (int i = 0; i < signature->count(); i++) {
 850           ciType* t = signature->type_at(i);
 851           if (t->is_klass()) {
 852             Node* arg = kit.argument(receiver_skip + i);


 865         int  vtable_index       = Method::invalid_vtable_index;
 866         bool call_does_dispatch = false;
 867 
 868         ciKlass* speculative_receiver_type = NULL;
 869         if (is_virtual_or_interface) {
 870           ciInstanceKlass* klass = target->holder();
 871           Node*             receiver_node = kit.argument(0);
 872           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
 873           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
 874           // optimize_virtual_call() takes 2 different holder
 875           // arguments for a corner case that doesn't apply here (see
 876           // Parse::do_call())
 877           target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
 878                                             target, receiver_type, is_virtual,
 879                                             call_does_dispatch, vtable_index, // out-parameters
 880                                             /*check_access=*/false);
 881           // We lack profiling at this call but type speculation may
 882           // provide us with a type
 883           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
 884         }
 885         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
 886         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
 887         if (cg != NULL && cg->is_inline())
 888           return cg;
 889       } else {
 890         const char* msg = "member_name not constant";
 891         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
 892         C->log_inline_failure(msg);
 893       }
 894     }
 895     break;
 896 
 897   default:
 898     fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
 899     break;
 900   }
 901   return NULL;
 902 }
 903 
 904 
 905 //------------------------PredicatedIntrinsicGenerator------------------------------
 906 // Internal class which handles all predicated Intrinsic calls.
 907 class PredicatedIntrinsicGenerator : public CallGenerator {




  29 #include "ci/ciMemberName.hpp"
  30 #include "ci/ciMethodHandle.hpp"
  31 #include "classfile/javaClasses.hpp"
  32 #include "compiler/compileLog.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 
  44 // Utility function.
  45 const TypeFunc* CallGenerator::tf() const {
  46   return TypeFunc::make(method());
  47 }
  48 
  49 bool CallGenerator::is_inlined_mh_linker(JVMState* jvms, ciMethod* callee) {
  50   ciMethod* symbolic_info = jvms->method()->get_method_at_bci(jvms->bci());
  51   return symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic();
  52 }
  53 
  54 //-----------------------------ParseGenerator---------------------------------
  55 // Internal class which handles all direct bytecode traversal.
  56 class ParseGenerator : public InlineCallGenerator {
  57 private:
  58   bool  _is_osr;
  59   float _expected_uses;
  60 
  61 public:
  62   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  63     : InlineCallGenerator(method)
  64   {
  65     _is_osr        = is_osr;
  66     _expected_uses = expected_uses;
  67     assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
  68   }
  69 
  70   virtual bool      is_parse() const           { return true; }
  71   virtual JVMState* generate(JVMState* jvms);
  72   int is_osr() { return _is_osr; }
  73 


 125       _separate_io_proj(separate_io_proj)
 126   {
 127   }
 128   virtual JVMState* generate(JVMState* jvms);
 129 
 130   CallStaticJavaNode* call_node() const { return _call_node; }
 131 };
 132 
 133 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 134   GraphKit kit(jvms);
 135   kit.C->print_inlining_update(this);
 136   bool is_static = method()->is_static();
 137   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 138                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 139 
 140   if (kit.C->log() != NULL) {
 141     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 142   }
 143 
 144   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
 145   if (is_inlined_mh_linker(jvms, method())) {
 146     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 147     // additional information about the method being invoked should be attached
 148     // to the call site to make resolution logic work
 149     // (see SharedRuntime::resolve_static_call_C).
 150     call->set_override_symbolic_info(true);
 151   }
 152   _call_node = call;  // Save the call node in case we need it later
 153   if (!is_static) {
 154     // Make an explicit receiver null_check as part of this call.
 155     // Since we share a map with the caller, his JVMS gets adjusted.
 156     kit.null_check_receiver_before_call(method());
 157     if (kit.stopped()) {
 158       // And dump it back to the caller, decorated with any exceptions:
 159       return kit.transfer_exceptions_into_jvms();
 160     }
 161     // Mark the call node as virtual, sort of:
 162     call->set_optimized_virtual(true);
 163     if (method()->is_method_handle_intrinsic() ||
 164         method()->is_compiled_lambda_form()) {
 165       call->set_method_handle_invoke(true);
 166     }
 167   }
 168   kit.set_arguments_for_java_call(call);
 169   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 170   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 171   kit.push_node(method()->return_type()->basic_type(), ret);


 187   virtual bool      is_virtual() const          { return true; }
 188   virtual JVMState* generate(JVMState* jvms);
 189 };
 190 
 191 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 192   GraphKit kit(jvms);
 193   Node* receiver = kit.argument(0);
 194 
 195   kit.C->print_inlining_update(this);
 196 
 197   if (kit.C->log() != NULL) {
 198     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 199   }
 200 
 201   // If the receiver is a constant null, do not torture the system
 202   // by attempting to call through it.  The compile will proceed
 203   // correctly, but may bail out in final_graph_reshaping, because
 204   // the call instruction will have a seemingly deficient out-count.
 205   // (The bailout says something misleading about an "infinite loop".)
 206   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 207     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 208     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 209     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 210     kit.inc_sp(arg_size);  // restore arguments
 211     kit.uncommon_trap(Deoptimization::Reason_null_check,
 212                       Deoptimization::Action_none,
 213                       NULL, "null receiver");
 214     return kit.transfer_exceptions_into_jvms();
 215   }
 216 
 217   // Ideally we would unconditionally do a null check here and let it
 218   // be converted to an implicit check based on profile information.
 219   // However currently the conversion to implicit null checks in
 220   // Block::implicit_null_check() only looks for loads and stores, not calls.
 221   ciMethod *caller = kit.method();
 222   ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
 223   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 224        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 225        (caller_md->trap_count(Deoptimization::Reason_null_check)
 226        >= (uint)ImplicitNullCheckThreshold))) {
 227     // Make an explicit receiver null_check as part of this call.
 228     // Since we share a map with the caller, his JVMS gets adjusted.
 229     receiver = kit.null_check_receiver_before_call(method());
 230     if (kit.stopped()) {
 231       // And dump it back to the caller, decorated with any exceptions:
 232       return kit.transfer_exceptions_into_jvms();
 233     }
 234   }
 235 
 236   assert(!method()->is_static(), "virtual call must not be to static");
 237   assert(!method()->is_final(), "virtual call should not be to final");
 238   assert(!method()->is_private(), "virtual call should not be to private");
 239   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 240          "no vtable calls if +UseInlineCaches ");
 241   address target = SharedRuntime::get_resolve_virtual_call_stub();
 242   // Normal inline cache used for call
 243   CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
 244   if (is_inlined_mh_linker(jvms, method())) {
 245     // To be able to issue a direct call (optimized virtual or virtual)
 246     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 247     // about the method being invoked should be attached to the call site to
 248     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 249     call->set_override_symbolic_info(true);
 250   }
 251   kit.set_arguments_for_java_call(call);
 252   kit.set_edges_for_java_call(call);
 253   Node* ret = kit.set_results_for_java_call(call);
 254   kit.push_node(method()->return_type()->basic_type(), ret);
 255 
 256   // Represent the effect of an implicit receiver null_check
 257   // as part of this call.  Since we share a map with the caller,
 258   // his JVMS gets adjusted.
 259   kit.cast_not_null(receiver);
 260   return kit.transfer_exceptions_into_jvms();
 261 }
 262 
 263 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 264   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 265   return new ParseGenerator(m, expected_uses);
 266 }
 267 
 268 // As a special case, the JVMS passed to this CallGenerator is
 269 // for the method execution already in progress, not just the JVMS
 270 // of the caller.  Thus, this CallGenerator cannot be mixed with others!


 468     if (_input_not_const) {
 469       // inlining won't be possible so no need to enqueue right now.
 470       call_node()->set_generator(this);
 471     } else {
 472       C->add_late_inline(this);
 473     }
 474     return new_jvms;
 475   }
 476 };
 477 
 478 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 479 
 480   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 481 
 482   Compile::current()->print_inlining_update_delayed(this);
 483 
 484   if (!_input_not_const) {
 485     _attempt++;
 486   }
 487 
 488   if (cg != NULL && cg->is_inline()) {
 489     assert(!cg->is_late_inline(), "we're doing late inlining");
 490     _inline_cg = cg;
 491     Compile::current()->dec_number_of_mh_late_inlines();
 492     return true;
 493   }
 494 
 495   call_node()->set_generator(this);
 496   return false;
 497 }
 498 
 499 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 500   Compile::current()->inc_number_of_mh_late_inlines();
 501   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 502   return cg;
 503 }
 504 
 505 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 506 
 507  public:
 508   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 509     LateInlineCallGenerator(method, inline_cg) {}


 812 
 813 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
 814   GraphKit kit(jvms);
 815   PhaseGVN& gvn = kit.gvn();
 816   Compile* C = kit.C;
 817   vmIntrinsics::ID iid = callee->intrinsic_id();
 818   input_not_const = true;
 819   switch (iid) {
 820   case vmIntrinsics::_invokeBasic:
 821     {
 822       // Get MethodHandle receiver:
 823       Node* receiver = kit.argument(0);
 824       if (receiver->Opcode() == Op_ConP) {
 825         input_not_const = false;
 826         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 827         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 828         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
 829         const int vtable_index = Method::invalid_vtable_index;
 830         CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
 831         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");

 832         return cg;
 833       } else {
 834         const char* msg = "receiver not constant";
 835         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
 836         C->log_inline_failure(msg);
 837       }
 838     }
 839     break;
 840 
 841   case vmIntrinsics::_linkToVirtual:
 842   case vmIntrinsics::_linkToStatic:
 843   case vmIntrinsics::_linkToSpecial:
 844   case vmIntrinsics::_linkToInterface:
 845     {
 846       // Get MemberName argument:
 847       Node* member_name = kit.argument(callee->arg_size() - 1);
 848       if (member_name->Opcode() == Op_ConP) {
 849         input_not_const = false;
 850         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 851         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 852 
 853         // In lambda forms we erase signature types to avoid resolving issues
 854         // involving class loaders.  When we optimize a method handle invoke
 855         // to a direct call we must cast the receiver and arguments to its
 856         // actual types.
 857         ciSignature* signature = target->signature();
 858         const int receiver_skip = target->is_static() ? 0 : 1;
 859         // Cast receiver to its type.
 860         if (!target->is_static()) {
 861           Node* arg = kit.argument(0);
 862           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
 863           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
 864           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
 865             Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
 866             kit.set_argument(0, cast_obj);
 867           }
 868         }
 869         // Cast reference arguments to its type.
 870         for (int i = 0; i < signature->count(); i++) {
 871           ciType* t = signature->type_at(i);
 872           if (t->is_klass()) {
 873             Node* arg = kit.argument(receiver_skip + i);


 886         int  vtable_index       = Method::invalid_vtable_index;
 887         bool call_does_dispatch = false;
 888 
 889         ciKlass* speculative_receiver_type = NULL;
 890         if (is_virtual_or_interface) {
 891           ciInstanceKlass* klass = target->holder();
 892           Node*             receiver_node = kit.argument(0);
 893           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
 894           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
 895           // optimize_virtual_call() takes 2 different holder
 896           // arguments for a corner case that doesn't apply here (see
 897           // Parse::do_call())
 898           target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
 899                                             target, receiver_type, is_virtual,
 900                                             call_does_dispatch, vtable_index, // out-parameters
 901                                             /*check_access=*/false);
 902           // We lack profiling at this call but type speculation may
 903           // provide us with a type
 904           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
 905         }
 906         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, /*allow_inline=*/true, PROB_ALWAYS, speculative_receiver_type, true, true);
 907         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");

 908         return cg;
 909       } else {
 910         const char* msg = "member_name not constant";
 911         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
 912         C->log_inline_failure(msg);
 913       }
 914     }
 915     break;
 916 
 917   default:
 918     fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
 919     break;
 920   }
 921   return NULL;
 922 }
 923 
 924 
 925 //------------------------PredicatedIntrinsicGenerator------------------------------
 926 // Internal class which handles all predicated Intrinsic calls.
 927 class PredicatedIntrinsicGenerator : public CallGenerator {


src/share/vm/opto/callGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File