< prev index next >

src/share/vm/opto/callGenerator.cpp

Print this page




 100   GraphKit& exits = parser.exits();
 101 
 102   if (C->failing()) {
 103     while (exits.pop_exception_state() != NULL) ;
 104     return NULL;
 105   }
 106 
 107   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 108 
 109   // Simply return the exit state of the parser,
 110   // augmented by any exceptional states.
 111   return exits.transfer_exceptions_into_jvms();
 112 }
 113 
 114 //---------------------------DirectCallGenerator------------------------------
 115 // Internal class which handles all out-of-line calls w/o receiver type checks.
 116 class DirectCallGenerator : public CallGenerator {
 117  private:
 118   CallStaticJavaNode* _call_node;
 119   // Force separate memory and I/O projections for the exceptional
 120   // paths to facilitate late inlinig.
 121   bool                _separate_io_proj;
 122 
 123  public:
 124   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 125     : CallGenerator(method),
 126       _separate_io_proj(separate_io_proj)
 127   {









 128   }
 129   virtual JVMState* generate(JVMState* jvms);
 130 
 131   CallStaticJavaNode* call_node() const { return _call_node; }
 132 };
 133 
 134 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 135   GraphKit kit(jvms);
 136   kit.C->print_inlining_update(this);
 137   PhaseGVN& gvn = kit.gvn();
 138   bool is_static = method()->is_static();
 139   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 140                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 141 
 142   if (kit.C->log() != NULL) {
 143     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 144   }
 145 
 146   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
 147   if (is_inlined_method_handle_intrinsic(jvms, method())) {


 156     if (!kit.argument(0)->is_ValueType()) {
 157       // Make an explicit receiver null_check as part of this call.
 158       // Since we share a map with the caller, his JVMS gets adjusted.
 159       kit.null_check_receiver_before_call(method());
 160     }
 161     if (kit.stopped()) {
 162       // And dump it back to the caller, decorated with any exceptions:
 163       return kit.transfer_exceptions_into_jvms();
 164     }
 165     // Mark the call node as virtual, sort of:
 166     call->set_optimized_virtual(true);
 167     if (method()->is_method_handle_intrinsic() ||
 168         method()->is_compiled_lambda_form()) {
 169       call->set_method_handle_invoke(true);
 170     }
 171   }
 172   kit.set_arguments_for_java_call(call);
 173   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 174   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 175   // Check if return value is a value type pointer
 176   if (gvn.type(ret)->isa_valuetypeptr()) {


 177     // Create ValueTypeNode from the oop and replace the return value
 178     Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret);
 179     kit.push_node(T_VALUETYPE, vt);
 180   } else {



 181     kit.push_node(method()->return_type()->basic_type(), ret);
 182   }
 183   return kit.transfer_exceptions_into_jvms();
 184 }
 185 
 186 //--------------------------VirtualCallGenerator------------------------------
 187 // Internal class which handles all out-of-line calls checking receiver type.
 188 class VirtualCallGenerator : public CallGenerator {
 189 private:
 190   int _vtable_index;
 191 public:
 192   VirtualCallGenerator(ciMethod* method, int vtable_index)
 193     : CallGenerator(method), _vtable_index(vtable_index)
 194   {
 195     assert(vtable_index == Method::invalid_vtable_index ||
 196            vtable_index >= 0, "either invalid or usable");
 197   }
 198   virtual bool      is_virtual() const          { return true; }
 199   virtual JVMState* generate(JVMState* jvms);
 200 };


 412 
 413   // Make enough space in the expression stack to transfer
 414   // the incoming arguments and return value.
 415   map->ensure_stack(jvms, jvms->method()->max_stack());
 416   const TypeTuple *domain_sig = call->_tf->domain_sig();
 417   uint nargs = method()->arg_size();
 418   assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 419 
 420   uint j = TypeFunc::Parms;
 421   for (uint i1 = 0; i1 < nargs; i1++) {
 422     const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 423     if (!ValueTypePassFieldsAsArgs) {
 424       Node* arg = call->in(TypeFunc::Parms + i1);
 425       if (t->isa_valuetypeptr()) {
 426         arg = ValueTypeNode::make(gvn, map->memory(), arg);
 427       }
 428       map->set_argument(jvms, i1, arg);
 429     } else {
 430       if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
 431         ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
 432         Node* vt = C->create_vt_node(call, vk, vk, 0, j);
 433         map->set_argument(jvms, i1, gvn.transform(vt));
 434         j += vk->value_arg_slots();
 435       } else {
 436         map->set_argument(jvms, i1, call->in(j));
 437         j++;
 438       }
 439     }
 440   }
 441 
 442   C->print_inlining_assert_ready();
 443 
 444   C->print_inlining_move_to(this);
 445 
 446   C->log_late_inline(this);
 447 
 448   // This check is done here because for_method_handle_inline() method
 449   // needs jvms for inlined state.
 450   if (!do_late_inline_check(jvms)) {
 451     map->disconnect_inputs(NULL, C);
 452     return;
 453   }
 454 
 455   // Setup default node notes to be picked up by the inlining
 456   Node_Notes* old_nn = C->node_notes_at(call->_idx);
 457   if (old_nn != NULL) {
 458     Node_Notes* entry_nn = old_nn->clone(C);
 459     entry_nn->set_jvms(jvms);
 460     C->set_default_node_notes(entry_nn);
 461   }
 462 
 463   // Now perform the inlining using the synthesized JVMState
 464   JVMState* new_jvms = _inline_cg->generate(jvms);
 465   if (new_jvms == NULL)  return;  // no change
 466   if (C->failing())      return;
 467 
 468   // Capture any exceptional control flow
 469   GraphKit kit(new_jvms);
 470 
 471   // Find the result object
 472   Node* result = C->top();
 473   int   result_size = method()->return_type()->size();

 474   if (result_size != 0 && !kit.stopped()) {
 475     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 476   }
 477 
 478   C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 479   C->env()->notice_inlined_method(_inline_cg->method());
 480   C->set_inlining_progress(true);
 481 

 482   if (result->is_ValueType()) {

 483     result = result->as_ValueType()->store_to_memory(&kit);


















 484   }
 485 
 486   kit.replace_call(call, result, true);
 487 }
 488 
 489 
 490 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 491   return new LateInlineCallGenerator(method, inline_cg);
 492 }
 493 
 494 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 495   ciMethod* _caller;
 496   int _attempt;
 497   bool _input_not_const;
 498 
 499   virtual bool do_late_inline_check(JVMState* jvms);
 500   virtual bool already_attempted() const { return _attempt > 0; }
 501 
 502  public:
 503   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 504     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
 505 
 506   virtual bool is_mh_late_inline() const { return true; }
 507 
 508   virtual JVMState* generate(JVMState* jvms) {
 509     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
 510 
 511     Compile* C = Compile::current();
 512     if (_input_not_const) {
 513       // inlining won't be possible so no need to enqueue right now.
 514       call_node()->set_generator(this);
 515     } else {
 516       C->add_late_inline(this);
 517     }
 518     return new_jvms;
 519   }
 520 };
 521 
 522 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 523 
 524   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 525 
 526   Compile::current()->print_inlining_update_delayed(this);
 527 
 528   if (!_input_not_const) {
 529     _attempt++;
 530   }
 531 
 532   if (cg != NULL && cg->is_inline()) {
 533     assert(!cg->is_late_inline(), "we're doing late inlining");
 534     _inline_cg = cg;
 535     Compile::current()->dec_number_of_mh_late_inlines();
 536     return true;
 537   }
 538 
 539   call_node()->set_generator(this);
 540   return false;
 541 }
 542 
 543 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 544   Compile::current()->inc_number_of_mh_late_inlines();
 545   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 546   return cg;
 547 }
 548 
 549 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 550 
 551  public:
 552   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :


 814     if (i == tos) {
 815       i = kit.jvms()->monoff();
 816       if( i >= limit ) break;
 817     }
 818     Node* m = kit.map()->in(i);
 819     Node* n = slow_map->in(i);
 820     if (m != n) {
 821       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
 822       Node* phi = PhiNode::make(region, m, t);
 823       phi->set_req(2, n);
 824       kit.map()->set_req(i, gvn.transform(phi));
 825     }
 826   }
 827   return kit.transfer_exceptions_into_jvms();
 828 }
 829 
 830 
 831 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
 832   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
 833   bool input_not_const;
 834   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
 835   Compile* C = Compile::current();
 836   if (cg != NULL) {
 837     if (!delayed_forbidden && AlwaysIncrementalInline) {
 838       return CallGenerator::for_late_inline(callee, cg);
 839     } else {
 840       return cg;
 841     }
 842   }
 843   int bci = jvms->bci();
 844   ciCallProfile profile = caller->call_profile_at_bci(bci);
 845   int call_site_count = caller->scale_count(profile.count());
 846 
 847   if (IncrementalInline && call_site_count > 0 &&
 848       (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
 849     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
 850   } else {
 851     // Out-of-line call.
 852     return CallGenerator::for_direct_call(callee);
 853   }
 854 }
 855 
 856 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
 857   GraphKit kit(jvms);
 858   PhaseGVN& gvn = kit.gvn();
 859   Compile* C = kit.C;
 860   vmIntrinsics::ID iid = callee->intrinsic_id();
 861   input_not_const = true;
 862   switch (iid) {
 863   case vmIntrinsics::_invokeBasic:
 864     {
 865       // Get MethodHandle receiver:
 866       Node* receiver = kit.argument(0);
 867       if (receiver->Opcode() == Op_ConP) {
 868         input_not_const = false;
 869         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 870         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 871         const int vtable_index = Method::invalid_vtable_index;
 872 
 873         if (!ciMethod::is_consistent_info(callee, target)) {
 874           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 875                                  "signatures mismatch");
 876           return NULL;
 877         }
 878 
 879         CallGenerator* cg = C->call_generator(target, vtable_index,
 880                                               false /* call_does_dispatch */,
 881                                               jvms,
 882                                               true /* allow_inline */,
 883                                               PROB_ALWAYS);



 884         return cg;
 885       } else {
 886         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 887                                "receiver not constant");
 888       }
 889     }
 890     break;
 891 
 892   case vmIntrinsics::_linkToVirtual:
 893   case vmIntrinsics::_linkToStatic:
 894   case vmIntrinsics::_linkToSpecial:
 895   case vmIntrinsics::_linkToInterface:
 896     {
 897       // Get MemberName argument:
 898       Node* member_name = kit.argument(callee->arg_size() - 1);
 899       if (member_name->Opcode() == Op_ConP) {
 900         input_not_const = false;
 901         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 902         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 903 


 947         ciKlass* speculative_receiver_type = NULL;
 948         if (is_virtual_or_interface) {
 949           ciInstanceKlass* klass = target->holder();
 950           Node*             receiver_node = kit.argument(0);
 951           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
 952           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
 953           // optimize_virtual_call() takes 2 different holder
 954           // arguments for a corner case that doesn't apply here (see
 955           // Parse::do_call())
 956           target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
 957                                             target, receiver_type, is_virtual,
 958                                             call_does_dispatch, vtable_index, // out-parameters
 959                                             false /* check_access */);
 960           // We lack profiling at this call but type speculation may
 961           // provide us with a type
 962           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
 963         }
 964         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
 965                                               true /* allow_inline */,
 966                                               PROB_ALWAYS,
 967                                               speculative_receiver_type);


 968         return cg;
 969       } else {
 970         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 971                                "member_name not constant");
 972       }
 973     }
 974     break;
 975 
 976   default:
 977     fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
 978     break;
 979   }
 980   return NULL;
 981 }
 982 
 983 
 984 //------------------------PredicatedIntrinsicGenerator------------------------------
 985 // Internal class which handles all predicated Intrinsic calls.
 986 class PredicatedIntrinsicGenerator : public CallGenerator {
 987   CallGenerator* _intrinsic;




 100   GraphKit& exits = parser.exits();
 101 
 102   if (C->failing()) {
 103     while (exits.pop_exception_state() != NULL) ;
 104     return NULL;
 105   }
 106 
 107   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 108 
 109   // Simply return the exit state of the parser,
 110   // augmented by any exceptional states.
 111   return exits.transfer_exceptions_into_jvms();
 112 }
 113 
 114 //---------------------------DirectCallGenerator------------------------------
 115 // Internal class which handles all out-of-line calls w/o receiver type checks.
 116 class DirectCallGenerator : public CallGenerator {
 117  private:
 118   CallStaticJavaNode* _call_node;
 119   // Force separate memory and I/O projections for the exceptional
 120   // paths to facilitate late inlining.
 121   bool                _separate_io_proj;
 122 
 123  public:
 124   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 125     : CallGenerator(method),
 126       _separate_io_proj(separate_io_proj)
 127   {
 128     if (method->is_method_handle_intrinsic() &&
 129         method->signature()->return_type() == ciEnv::current()->___Value_klass()) {
 130       // If that call has not been optimized by the time optimizations
 131       // are over, we'll need to add a call to create a value type
 132       // instance from the klass returned by the call. Separating
 133       // memory and I/O projections for exceptions is required to
 134       // perform that graph transformation.
 135       _separate_io_proj = true;
 136     }
 137   }
 138   virtual JVMState* generate(JVMState* jvms);
 139 
 140   CallStaticJavaNode* call_node() const { return _call_node; }
 141 };
 142 
 143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 144   GraphKit kit(jvms);
 145   kit.C->print_inlining_update(this);
 146   PhaseGVN& gvn = kit.gvn();
 147   bool is_static = method()->is_static();
 148   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 149                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 150 
 151   if (kit.C->log() != NULL) {
 152     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 153   }
 154 
 155   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
 156   if (is_inlined_method_handle_intrinsic(jvms, method())) {


 165     if (!kit.argument(0)->is_ValueType()) {
 166       // Make an explicit receiver null_check as part of this call.
 167       // Since we share a map with the caller, his JVMS gets adjusted.
 168       kit.null_check_receiver_before_call(method());
 169     }
 170     if (kit.stopped()) {
 171       // And dump it back to the caller, decorated with any exceptions:
 172       return kit.transfer_exceptions_into_jvms();
 173     }
 174     // Mark the call node as virtual, sort of:
 175     call->set_optimized_virtual(true);
 176     if (method()->is_method_handle_intrinsic() ||
 177         method()->is_compiled_lambda_form()) {
 178       call->set_method_handle_invoke(true);
 179     }
 180   }
 181   kit.set_arguments_for_java_call(call);
 182   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 183   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 184   // Check if return value is a value type pointer
 185   const TypeValueTypePtr* vtptr = gvn.type(ret)->isa_valuetypeptr();
 186   if (vtptr != NULL) {
 187     if (vtptr->klass() != kit.C->env()->___Value_klass()) {
 188       // Create ValueTypeNode from the oop and replace the return value
 189       Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret);
 190       kit.push_node(T_VALUETYPE, vt);
 191     } else {
 192       kit.push_node(T_VALUETYPE, ret);
 193     }
 194   } else {
 195     kit.push_node(method()->return_type()->basic_type(), ret);
 196   }
 197   return kit.transfer_exceptions_into_jvms();
 198 }
 199 
 200 //--------------------------VirtualCallGenerator------------------------------
 201 // Internal class which handles all out-of-line calls checking receiver type.
 202 class VirtualCallGenerator : public CallGenerator {
 203 private:
 204   int _vtable_index;
 205 public:
 206   VirtualCallGenerator(ciMethod* method, int vtable_index)
 207     : CallGenerator(method), _vtable_index(vtable_index)
 208   {
 209     assert(vtable_index == Method::invalid_vtable_index ||
 210            vtable_index >= 0, "either invalid or usable");
 211   }
 212   virtual bool      is_virtual() const          { return true; }
 213   virtual JVMState* generate(JVMState* jvms);
 214 };


 426 
 427   // Make enough space in the expression stack to transfer
 428   // the incoming arguments and return value.
 429   map->ensure_stack(jvms, jvms->method()->max_stack());
 430   const TypeTuple *domain_sig = call->_tf->domain_sig();
 431   uint nargs = method()->arg_size();
 432   assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 433 
 434   uint j = TypeFunc::Parms;
 435   for (uint i1 = 0; i1 < nargs; i1++) {
 436     const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 437     if (!ValueTypePassFieldsAsArgs) {
 438       Node* arg = call->in(TypeFunc::Parms + i1);
 439       if (t->isa_valuetypeptr()) {
 440         arg = ValueTypeNode::make(gvn, map->memory(), arg);
 441       }
 442       map->set_argument(jvms, i1, arg);
 443     } else {
 444       if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
 445         ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
 446         Node* vt = C->create_vt_node(call, vk, vk, 0, j, true);
 447         map->set_argument(jvms, i1, gvn.transform(vt));
 448         j += vk->value_arg_slots();
 449       } else {
 450         map->set_argument(jvms, i1, call->in(j));
 451         j++;
 452       }
 453     }
 454   }
 455 
 456   C->print_inlining_assert_ready();
 457 
 458   C->print_inlining_move_to(this);
 459 
 460   C->log_late_inline(this);
 461 
 462   // This check is done here because for_method_handle_inline() method
 463   // needs jvms for inlined state.
 464   if (!do_late_inline_check(jvms)) {
 465     map->disconnect_inputs(NULL, C);
 466     return;
 467   }
 468 
 469   // Setup default node notes to be picked up by the inlining
 470   Node_Notes* old_nn = C->node_notes_at(call->_idx);
 471   if (old_nn != NULL) {
 472     Node_Notes* entry_nn = old_nn->clone(C);
 473     entry_nn->set_jvms(jvms);
 474     C->set_default_node_notes(entry_nn);
 475   }
 476 
 477   // Now perform the inlining using the synthesized JVMState
 478   JVMState* new_jvms = _inline_cg->generate(jvms);
 479   if (new_jvms == NULL)  return;  // no change
 480   if (C->failing())      return;
 481 
 482   // Capture any exceptional control flow
 483   GraphKit kit(new_jvms);
 484 
 485   // Find the result object
 486   Node* result = C->top();
 487   ciType* return_type = _inline_cg->method()->return_type();
 488   int result_size = return_type->size();
 489   if (result_size != 0 && !kit.stopped()) {
 490     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 491   }
 492 
 493   C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 494   C->env()->notice_inlined_method(_inline_cg->method());
 495   C->set_inlining_progress(true);
 496   
 497   if (return_type->is_valuetype() && return_type != C->env()->___Value_klass()) {
 498     if (result->is_ValueType()) {
 499       if (!call->tf()->returns_value_type_as_fields()) {
 500         result = result->as_ValueType()->store_to_memory(&kit);
 501       } else {
 502         // Return of multiple values (the fields of a value type)
 503         ValueTypeNode* vt = result->as_ValueType();
 504         vt->replace_call_results(call, C);
 505       }
 506     } else {
 507       assert(result->is_top(), "what else?");
 508       for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
 509         ProjNode *pn = call->fast_out(i)->as_Proj();
 510         uint con = pn->_con;
 511         if (con >= TypeFunc::Parms) {
 512           // C->gvn_replace_by(pn, C->top());
 513           C->initial_gvn()->hash_delete(pn);
 514           pn->set_req(0, C->top());
 515           --i; --imax;
 516         }
 517       }
 518     }
 519   }
 520 
 521   kit.replace_call(call, result, true);
 522 }
 523 
 524 
 525 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 526   return new LateInlineCallGenerator(method, inline_cg);
 527 }
 528 
 529 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 530   ciMethod* _caller;
 531   int _attempt;
 532   bool _input_not_const;
 533 
 534   virtual bool do_late_inline_check(JVMState* jvms);
 535   virtual bool already_attempted() const { return _attempt > 0; }
 536 
 537  public:
 538   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 539     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
 540 
 541   virtual bool is_mh_late_inline() const { return true; }
 542 
 543   virtual JVMState* generate(JVMState* jvms) {
 544     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
 545 
 546     Compile* C = Compile::current();
 547     if (_input_not_const) {
 548       // inlining won't be possible so no need to enqueue right now.
 549       call_node()->set_generator(this);
 550     } else {
 551       C->add_late_inline(this);
 552     }
 553     return new_jvms;
 554   }
 555 };
 556 
 557 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 558   
 559   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const, AlwaysIncrementalInline);
 560 
 561   Compile::current()->print_inlining_update_delayed(this);
 562 
 563   if (!_input_not_const) {
 564     _attempt++;
 565   }
 566   
 567   if (cg != NULL && (cg->is_inline() || cg->is_inlined_method_handle_intrinsic(jvms, cg->method()))) {
 568     assert(!cg->is_late_inline(), "we're doing late inlining");
 569     _inline_cg = cg;
 570     Compile::current()->dec_number_of_mh_late_inlines();
 571     return true;
 572   }
 573 
 574   call_node()->set_generator(this);
 575   return false;
 576 }
 577 
 578 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 579   Compile::current()->inc_number_of_mh_late_inlines();
 580   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 581   return cg;
 582 }
 583 
 584 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 585 
 586  public:
 587   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :


 849     if (i == tos) {
 850       i = kit.jvms()->monoff();
 851       if( i >= limit ) break;
 852     }
 853     Node* m = kit.map()->in(i);
 854     Node* n = slow_map->in(i);
 855     if (m != n) {
 856       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
 857       Node* phi = PhiNode::make(region, m, t);
 858       phi->set_req(2, n);
 859       kit.map()->set_req(i, gvn.transform(phi));
 860     }
 861   }
 862   return kit.transfer_exceptions_into_jvms();
 863 }
 864 
 865 
 866 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
 867   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
 868   bool input_not_const;
 869   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const, false);
 870   Compile* C = Compile::current();
 871   if (cg != NULL) {
 872     if (!delayed_forbidden && AlwaysIncrementalInline) {
 873       return CallGenerator::for_late_inline(callee, cg);
 874     } else {
 875       return cg;
 876     }
 877   }
 878   int bci = jvms->bci();
 879   ciCallProfile profile = caller->call_profile_at_bci(bci);
 880   int call_site_count = caller->scale_count(profile.count());
 881 
 882   if (IncrementalInline && (AlwaysIncrementalInline ||
 883                             (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
 884     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
 885   } else {
 886     // Out-of-line call.
 887     return CallGenerator::for_direct_call(callee);
 888   }
 889 }
 890 
 891 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const, bool delayed_forbidden) {
 892   GraphKit kit(jvms);
 893   PhaseGVN& gvn = kit.gvn();
 894   Compile* C = kit.C;
 895   vmIntrinsics::ID iid = callee->intrinsic_id();
 896   input_not_const = true;
 897   switch (iid) {
 898   case vmIntrinsics::_invokeBasic:
 899     {
 900       // Get MethodHandle receiver:
 901       Node* receiver = kit.argument(0);
 902       if (receiver->Opcode() == Op_ConP) {
 903         input_not_const = false;
 904         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 905         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 906         const int vtable_index = Method::invalid_vtable_index;
 907 
 908         if (!ciMethod::is_consistent_info(callee, target)) {
 909           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 910                                  "signatures mismatch");
 911           return NULL;
 912         }
 913 
 914         CallGenerator* cg = C->call_generator(target, vtable_index,
 915                                               false /* call_does_dispatch */,
 916                                               jvms,
 917                                               true /* allow_inline */,
 918                                               PROB_ALWAYS,
 919                                               NULL,
 920                                               true,
 921                                               delayed_forbidden);
 922         return cg;
 923       } else {
 924         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 925                                "receiver not constant");
 926       }
 927     }
 928     break;
 929 
 930   case vmIntrinsics::_linkToVirtual:
 931   case vmIntrinsics::_linkToStatic:
 932   case vmIntrinsics::_linkToSpecial:
 933   case vmIntrinsics::_linkToInterface:
 934     {
 935       // Get MemberName argument:
 936       Node* member_name = kit.argument(callee->arg_size() - 1);
 937       if (member_name->Opcode() == Op_ConP) {
 938         input_not_const = false;
 939         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 940         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 941 


 985         ciKlass* speculative_receiver_type = NULL;
 986         if (is_virtual_or_interface) {
 987           ciInstanceKlass* klass = target->holder();
 988           Node*             receiver_node = kit.argument(0);
 989           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
 990           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
 991           // optimize_virtual_call() takes 2 different holder
 992           // arguments for a corner case that doesn't apply here (see
 993           // Parse::do_call())
 994           target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
 995                                             target, receiver_type, is_virtual,
 996                                             call_does_dispatch, vtable_index, // out-parameters
 997                                             false /* check_access */);
 998           // We lack profiling at this call but type speculation may
 999           // provide us with a type
1000           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
1001         }
1002         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1003                                               true /* allow_inline */,
1004                                               PROB_ALWAYS,
1005                                               speculative_receiver_type,
1006                                               true,
1007                                               delayed_forbidden);
1008         return cg;
1009       } else {
1010         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1011                                "member_name not constant");
1012       }
1013     }
1014     break;
1015 
1016   default:
1017     fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
1018     break;
1019   }
1020   return NULL;
1021 }
1022 
1023 
1024 //------------------------PredicatedIntrinsicGenerator------------------------------
1025 // Internal class which handles all predicated Intrinsic calls.
1026 class PredicatedIntrinsicGenerator : public CallGenerator {
1027   CallGenerator* _intrinsic;


< prev index next >