< prev index next >

src/hotspot/share/opto/callGenerator.cpp

Print this page




 106 
 107   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 108 
 109   // Simply return the exit state of the parser,
 110   // augmented by any exceptional states.
 111   return exits.transfer_exceptions_into_jvms();
 112 }
 113 
 114 //---------------------------DirectCallGenerator------------------------------
 115 // Internal class which handles all out-of-line calls w/o receiver type checks.
 116 class DirectCallGenerator : public CallGenerator {
 117  private:
 118   CallStaticJavaNode* _call_node;
 119   // Force separate memory and I/O projections for the exceptional
 120   // paths to facilitate late inlining.
 121   bool                _separate_io_proj;
 122 
 123  public:
 124   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 125     : CallGenerator(method),

 126       _separate_io_proj(separate_io_proj)
 127   {
 128     // TODO fix this with the calling convention changes
 129     if (false /*method->signature()->return_type()->is__Value()*/) {
 130       // If that call has not been optimized by the time optimizations
 131       // are over, we'll need to add a call to create a value type
 132       // instance from the klass returned by the call. Separating
 133       // memory and I/O projections for exceptions is required to
 134       // perform that graph transformation.
 135       _separate_io_proj = true;
 136     }
 137   }
 138   virtual JVMState* generate(JVMState* jvms);
 139 
 140   CallStaticJavaNode* call_node() const { return _call_node; }
 141 };
 142 
 143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 144   GraphKit kit(jvms);
 145   kit.C->print_inlining_update(this);
 146   PhaseGVN& gvn = kit.gvn();
 147   bool is_static = method()->is_static();
 148   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 149                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 150 
 151   if (kit.C->log() != NULL) {
 152     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 153   }


 419     map->set_req(TypeFunc::Memory, mem);
 420   }
 421 
 422   // blow away old call arguments
 423   Node* top = C->top();
 424   for (uint i1 = TypeFunc::Parms; i1 < call->_tf->domain_cc()->cnt(); i1++) {
 425     map->set_req(i1, top);
 426   }
 427   jvms->set_map(map);
 428 
 429   // Make enough space in the expression stack to transfer
 430   // the incoming arguments and return value.
 431   map->ensure_stack(jvms, jvms->method()->max_stack());
 432   const TypeTuple *domain_sig = call->_tf->domain_sig();
 433   uint nargs = method()->arg_size();
 434   assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 435 
 436   uint j = TypeFunc::Parms;
 437   for (uint i1 = 0; i1 < nargs; i1++) {
 438     const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 439     if (!ValueTypePassFieldsAsArgs) {
 440       Node* arg = call->in(TypeFunc::Parms + i1);
 441       map->set_argument(jvms, i1, arg);
 442     } else {
 443       assert(false, "FIXME");
 444       // TODO move this into Parse::Parse because we might need to deopt
 445       /*
 446       GraphKit arg_kit(jvms, &gvn);
 447       if (t->is_valuetypeptr()) {
 448         ciValueKlass* vk = t->value_klass();
 449         ValueTypeNode* vt = ValueTypeNode::make_from_multi(&arg_kit, call, vk, j, true);
 450         arg_kit.set_argument(i1, vt);
 451         j += vk->value_arg_slots();

 452       } else {
 453         arg_kit.set_argument(i1, call->in(j));






 454         j++;
 455       }
 456       */


 457     }
 458   }
 459 
 460   C->print_inlining_assert_ready();
 461 
 462   C->print_inlining_move_to(this);
 463 
 464   C->log_late_inline(this);
 465 
 466   // This check is done here because for_method_handle_inline() method
 467   // needs jvms for inlined state.
 468   if (!do_late_inline_check(jvms)) {
 469     map->disconnect_inputs(NULL, C);
 470     return;
 471   }
 472 
 473   // Setup default node notes to be picked up by the inlining
 474   Node_Notes* old_nn = C->node_notes_at(call->_idx);
 475   if (old_nn != NULL) {
 476     Node_Notes* entry_nn = old_nn->clone(C);


 485 
 486   // Capture any exceptional control flow
 487   GraphKit kit(new_jvms);
 488 
 489   // Find the result object
 490   Node* result = C->top();
 491   ciType* return_type = _inline_cg->method()->return_type();
 492   int result_size = return_type->size();
 493   if (result_size != 0 && !kit.stopped()) {
 494     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 495   }
 496 
 497   C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 498   C->env()->notice_inlined_method(_inline_cg->method());
 499   C->set_inlining_progress(true);
 500 
 501   // Handle value type returns
 502   bool returned_as_fields = call->tf()->returns_value_type_as_fields();
 503   if (result->is_ValueType()) {
 504     ValueTypeNode* vt = result->as_ValueType();
 505     if (!returned_as_fields) {
 506       result = ValueTypePtrNode::make_from_value_type(&kit, vt);
 507     } else {
 508       assert(false, "FIXME");
 509       // Return of multiple values (the fields of a value type)
 510       vt->replace_call_results(&kit, call, C);
 511       if (gvn.type(vt->get_oop()) == TypePtr::NULL_PTR) {
 512         result = vt->tagged_klass(gvn);
 513       } else {
 514         result = vt->get_oop();


 515       }


 516     }
 517   } else if (gvn.type(result)->is_valuetypeptr() && returned_as_fields) {
 518     assert(false, "FIXME");
 519     const Type* vt_t = call->_tf->range_sig()->field_at(TypeFunc::Parms);
 520     Node* cast = new CheckCastPPNode(NULL, result, vt_t);
 521     gvn.record_for_igvn(cast);
 522     ValueTypePtrNode* vtptr = ValueTypePtrNode::make_from_oop(&kit, gvn.transform(cast));
 523     vtptr->replace_call_results(&kit, call, C);
 524     result = cast;
 525   }
 526 
 527   kit.replace_call(call, result, true);
 528 }
 529 
 530 
 531 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 532   return new LateInlineCallGenerator(method, inline_cg);
 533 }
 534 
 535 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 536   ciMethod* _caller;
 537   int _attempt;
 538   bool _input_not_const;




 106 
 107   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 108 
 109   // Simply return the exit state of the parser,
 110   // augmented by any exceptional states.
 111   return exits.transfer_exceptions_into_jvms();
 112 }
 113 
 114 //---------------------------DirectCallGenerator------------------------------
 115 // Internal class which handles all out-of-line calls w/o receiver type checks.
 116 class DirectCallGenerator : public CallGenerator {
 117  private:
 118   CallStaticJavaNode* _call_node;
 119   // Force separate memory and I/O projections for the exceptional
 120   // paths to facilitate late inlining.
 121   bool                _separate_io_proj;
 122 
 123  public:
 124   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 125     : CallGenerator(method),
 126       _call_node(NULL),
 127       _separate_io_proj(separate_io_proj)
 128   {
 129     if (ValueTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
 130       // If that call has not been optimized by the time optimizations are over,
 131       // we'll need to add a call to create a value type instance from the klass
 132       // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
 133       // Separating memory and I/O projections for exceptions is required to

 134       // perform that graph transformation.
 135       _separate_io_proj = true;
 136     }
 137   }
 138   virtual JVMState* generate(JVMState* jvms);
 139 
 140   CallStaticJavaNode* call_node() const { return _call_node; }
 141 };
 142 
 143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 144   GraphKit kit(jvms);
 145   kit.C->print_inlining_update(this);
 146   PhaseGVN& gvn = kit.gvn();
 147   bool is_static = method()->is_static();
 148   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 149                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 150 
 151   if (kit.C->log() != NULL) {
 152     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 153   }


 419     map->set_req(TypeFunc::Memory, mem);
 420   }
 421 
 422   // blow away old call arguments
 423   Node* top = C->top();
 424   for (uint i1 = TypeFunc::Parms; i1 < call->_tf->domain_cc()->cnt(); i1++) {
 425     map->set_req(i1, top);
 426   }
 427   jvms->set_map(map);
 428 
 429   // Make enough space in the expression stack to transfer
 430   // the incoming arguments and return value.
 431   map->ensure_stack(jvms, jvms->method()->max_stack());
 432   const TypeTuple *domain_sig = call->_tf->domain_sig();
 433   uint nargs = method()->arg_size();
 434   assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 435 
 436   uint j = TypeFunc::Parms;
 437   for (uint i1 = 0; i1 < nargs; i1++) {
 438     const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 439     if (method()->get_Method()->has_scalarized_args()) {






 440       GraphKit arg_kit(jvms, &gvn);
 441       // TODO for now, don't scalarize value type receivers because of interface calls
 442       if (t->is_valuetypeptr() && (method()->is_static() || i1 != 0)) {
 443         arg_kit.set_control(map->control());
 444         ValueTypeNode* vt = ValueTypeNode::make_from_multi(&arg_kit, call, t->value_klass(), j, true);
 445         map->set_control(arg_kit.control());
 446         map->set_argument(jvms, i1, vt);
 447       } else {
 448         int index = j;
 449         SigEntry res_entry = method()->get_Method()->get_res_entry();
 450         if (res_entry._offset != -1 && (index - TypeFunc::Parms) >= res_entry._offset) {
 451           // Skip reserved entry
 452           index += type2size[res_entry._bt];
 453         }
 454         map->set_argument(jvms, i1, call->in(index));
 455         j++;
 456       }
 457     } else {
 458       Node* arg = call->in(TypeFunc::Parms + i1);
 459       map->set_argument(jvms, i1, arg);
 460     }
 461   }
 462 
 463   C->print_inlining_assert_ready();
 464 
 465   C->print_inlining_move_to(this);
 466 
 467   C->log_late_inline(this);
 468 
 469   // This check is done here because for_method_handle_inline() method
 470   // needs jvms for inlined state.
 471   if (!do_late_inline_check(jvms)) {
 472     map->disconnect_inputs(NULL, C);
 473     return;
 474   }
 475 
 476   // Setup default node notes to be picked up by the inlining
 477   Node_Notes* old_nn = C->node_notes_at(call->_idx);
 478   if (old_nn != NULL) {
 479     Node_Notes* entry_nn = old_nn->clone(C);


 488 
 489   // Capture any exceptional control flow
 490   GraphKit kit(new_jvms);
 491 
 492   // Find the result object
 493   Node* result = C->top();
 494   ciType* return_type = _inline_cg->method()->return_type();
 495   int result_size = return_type->size();
 496   if (result_size != 0 && !kit.stopped()) {
 497     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 498   }
 499 
 500   C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 501   C->env()->notice_inlined_method(_inline_cg->method());
 502   C->set_inlining_progress(true);
 503 
 504   // Handle value type returns
 505   bool returned_as_fields = call->tf()->returns_value_type_as_fields();
 506   if (result->is_ValueType()) {
 507     ValueTypeNode* vt = result->as_ValueType();
 508     if (returned_as_fields) {



 509       // Return of multiple values (the fields of a value type)
 510       vt->replace_call_results(&kit, call, C);
 511       if (vt->is_allocated(&gvn) && !StressValueTypeReturnedAsFields) {


 512         result = vt->get_oop();
 513       } else {
 514         result = vt->tagged_klass(gvn);
 515       }
 516     } else {
 517       result = ValueTypePtrNode::make_from_value_type(&kit, vt);
 518     }
 519   } else if (gvn.type(result)->is_valuetypeptr() && returned_as_fields) {

 520     const Type* vt_t = call->_tf->range_sig()->field_at(TypeFunc::Parms);
 521     Node* cast = new CheckCastPPNode(NULL, result, vt_t);
 522     gvn.record_for_igvn(cast);
 523     ValueTypePtrNode* vtptr = ValueTypePtrNode::make_from_oop(&kit, gvn.transform(cast));
 524     vtptr->replace_call_results(&kit, call, C);
 525     result = cast;
 526   }
 527 
 528   kit.replace_call(call, result, true);
 529 }
 530 
 531 
 532 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 533   return new LateInlineCallGenerator(method, inline_cg);
 534 }
 535 
 536 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 537   ciMethod* _caller;
 538   int _attempt;
 539   bool _input_not_const;


< prev index next >