483 // Now perform the inlining using the synthesized JVMState 484 JVMState* new_jvms = _inline_cg->generate(jvms); 485 if (new_jvms == NULL) return; // no change 486 if (C->failing()) return; 487 488 // Capture any exceptional control flow 489 GraphKit kit(new_jvms); 490 491 // Find the result object 492 Node* result = C->top(); 493 ciType* return_type = _inline_cg->method()->return_type(); 494 int result_size = return_type->size(); 495 if (result_size != 0 && !kit.stopped()) { 496 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 497 } 498 499 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 500 C->env()->notice_inlined_method(_inline_cg->method()); 501 C->set_inlining_progress(true); 502 503 if (return_type->is_valuetype()) { 504 const Type* vt_t = call->_tf->range_sig()->field_at(TypeFunc::Parms); 505 bool returned_as_fields = call->tf()->returns_value_type_as_fields(); 506 if (result->is_ValueType()) { 507 ValueTypeNode* vt = result->as_ValueType(); 508 if (!returned_as_fields) { 509 vt = vt->allocate(&kit)->as_ValueType(); 510 result = ValueTypePtrNode::make_from_value_type(gvn, vt); 511 } else { 512 // Return of multiple values (the fields of a value type) 513 vt->replace_call_results(&kit, call, C); 514 if (gvn.type(vt->get_oop()) == TypePtr::NULL_PTR) { 515 result = vt->tagged_klass(gvn); 516 } else { 517 result = vt->get_oop(); 518 } 519 } 520 } else if (gvn.type(result)->is_valuetypeptr() && returned_as_fields) { 521 Node* cast = new CheckCastPPNode(NULL, result, vt_t); 522 gvn.record_for_igvn(cast); 523 ValueTypePtrNode* vtptr = ValueTypePtrNode::make_from_oop(&kit, gvn.transform(cast)); 524 vtptr->replace_call_results(&kit, call, C); 525 result = cast; 526 } else { 527 assert(result->is_top(), "what else?"); 528 for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) { 529 ProjNode *pn = call->fast_out(i)->as_Proj(); 530 uint con = pn->_con; 531 if (con >= TypeFunc::Parms) { 532 gvn.hash_delete(pn); 533 pn->set_req(0, C->top()); 534 --i; --imax; 535 } 536 } 537 } 538 } else if (result->is_ValueType()) { 539 result = result->isa_ValueType()->allocate(&kit)->get_oop(); 540 } 541 542 kit.replace_call(call, result, true); 543 } 544 545 546 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 547 return new LateInlineCallGenerator(method, inline_cg); 548 } 549 550 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 551 ciMethod* _caller; 552 int _attempt; 553 bool _input_not_const; 554 555 virtual bool do_late_inline_check(JVMState* jvms); 556 virtual bool already_attempted() const { return _attempt > 0; } 557 558 public: 559 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 832 if (slow_map == NULL) { 833 // The fast path is the only path remaining. 834 return kit.transfer_exceptions_into_jvms(); 835 } 836 837 if (kit.stopped()) { 838 // Inlined method threw an exception, so it's just the slow path after all. 839 kit.set_jvms(slow_jvms); 840 return kit.transfer_exceptions_into_jvms(); 841 } 842 843 // Allocate value types if they are merged with objects (similar to Parse::merge_common()) 844 uint tos = kit.jvms()->stkoff() + kit.sp(); 845 uint limit = slow_map->req(); 846 for (uint i = TypeFunc::Parms; i < limit; i++) { 847 Node* m = kit.map()->in(i); 848 Node* n = slow_map->in(i); 849 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 850 if (m->is_ValueType() && !t->isa_valuetype()) { 851 // Allocate value type in fast path 852 ValueTypeBaseNode* vt = m->as_ValueType()->allocate(&kit); 853 m = ValueTypePtrNode::make_from_value_type(kit.gvn(), vt->as_ValueType()); 854 kit.map()->set_req(i, m); 855 } 856 if (n->is_ValueType() && !t->isa_valuetype()) { 857 // Allocate value type in slow path 858 PreserveJVMState pjvms(&kit); 859 kit.set_map(slow_map); 860 ValueTypeBaseNode* vt = n->as_ValueType()->allocate(&kit); 861 n = ValueTypePtrNode::make_from_value_type(kit.gvn(), vt->as_ValueType()); 862 kit.map()->set_req(i, n); 863 slow_map = kit.stop(); 864 } 865 } 866 867 // There are 2 branches and the replaced nodes are only valid on 868 // one: restore the replaced nodes to what they were before the 869 // branch. 870 kit.map()->set_replaced_nodes(replaced_nodes); 871 872 // Finish the diamond. 873 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 874 RegionNode* region = new RegionNode(3); 875 region->init_req(1, kit.control()); 876 region->init_req(2, slow_map->control()); 877 kit.set_control(gvn.transform(region)); 878 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 879 iophi->set_req(2, slow_map->i_o()); 880 kit.set_i_o(gvn.transform(iophi)); 881 // Merge memory | 483 // Now perform the inlining using the synthesized JVMState 484 JVMState* new_jvms = _inline_cg->generate(jvms); 485 if (new_jvms == NULL) return; // no change 486 if (C->failing()) return; 487 488 // Capture any exceptional control flow 489 GraphKit kit(new_jvms); 490 491 // Find the result object 492 Node* result = C->top(); 493 ciType* return_type = _inline_cg->method()->return_type(); 494 int result_size = return_type->size(); 495 if (result_size != 0 && !kit.stopped()) { 496 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 497 } 498 499 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 500 C->env()->notice_inlined_method(_inline_cg->method()); 501 C->set_inlining_progress(true); 502 503 // Handle value type returns 504 bool returned_as_fields = call->tf()->returns_value_type_as_fields(); 505 if (result->is_ValueType()) { 506 ValueTypeNode* vt = result->as_ValueType(); 507 if (!returned_as_fields) { 508 result = ValueTypePtrNode::make_from_value_type(&kit, vt); 509 } else { 510 assert(false, "FIXME"); 511 // Return of multiple values (the fields of a value type) 512 vt->replace_call_results(&kit, call, C); 513 if (gvn.type(vt->get_oop()) == TypePtr::NULL_PTR) { 514 result = vt->tagged_klass(gvn); 515 } else { 516 result = vt->get_oop(); 517 } 518 } 519 } else if (gvn.type(result)->is_valuetypeptr() && returned_as_fields) { 520 assert(false, "FIXME"); 521 const Type* vt_t = call->_tf->range_sig()->field_at(TypeFunc::Parms); 522 Node* cast = new CheckCastPPNode(NULL, result, vt_t); 523 gvn.record_for_igvn(cast); 524 ValueTypePtrNode* vtptr = ValueTypePtrNode::make_from_oop(&kit, gvn.transform(cast)); 525 vtptr->replace_call_results(&kit, call, C); 526 result = cast; 527 } 528 529 kit.replace_call(call, result, true); 530 } 531 532 533 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 534 return new LateInlineCallGenerator(method, inline_cg); 535 } 536 537 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 538 ciMethod* _caller; 539 int _attempt; 540 bool _input_not_const; 541 542 virtual bool do_late_inline_check(JVMState* jvms); 543 virtual bool already_attempted() const { return _attempt > 0; } 544 545 public: 546 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 819 if (slow_map == NULL) { 820 // The fast path is the only path remaining. 821 return kit.transfer_exceptions_into_jvms(); 822 } 823 824 if (kit.stopped()) { 825 // Inlined method threw an exception, so it's just the slow path after all. 826 kit.set_jvms(slow_jvms); 827 return kit.transfer_exceptions_into_jvms(); 828 } 829 830 // Allocate value types if they are merged with objects (similar to Parse::merge_common()) 831 uint tos = kit.jvms()->stkoff() + kit.sp(); 832 uint limit = slow_map->req(); 833 for (uint i = TypeFunc::Parms; i < limit; i++) { 834 Node* m = kit.map()->in(i); 835 Node* n = slow_map->in(i); 836 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 837 if (m->is_ValueType() && !t->isa_valuetype()) { 838 // Allocate value type in fast path 839 m = ValueTypePtrNode::make_from_value_type(&kit, m->as_ValueType()); 840 kit.map()->set_req(i, m); 841 } 842 if (n->is_ValueType() && !t->isa_valuetype()) { 843 // Allocate value type in slow path 844 PreserveJVMState pjvms(&kit); 845 kit.set_map(slow_map); 846 n = ValueTypePtrNode::make_from_value_type(&kit, n->as_ValueType()); 847 kit.map()->set_req(i, n); 848 slow_map = kit.stop(); 849 } 850 } 851 852 // There are 2 branches and the replaced nodes are only valid on 853 // one: restore the replaced nodes to what they were before the 854 // branch. 855 kit.map()->set_replaced_nodes(replaced_nodes); 856 857 // Finish the diamond. 858 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 859 RegionNode* region = new RegionNode(3); 860 region->init_req(1, kit.control()); 861 region->init_req(2, slow_map->control()); 862 kit.set_control(gvn.transform(region)); 863 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 864 iophi->set_req(2, slow_map->i_o()); 865 kit.set_i_o(gvn.transform(iophi)); 866 // Merge memory |