636 if (!stopped()) { 637 // This was some sort of virtual call, which did a null check for us. 638 // Now we can assert receiver-not-null, on the normal return path. 639 if (receiver != NULL && cg->is_virtual()) { 640 Node* cast = cast_not_null(receiver); 641 // %%% assert(receiver == cast, "should already have cast the receiver"); 642 } 643 644 // Round double result after a call from strict to non-strict code 645 round_double_result(cg->method()); 646 647 ciType* rtype = cg->method()->return_type(); 648 ciType* ctype = declared_signature->return_type(); 649 650 Node* retnode = peek(); 651 if (rtype->basic_type() == T_VALUETYPE && !retnode->is_ValueType()) { 652 pop(); 653 assert(!cg->is_inline(), "should have ValueTypeNode result when inlining"); 654 ciValueKlass* vk = _gvn.type(retnode)->value_klass(); 655 // We will deoptimize if the return value is null and then need to continue execution after the call 656 ValueTypeNode* vt = ValueTypeNode::make_from_oop(this, retnode, vk, /* buffer_check */ false, /* null2default */ false, iter().next_bci()); 657 push_node(T_VALUETYPE, vt); 658 } 659 660 if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) { 661 // Be careful here with return types. 662 if (ctype != rtype) { 663 BasicType rt = rtype->basic_type(); 664 BasicType ct = ctype->basic_type(); 665 if (ct == T_VOID) { 666 // It's OK for a method to return a value that is discarded. 667 // The discarding does not require any special action from the caller. 668 // The Java code knows this, at VerifyType.isNullConversion. 669 pop_node(rt); // whatever it was, pop it 670 } else if (rt == T_INT || is_subword_type(rt)) { 671 // Nothing. These cases are handled in lambda form bytecode. 672 assert(ct == T_INT || is_subword_type(ct), "must match: rt=%s, ct=%s", type2name(rt), type2name(ct)); 673 } else if (rt == T_OBJECT || rt == T_ARRAY || rt == T_VALUETYPE) { 674 assert(ct == T_OBJECT || ct == T_ARRAY || ct == T_VALUETYPE, "rt=%s, ct=%s", type2name(rt), type2name(ct)); 675 if (ctype->is_loaded()) { 676 const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass()); 677 const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); 678 if (ct == T_VALUETYPE && cg->method()->get_Method()->is_returning_vt()) { 679 // A NULL ValueType cannot be returned to compiled code. The 'areturn' bytecode 680 // handler will deoptimize its caller if it is about to return a NULL ValueType. 681 // (See comments inside TypeTuple::make_range). 682 sig_type = sig_type->join_speculative(TypePtr::NOTNULL); 683 } 684 if (arg_type != NULL && !arg_type->higher_equal(sig_type) && !retnode->is_ValueType()) { 685 pop(); 686 Node* cast_obj = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type)); 687 if (ct == T_VALUETYPE) { 688 // We will deoptimize if the return value is null and then need to continue execution after the call 689 cast_obj = ValueTypeNode::make_from_oop(this, cast_obj, ctype->as_value_klass(), /* buffer_check */ false, /* null2default */ false, iter().next_bci()); 690 } 691 push(cast_obj); 692 } 693 } 694 } else { 695 assert(rt == ct, "unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct)); 696 // push a zero; it's better than getting an oop/int mismatch 697 pop_node(rt); 698 Node* retnode = zerocon(ct); 699 push_node(ct, retnode); 700 } 701 // Now that the value is well-behaved, continue with the call-site type. 702 rtype = ctype; 703 } 704 } else { 705 // Symbolic resolution enforces the types to be the same. 706 // NOTE: We must relax the assert for unloaded types because two 707 // different ciType instances of the same unloaded class type 708 // can appear to be "loaded" by different loaders (depending on 709 // the accessing class). | 636 if (!stopped()) { 637 // This was some sort of virtual call, which did a null check for us. 638 // Now we can assert receiver-not-null, on the normal return path. 639 if (receiver != NULL && cg->is_virtual()) { 640 Node* cast = cast_not_null(receiver); 641 // %%% assert(receiver == cast, "should already have cast the receiver"); 642 } 643 644 // Round double result after a call from strict to non-strict code 645 round_double_result(cg->method()); 646 647 ciType* rtype = cg->method()->return_type(); 648 ciType* ctype = declared_signature->return_type(); 649 650 Node* retnode = peek(); 651 if (rtype->basic_type() == T_VALUETYPE && !retnode->is_ValueType()) { 652 pop(); 653 assert(!cg->is_inline(), "should have ValueTypeNode result when inlining"); 654 ciValueKlass* vk = _gvn.type(retnode)->value_klass(); 655 // We will deoptimize if the return value is null and then need to continue execution after the call 656 ValueTypeNode* vt = ValueTypeNode::make_from_oop(this, retnode, vk, /* null2default */ false, iter().next_bci()); 657 push_node(T_VALUETYPE, vt); 658 } 659 660 if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) { 661 // Be careful here with return types. 662 if (ctype != rtype) { 663 BasicType rt = rtype->basic_type(); 664 BasicType ct = ctype->basic_type(); 665 if (ct == T_VOID) { 666 // It's OK for a method to return a value that is discarded. 667 // The discarding does not require any special action from the caller. 668 // The Java code knows this, at VerifyType.isNullConversion. 669 pop_node(rt); // whatever it was, pop it 670 } else if (rt == T_INT || is_subword_type(rt)) { 671 // Nothing. These cases are handled in lambda form bytecode. 672 assert(ct == T_INT || is_subword_type(ct), "must match: rt=%s, ct=%s", type2name(rt), type2name(ct)); 673 } else if (rt == T_OBJECT || rt == T_ARRAY || rt == T_VALUETYPE) { 674 assert(ct == T_OBJECT || ct == T_ARRAY || ct == T_VALUETYPE, "rt=%s, ct=%s", type2name(rt), type2name(ct)); 675 if (ctype->is_loaded()) { 676 const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass()); 677 const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); 678 if (ct == T_VALUETYPE && cg->method()->get_Method()->is_returning_vt()) { 679 // A NULL ValueType cannot be returned to compiled code. The 'areturn' bytecode 680 // handler will deoptimize its caller if it is about to return a NULL ValueType. 681 // (See comments inside TypeTuple::make_range). 682 sig_type = sig_type->join_speculative(TypePtr::NOTNULL); 683 } 684 if (arg_type != NULL && !arg_type->higher_equal(sig_type) && !retnode->is_ValueType()) { 685 pop(); 686 Node* cast_obj = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type)); 687 if (ct == T_VALUETYPE) { 688 // We will deoptimize if the return value is null and then need to continue execution after the call 689 cast_obj = ValueTypeNode::make_from_oop(this, cast_obj, ctype->as_value_klass(), /* null2default */ false, iter().next_bci()); 690 } 691 push(cast_obj); 692 } 693 } 694 } else { 695 assert(rt == ct, "unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct)); 696 // push a zero; it's better than getting an oop/int mismatch 697 pop_node(rt); 698 Node* retnode = zerocon(ct); 699 push_node(ct, retnode); 700 } 701 // Now that the value is well-behaved, continue with the call-site type. 702 rtype = ctype; 703 } 704 } else { 705 // Symbolic resolution enforces the types to be the same. 706 // NOTE: We must relax the assert for unloaded types because two 707 // different ciType instances of the same unloaded class type 708 // can appear to be "loaded" by different loaders (depending on 709 // the accessing class). |