< prev index next >

src/hotspot/share/opto/subnode.cpp

Print this page




 698 
 699 //------------------------------Idealize---------------------------------------
 700 Node *CmpINode::Ideal( PhaseGVN *phase, bool can_reshape ) {
 701   if (phase->type(in(2))->higher_equal(TypeInt::ZERO)) {
 702     switch (in(1)->Opcode()) {
 703     case Op_CmpL3:              // Collapse a CmpL3/CmpI into a CmpL
 704       return new CmpLNode(in(1)->in(1),in(1)->in(2));
 705     case Op_CmpF3:              // Collapse a CmpF3/CmpI into a CmpF
 706       return new CmpFNode(in(1)->in(1),in(1)->in(2));
 707     case Op_CmpD3:              // Collapse a CmpD3/CmpI into a CmpD
 708       return new CmpDNode(in(1)->in(1),in(1)->in(2));
 709     //case Op_SubI:
 710       // If (x - y) cannot overflow, then ((x - y) <?> 0)
 711       // can be turned into (x <?> y).
 712       // This is handled (with more general cases) by Ideal_sub_algebra.
 713     }
 714   }
 715   return NULL;                  // No change
 716 }
 717 

















 718 
 719 //=============================================================================
 720 // Simplify a CmpL (compare 2 longs ) node, based on local information.
 721 // If both inputs are constants, compare them.
 722 const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
 723   const TypeLong *r0 = t1->is_long(); // Handy access
 724   const TypeLong *r1 = t2->is_long();
 725 
 726   if( r0->_hi < r1->_lo )       // Range is always low?
 727     return TypeInt::CC_LT;
 728   else if( r0->_lo > r1->_hi )  // Range is always high?
 729     return TypeInt::CC_GT;
 730 
 731   else if( r0->is_con() && r1->is_con() ) { // comparing constants?
 732     assert(r0->get_con() == r1->get_con(), "must be equal");
 733     return TypeInt::CC_EQ;      // Equal results.
 734   } else if( r0->_hi == r1->_lo ) // Range is never high?
 735     return TypeInt::CC_LE;
 736   else if( r0->_lo == r1->_hi ) // Range is never low?
 737     return TypeInt::CC_GE;


 915 
 916   // x.getClass() == int.class can never be true (for all primitive types)
 917   // Return a ConP(NULL) node for this case.
 918   if (mirror_type->is_classless()) {
 919     return phase->makecon(TypePtr::NULL_PTR);
 920   }
 921 
 922   // return the ConP(Foo.klass)
 923   assert(mirror_type->is_klass(), "mirror_type should represent a Klass*");
 924   return phase->makecon(TypeKlassPtr::make(mirror_type->as_klass()));
 925 }
 926 
 927 //------------------------------Ideal------------------------------------------
 928 // Normalize comparisons between Java mirror loads to compare the klass instead.
 929 //
 930 // Also check for the case of comparing an unknown klass loaded from the primary
 931 // super-type array vs a known klass with no subtypes.  This amounts to
 932 // checking to see an unknown klass subtypes a known klass with no subtypes;
 933 // this only happens on an exact match.  We can shorten this test by 1 load.
 934 Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {




























 935   // Normalize comparisons between Java mirrors into comparisons of the low-
 936   // level klass, where a dependent load could be shortened.
 937   //
 938   // The new pattern has a nice effect of matching the same pattern used in the
 939   // fast path of instanceof/checkcast/Class.isInstance(), which allows
 940   // redundant exact type check be optimized away by GVN.
 941   // For example, in
 942   //   if (x.getClass() == Foo.class) {
 943   //     Foo foo = (Foo) x;
 944   //     // ... use a ...
 945   //   }
 946   // a CmpPNode could be shared between if_acmpne and checkcast
 947   {
 948     Node* k1 = isa_java_mirror_load(phase, in(1));
 949     Node* k2 = isa_java_mirror_load(phase, in(2));
 950     Node* conk2 = isa_const_java_mirror(phase, in(2));
 951 
 952     if (k1 && (k2 || conk2)) {
 953       Node* lhs = k1;
 954       Node* rhs = (k2 != NULL) ? k2 : conk2;


1013   // %%% Do this after we fix TypeOopPtr:  Deps are expressive enough now.
1014 
1015   // Object arrays must have their base element have no subtypes
1016   while (superklass->is_obj_array_klass()) {
1017     ciType* elem = superklass->as_obj_array_klass()->element_type();
1018     superklass = elem->as_klass();
1019   }
1020   if (superklass->is_instance_klass()) {
1021     ciInstanceKlass* ik = superklass->as_instance_klass();
1022     if (ik->has_subklass() || ik->is_interface())  return NULL;
1023     // Add a dependency if there is a chance that a subclass will be added later.
1024     if (!ik->is_final()) {
1025       phase->C->dependencies()->assert_leaf_type(ik);
1026     }
1027   }
1028 
1029   // Bypass the dependent load, and compare directly
1030   this->set_req(1,ldk2);
1031 
1032   return this;
















1033 }
1034 
1035 //=============================================================================
1036 //------------------------------sub--------------------------------------------
1037 // Simplify an CmpN (compare 2 pointers) node, based on local information.
1038 // If both inputs are constants, compare them.
1039 const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
1040   const TypePtr *r0 = t1->make_ptr(); // Handy access
1041   const TypePtr *r1 = t2->make_ptr();
1042 
1043   // Undefined inputs makes for an undefined result
1044   if ((r0 == NULL) || (r1 == NULL) ||
1045       TypePtr::above_centerline(r0->_ptr) ||
1046       TypePtr::above_centerline(r1->_ptr)) {
1047     return Type::TOP;
1048   }
1049   if (r0 == r1 && r0->singleton()) {
1050     // Equal pointer constants (klasses, nulls, etc.)
1051     return TypeInt::CC_EQ;
1052   }




 698 
 699 //------------------------------Idealize---------------------------------------
 700 Node *CmpINode::Ideal( PhaseGVN *phase, bool can_reshape ) {
 701   if (phase->type(in(2))->higher_equal(TypeInt::ZERO)) {
 702     switch (in(1)->Opcode()) {
 703     case Op_CmpL3:              // Collapse a CmpL3/CmpI into a CmpL
 704       return new CmpLNode(in(1)->in(1),in(1)->in(2));
 705     case Op_CmpF3:              // Collapse a CmpF3/CmpI into a CmpF
 706       return new CmpFNode(in(1)->in(1),in(1)->in(2));
 707     case Op_CmpD3:              // Collapse a CmpD3/CmpI into a CmpD
 708       return new CmpDNode(in(1)->in(1),in(1)->in(2));
 709     //case Op_SubI:
 710       // If (x - y) cannot overflow, then ((x - y) <?> 0)
 711       // can be turned into (x <?> y).
 712       // This is handled (with more general cases) by Ideal_sub_algebra.
 713     }
 714   }
 715   return NULL;                  // No change
 716 }
 717 
 718 //------------------------------Ideal------------------------------------------
 719 Node* CmpLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 720   if (in(1)->Opcode() == Op_OrL && in(1)->in(1)->Opcode() == Op_CastP2X && in(1)->in(2)->Opcode() == Op_CastP2X) {
 721     Node* a = in(1)->in(1)->in(1);
 722     Node* b = in(1)->in(2)->in(1);
 723     const Type* ta = phase->type(a);
 724     const Type* tb = phase->type(b);
 725     if (ta->is_zero_type() || tb->is_zero_type()) {
 726       return new CmpPNode(a, b);
 727     } else if (!TypePtr::NULL_PTR->higher_equal(ta) || !TypePtr::NULL_PTR->higher_equal(tb)) {
 728       // One operand is never NULL, emit constant false
 729       return new CmpLNode(phase->longcon(0), phase->longcon(1));
 730     }
 731   }
 732   return NULL;
 733 }
 734 
 735 
 736 //=============================================================================
 737 // Simplify a CmpL (compare 2 longs ) node, based on local information.
 738 // If both inputs are constants, compare them.
 739 const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
 740   const TypeLong *r0 = t1->is_long(); // Handy access
 741   const TypeLong *r1 = t2->is_long();
 742 
 743   if( r0->_hi < r1->_lo )       // Range is always low?
 744     return TypeInt::CC_LT;
 745   else if( r0->_lo > r1->_hi )  // Range is always high?
 746     return TypeInt::CC_GT;
 747 
 748   else if( r0->is_con() && r1->is_con() ) { // comparing constants?
 749     assert(r0->get_con() == r1->get_con(), "must be equal");
 750     return TypeInt::CC_EQ;      // Equal results.
 751   } else if( r0->_hi == r1->_lo ) // Range is never high?
 752     return TypeInt::CC_LE;
 753   else if( r0->_lo == r1->_hi ) // Range is never low?
 754     return TypeInt::CC_GE;


 932 
 933   // x.getClass() == int.class can never be true (for all primitive types)
 934   // Return a ConP(NULL) node for this case.
 935   if (mirror_type->is_classless()) {
 936     return phase->makecon(TypePtr::NULL_PTR);
 937   }
 938 
 939   // return the ConP(Foo.klass)
 940   assert(mirror_type->is_klass(), "mirror_type should represent a Klass*");
 941   return phase->makecon(TypeKlassPtr::make(mirror_type->as_klass()));
 942 }
 943 
 944 //------------------------------Ideal------------------------------------------
 945 // Normalize comparisons between Java mirror loads to compare the klass instead.
 946 //
 947 // Also check for the case of comparing an unknown klass loaded from the primary
 948 // super-type array vs a known klass with no subtypes.  This amounts to
 949 // checking to see an unknown klass subtypes a known klass with no subtypes;
 950 // this only happens on an exact match.  We can shorten this test by 1 load.
 951 Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
 952   Node* pert = has_perturbed_operand();
 953   if (pert != NULL) {
 954     // Optimize new acmp
 955     Node* a = pert->in(AddPNode::Base); // unperturbed a
 956     Node* b = in(2);
 957     Node* cmp = phase->C->optimize_acmp(phase, a, b);
 958     if (cmp != NULL) {
 959       return cmp;
 960     }
 961     if ( TypePtr::NULL_PTR->higher_equal(phase->type(a)) &&
 962         !TypePtr::NULL_PTR->higher_equal(phase->type(b))) {
 963       // Operand 'b' is never null, swap operands to avoid null check
 964       Node* is_value = phase->C->load_is_value_bit(phase, b);
 965       set_req(1, phase->transform(new AddPNode(b, b, is_value)));
 966       set_req(2, a);
 967       return this;
 968     }
 969   } else {
 970     // Optimize old acmp with value type operands
 971     const TypeInstPtr* ta = phase->type(in(1))->isa_instptr();
 972     const TypeInstPtr* tb = phase->type(in(2))->isa_instptr();
 973     if (((ta != NULL && ta->is_loaded() && ta->is_value_based()) || (tb != NULL && tb->is_loaded() && tb->is_value_based())) &&
 974         (!TypePtr::NULL_PTR->higher_equal(phase->type(in(1))) || !TypePtr::NULL_PTR->higher_equal(phase->type(in(2))))) {
 975       // One operand is a value type and one operand is never null, fold to constant false
 976       return new CmpINode(phase->intcon(0), phase->intcon(1));
 977     }
 978   }
 979 
 980   // Normalize comparisons between Java mirrors into comparisons of the low-
 981   // level klass, where a dependent load could be shortened.
 982   //
 983   // The new pattern has a nice effect of matching the same pattern used in the
 984   // fast path of instanceof/checkcast/Class.isInstance(), which allows
 985   // redundant exact type check be optimized away by GVN.
 986   // For example, in
 987   //   if (x.getClass() == Foo.class) {
 988   //     Foo foo = (Foo) x;
 989   //     // ... use a ...
 990   //   }
 991   // a CmpPNode could be shared between if_acmpne and checkcast
 992   {
 993     Node* k1 = isa_java_mirror_load(phase, in(1));
 994     Node* k2 = isa_java_mirror_load(phase, in(2));
 995     Node* conk2 = isa_const_java_mirror(phase, in(2));
 996 
 997     if (k1 && (k2 || conk2)) {
 998       Node* lhs = k1;
 999       Node* rhs = (k2 != NULL) ? k2 : conk2;


1058   // %%% Do this after we fix TypeOopPtr:  Deps are expressive enough now.
1059 
1060   // Object arrays must have their base element have no subtypes
1061   while (superklass->is_obj_array_klass()) {
1062     ciType* elem = superklass->as_obj_array_klass()->element_type();
1063     superklass = elem->as_klass();
1064   }
1065   if (superklass->is_instance_klass()) {
1066     ciInstanceKlass* ik = superklass->as_instance_klass();
1067     if (ik->has_subklass() || ik->is_interface())  return NULL;
1068     // Add a dependency if there is a chance that a subclass will be added later.
1069     if (!ik->is_final()) {
1070       phase->C->dependencies()->assert_leaf_type(ik);
1071     }
1072   }
1073 
1074   // Bypass the dependent load, and compare directly
1075   this->set_req(1,ldk2);
1076 
1077   return this;
1078 }
1079 
1080 // Checks if one operand is perturbed and returns it
1081 Node* CmpPNode::has_perturbed_operand() const {
1082   // We always perturbe the first operand
1083   AddPNode* addP = in(1)->isa_AddP();
1084   if (addP != NULL) {
1085     Node* base = addP->in(AddPNode::Base);
1086     if (base->is_top()) {
1087       // RawPtr comparison
1088       return NULL;
1089     }
1090     assert(UseNewAcmp, "unexpected perturbed oop");
1091     return in(1);
1092   }
1093   return NULL;
1094 }
1095 
1096 //=============================================================================
1097 //------------------------------sub--------------------------------------------
1098 // Simplify an CmpN (compare 2 pointers) node, based on local information.
1099 // If both inputs are constants, compare them.
1100 const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
1101   const TypePtr *r0 = t1->make_ptr(); // Handy access
1102   const TypePtr *r1 = t2->make_ptr();
1103 
1104   // Undefined inputs makes for an undefined result
1105   if ((r0 == NULL) || (r1 == NULL) ||
1106       TypePtr::above_centerline(r0->_ptr) ||
1107       TypePtr::above_centerline(r1->_ptr)) {
1108     return Type::TOP;
1109   }
1110   if (r0 == r1 && r0->singleton()) {
1111     // Equal pointer constants (klasses, nulls, etc.)
1112     return TypeInt::CC_EQ;
1113   }


< prev index next >