< prev index next >

src/share/vm/opto/ifnode.cpp

Print this page




 348   region_s->init_req(2, iff_x_t);
 349   igvn->register_new_node_with_optimizer( region_s );
 350 
 351   // Merge the FALSE paths
 352   Node *region_f = new RegionNode(3);
 353   igvn->_worklist.push(region_f);
 354   region_f->init_req(1, iff_c_f);
 355   region_f->init_req(2, iff_x_f);
 356   igvn->register_new_node_with_optimizer( region_f );
 357 
 358   igvn->hash_delete(cmp);// Remove soon-to-be-dead node from hash table.
 359   cmp->set_req(1,NULL);  // Whack the inputs to cmp because it will be dead
 360   cmp->set_req(2,NULL);
 361   // Check for all uses of the Phi and give them a new home.
 362   // The 'cmp' got cloned, but CastPP/IIs need to be moved.
 363   Node *phi_s = NULL;     // do not construct unless needed
 364   Node *phi_f = NULL;     // do not construct unless needed
 365   for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) {
 366     Node* v = phi->last_out(i2);// User of the phi
 367     igvn->rehash_node_delayed(v); // Have to fixup other Phi users
 368     uint vop = v->Opcode();
 369     Node *proj = NULL;
 370     if( vop == Op_Phi ) {       // Remote merge point
 371       Node *r = v->in(0);
 372       for (uint i3 = 1; i3 < r->req(); i3++)
 373         if (r->in(i3) && r->in(i3)->in(0) == iff) {
 374           proj = r->in(i3);
 375           break;
 376         }
 377     } else if( v->is_ConstraintCast() ) {
 378       proj = v->in(0);          // Controlling projection
 379     } else {
 380       assert( 0, "do not know how to handle this guy" );
 381     }
 382 
 383     Node *proj_path_data, *proj_path_ctrl;
 384     if( proj->Opcode() == Op_IfTrue ) {
 385       if( phi_s == NULL ) {
 386         // Only construct phi_s if needed, otherwise provides
 387         // interfering use.
 388         phi_s = PhiNode::make_blank(region_s,phi);
 389         phi_s->init_req( 1, phi_c );
 390         phi_s->init_req( 2, phi_x );
 391         hook->add_req(phi_s);
 392         phi_s = phase->transform(phi_s);
 393       }
 394       proj_path_data = phi_s;
 395       proj_path_ctrl = region_s;
 396     } else {
 397       if( phi_f == NULL ) {
 398         // Only construct phi_f if needed, otherwise provides
 399         // interfering use.
 400         phi_f = PhiNode::make_blank(region_f,phi);
 401         phi_f->init_req( 1, phi_c );
 402         phi_f->init_req( 2, phi_x );
 403         hook->add_req(phi_f);
 404         phi_f = phase->transform(phi_f);
 405       }
 406       proj_path_data = phi_f;
 407       proj_path_ctrl = region_f;
 408     }
 409 
 410     // Fixup 'v' for for the split
 411     if( vop == Op_Phi ) {       // Remote merge point
 412       uint i;
 413       for( i = 1; i < v->req(); i++ )
 414         if( v->in(i) == phi )
 415           break;
 416       v->set_req(i, proj_path_data );
 417     } else if( v->is_ConstraintCast() ) {
 418       v->set_req(0, proj_path_ctrl );
 419       v->set_req(1, proj_path_data );
 420     } else
 421       ShouldNotReachHere();
 422   }
 423 
 424   // Now replace the original iff's True/False with region_s/region_t.
 425   // This makes the original iff go dead.
 426   for (DUIterator_Last i3min, i3 = iff->last_outs(i3min); i3 >= i3min; --i3) {
 427     Node* p = iff->last_out(i3);
 428     assert( p->Opcode() == Op_IfTrue || p->Opcode() == Op_IfFalse, "" );
 429     Node *u = (p->Opcode() == Op_IfTrue) ? region_s : region_f;
 430     // Replace p with u
 431     igvn->add_users_to_worklist(p);
 432     for (DUIterator_Last lmin, l = p->last_outs(lmin); l >= lmin;) {
 433       Node* x = p->last_out(l);
 434       igvn->hash_delete(x);
 435       uint uses_found = 0;
 436       for( uint j = 0; j < x->req(); j++ ) {
 437         if( x->in(j) == p ) {
 438           x->set_req(j, u);
 439           uses_found++;
 440         }
 441       }
 442       l -= uses_found;    // we deleted 1 or more copies of this edge
 443     }
 444     igvn->remove_dead_node(p);
 445   }
 446 
 447   // Force the original merge dead
 448   igvn->hash_delete(r);
 449   // First, remove region's dead users.


 458     l -= 1;
 459   }
 460   igvn->remove_dead_node(r);
 461 
 462   // Now remove the bogus extra edges used to keep things alive
 463   igvn->remove_dead_node( hook );
 464 
 465   // Must return either the original node (now dead) or a new node
 466   // (Do not return a top here, since that would break the uniqueness of top.)
 467   return new ConINode(TypeInt::ZERO);
 468 }
 469 
 470 // if this IfNode follows a range check pattern return the projection
 471 // for the failed path
 472 ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) {
 473   Node* b = in(1);
 474   if (b == NULL || !b->is_Bool())  return NULL;
 475   BoolNode* bn = b->as_Bool();
 476   Node* cmp = bn->in(1);
 477   if (cmp == NULL)  return NULL;
 478   if (cmp->Opcode() != Op_CmpU)  return NULL;
 479 
 480   l = cmp->in(1);
 481   r = cmp->in(2);
 482   flip_test = 1;
 483   if (bn->_test._test == BoolTest::le) {
 484     l = cmp->in(2);
 485     r = cmp->in(1);
 486     flip_test = 2;
 487   } else if (bn->_test._test != BoolTest::lt) {
 488     return NULL;
 489   }
 490   if (l->is_top())  return NULL;   // Top input means dead test
 491   if (r->Opcode() != Op_LoadRange && !is_RangeCheck())  return NULL;
 492 
 493   // We have recognized one of these forms:
 494   //  Flip 1:  If (Bool[<] CmpU(l, LoadRange)) ...
 495   //  Flip 2:  If (Bool[<=] CmpU(LoadRange, l)) ...
 496 
 497   ProjNode* iftrap = proj_out(flip_test == 2 ? true : false);
 498   return iftrap;
 499 }
 500 
 501 
 502 //------------------------------is_range_check---------------------------------
 503 // Return 0 if not a range check.  Return 1 if a range check and set index and
 504 // offset.  Return 2 if we had to negate the test.  Index is NULL if the check
 505 // is versus a constant.
 506 int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) {
 507   int flip_test = 0;
 508   Node* l = NULL;
 509   Node* r = NULL;
 510   ProjNode* iftrap = range_check_trap_proj(flip_test, l, r);
 511 
 512   if (iftrap == NULL) {
 513     return 0;
 514   }
 515 
 516   // Make sure it's a real range check by requiring an uncommon trap
 517   // along the OOB path.  Otherwise, it's possible that the user wrote
 518   // something which optimized to look like a range check but behaves
 519   // in some other way.
 520   if (iftrap->is_uncommon_trap_proj(Deoptimization::Reason_range_check) == NULL) {
 521     return 0;
 522   }
 523 
 524   // Look for index+offset form
 525   Node* ind = l;
 526   jint  off = 0;
 527   if (l->is_top()) {
 528     return 0;
 529   } else if (l->Opcode() == Op_AddI) {
 530     if ((off = l->in(1)->find_int_con(0)) != 0) {
 531       ind = l->in(2)->uncast();
 532     } else if ((off = l->in(2)->find_int_con(0)) != 0) {
 533       ind = l->in(1)->uncast();
 534     }
 535   } else if ((off = l->find_int_con(-1)) >= 0) {
 536     // constant offset with no variable index
 537     ind = NULL;
 538   } else {
 539     // variable index with no constant offset (or dead negative index)
 540     off = 0;
 541   }
 542 
 543   // Return all the values:
 544   index  = ind;
 545   offset = off;
 546   range  = r;
 547   return flip_test;
 548 }
 549 


 610       (din4 = din2->in(0)) ) {  // Right path up one
 611     if( din3->is_Call() &&      // Handle a slow-path call on either arm
 612         (din3 = din3->in(0)) )
 613       din3 = din3->in(0);
 614     if( din4->is_Call() &&      // Handle a slow-path call on either arm
 615         (din4 = din4->in(0)) )
 616       din4 = din4->in(0);
 617     if( din3 == din4 && din3->is_If() )
 618       return din3;              // Skip around diamonds
 619   }
 620 
 621   // Give up the search at true merges
 622   return NULL;                  // Dead loop?  Or hit root?
 623 }
 624 
 625 
 626 //------------------------------filtered_int_type--------------------------------
 627 // Return a possibly more restrictive type for val based on condition control flow for an if
 628 const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node *val, Node* if_proj) {
 629   assert(if_proj &&
 630          (if_proj->Opcode() == Op_IfTrue || if_proj->Opcode() == Op_IfFalse), "expecting an if projection");
 631   if (if_proj->in(0) && if_proj->in(0)->is_If()) {
 632     IfNode* iff = if_proj->in(0)->as_If();
 633     if (iff->in(1) && iff->in(1)->is_Bool()) {
 634       BoolNode* bol = iff->in(1)->as_Bool();
 635       if (bol->in(1) && bol->in(1)->is_Cmp()) {
 636         const CmpNode* cmp  = bol->in(1)->as_Cmp();
 637         if (cmp->in(1) == val) {
 638           const TypeInt* cmp2_t = gvn->type(cmp->in(2))->isa_int();
 639           if (cmp2_t != NULL) {
 640             jint lo = cmp2_t->_lo;
 641             jint hi = cmp2_t->_hi;
 642             BoolTest::mask msk = if_proj->Opcode() == Op_IfTrue ? bol->_test._test : bol->_test.negate();
 643             switch (msk) {
 644             case BoolTest::ne:
 645               // Can't refine type
 646               return NULL;
 647             case BoolTest::eq:
 648               return cmp2_t;
 649             case BoolTest::lt:
 650               lo = TypeInt::INT->_lo;
 651               if (hi - 1 < hi) {
 652                 hi = hi - 1;
 653               }
 654               break;
 655             case BoolTest::le:
 656               lo = TypeInt::INT->_lo;
 657               break;
 658             case BoolTest::gt:
 659               if (lo + 1 > lo) {
 660                 lo = lo + 1;
 661               }
 662               hi = TypeInt::INT->_hi;


 702 // an explicit range check:
 703 // if (index < 0 || index >= array.length) {
 704 // which may need a null check to guard the LoadRange
 705 //
 706 //                   If
 707 //                  / \
 708 //                 /   \
 709 //                /     \
 710 //              If      unc
 711 //              /\
 712 //             /  \
 713 //            /    \
 714 //           /      unc
 715 //
 716 
 717 // Is the comparison for this If suitable for folding?
 718 bool IfNode::cmpi_folds(PhaseIterGVN* igvn) {
 719   return in(1) != NULL &&
 720     in(1)->is_Bool() &&
 721     in(1)->in(1) != NULL &&
 722     in(1)->in(1)->Opcode() == Op_CmpI &&
 723     in(1)->in(1)->in(2) != NULL &&
 724     in(1)->in(1)->in(2) != igvn->C->top() &&
 725     (in(1)->as_Bool()->_test.is_less() ||
 726      in(1)->as_Bool()->_test.is_greater());
 727 }
 728 
 729 // Is a dominating control suitable for folding with this if?
 730 bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) {
 731   return ctrl != NULL &&
 732     ctrl->is_Proj() &&
 733     ctrl->in(0) != NULL &&
 734     ctrl->in(0)->Opcode() == Op_If &&
 735     ctrl->in(0)->outcnt() == 2 &&
 736     ctrl->in(0)->as_If()->cmpi_folds(igvn) &&
 737     // Must compare same value
 738     ctrl->in(0)->in(1)->in(1)->in(1) != NULL &&
 739     ctrl->in(0)->in(1)->in(1)->in(1) == in(1)->in(1)->in(1);
 740 }
 741 
 742 // Do this If and the dominating If share a region?
 743 bool IfNode::has_shared_region(ProjNode* proj, ProjNode*& success, ProjNode*& fail) {
 744   ProjNode* otherproj = proj->other_if_proj();
 745   Node* otherproj_ctrl_use = otherproj->unique_ctrl_out();
 746   RegionNode* region = (otherproj_ctrl_use != NULL && otherproj_ctrl_use->is_Region()) ? otherproj_ctrl_use->as_Region() : NULL;
 747   success = NULL;
 748   fail = NULL;
 749 
 750   if (otherproj->outcnt() == 1 && region != NULL && !region->has_phi()) {
 751     for (int i = 0; i < 2; i++) {
 752       ProjNode* proj = proj_out(i);
 753       if (success == NULL && proj->outcnt() == 1 && proj->unique_out() == region) {
 754         success = proj;


1060     // so use a special trap reason to mark this pair of CmpI nodes as
1061     // bad candidate for folding. On recompilation we won't fold them
1062     // and we may trap again but this time we'll know what branch
1063     // traps
1064     trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_unstable_fused_if, action);
1065   }
1066   igvn->replace_input_of(dom_unc, TypeFunc::Parms, igvn->intcon(trap_request));
1067   return res;
1068 }
1069 
1070 // If we are turning 2 CmpI nodes into a CmpU that follows the pattern
1071 // of a rangecheck on index i, on 64 bit the compares may be followed
1072 // by memory accesses using i as index. In that case, the CmpU tells
1073 // us something about the values taken by i that can help the compiler
1074 // (see Compile::conv_I2X_index())
1075 void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGVN* igvn) {
1076 #ifdef _LP64
1077   ResourceMark rm;
1078   Node_Stack stack(2);
1079 
1080   assert(r->Opcode() == Op_LoadRange, "unexpected range check");
1081   const TypeInt* array_size = igvn->type(r)->is_int();
1082 
1083   stack.push(l, 0);
1084 
1085   while(stack.size() > 0) {
1086     Node* n = stack.node();
1087     uint start = stack.index();
1088 
1089     uint i = start;
1090     for (; i < n->outcnt(); i++) {
1091       Node* use = n->raw_out(i);
1092       if (stack.size() == 1) {
1093         if (use->Opcode() == Op_ConvI2L) {
1094           const TypeLong* bounds = use->as_Type()->type()->is_long();
1095           if (bounds->_lo <= array_size->_lo && bounds->_hi >= array_size->_hi &&
1096               (bounds->_lo != array_size->_lo || bounds->_hi != array_size->_hi)) {
1097             stack.set_index(i+1);
1098             stack.push(use, 0);
1099             break;
1100           }
1101         }
1102       } else if (use->is_Mem()) {
1103         Node* ctrl = use->in(0);
1104         for (int i = 0; i < 10 && ctrl != NULL && ctrl != fail; i++) {
1105           ctrl = up_one_dom(ctrl);
1106         }
1107         if (ctrl == fail) {
1108           Node* init_n = stack.node_at(1);
1109           assert(init_n->Opcode() == Op_ConvI2L, "unexpected first node");
1110           // Create a new narrow ConvI2L node that is dependent on the range check
1111           Node* new_n = igvn->C->conv_I2X_index(igvn, l, array_size, fail);
1112 
1113           // The type of the ConvI2L may be widen and so the new
1114           // ConvI2L may not be better than an existing ConvI2L
1115           if (new_n != init_n) {
1116             for (uint j = 2; j < stack.size(); j++) {
1117               Node* n = stack.node_at(j);
1118               Node* clone = n->clone();
1119               int rep = clone->replace_edge(init_n, new_n);
1120               assert(rep > 0, "can't find expected node?");
1121               clone = igvn->transform(clone);
1122               init_n = n;
1123               new_n = clone;
1124             }
1125             igvn->hash_delete(use);
1126             int rep = use->replace_edge(init_n, new_n);
1127             assert(rep > 0, "can't find expected node?");
1128             igvn->transform(use);
1129             if (init_n->outcnt() == 0) {


1133         }
1134       } else if (use->in(0) == NULL && (igvn->type(use)->isa_long() ||
1135                                         igvn->type(use)->isa_ptr())) {
1136         stack.set_index(i+1);
1137         stack.push(use, 0);
1138         break;
1139       }
1140     }
1141     if (i == n->outcnt()) {
1142       stack.pop();
1143     }
1144   }
1145 #endif
1146 }
1147 
1148 bool IfNode::is_cmp_with_loadrange(ProjNode* proj) {
1149   if (in(1) != NULL &&
1150       in(1)->in(1) != NULL &&
1151       in(1)->in(1)->in(2) != NULL) {
1152     Node* other = in(1)->in(1)->in(2);
1153     if (other->Opcode() == Op_LoadRange &&
1154         ((other->in(0) != NULL && other->in(0) == proj) ||
1155          (other->in(0) == NULL &&
1156           other->in(2) != NULL &&
1157           other->in(2)->is_AddP() &&
1158           other->in(2)->in(1) != NULL &&
1159           other->in(2)->in(1)->Opcode() == Op_CastPP &&
1160           other->in(2)->in(1)->in(0) == proj))) {
1161       return true;
1162     }
1163   }
1164   return false;
1165 }
1166 
1167 bool IfNode::is_null_check(ProjNode* proj, PhaseIterGVN* igvn) {
1168   Node* other = in(1)->in(1)->in(2);
1169   if (other->in(MemNode::Address) != NULL &&
1170       proj->in(0)->in(1) != NULL &&
1171       proj->in(0)->in(1)->is_Bool() &&
1172       proj->in(0)->in(1)->in(1) != NULL &&
1173       proj->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1174       proj->in(0)->in(1)->in(1)->in(2) != NULL &&
1175       proj->in(0)->in(1)->in(1)->in(1) == other->in(MemNode::Address)->in(AddPNode::Address)->uncast() &&
1176       igvn->type(proj->in(0)->in(1)->in(1)->in(2)) == TypePtr::NULL_PTR) {
1177     return true;
1178   }
1179   return false;
1180 }
1181 
1182 // Check that the If that is in between the 2 integer comparisons has
1183 // no side effect
1184 bool IfNode::is_side_effect_free_test(ProjNode* proj, PhaseIterGVN* igvn) {
1185   if (proj != NULL &&
1186       proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1187       proj->outcnt() <= 2) {
1188     if (proj->outcnt() == 1 ||
1189         // Allow simple null check from LoadRange
1190         (is_cmp_with_loadrange(proj) && is_null_check(proj, igvn))) {
1191       CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1192       CallStaticJavaNode* dom_unc = proj->in(0)->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1193 


1223 
1224   Node* new_unc = dom_unc->clone();
1225   call_proj = call_proj->clone();
1226   halt = halt->clone();
1227   Node* c = otherproj->clone();
1228 
1229   c = igvn->transform(c);
1230   new_unc->set_req(TypeFunc::Parms, unc->in(TypeFunc::Parms));
1231   new_unc->set_req(0, c);
1232   new_unc = igvn->transform(new_unc);
1233   call_proj->set_req(0, new_unc);
1234   call_proj = igvn->transform(call_proj);
1235   halt->set_req(0, call_proj);
1236   halt = igvn->transform(halt);
1237 
1238   igvn->replace_node(otherproj, igvn->C->top());
1239   igvn->C->root()->add_req(halt);
1240 }
1241 
1242 Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
1243   if (Opcode() != Op_If) return NULL;
1244 
1245   if (cmpi_folds(igvn)) {
1246     Node* ctrl = in(0);
1247     if (is_ctrl_folds(ctrl, igvn) &&
1248         ctrl->outcnt() == 1) {
1249       // A integer comparison immediately dominated by another integer
1250       // comparison
1251       ProjNode* success = NULL;
1252       ProjNode* fail = NULL;
1253       ProjNode* dom_cmp = ctrl->as_Proj();
1254       if (has_shared_region(dom_cmp, success, fail) &&
1255           // Next call modifies graph so must be last
1256           fold_compares_helper(dom_cmp, success, fail, igvn)) {
1257         return this;
1258       }
1259       if (has_only_uncommon_traps(dom_cmp, success, fail, igvn) &&
1260           // Next call modifies graph so must be last
1261           fold_compares_helper(dom_cmp, success, fail, igvn)) {
1262         return merge_uncommon_traps(dom_cmp, success, fail, igvn);
1263       }


1278           // Next call modifies graph so must be last
1279           fold_compares_helper(dom_cmp, success, fail, igvn)) {
1280         reroute_side_effect_free_unc(other_cmp, dom_cmp, igvn);
1281         return merge_uncommon_traps(dom_cmp, success, fail, igvn);
1282       }
1283     }
1284   }
1285   return NULL;
1286 }
1287 
1288 //------------------------------remove_useless_bool----------------------------
1289 // Check for people making a useless boolean: things like
1290 // if( (x < y ? true : false) ) { ... }
1291 // Replace with if( x < y ) { ... }
1292 static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) {
1293   Node *i1 = iff->in(1);
1294   if( !i1->is_Bool() ) return NULL;
1295   BoolNode *bol = i1->as_Bool();
1296 
1297   Node *cmp = bol->in(1);
1298   if( cmp->Opcode() != Op_CmpI ) return NULL;
1299 
1300   // Must be comparing against a bool
1301   const Type *cmp2_t = phase->type( cmp->in(2) );
1302   if( cmp2_t != TypeInt::ZERO &&
1303       cmp2_t != TypeInt::ONE )
1304     return NULL;
1305 
1306   // Find a prior merge point merging the boolean
1307   i1 = cmp->in(1);
1308   if( !i1->is_Phi() ) return NULL;
1309   PhiNode *phi = i1->as_Phi();
1310   if( phase->type( phi ) != TypeInt::BOOL )
1311     return NULL;
1312 
1313   // Check for diamond pattern
1314   int true_path = phi->is_diamond_phi();
1315   if( true_path == 0 ) return NULL;
1316 
1317   // Make sure that iff and the control of the phi are different. This
1318   // should really only happen for dead control flow since it requires


1394     return res;
1395   }
1396 
1397   // Check for people making a useless boolean: things like
1398   // if( (x < y ? true : false) ) { ... }
1399   // Replace with if( x < y ) { ... }
1400   Node *bol2 = remove_useless_bool(this, phase);
1401   if( bol2 ) return bol2;
1402 
1403   if (in(0) == NULL) return NULL;     // Dead loop?
1404 
1405   PhaseIterGVN *igvn = phase->is_IterGVN();
1406   Node* result = fold_compares(igvn);
1407   if (result != NULL) {
1408     return result;
1409   }
1410 
1411   // Scan for an equivalent test
1412   Node *cmp;
1413   int dist = 0;               // Cutoff limit for search
1414   int op = Opcode();
1415   if( op == Op_If &&
1416       (cmp=in(1)->in(1))->Opcode() == Op_CmpP ) {
1417     if( cmp->in(2) != NULL && // make sure cmp is not already dead
1418         cmp->in(2)->bottom_type() == TypePtr::NULL_PTR ) {
1419       dist = 64;              // Limit for null-pointer scans
1420     } else {
1421       dist = 4;               // Do not bother for random pointer tests
1422     }
1423   } else {
1424     dist = 4;                 // Limit for random junky scans
1425   }
1426 
1427   Node* prev_dom = search_identical(dist);
1428 
1429   if (prev_dom == NULL) {
1430     return NULL;
1431   }
1432 
1433   // Replace dominated IfNode
1434   return dominated_by(prev_dom, igvn);
1435 }
1436 
1437 //------------------------------dominated_by-----------------------------------
1438 Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN *igvn) {
1439 #ifndef PRODUCT
1440   if (TraceIterativeGVN) {
1441     tty->print("   Removing IfNode: "); this->dump();
1442   }
1443   if (VerifyOpto && !igvn->allow_progress()) {
1444     // Found an equivalent dominating test,
1445     // we can not guarantee reaching a fix-point for these during iterativeGVN
1446     // since intervening nodes may not change.
1447     return NULL;
1448   }
1449 #endif
1450 
1451   igvn->hash_delete(this);      // Remove self to prevent spurious V-N
1452   Node *idom = in(0);
1453   // Need opcode to decide which way 'this' test goes
1454   int prev_op = prev_dom->Opcode();
1455   Node *top = igvn->C->top(); // Shortcut to top
1456 
1457   // Loop predicates may have depending checks which should not
1458   // be skipped. For example, range check predicate has two checks
1459   // for lower and upper bounds.
1460   ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
1461   if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL)
1462    prev_dom = idom;
1463 
1464   // Now walk the current IfNode's projections.
1465   // Loop ends when 'this' has no more uses.
1466   for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) {
1467     Node *ifp = last_out(i);     // Get IfTrue/IfFalse
1468     igvn->add_users_to_worklist(ifp);
1469     // Check which projection it is and set target.
1470     // Data-target is either the dominating projection of the same type
1471     // or TOP if the dominating projection is of opposite type.
1472     // Data-target will be used as the new control edge for the non-CFG
1473     // nodes like Casts and Loads.
1474     Node *data_target = (ifp->Opcode() == prev_op) ? prev_dom : top;


1488       } else {                      // Else, for control producers,
1489         igvn->replace_input_of(s, 0, data_target); // Move child to data-target
1490       }
1491     } // End for each child of a projection
1492 
1493     igvn->remove_dead_node(ifp);
1494   } // End for each IfTrue/IfFalse child of If
1495 
1496   // Kill the IfNode
1497   igvn->remove_dead_node(this);
1498 
1499   // Must return either the original node (now dead) or a new node
1500   // (Do not return a top here, since that would break the uniqueness of top.)
1501   return new ConINode(TypeInt::ZERO);
1502 }
1503 
1504 Node* IfNode::search_identical(int dist) {
1505   // Setup to scan up the CFG looking for a dominating test
1506   Node* dom = in(0);
1507   Node* prev_dom = this;
1508   int op = Opcode();
1509   // Search up the dominator tree for an If with an identical test
1510   while (dom->Opcode() != op    ||  // Not same opcode?
1511          dom->in(1)    != in(1) ||  // Not same input 1?
1512          (req() == 3 && dom->in(2) != in(2)) || // Not same input 2?
1513          prev_dom->in(0) != dom) {  // One path of test does not dominate?
1514     if (dist < 0) return NULL;
1515 
1516     dist--;
1517     prev_dom = dom;
1518     dom = up_one_dom(dom);
1519     if (!dom) return NULL;
1520   }
1521 
1522   // Check that we did not follow a loop back to ourselves
1523   if (this == dom) {
1524     return NULL;
1525   }
1526 
1527 #ifndef PRODUCT
1528   if (dist > 2) { // Add to count of NULL checks elided


1673     // Try to remove extra range checks.  All 'up_one_dom' gives up at merges
1674     // so all checks we inspect post-dominate the top-most check we find.
1675     // If we are going to fail the current check and we reach the top check
1676     // then we are guaranteed to fail, so just start interpreting there.
1677     // We 'expand' the top 3 range checks to include all post-dominating
1678     // checks.
1679 
1680     // The top 3 range checks seen
1681     const int NRC =3;
1682     RangeCheck prev_checks[NRC];
1683     int nb_checks = 0;
1684 
1685     // Low and high offsets seen so far
1686     jint off_lo = offset1;
1687     jint off_hi = offset1;
1688 
1689     bool found_immediate_dominator = false;
1690 
1691     // Scan for the top checks and collect range of offsets
1692     for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
1693       if (dom->Opcode() == Op_RangeCheck &&  // Not same opcode?
1694           prev_dom->in(0) == dom) { // One path of test does dominate?
1695         if (dom == this) return NULL; // dead loop
1696         // See if this is a range check
1697         Node* index2;
1698         Node* range2;
1699         jint offset2;
1700         int flip2 = dom->as_RangeCheck()->is_range_check(range2, index2, offset2);
1701         // See if this is a _matching_ range check, checking against
1702         // the same array bounds.
1703         if (flip2 == flip1 && range2 == range1 && index2 == index1 &&
1704             dom->outcnt() == 2) {
1705           if (nb_checks == 0 && dom->in(1) == in(1)) {
1706             // Found an immediately dominating test at the same offset.
1707             // This kind of back-to-back test can be eliminated locally,
1708             // and there is no need to search further for dominating tests.
1709             assert(offset2 == offset1, "Same test but different offsets");
1710             found_immediate_dominator = true;
1711             break;
1712           }
1713           // Gather expanded bounds




 348   region_s->init_req(2, iff_x_t);
 349   igvn->register_new_node_with_optimizer( region_s );
 350 
 351   // Merge the FALSE paths
 352   Node *region_f = new RegionNode(3);
 353   igvn->_worklist.push(region_f);
 354   region_f->init_req(1, iff_c_f);
 355   region_f->init_req(2, iff_x_f);
 356   igvn->register_new_node_with_optimizer( region_f );
 357 
 358   igvn->hash_delete(cmp);// Remove soon-to-be-dead node from hash table.
 359   cmp->set_req(1,NULL);  // Whack the inputs to cmp because it will be dead
 360   cmp->set_req(2,NULL);
 361   // Check for all uses of the Phi and give them a new home.
 362   // The 'cmp' got cloned, but CastPP/IIs need to be moved.
 363   Node *phi_s = NULL;     // do not construct unless needed
 364   Node *phi_f = NULL;     // do not construct unless needed
 365   for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) {
 366     Node* v = phi->last_out(i2);// User of the phi
 367     igvn->rehash_node_delayed(v); // Have to fixup other Phi users
 368     Opcodes vop = v->Opcode();
 369     Node *proj = NULL;
 370     if( vop == Opcodes::Op_Phi ) {       // Remote merge point
 371       Node *r = v->in(0);
 372       for (uint i3 = 1; i3 < r->req(); i3++)
 373         if (r->in(i3) && r->in(i3)->in(0) == iff) {
 374           proj = r->in(i3);
 375           break;
 376         }
 377     } else if( v->is_ConstraintCast() ) {
 378       proj = v->in(0);          // Controlling projection
 379     } else {
 380       assert( 0, "do not know how to handle this guy" );
 381     }
 382 
 383     Node *proj_path_data, *proj_path_ctrl;
 384     if( proj->Opcode() == Opcodes::Op_IfTrue ) {
 385       if( phi_s == NULL ) {
 386         // Only construct phi_s if needed, otherwise provides
 387         // interfering use.
 388         phi_s = PhiNode::make_blank(region_s,phi);
 389         phi_s->init_req( 1, phi_c );
 390         phi_s->init_req( 2, phi_x );
 391         hook->add_req(phi_s);
 392         phi_s = phase->transform(phi_s);
 393       }
 394       proj_path_data = phi_s;
 395       proj_path_ctrl = region_s;
 396     } else {
 397       if( phi_f == NULL ) {
 398         // Only construct phi_f if needed, otherwise provides
 399         // interfering use.
 400         phi_f = PhiNode::make_blank(region_f,phi);
 401         phi_f->init_req( 1, phi_c );
 402         phi_f->init_req( 2, phi_x );
 403         hook->add_req(phi_f);
 404         phi_f = phase->transform(phi_f);
 405       }
 406       proj_path_data = phi_f;
 407       proj_path_ctrl = region_f;
 408     }
 409 
 410     // Fixup 'v' for for the split
 411     if( vop == Opcodes::Op_Phi ) {       // Remote merge point
 412       uint i;
 413       for( i = 1; i < v->req(); i++ )
 414         if( v->in(i) == phi )
 415           break;
 416       v->set_req(i, proj_path_data );
 417     } else if( v->is_ConstraintCast() ) {
 418       v->set_req(0, proj_path_ctrl );
 419       v->set_req(1, proj_path_data );
 420     } else
 421       ShouldNotReachHere();
 422   }
 423 
 424   // Now replace the original iff's True/False with region_s/region_t.
 425   // This makes the original iff go dead.
 426   for (DUIterator_Last i3min, i3 = iff->last_outs(i3min); i3 >= i3min; --i3) {
 427     Node* p = iff->last_out(i3);
 428     assert( p->Opcode() == Opcodes::Op_IfTrue || p->Opcode() == Opcodes::Op_IfFalse, "" );
 429     Node *u = (p->Opcode() == Opcodes::Op_IfTrue) ? region_s : region_f;
 430     // Replace p with u
 431     igvn->add_users_to_worklist(p);
 432     for (DUIterator_Last lmin, l = p->last_outs(lmin); l >= lmin;) {
 433       Node* x = p->last_out(l);
 434       igvn->hash_delete(x);
 435       uint uses_found = 0;
 436       for( uint j = 0; j < x->req(); j++ ) {
 437         if( x->in(j) == p ) {
 438           x->set_req(j, u);
 439           uses_found++;
 440         }
 441       }
 442       l -= uses_found;    // we deleted 1 or more copies of this edge
 443     }
 444     igvn->remove_dead_node(p);
 445   }
 446 
 447   // Force the original merge dead
 448   igvn->hash_delete(r);
 449   // First, remove region's dead users.


 458     l -= 1;
 459   }
 460   igvn->remove_dead_node(r);
 461 
 462   // Now remove the bogus extra edges used to keep things alive
 463   igvn->remove_dead_node( hook );
 464 
 465   // Must return either the original node (now dead) or a new node
 466   // (Do not return a top here, since that would break the uniqueness of top.)
 467   return new ConINode(TypeInt::ZERO);
 468 }
 469 
 470 // if this IfNode follows a range check pattern return the projection
 471 // for the failed path
 472 ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) {
 473   Node* b = in(1);
 474   if (b == NULL || !b->is_Bool())  return NULL;
 475   BoolNode* bn = b->as_Bool();
 476   Node* cmp = bn->in(1);
 477   if (cmp == NULL)  return NULL;
 478   if (cmp->Opcode() != Opcodes::Op_CmpU)  return NULL;
 479 
 480   l = cmp->in(1);
 481   r = cmp->in(2);
 482   flip_test = 1;
 483   if (bn->_test._test == BoolTest::le) {
 484     l = cmp->in(2);
 485     r = cmp->in(1);
 486     flip_test = 2;
 487   } else if (bn->_test._test != BoolTest::lt) {
 488     return NULL;
 489   }
 490   if (l->is_top())  return NULL;   // Top input means dead test
 491   if (r->Opcode() != Opcodes::Op_LoadRange && !is_RangeCheck())  return NULL;
 492 
 493   // We have recognized one of these forms:
 494   //  Flip 1:  If (Bool[<] CmpU(l, LoadRange)) ...
 495   //  Flip 2:  If (Bool[<=] CmpU(LoadRange, l)) ...
 496 
 497   ProjNode* iftrap = proj_out(flip_test == 2 ? true : false);
 498   return iftrap;
 499 }
 500 
 501 
 502 //------------------------------is_range_check---------------------------------
 503 // Return 0 if not a range check.  Return 1 if a range check and set index and
 504 // offset.  Return 2 if we had to negate the test.  Index is NULL if the check
 505 // is versus a constant.
 506 int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) {
 507   int flip_test = 0;
 508   Node* l = NULL;
 509   Node* r = NULL;
 510   ProjNode* iftrap = range_check_trap_proj(flip_test, l, r);
 511 
 512   if (iftrap == NULL) {
 513     return 0;
 514   }
 515 
 516   // Make sure it's a real range check by requiring an uncommon trap
 517   // along the OOB path.  Otherwise, it's possible that the user wrote
 518   // something which optimized to look like a range check but behaves
 519   // in some other way.
 520   if (iftrap->is_uncommon_trap_proj(Deoptimization::Reason_range_check) == NULL) {
 521     return 0;
 522   }
 523 
 524   // Look for index+offset form
 525   Node* ind = l;
 526   jint  off = 0;
 527   if (l->is_top()) {
 528     return 0;
 529   } else if (l->Opcode() == Opcodes::Op_AddI) {
 530     if ((off = l->in(1)->find_int_con(0)) != 0) {
 531       ind = l->in(2)->uncast();
 532     } else if ((off = l->in(2)->find_int_con(0)) != 0) {
 533       ind = l->in(1)->uncast();
 534     }
 535   } else if ((off = l->find_int_con(-1)) >= 0) {
 536     // constant offset with no variable index
 537     ind = NULL;
 538   } else {
 539     // variable index with no constant offset (or dead negative index)
 540     off = 0;
 541   }
 542 
 543   // Return all the values:
 544   index  = ind;
 545   offset = off;
 546   range  = r;
 547   return flip_test;
 548 }
 549 


 610       (din4 = din2->in(0)) ) {  // Right path up one
 611     if( din3->is_Call() &&      // Handle a slow-path call on either arm
 612         (din3 = din3->in(0)) )
 613       din3 = din3->in(0);
 614     if( din4->is_Call() &&      // Handle a slow-path call on either arm
 615         (din4 = din4->in(0)) )
 616       din4 = din4->in(0);
 617     if( din3 == din4 && din3->is_If() )
 618       return din3;              // Skip around diamonds
 619   }
 620 
 621   // Give up the search at true merges
 622   return NULL;                  // Dead loop?  Or hit root?
 623 }
 624 
 625 
 626 //------------------------------filtered_int_type--------------------------------
 627 // Return a possibly more restrictive type for val based on condition control flow for an if
 628 const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node *val, Node* if_proj) {
 629   assert(if_proj &&
 630          (if_proj->Opcode() == Opcodes::Op_IfTrue || if_proj->Opcode() == Opcodes::Op_IfFalse), "expecting an if projection");
 631   if (if_proj->in(0) && if_proj->in(0)->is_If()) {
 632     IfNode* iff = if_proj->in(0)->as_If();
 633     if (iff->in(1) && iff->in(1)->is_Bool()) {
 634       BoolNode* bol = iff->in(1)->as_Bool();
 635       if (bol->in(1) && bol->in(1)->is_Cmp()) {
 636         const CmpNode* cmp  = bol->in(1)->as_Cmp();
 637         if (cmp->in(1) == val) {
 638           const TypeInt* cmp2_t = gvn->type(cmp->in(2))->isa_int();
 639           if (cmp2_t != NULL) {
 640             jint lo = cmp2_t->_lo;
 641             jint hi = cmp2_t->_hi;
 642             BoolTest::mask msk = if_proj->Opcode() == Opcodes::Op_IfTrue ? bol->_test._test : bol->_test.negate();
 643             switch (msk) {
 644             case BoolTest::ne:
 645               // Can't refine type
 646               return NULL;
 647             case BoolTest::eq:
 648               return cmp2_t;
 649             case BoolTest::lt:
 650               lo = TypeInt::INT->_lo;
 651               if (hi - 1 < hi) {
 652                 hi = hi - 1;
 653               }
 654               break;
 655             case BoolTest::le:
 656               lo = TypeInt::INT->_lo;
 657               break;
 658             case BoolTest::gt:
 659               if (lo + 1 > lo) {
 660                 lo = lo + 1;
 661               }
 662               hi = TypeInt::INT->_hi;


 702 // an explicit range check:
 703 // if (index < 0 || index >= array.length) {
 704 // which may need a null check to guard the LoadRange
 705 //
 706 //                   If
 707 //                  / \
 708 //                 /   \
 709 //                /     \
 710 //              If      unc
 711 //              /\
 712 //             /  \
 713 //            /    \
 714 //           /      unc
 715 //
 716 
 717 // Is the comparison for this If suitable for folding?
 718 bool IfNode::cmpi_folds(PhaseIterGVN* igvn) {
 719   return in(1) != NULL &&
 720     in(1)->is_Bool() &&
 721     in(1)->in(1) != NULL &&
 722     in(1)->in(1)->Opcode() == Opcodes::Op_CmpI &&
 723     in(1)->in(1)->in(2) != NULL &&
 724     in(1)->in(1)->in(2) != igvn->C->top() &&
 725     (in(1)->as_Bool()->_test.is_less() ||
 726      in(1)->as_Bool()->_test.is_greater());
 727 }
 728 
 729 // Is a dominating control suitable for folding with this if?
 730 bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) {
 731   return ctrl != NULL &&
 732     ctrl->is_Proj() &&
 733     ctrl->in(0) != NULL &&
 734     ctrl->in(0)->Opcode() == Opcodes::Op_If &&
 735     ctrl->in(0)->outcnt() == 2 &&
 736     ctrl->in(0)->as_If()->cmpi_folds(igvn) &&
 737     // Must compare same value
 738     ctrl->in(0)->in(1)->in(1)->in(1) != NULL &&
 739     ctrl->in(0)->in(1)->in(1)->in(1) == in(1)->in(1)->in(1);
 740 }
 741 
 742 // Do this If and the dominating If share a region?
 743 bool IfNode::has_shared_region(ProjNode* proj, ProjNode*& success, ProjNode*& fail) {
 744   ProjNode* otherproj = proj->other_if_proj();
 745   Node* otherproj_ctrl_use = otherproj->unique_ctrl_out();
 746   RegionNode* region = (otherproj_ctrl_use != NULL && otherproj_ctrl_use->is_Region()) ? otherproj_ctrl_use->as_Region() : NULL;
 747   success = NULL;
 748   fail = NULL;
 749 
 750   if (otherproj->outcnt() == 1 && region != NULL && !region->has_phi()) {
 751     for (int i = 0; i < 2; i++) {
 752       ProjNode* proj = proj_out(i);
 753       if (success == NULL && proj->outcnt() == 1 && proj->unique_out() == region) {
 754         success = proj;


1060     // so use a special trap reason to mark this pair of CmpI nodes as
1061     // bad candidate for folding. On recompilation we won't fold them
1062     // and we may trap again but this time we'll know what branch
1063     // traps
1064     trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_unstable_fused_if, action);
1065   }
1066   igvn->replace_input_of(dom_unc, TypeFunc::Parms, igvn->intcon(trap_request));
1067   return res;
1068 }
1069 
1070 // If we are turning 2 CmpI nodes into a CmpU that follows the pattern
1071 // of a rangecheck on index i, on 64 bit the compares may be followed
1072 // by memory accesses using i as index. In that case, the CmpU tells
1073 // us something about the values taken by i that can help the compiler
1074 // (see Compile::conv_I2X_index())
1075 void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGVN* igvn) {
1076 #ifdef _LP64
1077   ResourceMark rm;
1078   Node_Stack stack(2);
1079 
1080   assert(r->Opcode() == Opcodes::Op_LoadRange, "unexpected range check");
1081   const TypeInt* array_size = igvn->type(r)->is_int();
1082 
1083   stack.push(l, 0);
1084 
1085   while(stack.size() > 0) {
1086     Node* n = stack.node();
1087     uint start = stack.index();
1088 
1089     uint i = start;
1090     for (; i < n->outcnt(); i++) {
1091       Node* use = n->raw_out(i);
1092       if (stack.size() == 1) {
1093         if (use->Opcode() == Opcodes::Op_ConvI2L) {
1094           const TypeLong* bounds = use->as_Type()->type()->is_long();
1095           if (bounds->_lo <= array_size->_lo && bounds->_hi >= array_size->_hi &&
1096               (bounds->_lo != array_size->_lo || bounds->_hi != array_size->_hi)) {
1097             stack.set_index(i+1);
1098             stack.push(use, 0);
1099             break;
1100           }
1101         }
1102       } else if (use->is_Mem()) {
1103         Node* ctrl = use->in(0);
1104         for (int i = 0; i < 10 && ctrl != NULL && ctrl != fail; i++) {
1105           ctrl = up_one_dom(ctrl);
1106         }
1107         if (ctrl == fail) {
1108           Node* init_n = stack.node_at(1);
1109           assert(init_n->Opcode() == Opcodes::Op_ConvI2L, "unexpected first node");
1110           // Create a new narrow ConvI2L node that is dependent on the range check
1111           Node* new_n = igvn->C->conv_I2X_index(igvn, l, array_size, fail);
1112 
1113           // The type of the ConvI2L may be widen and so the new
1114           // ConvI2L may not be better than an existing ConvI2L
1115           if (new_n != init_n) {
1116             for (uint j = 2; j < stack.size(); j++) {
1117               Node* n = stack.node_at(j);
1118               Node* clone = n->clone();
1119               int rep = clone->replace_edge(init_n, new_n);
1120               assert(rep > 0, "can't find expected node?");
1121               clone = igvn->transform(clone);
1122               init_n = n;
1123               new_n = clone;
1124             }
1125             igvn->hash_delete(use);
1126             int rep = use->replace_edge(init_n, new_n);
1127             assert(rep > 0, "can't find expected node?");
1128             igvn->transform(use);
1129             if (init_n->outcnt() == 0) {


1133         }
1134       } else if (use->in(0) == NULL && (igvn->type(use)->isa_long() ||
1135                                         igvn->type(use)->isa_ptr())) {
1136         stack.set_index(i+1);
1137         stack.push(use, 0);
1138         break;
1139       }
1140     }
1141     if (i == n->outcnt()) {
1142       stack.pop();
1143     }
1144   }
1145 #endif
1146 }
1147 
1148 bool IfNode::is_cmp_with_loadrange(ProjNode* proj) {
1149   if (in(1) != NULL &&
1150       in(1)->in(1) != NULL &&
1151       in(1)->in(1)->in(2) != NULL) {
1152     Node* other = in(1)->in(1)->in(2);
1153     if (other->Opcode() == Opcodes::Op_LoadRange &&
1154         ((other->in(0) != NULL && other->in(0) == proj) ||
1155          (other->in(0) == NULL &&
1156           other->in(2) != NULL &&
1157           other->in(2)->is_AddP() &&
1158           other->in(2)->in(1) != NULL &&
1159           other->in(2)->in(1)->Opcode() == Opcodes::Op_CastPP &&
1160           other->in(2)->in(1)->in(0) == proj))) {
1161       return true;
1162     }
1163   }
1164   return false;
1165 }
1166 
1167 bool IfNode::is_null_check(ProjNode* proj, PhaseIterGVN* igvn) {
1168   Node* other = in(1)->in(1)->in(2);
1169   if (other->in(MemNode::Address) != NULL &&
1170       proj->in(0)->in(1) != NULL &&
1171       proj->in(0)->in(1)->is_Bool() &&
1172       proj->in(0)->in(1)->in(1) != NULL &&
1173       proj->in(0)->in(1)->in(1)->Opcode() == Opcodes::Op_CmpP &&
1174       proj->in(0)->in(1)->in(1)->in(2) != NULL &&
1175       proj->in(0)->in(1)->in(1)->in(1) == other->in(MemNode::Address)->in(AddPNode::Address)->uncast() &&
1176       igvn->type(proj->in(0)->in(1)->in(1)->in(2)) == TypePtr::NULL_PTR) {
1177     return true;
1178   }
1179   return false;
1180 }
1181 
1182 // Check that the If that is in between the 2 integer comparisons has
1183 // no side effect
1184 bool IfNode::is_side_effect_free_test(ProjNode* proj, PhaseIterGVN* igvn) {
1185   if (proj != NULL &&
1186       proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1187       proj->outcnt() <= 2) {
1188     if (proj->outcnt() == 1 ||
1189         // Allow simple null check from LoadRange
1190         (is_cmp_with_loadrange(proj) && is_null_check(proj, igvn))) {
1191       CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1192       CallStaticJavaNode* dom_unc = proj->in(0)->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1193 


1223 
1224   Node* new_unc = dom_unc->clone();
1225   call_proj = call_proj->clone();
1226   halt = halt->clone();
1227   Node* c = otherproj->clone();
1228 
1229   c = igvn->transform(c);
1230   new_unc->set_req(TypeFunc::Parms, unc->in(TypeFunc::Parms));
1231   new_unc->set_req(0, c);
1232   new_unc = igvn->transform(new_unc);
1233   call_proj->set_req(0, new_unc);
1234   call_proj = igvn->transform(call_proj);
1235   halt->set_req(0, call_proj);
1236   halt = igvn->transform(halt);
1237 
1238   igvn->replace_node(otherproj, igvn->C->top());
1239   igvn->C->root()->add_req(halt);
1240 }
1241 
1242 Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
1243   if (Opcode() != Opcodes::Op_If) return NULL;
1244 
1245   if (cmpi_folds(igvn)) {
1246     Node* ctrl = in(0);
1247     if (is_ctrl_folds(ctrl, igvn) &&
1248         ctrl->outcnt() == 1) {
1249       // A integer comparison immediately dominated by another integer
1250       // comparison
1251       ProjNode* success = NULL;
1252       ProjNode* fail = NULL;
1253       ProjNode* dom_cmp = ctrl->as_Proj();
1254       if (has_shared_region(dom_cmp, success, fail) &&
1255           // Next call modifies graph so must be last
1256           fold_compares_helper(dom_cmp, success, fail, igvn)) {
1257         return this;
1258       }
1259       if (has_only_uncommon_traps(dom_cmp, success, fail, igvn) &&
1260           // Next call modifies graph so must be last
1261           fold_compares_helper(dom_cmp, success, fail, igvn)) {
1262         return merge_uncommon_traps(dom_cmp, success, fail, igvn);
1263       }


1278           // Next call modifies graph so must be last
1279           fold_compares_helper(dom_cmp, success, fail, igvn)) {
1280         reroute_side_effect_free_unc(other_cmp, dom_cmp, igvn);
1281         return merge_uncommon_traps(dom_cmp, success, fail, igvn);
1282       }
1283     }
1284   }
1285   return NULL;
1286 }
1287 
1288 //------------------------------remove_useless_bool----------------------------
1289 // Check for people making a useless boolean: things like
1290 // if( (x < y ? true : false) ) { ... }
1291 // Replace with if( x < y ) { ... }
1292 static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) {
1293   Node *i1 = iff->in(1);
1294   if( !i1->is_Bool() ) return NULL;
1295   BoolNode *bol = i1->as_Bool();
1296 
1297   Node *cmp = bol->in(1);
1298   if( cmp->Opcode() != Opcodes::Op_CmpI ) return NULL;
1299 
1300   // Must be comparing against a bool
1301   const Type *cmp2_t = phase->type( cmp->in(2) );
1302   if( cmp2_t != TypeInt::ZERO &&
1303       cmp2_t != TypeInt::ONE )
1304     return NULL;
1305 
1306   // Find a prior merge point merging the boolean
1307   i1 = cmp->in(1);
1308   if( !i1->is_Phi() ) return NULL;
1309   PhiNode *phi = i1->as_Phi();
1310   if( phase->type( phi ) != TypeInt::BOOL )
1311     return NULL;
1312 
1313   // Check for diamond pattern
1314   int true_path = phi->is_diamond_phi();
1315   if( true_path == 0 ) return NULL;
1316 
1317   // Make sure that iff and the control of the phi are different. This
1318   // should really only happen for dead control flow since it requires


1394     return res;
1395   }
1396 
1397   // Check for people making a useless boolean: things like
1398   // if( (x < y ? true : false) ) { ... }
1399   // Replace with if( x < y ) { ... }
1400   Node *bol2 = remove_useless_bool(this, phase);
1401   if( bol2 ) return bol2;
1402 
1403   if (in(0) == NULL) return NULL;     // Dead loop?
1404 
1405   PhaseIterGVN *igvn = phase->is_IterGVN();
1406   Node* result = fold_compares(igvn);
1407   if (result != NULL) {
1408     return result;
1409   }
1410 
1411   // Scan for an equivalent test
1412   Node *cmp;
1413   int dist = 0;               // Cutoff limit for search
1414   Opcodes op = Opcode();
1415   if( op == Opcodes::Op_If &&
1416       (cmp=in(1)->in(1))->Opcode() == Opcodes::Op_CmpP ) {
1417     if( cmp->in(2) != NULL && // make sure cmp is not already dead
1418         cmp->in(2)->bottom_type() == TypePtr::NULL_PTR ) {
1419       dist = 64;              // Limit for null-pointer scans
1420     } else {
1421       dist = 4;               // Do not bother for random pointer tests
1422     }
1423   } else {
1424     dist = 4;                 // Limit for random junky scans
1425   }
1426 
1427   Node* prev_dom = search_identical(dist);
1428 
1429   if (prev_dom == NULL) {
1430     return NULL;
1431   }
1432 
1433   // Replace dominated IfNode
1434   return dominated_by(prev_dom, igvn);
1435 }
1436 
1437 //------------------------------dominated_by-----------------------------------
1438 Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN *igvn) {
1439 #ifndef PRODUCT
1440   if (TraceIterativeGVN) {
1441     tty->print("   Removing IfNode: "); this->dump();
1442   }
1443   if (VerifyOpto && !igvn->allow_progress()) {
1444     // Found an equivalent dominating test,
1445     // we can not guarantee reaching a fix-point for these during iterativeGVN
1446     // since intervening nodes may not change.
1447     return NULL;
1448   }
1449 #endif
1450 
1451   igvn->hash_delete(this);      // Remove self to prevent spurious V-N
1452   Node *idom = in(0);
1453   // Need opcode to decide which way 'this' test goes
1454   Opcodes prev_op = prev_dom->Opcode();
1455   Node *top = igvn->C->top(); // Shortcut to top
1456 
1457   // Loop predicates may have depending checks which should not
1458   // be skipped. For example, range check predicate has two checks
1459   // for lower and upper bounds.
1460   ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
1461   if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL)
1462    prev_dom = idom;
1463 
1464   // Now walk the current IfNode's projections.
1465   // Loop ends when 'this' has no more uses.
1466   for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) {
1467     Node *ifp = last_out(i);     // Get IfTrue/IfFalse
1468     igvn->add_users_to_worklist(ifp);
1469     // Check which projection it is and set target.
1470     // Data-target is either the dominating projection of the same type
1471     // or TOP if the dominating projection is of opposite type.
1472     // Data-target will be used as the new control edge for the non-CFG
1473     // nodes like Casts and Loads.
1474     Node *data_target = (ifp->Opcode() == prev_op) ? prev_dom : top;


1488       } else {                      // Else, for control producers,
1489         igvn->replace_input_of(s, 0, data_target); // Move child to data-target
1490       }
1491     } // End for each child of a projection
1492 
1493     igvn->remove_dead_node(ifp);
1494   } // End for each IfTrue/IfFalse child of If
1495 
1496   // Kill the IfNode
1497   igvn->remove_dead_node(this);
1498 
1499   // Must return either the original node (now dead) or a new node
1500   // (Do not return a top here, since that would break the uniqueness of top.)
1501   return new ConINode(TypeInt::ZERO);
1502 }
1503 
1504 Node* IfNode::search_identical(int dist) {
1505   // Setup to scan up the CFG looking for a dominating test
1506   Node* dom = in(0);
1507   Node* prev_dom = this;
1508   Opcodes op = Opcode();
1509   // Search up the dominator tree for an If with an identical test
1510   while (dom->Opcode() != op    ||  // Not same opcode?
1511          dom->in(1)    != in(1) ||  // Not same input 1?
1512          (req() == 3 && dom->in(2) != in(2)) || // Not same input 2?
1513          prev_dom->in(0) != dom) {  // One path of test does not dominate?
1514     if (dist < 0) return NULL;
1515 
1516     dist--;
1517     prev_dom = dom;
1518     dom = up_one_dom(dom);
1519     if (!dom) return NULL;
1520   }
1521 
1522   // Check that we did not follow a loop back to ourselves
1523   if (this == dom) {
1524     return NULL;
1525   }
1526 
1527 #ifndef PRODUCT
1528   if (dist > 2) { // Add to count of NULL checks elided


1673     // Try to remove extra range checks.  All 'up_one_dom' gives up at merges
1674     // so all checks we inspect post-dominate the top-most check we find.
1675     // If we are going to fail the current check and we reach the top check
1676     // then we are guaranteed to fail, so just start interpreting there.
1677     // We 'expand' the top 3 range checks to include all post-dominating
1678     // checks.
1679 
1680     // The top 3 range checks seen
1681     const int NRC =3;
1682     RangeCheck prev_checks[NRC];
1683     int nb_checks = 0;
1684 
1685     // Low and high offsets seen so far
1686     jint off_lo = offset1;
1687     jint off_hi = offset1;
1688 
1689     bool found_immediate_dominator = false;
1690 
1691     // Scan for the top checks and collect range of offsets
1692     for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
1693       if (dom->Opcode() == Opcodes::Op_RangeCheck &&  // Not same opcode?
1694           prev_dom->in(0) == dom) { // One path of test does dominate?
1695         if (dom == this) return NULL; // dead loop
1696         // See if this is a range check
1697         Node* index2;
1698         Node* range2;
1699         jint offset2;
1700         int flip2 = dom->as_RangeCheck()->is_range_check(range2, index2, offset2);
1701         // See if this is a _matching_ range check, checking against
1702         // the same array bounds.
1703         if (flip2 == flip1 && range2 == range1 && index2 == index1 &&
1704             dom->outcnt() == 2) {
1705           if (nb_checks == 0 && dom->in(1) == in(1)) {
1706             // Found an immediately dominating test at the same offset.
1707             // This kind of back-to-back test can be eliminated locally,
1708             // and there is no need to search further for dominating tests.
1709             assert(offset2 == offset1, "Same test but different offsets");
1710             found_immediate_dominator = true;
1711             break;
1712           }
1713           // Gather expanded bounds


< prev index next >