1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"
  36 #include "opto/loopnode.hpp"
  37 #include "opto/matcher.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/opaquenode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/subnode.hpp"
  43 #include "utilities/macros.hpp"
  44 #if INCLUDE_ZGC
  45 #include "gc/z/c2/zBarrierSetC2.hpp"
  46 #endif
  47 
  48 //=============================================================================
  49 //------------------------------split_thru_phi---------------------------------
  50 // Split Node 'n' through merge point if there is enough win.
  51 Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
  52   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  53     // ConvI2L may have type information on it which is unsafe to push up
  54     // so disable this for now
  55     return NULL;
  56   }
  57 
  58   // Splitting range check CastIIs through a loop induction Phi can
  59   // cause new Phis to be created that are left unrelated to the loop
  60   // induction Phi and prevent optimizations (vectorization)
  61   if (n->Opcode() == Op_CastII && n->as_CastII()->has_range_check() &&
  62       region->is_CountedLoop() && n->in(1) == region->as_CountedLoop()->phi()) {
  63     return NULL;
  64   }
  65 
  66   int wins = 0;
  67   assert(!n->is_CFG(), "");
  68   assert(region->is_Region(), "");
  69 
  70   const Type* type = n->bottom_type();
  71   const TypeOopPtr *t_oop = _igvn.type(n)->isa_oopptr();
  72   Node *phi;
  73   if (t_oop != NULL && t_oop->is_known_instance_field()) {
  74     int iid    = t_oop->instance_id();
  75     int index  = C->get_alias_index(t_oop);
  76     int offset = t_oop->offset();
  77     phi = new PhiNode(region, type, NULL, iid, index, offset);
  78   } else {
  79     phi = PhiNode::make_blank(region, n);
  80   }
  81   uint old_unique = C->unique();
  82   for (uint i = 1; i < region->req(); i++) {
  83     Node *x;
  84     Node* the_clone = NULL;
  85     if (region->in(i) == C->top()) {
  86       x = C->top();             // Dead path?  Use a dead data op
  87     } else {
  88       x = n->clone();           // Else clone up the data op
  89       the_clone = x;            // Remember for possible deletion.
  90       // Alter data node to use pre-phi inputs
  91       if (n->in(0) == region)
  92         x->set_req( 0, region->in(i) );
  93       for (uint j = 1; j < n->req(); j++) {
  94         Node *in = n->in(j);
  95         if (in->is_Phi() && in->in(0) == region)
  96           x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone
  97       }
  98     }
  99     // Check for a 'win' on some paths
 100     const Type *t = x->Value(&_igvn);
 101 
 102     bool singleton = t->singleton();
 103 
 104     // A TOP singleton indicates that there are no possible values incoming
 105     // along a particular edge. In most cases, this is OK, and the Phi will
 106     // be eliminated later in an Ideal call. However, we can't allow this to
 107     // happen if the singleton occurs on loop entry, as the elimination of
 108     // the PhiNode may cause the resulting node to migrate back to a previous
 109     // loop iteration.
 110     if (singleton && t == Type::TOP) {
 111       // Is_Loop() == false does not confirm the absence of a loop (e.g., an
 112       // irreducible loop may not be indicated by an affirmative is_Loop());
 113       // therefore, the only top we can split thru a phi is on a backedge of
 114       // a loop.
 115       singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
 116     }
 117 
 118     if (singleton) {
 119       wins++;
 120       x = ((PhaseGVN&)_igvn).makecon(t);
 121     } else {
 122       // We now call Identity to try to simplify the cloned node.
 123       // Note that some Identity methods call phase->type(this).
 124       // Make sure that the type array is big enough for
 125       // our new node, even though we may throw the node away.
 126       // (Note: This tweaking with igvn only works because x is a new node.)
 127       _igvn.set_type(x, t);
 128       // If x is a TypeNode, capture any more-precise type permanently into Node
 129       // otherwise it will be not updated during igvn->transform since
 130       // igvn->type(x) is set to x->Value() already.
 131       x->raise_bottom_type(t);
 132       Node *y = _igvn.apply_identity(x);
 133       if (y != x) {
 134         wins++;
 135         x = y;
 136       } else {
 137         y = _igvn.hash_find(x);
 138         if (y) {
 139           wins++;
 140           x = y;
 141         } else {
 142           // Else x is a new node we are keeping
 143           // We do not need register_new_node_with_optimizer
 144           // because set_type has already been called.
 145           _igvn._worklist.push(x);
 146         }
 147       }
 148     }
 149     if (x != the_clone && the_clone != NULL)
 150       _igvn.remove_dead_node(the_clone);
 151     phi->set_req( i, x );
 152   }
 153   // Too few wins?
 154   if (wins <= policy) {
 155     _igvn.remove_dead_node(phi);
 156     return NULL;
 157   }
 158 
 159   // Record Phi
 160   register_new_node( phi, region );
 161 
 162   for (uint i2 = 1; i2 < phi->req(); i2++) {
 163     Node *x = phi->in(i2);
 164     // If we commoned up the cloned 'x' with another existing Node,
 165     // the existing Node picks up a new use.  We need to make the
 166     // existing Node occur higher up so it dominates its uses.
 167     Node *old_ctrl;
 168     IdealLoopTree *old_loop;
 169 
 170     if (x->is_Con()) {
 171       // Constant's control is always root.
 172       set_ctrl(x, C->root());
 173       continue;
 174     }
 175     // The occasional new node
 176     if (x->_idx >= old_unique) {     // Found a new, unplaced node?
 177       old_ctrl = NULL;
 178       old_loop = NULL;               // Not in any prior loop
 179     } else {
 180       old_ctrl = get_ctrl(x);
 181       old_loop = get_loop(old_ctrl); // Get prior loop
 182     }
 183     // New late point must dominate new use
 184     Node *new_ctrl = dom_lca(old_ctrl, region->in(i2));
 185     if (new_ctrl == old_ctrl) // Nothing is changed
 186       continue;
 187 
 188     IdealLoopTree *new_loop = get_loop(new_ctrl);
 189 
 190     // Don't move x into a loop if its uses are
 191     // outside of loop. Otherwise x will be cloned
 192     // for each use outside of this loop.
 193     IdealLoopTree *use_loop = get_loop(region);
 194     if (!new_loop->is_member(use_loop) &&
 195         (old_loop == NULL || !new_loop->is_member(old_loop))) {
 196       // Take early control, later control will be recalculated
 197       // during next iteration of loop optimizations.
 198       new_ctrl = get_early_ctrl(x);
 199       new_loop = get_loop(new_ctrl);
 200     }
 201     // Set new location
 202     set_ctrl(x, new_ctrl);
 203     // If changing loop bodies, see if we need to collect into new body
 204     if (old_loop != new_loop) {
 205       if (old_loop && !old_loop->_child)
 206         old_loop->_body.yank(x);
 207       if (!new_loop->_child)
 208         new_loop->_body.push(x);  // Collect body info
 209     }
 210   }
 211 
 212   return phi;
 213 }
 214 
 215 //------------------------------dominated_by------------------------------------
 216 // Replace the dominated test with an obvious true or false.  Place it on the
 217 // IGVN worklist for later cleanup.  Move control-dependent data Nodes on the
 218 // live path up to the dominating control.
 219 void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exclude_loop_predicate ) {
 220   if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); }
 221 
 222   // prevdom is the dominating projection of the dominating test.
 223   assert( iff->is_If(), "" );
 224   assert(iff->Opcode() == Op_If || iff->Opcode() == Op_CountedLoopEnd || iff->Opcode() == Op_RangeCheck, "Check this code when new subtype is added");
 225   int pop = prevdom->Opcode();
 226   assert( pop == Op_IfFalse || pop == Op_IfTrue, "" );
 227   if (flip) {
 228     if (pop == Op_IfTrue)
 229       pop = Op_IfFalse;
 230     else
 231       pop = Op_IfTrue;
 232   }
 233   // 'con' is set to true or false to kill the dominated test.
 234   Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
 235   set_ctrl(con, C->root()); // Constant gets a new use
 236   // Hack the dominated test
 237   _igvn.replace_input_of(iff, 1, con);
 238 
 239   // If I dont have a reachable TRUE and FALSE path following the IfNode then
 240   // I can assume this path reaches an infinite loop.  In this case it's not
 241   // important to optimize the data Nodes - either the whole compilation will
 242   // be tossed or this path (and all data Nodes) will go dead.
 243   if (iff->outcnt() != 2) return;
 244 
 245   // Make control-dependent data Nodes on the live path (path that will remain
 246   // once the dominated IF is removed) become control-dependent on the
 247   // dominating projection.
 248   Node* dp = iff->as_If()->proj_out_or_null(pop == Op_IfTrue);
 249 
 250   // Loop predicates may have depending checks which should not
 251   // be skipped. For example, range check predicate has two checks
 252   // for lower and upper bounds.
 253   if (dp == NULL)
 254     return;
 255 
 256   ProjNode* dp_proj  = dp->as_Proj();
 257   ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
 258   if (exclude_loop_predicate &&
 259       (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL ||
 260        unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != NULL ||
 261        unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check) != NULL)) {
 262     // If this is a range check (IfNode::is_range_check), do not
 263     // reorder because Compile::allow_range_check_smearing might have
 264     // changed the check.
 265     return; // Let IGVN transformation change control dependence.
 266   }
 267 
 268   IdealLoopTree *old_loop = get_loop(dp);
 269 
 270   for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
 271     Node* cd = dp->fast_out(i); // Control-dependent node
 272     if (cd->depends_only_on_test()) {
 273       assert(cd->in(0) == dp, "");
 274       _igvn.replace_input_of(cd, 0, prevdom);
 275       set_early_ctrl(cd);
 276       IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
 277       if (old_loop != new_loop) {
 278         if (!old_loop->_child) old_loop->_body.yank(cd);
 279         if (!new_loop->_child) new_loop->_body.push(cd);
 280       }
 281       --i;
 282       --imax;
 283     }
 284   }
 285 }
 286 
 287 //------------------------------has_local_phi_input----------------------------
 288 // Return TRUE if 'n' has Phi inputs from its local block and no other
 289 // block-local inputs (all non-local-phi inputs come from earlier blocks)
 290 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) {
 291   Node *n_ctrl = get_ctrl(n);
 292   // See if some inputs come from a Phi in this block, or from before
 293   // this block.
 294   uint i;
 295   for( i = 1; i < n->req(); i++ ) {
 296     Node *phi = n->in(i);
 297     if( phi->is_Phi() && phi->in(0) == n_ctrl )
 298       break;
 299   }
 300   if( i >= n->req() )
 301     return NULL;                // No Phi inputs; nowhere to clone thru
 302 
 303   // Check for inputs created between 'n' and the Phi input.  These
 304   // must split as well; they have already been given the chance
 305   // (courtesy of a post-order visit) and since they did not we must
 306   // recover the 'cost' of splitting them by being very profitable
 307   // when splitting 'n'.  Since this is unlikely we simply give up.
 308   for( i = 1; i < n->req(); i++ ) {
 309     Node *m = n->in(i);
 310     if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) {
 311       // We allow the special case of AddP's with no local inputs.
 312       // This allows us to split-up address expressions.
 313       if (m->is_AddP() &&
 314           get_ctrl(m->in(2)) != n_ctrl &&
 315           get_ctrl(m->in(3)) != n_ctrl) {
 316         // Move the AddP up to dominating point
 317         Node* c = find_non_split_ctrl(idom(n_ctrl));
 318         if (c->is_OuterStripMinedLoop()) {
 319           c->as_Loop()->verify_strip_mined(1);
 320           c = c->in(LoopNode::EntryControl);
 321         }
 322         set_ctrl_and_loop(m, c);
 323         continue;
 324       }
 325       return NULL;
 326     }
 327     assert(m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control");
 328   }
 329 
 330   return n_ctrl;
 331 }
 332 
 333 //------------------------------remix_address_expressions----------------------
 334 // Rework addressing expressions to get the most loop-invariant stuff
 335 // moved out.  We'd like to do all associative operators, but it's especially
 336 // important (common) to do address expressions.
 337 Node *PhaseIdealLoop::remix_address_expressions( Node *n ) {
 338   if (!has_ctrl(n))  return NULL;
 339   Node *n_ctrl = get_ctrl(n);
 340   IdealLoopTree *n_loop = get_loop(n_ctrl);
 341 
 342   // See if 'n' mixes loop-varying and loop-invariant inputs and
 343   // itself is loop-varying.
 344 
 345   // Only interested in binary ops (and AddP)
 346   if( n->req() < 3 || n->req() > 4 ) return NULL;
 347 
 348   Node *n1_ctrl = get_ctrl(n->in(                    1));
 349   Node *n2_ctrl = get_ctrl(n->in(                    2));
 350   Node *n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3));
 351   IdealLoopTree *n1_loop = get_loop( n1_ctrl );
 352   IdealLoopTree *n2_loop = get_loop( n2_ctrl );
 353   IdealLoopTree *n3_loop = get_loop( n3_ctrl );
 354 
 355   // Does one of my inputs spin in a tighter loop than self?
 356   if( (n_loop->is_member( n1_loop ) && n_loop != n1_loop) ||
 357       (n_loop->is_member( n2_loop ) && n_loop != n2_loop) ||
 358       (n_loop->is_member( n3_loop ) && n_loop != n3_loop) )
 359     return NULL;                // Leave well enough alone
 360 
 361   // Is at least one of my inputs loop-invariant?
 362   if( n1_loop == n_loop &&
 363       n2_loop == n_loop &&
 364       n3_loop == n_loop )
 365     return NULL;                // No loop-invariant inputs
 366 
 367 
 368   int n_op = n->Opcode();
 369 
 370   // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2).
 371   if( n_op == Op_LShiftI ) {
 372     // Scale is loop invariant
 373     Node *scale = n->in(2);
 374     Node *scale_ctrl = get_ctrl(scale);
 375     IdealLoopTree *scale_loop = get_loop(scale_ctrl );
 376     if( n_loop == scale_loop || !scale_loop->is_member( n_loop ) )
 377       return NULL;
 378     const TypeInt *scale_t = scale->bottom_type()->isa_int();
 379     if( scale_t && scale_t->is_con() && scale_t->get_con() >= 16 )
 380       return NULL;              // Dont bother with byte/short masking
 381     // Add must vary with loop (else shift would be loop-invariant)
 382     Node *add = n->in(1);
 383     Node *add_ctrl = get_ctrl(add);
 384     IdealLoopTree *add_loop = get_loop(add_ctrl);
 385     //assert( n_loop == add_loop, "" );
 386     if( n_loop != add_loop ) return NULL;  // happens w/ evil ZKM loops
 387 
 388     // Convert I-V into I+ (0-V); same for V-I
 389     if( add->Opcode() == Op_SubI &&
 390         _igvn.type( add->in(1) ) != TypeInt::ZERO ) {
 391       Node *zero = _igvn.intcon(0);
 392       set_ctrl(zero, C->root());
 393       Node *neg = new SubINode( _igvn.intcon(0), add->in(2) );
 394       register_new_node( neg, get_ctrl(add->in(2) ) );
 395       add = new AddINode( add->in(1), neg );
 396       register_new_node( add, add_ctrl );
 397     }
 398     if( add->Opcode() != Op_AddI ) return NULL;
 399     // See if one add input is loop invariant
 400     Node *add_var = add->in(1);
 401     Node *add_var_ctrl = get_ctrl(add_var);
 402     IdealLoopTree *add_var_loop = get_loop(add_var_ctrl );
 403     Node *add_invar = add->in(2);
 404     Node *add_invar_ctrl = get_ctrl(add_invar);
 405     IdealLoopTree *add_invar_loop = get_loop(add_invar_ctrl );
 406     if( add_var_loop == n_loop ) {
 407     } else if( add_invar_loop == n_loop ) {
 408       // Swap to find the invariant part
 409       add_invar = add_var;
 410       add_invar_ctrl = add_var_ctrl;
 411       add_invar_loop = add_var_loop;
 412       add_var = add->in(2);
 413       Node *add_var_ctrl = get_ctrl(add_var);
 414       IdealLoopTree *add_var_loop = get_loop(add_var_ctrl );
 415     } else                      // Else neither input is loop invariant
 416       return NULL;
 417     if( n_loop == add_invar_loop || !add_invar_loop->is_member( n_loop ) )
 418       return NULL;              // No invariant part of the add?
 419 
 420     // Yes!  Reshape address expression!
 421     Node *inv_scale = new LShiftINode( add_invar, scale );
 422     Node *inv_scale_ctrl =
 423       dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
 424       add_invar_ctrl : scale_ctrl;
 425     register_new_node( inv_scale, inv_scale_ctrl );
 426     Node *var_scale = new LShiftINode( add_var, scale );
 427     register_new_node( var_scale, n_ctrl );
 428     Node *var_add = new AddINode( var_scale, inv_scale );
 429     register_new_node( var_add, n_ctrl );
 430     _igvn.replace_node( n, var_add );
 431     return var_add;
 432   }
 433 
 434   // Replace (I+V) with (V+I)
 435   if( n_op == Op_AddI ||
 436       n_op == Op_AddL ||
 437       n_op == Op_AddF ||
 438       n_op == Op_AddD ||
 439       n_op == Op_MulI ||
 440       n_op == Op_MulL ||
 441       n_op == Op_MulF ||
 442       n_op == Op_MulD ) {
 443     if( n2_loop == n_loop ) {
 444       assert( n1_loop != n_loop, "" );
 445       n->swap_edges(1, 2);
 446     }
 447   }
 448 
 449   // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V),
 450   // but not if I2 is a constant.
 451   if( n_op == Op_AddP ) {
 452     if( n2_loop == n_loop && n3_loop != n_loop ) {
 453       if( n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con() ) {
 454         Node *n22_ctrl = get_ctrl(n->in(2)->in(2));
 455         Node *n23_ctrl = get_ctrl(n->in(2)->in(3));
 456         IdealLoopTree *n22loop = get_loop( n22_ctrl );
 457         IdealLoopTree *n23_loop = get_loop( n23_ctrl );
 458         if( n22loop != n_loop && n22loop->is_member(n_loop) &&
 459             n23_loop == n_loop ) {
 460           Node *add1 = new AddPNode( n->in(1), n->in(2)->in(2), n->in(3) );
 461           // Stuff new AddP in the loop preheader
 462           register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) );
 463           Node *add2 = new AddPNode( n->in(1), add1, n->in(2)->in(3) );
 464           register_new_node( add2, n_ctrl );
 465           _igvn.replace_node( n, add2 );
 466           return add2;
 467         }
 468       }
 469     }
 470 
 471     // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V)
 472     if (n2_loop != n_loop && n3_loop == n_loop) {
 473       if (n->in(3)->Opcode() == Op_AddX) {
 474         Node *V = n->in(3)->in(1);
 475         Node *I = n->in(3)->in(2);
 476         if (is_member(n_loop,get_ctrl(V))) {
 477         } else {
 478           Node *tmp = V; V = I; I = tmp;
 479         }
 480         if (!is_member(n_loop,get_ctrl(I))) {
 481           Node *add1 = new AddPNode(n->in(1), n->in(2), I);
 482           // Stuff new AddP in the loop preheader
 483           register_new_node(add1, n_loop->_head->in(LoopNode::EntryControl));
 484           Node *add2 = new AddPNode(n->in(1), add1, V);
 485           register_new_node(add2, n_ctrl);
 486           _igvn.replace_node(n, add2);
 487           return add2;
 488         }
 489       }
 490     }
 491   }
 492 
 493   return NULL;
 494 }
 495 
 496 // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1]))
 497 Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) {
 498   assert(n->Opcode() == Op_AddI, "sanity");
 499   Node * nn = NULL;
 500   Node * in1 = n->in(1);
 501   Node * in2 = n->in(2);
 502   if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) {
 503     IdealLoopTree* loop_n = get_loop(get_ctrl(n));
 504     if (loop_n->_head->as_Loop()->is_valid_counted_loop() &&
 505         Matcher::match_rule_supported(Op_MulAddS2I) &&
 506         Matcher::match_rule_supported(Op_MulAddVS2VI)) {
 507       Node* mul_in1 = in1->in(1);
 508       Node* mul_in2 = in1->in(2);
 509       Node* mul_in3 = in2->in(1);
 510       Node* mul_in4 = in2->in(2);
 511       if (mul_in1->Opcode() == Op_LoadS &&
 512           mul_in2->Opcode() == Op_LoadS &&
 513           mul_in3->Opcode() == Op_LoadS &&
 514           mul_in4->Opcode() == Op_LoadS) {
 515         IdealLoopTree* loop1 = get_loop(get_ctrl(mul_in1));
 516         IdealLoopTree* loop2 = get_loop(get_ctrl(mul_in2));
 517         IdealLoopTree* loop3 = get_loop(get_ctrl(mul_in3));
 518         IdealLoopTree* loop4 = get_loop(get_ctrl(mul_in4));
 519         IdealLoopTree* loop5 = get_loop(get_ctrl(in1));
 520         IdealLoopTree* loop6 = get_loop(get_ctrl(in2));
 521         // All nodes should be in the same counted loop.
 522         if (loop_n == loop1 && loop_n == loop2 && loop_n == loop3 &&
 523             loop_n == loop4 && loop_n == loop5 && loop_n == loop6) {
 524           Node* adr1 = mul_in1->in(MemNode::Address);
 525           Node* adr2 = mul_in2->in(MemNode::Address);
 526           Node* adr3 = mul_in3->in(MemNode::Address);
 527           Node* adr4 = mul_in4->in(MemNode::Address);
 528           if (adr1->is_AddP() && adr2->is_AddP() && adr3->is_AddP() && adr4->is_AddP()) {
 529             if ((adr1->in(AddPNode::Base) == adr3->in(AddPNode::Base)) &&
 530                 (adr2->in(AddPNode::Base) == adr4->in(AddPNode::Base))) {
 531               nn = new MulAddS2INode(mul_in1, mul_in2, mul_in3, mul_in4);
 532               register_new_node(nn, get_ctrl(n));
 533               _igvn.replace_node(n, nn);
 534               return nn;
 535             } else if ((adr1->in(AddPNode::Base) == adr4->in(AddPNode::Base)) &&
 536                        (adr2->in(AddPNode::Base) == adr3->in(AddPNode::Base))) {
 537               nn = new MulAddS2INode(mul_in1, mul_in2, mul_in4, mul_in3);
 538               register_new_node(nn, get_ctrl(n));
 539               _igvn.replace_node(n, nn);
 540               return nn;
 541             }
 542           }
 543         }
 544       }
 545     }
 546   }
 547   return nn;
 548 }
 549 
 550 //------------------------------conditional_move-------------------------------
 551 // Attempt to replace a Phi with a conditional move.  We have some pretty
 552 // strict profitability requirements.  All Phis at the merge point must
 553 // be converted, so we can remove the control flow.  We need to limit the
 554 // number of c-moves to a small handful.  All code that was in the side-arms
 555 // of the CFG diamond is now speculatively executed.  This code has to be
 556 // "cheap enough".  We are pretty much limited to CFG diamonds that merge
 557 // 1 or 2 items with a total of 1 or 2 ops executed speculatively.
 558 Node *PhaseIdealLoop::conditional_move( Node *region ) {
 559 
 560   assert(region->is_Region(), "sanity check");
 561   if (region->req() != 3) return NULL;
 562 
 563   // Check for CFG diamond
 564   Node *lp = region->in(1);
 565   Node *rp = region->in(2);
 566   if (!lp || !rp) return NULL;
 567   Node *lp_c = lp->in(0);
 568   if (lp_c == NULL || lp_c != rp->in(0) || !lp_c->is_If()) return NULL;
 569   IfNode *iff = lp_c->as_If();
 570 
 571   // Check for ops pinned in an arm of the diamond.
 572   // Can't remove the control flow in this case
 573   if (lp->outcnt() > 1) return NULL;
 574   if (rp->outcnt() > 1) return NULL;
 575 
 576   IdealLoopTree* r_loop = get_loop(region);
 577   assert(r_loop == get_loop(iff), "sanity");
 578   // Always convert to CMOVE if all results are used only outside this loop.
 579   bool used_inside_loop = (r_loop == _ltree_root);
 580 
 581   // Check profitability
 582   int cost = 0;
 583   int phis = 0;
 584   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
 585     Node *out = region->fast_out(i);
 586     if (!out->is_Phi()) continue; // Ignore other control edges, etc
 587     phis++;
 588     PhiNode* phi = out->as_Phi();
 589     BasicType bt = phi->type()->basic_type();
 590     switch (bt) {
 591     case T_DOUBLE:
 592     case T_FLOAT:
 593       if (C->use_cmove()) {
 594         continue; //TODO: maybe we want to add some cost
 595       }
 596       cost += Matcher::float_cmove_cost(); // Could be very expensive
 597       break;
 598     case T_LONG: {
 599       cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's
 600     }
 601     case T_INT:                 // These all CMOV fine
 602     case T_ADDRESS: {           // (RawPtr)
 603       cost++;
 604       break;
 605     }
 606     case T_NARROWOOP: // Fall through
 607     case T_OBJECT: {            // Base oops are OK, but not derived oops
 608       const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
 609       // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
 610       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 611       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 612       // have a Phi for the base here that we convert to a CMOVE all is well
 613       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 614       // the allocator will have to produce a base by creating a CMOVE of the
 615       // relevant bases.  This puts the allocator in the business of
 616       // manufacturing expensive instructions, generally a bad plan.
 617       // Just Say No to Conditionally-Moved Derived Pointers.
 618       if (tp && tp->offset() != 0)
 619         return NULL;
 620       cost++;
 621       break;
 622     }
 623     default:
 624       return NULL;              // In particular, can't do memory or I/O
 625     }
 626     // Add in cost any speculative ops
 627     for (uint j = 1; j < region->req(); j++) {
 628       Node *proj = region->in(j);
 629       Node *inp = phi->in(j);
 630       if (get_ctrl(inp) == proj) { // Found local op
 631         cost++;
 632         // Check for a chain of dependent ops; these will all become
 633         // speculative in a CMOV.
 634         for (uint k = 1; k < inp->req(); k++)
 635           if (get_ctrl(inp->in(k)) == proj)
 636             cost += ConditionalMoveLimit; // Too much speculative goo
 637       }
 638     }
 639     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 640     // This will likely Split-If, a higher-payoff operation.
 641     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 642       Node* use = phi->fast_out(k);
 643       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 644         cost += ConditionalMoveLimit;
 645       // Is there a use inside the loop?
 646       // Note: check only basic types since CMoveP is pinned.
 647       if (!used_inside_loop && is_java_primitive(bt)) {
 648         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 649         if (r_loop == u_loop || r_loop->is_member(u_loop)) {
 650           used_inside_loop = true;
 651         }
 652       }
 653     }
 654   }//for
 655   Node* bol = iff->in(1);
 656   assert(bol->Opcode() == Op_Bool, "");
 657   int cmp_op = bol->in(1)->Opcode();
 658   // It is expensive to generate flags from a float compare.
 659   // Avoid duplicated float compare.
 660   if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return NULL;
 661 
 662   float infrequent_prob = PROB_UNLIKELY_MAG(3);
 663   // Ignore cost and blocks frequency if CMOVE can be moved outside the loop.
 664   if (used_inside_loop) {
 665     if (cost >= ConditionalMoveLimit) return NULL; // Too much goo
 666 
 667     // BlockLayoutByFrequency optimization moves infrequent branch
 668     // from hot path. No point in CMOV'ing in such case (110 is used
 669     // instead of 100 to take into account not exactness of float value).
 670     if (BlockLayoutByFrequency) {
 671       infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f);
 672     }
 673   }
 674   // Check for highly predictable branch.  No point in CMOV'ing if
 675   // we are going to predict accurately all the time.
 676   if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) {
 677     //keep going
 678   } else if (iff->_prob < infrequent_prob ||
 679       iff->_prob > (1.0f - infrequent_prob))
 680     return NULL;
 681 
 682   // --------------
 683   // Now replace all Phis with CMOV's
 684   Node *cmov_ctrl = iff->in(0);
 685   uint flip = (lp->Opcode() == Op_IfTrue);
 686   Node_List wq;
 687   while (1) {
 688     PhiNode* phi = NULL;
 689     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
 690       Node *out = region->fast_out(i);
 691       if (out->is_Phi()) {
 692         phi = out->as_Phi();
 693         break;
 694       }
 695     }
 696     if (phi == NULL)  break;
 697     if (PrintOpto && VerifyLoopOptimizations) { tty->print_cr("CMOV"); }
 698     // Move speculative ops
 699     wq.push(phi);
 700     while (wq.size() > 0) {
 701       Node *n = wq.pop();
 702       for (uint j = 1; j < n->req(); j++) {
 703         Node* m = n->in(j);
 704         if (m != NULL && !is_dominator(get_ctrl(m), cmov_ctrl)) {
 705 #ifndef PRODUCT
 706           if (PrintOpto && VerifyLoopOptimizations) {
 707             tty->print("  speculate: ");
 708             m->dump();
 709           }
 710 #endif
 711           set_ctrl(m, cmov_ctrl);
 712           wq.push(m);
 713         }
 714       }
 715     }
 716     Node *cmov = CMoveNode::make(cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
 717     register_new_node( cmov, cmov_ctrl );
 718     _igvn.replace_node( phi, cmov );
 719 #ifndef PRODUCT
 720     if (TraceLoopOpts) {
 721       tty->print("CMOV  ");
 722       r_loop->dump_head();
 723       if (Verbose) {
 724         bol->in(1)->dump(1);
 725         cmov->dump(1);
 726       }
 727     }
 728     if (VerifyLoopOptimizations) verify();
 729 #endif
 730   }
 731 
 732   // The useless CFG diamond will fold up later; see the optimization in
 733   // RegionNode::Ideal.
 734   _igvn._worklist.push(region);
 735 
 736   return iff->in(1);
 737 }
 738 
 739 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
 740   for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
 741     Node* u = m->fast_out(i);
 742     if (u->is_CFG()) {
 743       if (u->Opcode() == Op_NeverBranch) {
 744         u = ((NeverBranchNode*)u)->proj_out(0);
 745         enqueue_cfg_uses(u, wq);
 746       } else {
 747         wq.push(u);
 748       }
 749     }
 750   }
 751 }
 752 
 753 // Try moving a store out of a loop, right before the loop
 754 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
 755   // Store has to be first in the loop body
 756   IdealLoopTree *n_loop = get_loop(n_ctrl);
 757   if (n->is_Store() && n_loop != _ltree_root &&
 758       n_loop->is_loop() && n_loop->_head->is_Loop() &&
 759       n->in(0) != NULL) {
 760     Node* address = n->in(MemNode::Address);
 761     Node* value = n->in(MemNode::ValueIn);
 762     Node* mem = n->in(MemNode::Memory);
 763     IdealLoopTree* address_loop = get_loop(get_ctrl(address));
 764     IdealLoopTree* value_loop = get_loop(get_ctrl(value));
 765 
 766     // - address and value must be loop invariant
 767     // - memory must be a memory Phi for the loop
 768     // - Store must be the only store on this memory slice in the
 769     // loop: if there's another store following this one then value
 770     // written at iteration i by the second store could be overwritten
 771     // at iteration i+n by the first store: it's not safe to move the
 772     // first store out of the loop
 773     // - nothing must observe the memory Phi: it guarantees no read
 774     // before the store, we are also guaranteed the store post
 775     // dominates the loop head (ignoring a possible early
 776     // exit). Otherwise there would be extra Phi involved between the
 777     // loop's Phi and the store.
 778     // - there must be no early exit from the loop before the Store
 779     // (such an exit most of the time would be an extra use of the
 780     // memory Phi but sometimes is a bottom memory Phi that takes the
 781     // store as input).
 782 
 783     if (!n_loop->is_member(address_loop) &&
 784         !n_loop->is_member(value_loop) &&
 785         mem->is_Phi() && mem->in(0) == n_loop->_head &&
 786         mem->outcnt() == 1 &&
 787         mem->in(LoopNode::LoopBackControl) == n) {
 788 
 789       assert(n_loop->_tail != NULL, "need a tail");
 790       assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop");
 791 
 792       // Verify that there's no early exit of the loop before the store.
 793       bool ctrl_ok = false;
 794       {
 795         // Follow control from loop head until n, we exit the loop or
 796         // we reach the tail
 797         ResourceMark rm;
 798         Unique_Node_List wq;
 799         wq.push(n_loop->_head);
 800 
 801         for (uint next = 0; next < wq.size(); ++next) {
 802           Node *m = wq.at(next);
 803           if (m == n->in(0)) {
 804             ctrl_ok = true;
 805             continue;
 806           }
 807           assert(!has_ctrl(m), "should be CFG");
 808           if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) {
 809             ctrl_ok = false;
 810             break;
 811           }
 812           enqueue_cfg_uses(m, wq);
 813           if (wq.size() > 10) {
 814             ctrl_ok = false;
 815             break;
 816           }
 817         }
 818       }
 819       if (ctrl_ok) {
 820         // move the Store
 821         _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem);
 822         _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl));
 823         _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl));
 824         // Disconnect the phi now. An empty phi can confuse other
 825         // optimizations in this pass of loop opts.
 826         _igvn.replace_node(mem, mem->in(LoopNode::EntryControl));
 827         n_loop->_body.yank(mem);
 828 
 829         set_ctrl_and_loop(n, n->in(0));
 830 
 831         return n;
 832       }
 833     }
 834   }
 835   return NULL;
 836 }
 837 
 838 // Try moving a store out of a loop, right after the loop
 839 void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
 840   if (n->is_Store() && n->in(0) != NULL) {
 841     Node *n_ctrl = get_ctrl(n);
 842     IdealLoopTree *n_loop = get_loop(n_ctrl);
 843     // Store must be in a loop
 844     if (n_loop != _ltree_root && !n_loop->_irreducible) {
 845       Node* address = n->in(MemNode::Address);
 846       Node* value = n->in(MemNode::ValueIn);
 847       IdealLoopTree* address_loop = get_loop(get_ctrl(address));
 848       // address must be loop invariant
 849       if (!n_loop->is_member(address_loop)) {
 850         // Store must be last on this memory slice in the loop and
 851         // nothing in the loop must observe it
 852         Node* phi = NULL;
 853         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 854           Node* u = n->fast_out(i);
 855           if (has_ctrl(u)) { // control use?
 856             IdealLoopTree *u_loop = get_loop(get_ctrl(u));
 857             if (!n_loop->is_member(u_loop)) {
 858               continue;
 859             }
 860             if (u->is_Phi() && u->in(0) == n_loop->_head) {
 861               assert(_igvn.type(u) == Type::MEMORY, "bad phi");
 862               // multiple phis on the same slice are possible
 863               if (phi != NULL) {
 864                 return;
 865               }
 866               phi = u;
 867               continue;
 868             }
 869           }
 870           return;
 871         }
 872         if (phi != NULL) {
 873           // Nothing in the loop before the store (next iteration)
 874           // must observe the stored value
 875           bool mem_ok = true;
 876           {
 877             ResourceMark rm;
 878             Unique_Node_List wq;
 879             wq.push(phi);
 880             for (uint next = 0; next < wq.size() && mem_ok; ++next) {
 881               Node *m = wq.at(next);
 882               for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) {
 883                 Node* u = m->fast_out(i);
 884                 if (u->is_Store() || u->is_Phi()) {
 885                   if (u != n) {
 886                     wq.push(u);
 887                     mem_ok = (wq.size() <= 10);
 888                   }
 889                 } else {
 890                   mem_ok = false;
 891                   break;
 892                 }
 893               }
 894             }
 895           }
 896           if (mem_ok) {
 897             // Move the store out of the loop if the LCA of all
 898             // users (except for the phi) is outside the loop.
 899             Node* hook = new Node(1);
 900             _igvn.rehash_node_delayed(phi);
 901             int count = phi->replace_edge(n, hook);
 902             assert(count > 0, "inconsistent phi");
 903 
 904             // Compute latest point this store can go
 905             Node* lca = get_late_ctrl(n, get_ctrl(n));
 906             if (n_loop->is_member(get_loop(lca))) {
 907               // LCA is in the loop - bail out
 908               _igvn.replace_node(hook, n);
 909               return;
 910             }
 911 #ifdef ASSERT
 912             if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) {
 913               assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined");
 914               n_loop->_head->as_Loop()->verify_strip_mined(1);
 915               Node* outer = n_loop->_head->as_CountedLoop()->outer_loop();
 916               IdealLoopTree* outer_loop = get_loop(outer);
 917               assert(n_loop->_parent == outer_loop, "broken loop tree");
 918               assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state");
 919             }
 920 #endif
 921 
 922             // Move store out of the loop
 923             _igvn.replace_node(hook, n->in(MemNode::Memory));
 924             _igvn.replace_input_of(n, 0, lca);
 925             set_ctrl_and_loop(n, lca);
 926 
 927             // Disconnect the phi now. An empty phi can confuse other
 928             // optimizations in this pass of loop opts..
 929             if (phi->in(LoopNode::LoopBackControl) == phi) {
 930               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
 931               n_loop->_body.yank(phi);
 932             }
 933           }
 934         }
 935       }
 936     }
 937   }
 938 }
 939 
 940 //------------------------------split_if_with_blocks_pre-----------------------
 941 // Do the real work in a non-recursive function.  Data nodes want to be
 942 // cloned in the pre-order so they can feed each other nicely.
 943 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
 944   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 945   Node* bs_res = bs->split_if_pre(this, n);
 946   if (bs_res != NULL) {
 947     return bs_res;
 948   }
 949   // Cloning these guys is unlikely to win
 950   int n_op = n->Opcode();
 951   if( n_op == Op_MergeMem ) return n;
 952   if( n->is_Proj() ) return n;
 953   // Do not clone-up CmpFXXX variations, as these are always
 954   // followed by a CmpI
 955   if( n->is_Cmp() ) return n;
 956   // Attempt to use a conditional move instead of a phi/branch
 957   if( ConditionalMoveLimit > 0 && n_op == Op_Region ) {
 958     Node *cmov = conditional_move( n );
 959     if( cmov ) return cmov;
 960   }
 961   if( n->is_CFG() || n->is_LoadStore() )
 962     return n;
 963   if( n_op == Op_Opaque1 ||     // Opaque nodes cannot be mod'd
 964       n_op == Op_Opaque2 ) {
 965     if( !C->major_progress() )   // If chance of no more loop opts...
 966       _igvn._worklist.push(n);  // maybe we'll remove them
 967     return n;
 968   }
 969 
 970   if( n->is_Con() ) return n;   // No cloning for Con nodes
 971 
 972   Node *n_ctrl = get_ctrl(n);
 973   if( !n_ctrl ) return n;       // Dead node
 974 
 975   Node* res = try_move_store_before_loop(n, n_ctrl);
 976   if (res != NULL) {
 977     return n;
 978   }
 979 
 980   // Attempt to remix address expressions for loop invariants
 981   Node *m = remix_address_expressions( n );
 982   if( m ) return m;
 983 
 984   if (n_op == Op_AddI) {
 985     Node *nn = convert_add_to_muladd( n );
 986     if ( nn ) return nn;
 987   }
 988 
 989   if (n->is_ConstraintCast()) {
 990     Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this);
 991     // ConstraintCastNode::dominating_cast() uses node control input to determine domination.
 992     // Node control inputs don't necessarily agree with loop control info (due to
 993     // transformations happened in between), thus additional dominance check is needed
 994     // to keep loop info valid.
 995     if (dom_cast != NULL && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) {
 996       _igvn.replace_node(n, dom_cast);
 997       return dom_cast;
 998     }
 999   }
1000 
1001   // Determine if the Node has inputs from some local Phi.
1002   // Returns the block to clone thru.
1003   Node *n_blk = has_local_phi_input( n );
1004   if( !n_blk ) return n;
1005 
1006   // Do not clone the trip counter through on a CountedLoop
1007   // (messes up the canonical shape).
1008   if( n_blk->is_CountedLoop() && n->Opcode() == Op_AddI ) return n;
1009 
1010   // Check for having no control input; not pinned.  Allow
1011   // dominating control.
1012   if (n->in(0)) {
1013     Node *dom = idom(n_blk);
1014     if (dom_lca(n->in(0), dom) != n->in(0)) {
1015       return n;
1016     }
1017   }
1018   // Policy: when is it profitable.  You must get more wins than
1019   // policy before it is considered profitable.  Policy is usually 0,
1020   // so 1 win is considered profitable.  Big merges will require big
1021   // cloning, so get a larger policy.
1022   int policy = n_blk->req() >> 2;
1023 
1024   // If the loop is a candidate for range check elimination,
1025   // delay splitting through it's phi until a later loop optimization
1026   if (n_blk->is_CountedLoop()) {
1027     IdealLoopTree *lp = get_loop(n_blk);
1028     if (lp && lp->_rce_candidate) {
1029       return n;
1030     }
1031   }
1032 
1033   if (must_throttle_split_if()) return n;
1034 
1035   // Split 'n' through the merge point if it is profitable
1036   Node *phi = split_thru_phi( n, n_blk, policy );
1037   if (!phi) return n;
1038 
1039   // Found a Phi to split thru!
1040   // Replace 'n' with the new phi
1041   _igvn.replace_node( n, phi );
1042   // Moved a load around the loop, 'en-registering' something.
1043   if (n_blk->is_Loop() && n->is_Load() &&
1044       !phi->in(LoopNode::LoopBackControl)->is_Load())
1045     C->set_major_progress();
1046 
1047   return phi;
1048 }
1049 
1050 static bool merge_point_too_heavy(Compile* C, Node* region) {
1051   // Bail out if the region and its phis have too many users.
1052   int weight = 0;
1053   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1054     weight += region->fast_out(i)->outcnt();
1055   }
1056   int nodes_left = C->max_node_limit() - C->live_nodes();
1057   if (weight * 8 > nodes_left) {
1058     if (PrintOpto) {
1059       tty->print_cr("*** Split-if bails out:  %d nodes, region weight %d", C->unique(), weight);
1060     }
1061     return true;
1062   } else {
1063     return false;
1064   }
1065 }
1066 
1067 static bool merge_point_safe(Node* region) {
1068   // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1069   // having a PhiNode input. This sidesteps the dangerous case where the split
1070   // ConvI2LNode may become TOP if the input Value() does not
1071   // overlap the ConvI2L range, leaving a node which may not dominate its
1072   // uses.
1073   // A better fix for this problem can be found in the BugTraq entry, but
1074   // expediency for Mantis demands this hack.
1075   // 6855164: If the merge point has a FastLockNode with a PhiNode input, we stop
1076   // split_if_with_blocks from splitting a block because we could not move around
1077   // the FastLockNode.
1078   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1079     Node* n = region->fast_out(i);
1080     if (n->is_Phi()) {
1081       for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1082         Node* m = n->fast_out(j);
1083         if (m->is_FastLock())
1084           return false;
1085 #ifdef _LP64
1086         if (m->Opcode() == Op_ConvI2L)
1087           return false;
1088         if (m->is_CastII() && m->isa_CastII()->has_range_check()) {
1089           return false;
1090         }
1091 #endif
1092       }
1093     }
1094   }
1095   return true;
1096 }
1097 
1098 
1099 //------------------------------place_near_use---------------------------------
1100 // Place some computation next to use but not inside inner loops.
1101 // For inner loop uses move it to the preheader area.
1102 Node *PhaseIdealLoop::place_near_use(Node *useblock) const {
1103   IdealLoopTree *u_loop = get_loop( useblock );
1104   if (u_loop->_irreducible) {
1105     return useblock;
1106   }
1107   if (u_loop->_child) {
1108     if (useblock == u_loop->_head && u_loop->_head->is_OuterStripMinedLoop()) {
1109       return u_loop->_head->in(LoopNode::EntryControl);
1110     }
1111     return useblock;
1112   }
1113   return u_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
1114 }
1115 
1116 
1117 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) {
1118   if (!n->is_If() || n->is_CountedLoopEnd()) {
1119     return false;
1120   }
1121   if (!n->in(0)->is_Region()) {
1122     return false;
1123   }
1124   Node* region = n->in(0);
1125   Node* dom = idom(region);
1126   if (!dom->is_If() || dom->in(1) != n->in(1)) {
1127     return false;
1128   }
1129   IfNode* dom_if = dom->as_If();
1130   Node* proj_true = dom_if->proj_out(1);
1131   Node* proj_false = dom_if->proj_out(0);
1132 
1133   for (uint i = 1; i < region->req(); i++) {
1134     if (is_dominator(proj_true, region->in(i))) {
1135       continue;
1136     }
1137     if (is_dominator(proj_false, region->in(i))) {
1138       continue;
1139     }
1140     return false;
1141   }
1142 
1143   return true;
1144 }
1145 
1146 
1147 bool PhaseIdealLoop::can_split_if(Node* n_ctrl) {
1148   if (must_throttle_split_if()) {
1149     return false;
1150   }
1151 
1152   // Do not do 'split-if' if irreducible loops are present.
1153   if (_has_irreducible_loops) {
1154     return false;
1155   }
1156 
1157   if (merge_point_too_heavy(C, n_ctrl)) {
1158     return false;
1159   }
1160 
1161   // Do not do 'split-if' if some paths are dead.  First do dead code
1162   // elimination and then see if its still profitable.
1163   for (uint i = 1; i < n_ctrl->req(); i++) {
1164     if (n_ctrl->in(i) == C->top()) {
1165       return false;
1166     }
1167   }
1168 
1169   // If trying to do a 'Split-If' at the loop head, it is only
1170   // profitable if the cmp folds up on BOTH paths.  Otherwise we
1171   // risk peeling a loop forever.
1172 
1173   // CNC - Disabled for now.  Requires careful handling of loop
1174   // body selection for the cloned code.  Also, make sure we check
1175   // for any input path not being in the same loop as n_ctrl.  For
1176   // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
1177   // because the alternative loop entry points won't be converted
1178   // into LoopNodes.
1179   IdealLoopTree *n_loop = get_loop(n_ctrl);
1180   for (uint j = 1; j < n_ctrl->req(); j++) {
1181     if (get_loop(n_ctrl->in(j)) != n_loop) {
1182       return false;
1183     }
1184   }
1185 
1186   // Check for safety of the merge point.
1187   if (!merge_point_safe(n_ctrl)) {
1188     return false;
1189   }
1190 
1191   return true;
1192 }
1193 
1194 //------------------------------split_if_with_blocks_post----------------------
1195 // Do the real work in a non-recursive function.  CFG hackery wants to be
1196 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1197 // info.
1198 void PhaseIdealLoop::split_if_with_blocks_post(Node *n, bool last_round) {
1199 
1200   // Cloning Cmp through Phi's involves the split-if transform.
1201   // FastLock is not used by an If
1202   if (n->is_Cmp() && !n->is_FastLock() && !last_round) {
1203     Node *n_ctrl = get_ctrl(n);
1204     // Determine if the Node has inputs from some local Phi.
1205     // Returns the block to clone thru.
1206     Node *n_blk = has_local_phi_input(n);
1207     if (n_blk != n_ctrl) {
1208       return;
1209     }
1210 
1211     if (!can_split_if(n_ctrl)) {
1212       return;
1213     }
1214 
1215     if (n->outcnt() != 1) {
1216       return; // Multiple bool's from 1 compare?
1217     }
1218     Node *bol = n->unique_out();
1219     assert(bol->is_Bool(), "expect a bool here");
1220     if (bol->outcnt() != 1) {
1221       return;// Multiple branches from 1 compare?
1222     }
1223     Node *iff = bol->unique_out();
1224 
1225     // Check some safety conditions
1226     if (iff->is_If()) {        // Classic split-if?
1227       if (iff->in(0) != n_ctrl) {
1228         return; // Compare must be in same blk as if
1229       }
1230     } else if (iff->is_CMove()) { // Trying to split-up a CMOVE
1231       // Can't split CMove with different control edge.
1232       if (iff->in(0) != NULL && iff->in(0) != n_ctrl ) {
1233         return;
1234       }
1235       if (get_ctrl(iff->in(2)) == n_ctrl ||
1236           get_ctrl(iff->in(3)) == n_ctrl) {
1237         return;                 // Inputs not yet split-up
1238       }
1239       if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) {
1240         return;                 // Loop-invar test gates loop-varying CMOVE
1241       }
1242     } else {
1243       return;  // some other kind of node, such as an Allocate
1244     }
1245 
1246     // When is split-if profitable?  Every 'win' on means some control flow
1247     // goes dead, so it's almost always a win.
1248     int policy = 0;
1249     // Split compare 'n' through the merge point if it is profitable
1250     Node *phi = split_thru_phi( n, n_ctrl, policy);
1251     if (!phi) {
1252       return;
1253     }
1254 
1255     // Found a Phi to split thru!
1256     // Replace 'n' with the new phi
1257     _igvn.replace_node(n, phi);
1258 
1259     // Now split the bool up thru the phi
1260     Node *bolphi = split_thru_phi(bol, n_ctrl, -1);
1261     guarantee(bolphi != NULL, "null boolean phi node");
1262 
1263     _igvn.replace_node(bol, bolphi);
1264     assert(iff->in(1) == bolphi, "");
1265 
1266     if (bolphi->Value(&_igvn)->singleton()) {
1267       return;
1268     }
1269 
1270     // Conditional-move?  Must split up now
1271     if (!iff->is_If()) {
1272       Node *cmovphi = split_thru_phi(iff, n_ctrl, -1);
1273       _igvn.replace_node(iff, cmovphi);
1274       return;
1275     }
1276 
1277     // Now split the IF
1278     do_split_if(iff);
1279     return;
1280   }
1281 
1282   // Two identical ifs back to back can be merged
1283   if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) {
1284     Node *n_ctrl = n->in(0);
1285     PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1286     IfNode* dom_if = idom(n_ctrl)->as_If();
1287     Node* proj_true = dom_if->proj_out(1);
1288     Node* proj_false = dom_if->proj_out(0);
1289     Node* con_true = _igvn.makecon(TypeInt::ONE);
1290     Node* con_false = _igvn.makecon(TypeInt::ZERO);
1291 
1292     for (uint i = 1; i < n_ctrl->req(); i++) {
1293       if (is_dominator(proj_true, n_ctrl->in(i))) {
1294         bolphi->init_req(i, con_true);
1295       } else {
1296         assert(is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1297         bolphi->init_req(i, con_false);
1298       }
1299     }
1300     register_new_node(bolphi, n_ctrl);
1301     _igvn.replace_input_of(n, 1, bolphi);
1302 
1303     // Now split the IF
1304     do_split_if(n);
1305     return;
1306   }
1307 
1308   // Check for an IF ready to split; one that has its
1309   // condition codes input coming from a Phi at the block start.
1310   int n_op = n->Opcode();
1311 
1312   // Check for an IF being dominated by another IF same test
1313   if (n_op == Op_If ||
1314       n_op == Op_RangeCheck) {
1315     Node *bol = n->in(1);
1316     uint max = bol->outcnt();
1317     // Check for same test used more than once?
1318     if (max > 1 && bol->is_Bool()) {
1319       // Search up IDOMs to see if this IF is dominated.
1320       Node *cutoff = get_ctrl(bol);
1321 
1322       // Now search up IDOMs till cutoff, looking for a dominating test
1323       Node *prevdom = n;
1324       Node *dom = idom(prevdom);
1325       while (dom != cutoff) {
1326         if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) {
1327           // Replace the dominated test with an obvious true or false.
1328           // Place it on the IGVN worklist for later cleanup.
1329           C->set_major_progress();
1330           dominated_by(prevdom, n, false, true);
1331 #ifndef PRODUCT
1332           if( VerifyLoopOptimizations ) verify();
1333 #endif
1334           return;
1335         }
1336         prevdom = dom;
1337         dom = idom(prevdom);
1338       }
1339     }
1340   }
1341 
1342   // See if a shared loop-varying computation has no loop-varying uses.
1343   // Happens if something is only used for JVM state in uncommon trap exits,
1344   // like various versions of induction variable+offset.  Clone the
1345   // computation per usage to allow it to sink out of the loop.
1346   if (has_ctrl(n) && !n->in(0)) {// n not dead and has no control edge (can float about)
1347     Node *n_ctrl = get_ctrl(n);
1348     IdealLoopTree *n_loop = get_loop(n_ctrl);
1349     if( n_loop != _ltree_root ) {
1350       DUIterator_Fast imax, i = n->fast_outs(imax);
1351       for (; i < imax; i++) {
1352         Node* u = n->fast_out(i);
1353         if( !has_ctrl(u) )     break; // Found control user
1354         IdealLoopTree *u_loop = get_loop(get_ctrl(u));
1355         if( u_loop == n_loop ) break; // Found loop-varying use
1356         if( n_loop->is_member( u_loop ) ) break; // Found use in inner loop
1357         if( u->Opcode() == Op_Opaque1 ) break; // Found loop limit, bugfix for 4677003
1358       }
1359       bool did_break = (i < imax);  // Did we break out of the previous loop?
1360       if (!did_break && n->outcnt() > 1) { // All uses in outer loops!
1361         Node *late_load_ctrl = NULL;
1362         if (n->is_Load()) {
1363           // If n is a load, get and save the result from get_late_ctrl(),
1364           // to be later used in calculating the control for n's clones.
1365           clear_dom_lca_tags();
1366           late_load_ctrl = get_late_ctrl(n, n_ctrl);
1367         }
1368         // If n is a load, and the late control is the same as the current
1369         // control, then the cloning of n is a pointless exercise, because
1370         // GVN will ensure that we end up where we started.
1371         if (!n->is_Load() || late_load_ctrl != n_ctrl) {
1372           BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1373           for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) {
1374             Node *u = n->last_out(j); // Clone private computation per use
1375             _igvn.rehash_node_delayed(u);
1376             Node *x = n->clone(); // Clone computation
1377             Node *x_ctrl = NULL;
1378             if( u->is_Phi() ) {
1379               // Replace all uses of normal nodes.  Replace Phi uses
1380               // individually, so the separate Nodes can sink down
1381               // different paths.
1382               uint k = 1;
1383               while( u->in(k) != n ) k++;
1384               u->set_req( k, x );
1385               // x goes next to Phi input path
1386               x_ctrl = u->in(0)->in(k);
1387               --j;
1388             } else {              // Normal use
1389               // Replace all uses
1390               for( uint k = 0; k < u->req(); k++ ) {
1391                 if( u->in(k) == n ) {
1392                   u->set_req( k, x );
1393                   --j;
1394                 }
1395               }
1396               x_ctrl = get_ctrl(u);
1397             }
1398 
1399             // Find control for 'x' next to use but not inside inner loops.
1400             // For inner loop uses get the preheader area.
1401             x_ctrl = place_near_use(x_ctrl);
1402 
1403             if (bs->sink_node(this, n, x, x_ctrl, n_ctrl)) {
1404               continue;
1405             }
1406 
1407             if (n->is_Load()) {
1408               // For loads, add a control edge to a CFG node outside of the loop
1409               // to force them to not combine and return back inside the loop
1410               // during GVN optimization (4641526).
1411               //
1412               // Because we are setting the actual control input, factor in
1413               // the result from get_late_ctrl() so we respect any
1414               // anti-dependences. (6233005).
1415               x_ctrl = dom_lca(late_load_ctrl, x_ctrl);
1416 
1417               // Don't allow the control input to be a CFG splitting node.
1418               // Such nodes should only have ProjNodes as outs, e.g. IfNode
1419               // should only have IfTrueNode and IfFalseNode (4985384).
1420               x_ctrl = find_non_split_ctrl(x_ctrl);
1421               assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone");
1422 
1423               x->set_req(0, x_ctrl);
1424             }
1425             register_new_node(x, x_ctrl);
1426 
1427             // Some institutional knowledge is needed here: 'x' is
1428             // yanked because if the optimizer runs GVN on it all the
1429             // cloned x's will common up and undo this optimization and
1430             // be forced back in the loop.  This is annoying because it
1431             // makes +VerifyOpto report false-positives on progress.  I
1432             // tried setting control edges on the x's to force them to
1433             // not combine, but the matching gets worried when it tries
1434             // to fold a StoreP and an AddP together (as part of an
1435             // address expression) and the AddP and StoreP have
1436             // different controls.
1437             if (!x->is_Load() && !x->is_DecodeNarrowPtr()) _igvn._worklist.yank(x);
1438           }
1439           _igvn.remove_dead_node(n);
1440         }
1441       }
1442     }
1443   }
1444 
1445   try_move_store_after_loop(n);
1446 
1447   // Check for Opaque2's who's loop has disappeared - who's input is in the
1448   // same loop nest as their output.  Remove 'em, they are no longer useful.
1449   if( n_op == Op_Opaque2 &&
1450       n->in(1) != NULL &&
1451       get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) {
1452     _igvn.replace_node( n, n->in(1) );
1453   }
1454 
1455 #if INCLUDE_ZGC
1456   if (UseZGC) {
1457     ZBarrierSetC2::loop_optimize_gc_barrier(this, n, last_round);
1458   }
1459 #endif
1460 }
1461 
1462 //------------------------------split_if_with_blocks---------------------------
1463 // Check for aggressive application of 'split-if' optimization,
1464 // using basic block level info.
1465 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack, bool last_round) {
1466   Node* root = C->root();
1467   visited.set(root->_idx); // first, mark root as visited
1468   // Do pre-visit work for root
1469   Node* n   = split_if_with_blocks_pre(root);
1470   uint  cnt = n->outcnt();
1471   uint  i   = 0;
1472 
1473   while (true) {
1474     // Visit all children
1475     if (i < cnt) {
1476       Node* use = n->raw_out(i);
1477       ++i;
1478       if (use->outcnt() != 0 && !visited.test_set(use->_idx)) {
1479         // Now do pre-visit work for this use
1480         use = split_if_with_blocks_pre(use);
1481         nstack.push(n, i); // Save parent and next use's index.
1482         n   = use;         // Process all children of current use.
1483         cnt = use->outcnt();
1484         i   = 0;
1485       }
1486     }
1487     else {
1488       // All of n's children have been processed, complete post-processing.
1489       if (cnt != 0 && !n->is_Con()) {
1490         assert(has_node(n), "no dead nodes");
1491         split_if_with_blocks_post(n, last_round);
1492       }
1493       if (must_throttle_split_if()) {
1494         nstack.clear();
1495       }
1496       if (nstack.is_empty()) {
1497         // Finished all nodes on stack.
1498         break;
1499       }
1500       // Get saved parent node and next use's index. Visit the rest of uses.
1501       n   = nstack.node();
1502       cnt = n->outcnt();
1503       i   = nstack.index();
1504       nstack.pop();
1505     }
1506   }
1507 }
1508 
1509 
1510 //=============================================================================
1511 //
1512 //                   C L O N E   A   L O O P   B O D Y
1513 //
1514 
1515 //------------------------------clone_iff--------------------------------------
1516 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
1517 // "Nearly" because all Nodes have been cloned from the original in the loop,
1518 // but the fall-in edges to the Cmp are different.  Clone bool/Cmp pairs
1519 // through the Phi recursively, and return a Bool.
1520 Node* PhaseIdealLoop::clone_iff(PhiNode *phi, IdealLoopTree *loop) {
1521 
1522   // Convert this Phi into a Phi merging Bools
1523   uint i;
1524   for (i = 1; i < phi->req(); i++) {
1525     Node *b = phi->in(i);
1526     if (b->is_Phi()) {
1527       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi(), loop));
1528     } else {
1529       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
1530     }
1531   }
1532 
1533   Node* n = phi->in(1);
1534   Node* sample_opaque = NULL;
1535   Node *sample_bool = NULL;
1536   if (n->Opcode() == Op_Opaque4) {
1537     sample_opaque = n;
1538     sample_bool = n->in(1);
1539     assert(sample_bool->is_Bool(), "wrong type");
1540   } else {
1541     sample_bool = n;
1542   }
1543   Node *sample_cmp = sample_bool->in(1);
1544 
1545   // Make Phis to merge the Cmp's inputs.
1546   PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
1547   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
1548   for (i = 1; i < phi->req(); i++) {
1549     Node *n1 = sample_opaque == NULL ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
1550     Node *n2 = sample_opaque == NULL ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
1551     phi1->set_req(i, n1);
1552     phi2->set_req(i, n2);
1553     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1554     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1555   }
1556   // See if these Phis have been made before.
1557   // Register with optimizer
1558   Node *hit1 = _igvn.hash_find_insert(phi1);
1559   if (hit1) {                   // Hit, toss just made Phi
1560     _igvn.remove_dead_node(phi1); // Remove new phi
1561     assert(hit1->is_Phi(), "" );
1562     phi1 = (PhiNode*)hit1;      // Use existing phi
1563   } else {                      // Miss
1564     _igvn.register_new_node_with_optimizer(phi1);
1565   }
1566   Node *hit2 = _igvn.hash_find_insert(phi2);
1567   if (hit2) {                   // Hit, toss just made Phi
1568     _igvn.remove_dead_node(phi2); // Remove new phi
1569     assert(hit2->is_Phi(), "" );
1570     phi2 = (PhiNode*)hit2;      // Use existing phi
1571   } else {                      // Miss
1572     _igvn.register_new_node_with_optimizer(phi2);
1573   }
1574   // Register Phis with loop/block info
1575   set_ctrl(phi1, phi->in(0));
1576   set_ctrl(phi2, phi->in(0));
1577   // Make a new Cmp
1578   Node *cmp = sample_cmp->clone();
1579   cmp->set_req(1, phi1);
1580   cmp->set_req(2, phi2);
1581   _igvn.register_new_node_with_optimizer(cmp);
1582   set_ctrl(cmp, phi->in(0));
1583 
1584   // Make a new Bool
1585   Node *b = sample_bool->clone();
1586   b->set_req(1,cmp);
1587   _igvn.register_new_node_with_optimizer(b);
1588   set_ctrl(b, phi->in(0));
1589 
1590   if (sample_opaque != NULL) {
1591     Node* opaque = sample_opaque->clone();
1592     opaque->set_req(1, b);
1593     _igvn.register_new_node_with_optimizer(opaque);
1594     set_ctrl(opaque, phi->in(0));
1595     return opaque;
1596   }
1597 
1598   assert(b->is_Bool(), "");
1599   return b;
1600 }
1601 
1602 //------------------------------clone_bool-------------------------------------
1603 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
1604 // "Nearly" because all Nodes have been cloned from the original in the loop,
1605 // but the fall-in edges to the Cmp are different.  Clone bool/Cmp pairs
1606 // through the Phi recursively, and return a Bool.
1607 CmpNode *PhaseIdealLoop::clone_bool( PhiNode *phi, IdealLoopTree *loop ) {
1608   uint i;
1609   // Convert this Phi into a Phi merging Bools
1610   for( i = 1; i < phi->req(); i++ ) {
1611     Node *b = phi->in(i);
1612     if( b->is_Phi() ) {
1613       _igvn.replace_input_of(phi, i, clone_bool( b->as_Phi(), loop ));
1614     } else {
1615       assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
1616     }
1617   }
1618 
1619   Node *sample_cmp = phi->in(1);
1620 
1621   // Make Phis to merge the Cmp's inputs.
1622   PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP );
1623   PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP );
1624   for( uint j = 1; j < phi->req(); j++ ) {
1625     Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP
1626     Node *n1, *n2;
1627     if( cmp_top->is_Cmp() ) {
1628       n1 = cmp_top->in(1);
1629       n2 = cmp_top->in(2);
1630     } else {
1631       n1 = n2 = cmp_top;
1632     }
1633     phi1->set_req( j, n1 );
1634     phi2->set_req( j, n2 );
1635     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1636     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1637   }
1638 
1639   // See if these Phis have been made before.
1640   // Register with optimizer
1641   Node *hit1 = _igvn.hash_find_insert(phi1);
1642   if( hit1 ) {                  // Hit, toss just made Phi
1643     _igvn.remove_dead_node(phi1); // Remove new phi
1644     assert( hit1->is_Phi(), "" );
1645     phi1 = (PhiNode*)hit1;      // Use existing phi
1646   } else {                      // Miss
1647     _igvn.register_new_node_with_optimizer(phi1);
1648   }
1649   Node *hit2 = _igvn.hash_find_insert(phi2);
1650   if( hit2 ) {                  // Hit, toss just made Phi
1651     _igvn.remove_dead_node(phi2); // Remove new phi
1652     assert( hit2->is_Phi(), "" );
1653     phi2 = (PhiNode*)hit2;      // Use existing phi
1654   } else {                      // Miss
1655     _igvn.register_new_node_with_optimizer(phi2);
1656   }
1657   // Register Phis with loop/block info
1658   set_ctrl(phi1, phi->in(0));
1659   set_ctrl(phi2, phi->in(0));
1660   // Make a new Cmp
1661   Node *cmp = sample_cmp->clone();
1662   cmp->set_req( 1, phi1 );
1663   cmp->set_req( 2, phi2 );
1664   _igvn.register_new_node_with_optimizer(cmp);
1665   set_ctrl(cmp, phi->in(0));
1666 
1667   assert( cmp->is_Cmp(), "" );
1668   return (CmpNode*)cmp;
1669 }
1670 
1671 //------------------------------sink_use---------------------------------------
1672 // If 'use' was in the loop-exit block, it now needs to be sunk
1673 // below the post-loop merge point.
1674 void PhaseIdealLoop::sink_use( Node *use, Node *post_loop ) {
1675   if (!use->is_CFG() && get_ctrl(use) == post_loop->in(2)) {
1676     set_ctrl(use, post_loop);
1677     for (DUIterator j = use->outs(); use->has_out(j); j++)
1678       sink_use(use->out(j), post_loop);
1679   }
1680 }
1681 
1682 void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
1683                                                  IdealLoopTree* loop, IdealLoopTree* outer_loop,
1684                                                  Node_List*& split_if_set, Node_List*& split_bool_set,
1685                                                  Node_List*& split_cex_set, Node_List& worklist,
1686                                                  uint new_counter, CloneLoopMode mode) {
1687   Node* nnn = old_new[old->_idx];
1688   // Copy uses to a worklist, so I can munge the def-use info
1689   // with impunity.
1690   for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
1691     worklist.push(old->fast_out(j));
1692 
1693   while( worklist.size() ) {
1694     Node *use = worklist.pop();
1695     if (!has_node(use))  continue; // Ignore dead nodes
1696     if (use->in(0) == C->top())  continue;
1697     IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
1698     // Check for data-use outside of loop - at least one of OLD or USE
1699     // must not be a CFG node.
1700 #ifdef ASSERT
1701     if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == NULL) {
1702       Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint();
1703       assert(mode == ControlAroundStripMined && use == sfpt, "missed a node");
1704     }
1705 #endif
1706     if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) {
1707 
1708       // If the Data use is an IF, that means we have an IF outside of the
1709       // loop that is switching on a condition that is set inside of the
1710       // loop.  Happens if people set a loop-exit flag; then test the flag
1711       // in the loop to break the loop, then test is again outside of the
1712       // loop to determine which way the loop exited.
1713       // Loop predicate If node connects to Bool node through Opaque1 node.
1714       if (use->is_If() || use->is_CMove() || C->is_predicate_opaq(use) || use->Opcode() == Op_Opaque4) {
1715         // Since this code is highly unlikely, we lazily build the worklist
1716         // of such Nodes to go split.
1717         if (!split_if_set) {
1718           ResourceArea *area = Thread::current()->resource_area();
1719           split_if_set = new Node_List(area);
1720         }
1721         split_if_set->push(use);
1722       }
1723       if (use->is_Bool()) {
1724         if (!split_bool_set) {
1725           ResourceArea *area = Thread::current()->resource_area();
1726           split_bool_set = new Node_List(area);
1727         }
1728         split_bool_set->push(use);
1729       }
1730       if (use->Opcode() == Op_CreateEx) {
1731         if (!split_cex_set) {
1732           ResourceArea *area = Thread::current()->resource_area();
1733           split_cex_set = new Node_List(area);
1734         }
1735         split_cex_set->push(use);
1736       }
1737 
1738 
1739       // Get "block" use is in
1740       uint idx = 0;
1741       while( use->in(idx) != old ) idx++;
1742       Node *prev = use->is_CFG() ? use : get_ctrl(use);
1743       assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" );
1744       Node *cfg = prev->_idx >= new_counter
1745         ? prev->in(2)
1746         : idom(prev);
1747       if( use->is_Phi() )     // Phi use is in prior block
1748         cfg = prev->in(idx);  // NOT in block of Phi itself
1749       if (cfg->is_top()) {    // Use is dead?
1750         _igvn.replace_input_of(use, idx, C->top());
1751         continue;
1752       }
1753 
1754       // If use is referenced through control edge... (idx == 0)
1755       if (mode == IgnoreStripMined && idx == 0) {
1756         LoopNode *head = loop->_head->as_Loop();
1757         if (head->is_strip_mined() && is_dominator(head->outer_loop_exit(), prev)) {
1758           // That node is outside the inner loop, leave it outside the
1759           // outer loop as well to not confuse verification code.
1760           assert(!loop->_parent->is_member(use_loop), "should be out of the outer loop");
1761           _igvn.replace_input_of(use, 0, head->outer_loop_exit());
1762           continue;
1763         }
1764       }
1765 
1766       while(!outer_loop->is_member(get_loop(cfg))) {
1767         prev = cfg;
1768         cfg = cfg->_idx >= new_counter ? cfg->in(2) : idom(cfg);
1769       }
1770       // If the use occurs after merging several exits from the loop, then
1771       // old value must have dominated all those exits.  Since the same old
1772       // value was used on all those exits we did not need a Phi at this
1773       // merge point.  NOW we do need a Phi here.  Each loop exit value
1774       // is now merged with the peeled body exit; each exit gets its own
1775       // private Phi and those Phis need to be merged here.
1776       Node *phi;
1777       if( prev->is_Region() ) {
1778         if( idx == 0 ) {      // Updating control edge?
1779           phi = prev;         // Just use existing control
1780         } else {              // Else need a new Phi
1781           phi = PhiNode::make( prev, old );
1782           // Now recursively fix up the new uses of old!
1783           for( uint i = 1; i < prev->req(); i++ ) {
1784             worklist.push(phi); // Onto worklist once for each 'old' input
1785           }
1786         }
1787       } else {
1788         // Get new RegionNode merging old and new loop exits
1789         prev = old_new[prev->_idx];
1790         assert( prev, "just made this in step 7" );
1791         if( idx == 0) {      // Updating control edge?
1792           phi = prev;         // Just use existing control
1793         } else {              // Else need a new Phi
1794           // Make a new Phi merging data values properly
1795           phi = PhiNode::make( prev, old );
1796           phi->set_req( 1, nnn );
1797         }
1798       }
1799       // If inserting a new Phi, check for prior hits
1800       if( idx != 0 ) {
1801         Node *hit = _igvn.hash_find_insert(phi);
1802         if( hit == NULL ) {
1803           _igvn.register_new_node_with_optimizer(phi); // Register new phi
1804         } else {                                      // or
1805           // Remove the new phi from the graph and use the hit
1806           _igvn.remove_dead_node(phi);
1807           phi = hit;                                  // Use existing phi
1808         }
1809         set_ctrl(phi, prev);
1810       }
1811       // Make 'use' use the Phi instead of the old loop body exit value
1812       _igvn.replace_input_of(use, idx, phi);
1813       if( use->_idx >= new_counter ) { // If updating new phis
1814         // Not needed for correctness, but prevents a weak assert
1815         // in AddPNode from tripping (when we end up with different
1816         // base & derived Phis that will become the same after
1817         // IGVN does CSE).
1818         Node *hit = _igvn.hash_find_insert(use);
1819         if( hit )             // Go ahead and re-hash for hits.
1820           _igvn.replace_node( use, hit );
1821       }
1822 
1823       // If 'use' was in the loop-exit block, it now needs to be sunk
1824       // below the post-loop merge point.
1825       sink_use( use, prev );
1826     }
1827   }
1828 }
1829 
1830 static void clone_outer_loop_helper(Node* n, const IdealLoopTree *loop, const IdealLoopTree* outer_loop,
1831                                     const Node_List &old_new, Unique_Node_List& wq, PhaseIdealLoop* phase,
1832                                     bool check_old_new) {
1833   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1834     Node* u = n->fast_out(j);
1835     assert(check_old_new || old_new[u->_idx] == NULL, "shouldn't have been cloned");
1836     if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == NULL)) {
1837       Node* c = phase->get_ctrl(u);
1838       IdealLoopTree* u_loop = phase->get_loop(c);
1839       assert(!loop->is_member(u_loop), "can be in outer loop or out of both loops only");
1840       if (outer_loop->is_member(u_loop)) {
1841         wq.push(u);
1842       }
1843     }
1844   }
1845 }
1846 
1847 void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
1848                                       IdealLoopTree* outer_loop, int dd, Node_List &old_new,
1849                                       Node_List& extra_data_nodes) {
1850   if (head->is_strip_mined() && mode != IgnoreStripMined) {
1851     CountedLoopNode* cl = head->as_CountedLoop();
1852     Node* l = cl->outer_loop();
1853     Node* tail = cl->outer_loop_tail();
1854     IfNode* le = cl->outer_loop_end();
1855     Node* sfpt = cl->outer_safepoint();
1856     CountedLoopEndNode* cle = cl->loopexit();
1857     CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop();
1858     CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null();
1859     Node* cle_out = cle->proj_out(false);
1860 
1861     Node* new_sfpt = NULL;
1862     Node* new_cle_out = cle_out->clone();
1863     old_new.map(cle_out->_idx, new_cle_out);
1864     if (mode == CloneIncludesStripMined) {
1865       // clone outer loop body
1866       Node* new_l = l->clone();
1867       Node* new_tail = tail->clone();
1868       IfNode* new_le = le->clone()->as_If();
1869       new_sfpt = sfpt->clone();
1870 
1871       set_loop(new_l, outer_loop->_parent);
1872       set_idom(new_l, new_l->in(LoopNode::EntryControl), dd);
1873       set_loop(new_cle_out, outer_loop->_parent);
1874       set_idom(new_cle_out, new_cle, dd);
1875       set_loop(new_sfpt, outer_loop->_parent);
1876       set_idom(new_sfpt, new_cle_out, dd);
1877       set_loop(new_le, outer_loop->_parent);
1878       set_idom(new_le, new_sfpt, dd);
1879       set_loop(new_tail, outer_loop->_parent);
1880       set_idom(new_tail, new_le, dd);
1881       set_idom(new_cl, new_l, dd);
1882 
1883       old_new.map(l->_idx, new_l);
1884       old_new.map(tail->_idx, new_tail);
1885       old_new.map(le->_idx, new_le);
1886       old_new.map(sfpt->_idx, new_sfpt);
1887 
1888       new_l->set_req(LoopNode::LoopBackControl, new_tail);
1889       new_l->set_req(0, new_l);
1890       new_tail->set_req(0, new_le);
1891       new_le->set_req(0, new_sfpt);
1892       new_sfpt->set_req(0, new_cle_out);
1893       new_cle_out->set_req(0, new_cle);
1894       new_cl->set_req(LoopNode::EntryControl, new_l);
1895 
1896       _igvn.register_new_node_with_optimizer(new_l);
1897       _igvn.register_new_node_with_optimizer(new_tail);
1898       _igvn.register_new_node_with_optimizer(new_le);
1899     } else {
1900       Node *newhead = old_new[loop->_head->_idx];
1901       newhead->as_Loop()->clear_strip_mined();
1902       _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl));
1903       set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
1904     }
1905     // Look at data node that were assigned a control in the outer
1906     // loop: they are kept in the outer loop by the safepoint so start
1907     // from the safepoint node's inputs.
1908     IdealLoopTree* outer_loop = get_loop(l);
1909     Node_Stack stack(2);
1910     stack.push(sfpt, 1);
1911     uint new_counter = C->unique();
1912     while (stack.size() > 0) {
1913       Node* n = stack.node();
1914       uint i = stack.index();
1915       while (i < n->req() &&
1916              (n->in(i) == NULL ||
1917               !has_ctrl(n->in(i)) ||
1918               get_loop(get_ctrl(n->in(i))) != outer_loop ||
1919               (old_new[n->in(i)->_idx] != NULL && old_new[n->in(i)->_idx]->_idx >= new_counter))) {
1920         i++;
1921       }
1922       if (i < n->req()) {
1923         stack.set_index(i+1);
1924         stack.push(n->in(i), 0);
1925       } else {
1926         assert(old_new[n->_idx] == NULL || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet");
1927         Node* m = n == sfpt ? new_sfpt : n->clone();
1928         if (m != NULL) {
1929           for (uint i = 0; i < n->req(); i++) {
1930             if (m->in(i) != NULL && old_new[m->in(i)->_idx] != NULL) {
1931               m->set_req(i, old_new[m->in(i)->_idx]);
1932             }
1933           }
1934         } else {
1935           assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?");
1936         }
1937         if (n != sfpt) {
1938           extra_data_nodes.push(n);
1939           _igvn.register_new_node_with_optimizer(m);
1940           assert(get_ctrl(n) == cle_out, "what other control?");
1941           set_ctrl(m, new_cle_out);
1942           old_new.map(n->_idx, m);
1943         }
1944         stack.pop();
1945       }
1946     }
1947     if (mode == CloneIncludesStripMined) {
1948       _igvn.register_new_node_with_optimizer(new_sfpt);
1949       _igvn.register_new_node_with_optimizer(new_cle_out);
1950     }
1951     // Some other transformation may have pessimistically assign some
1952     // data nodes to the outer loop. Set their control so they are out
1953     // of the outer loop.
1954     ResourceMark rm;
1955     Unique_Node_List wq;
1956     for (uint i = 0; i < extra_data_nodes.size(); i++) {
1957       Node* old = extra_data_nodes.at(i);
1958       clone_outer_loop_helper(old, loop, outer_loop, old_new, wq, this, true);
1959     }
1960     Node* new_ctrl = cl->outer_loop_exit();
1961     assert(get_loop(new_ctrl) != outer_loop, "must be out of the loop nest");
1962     for (uint i = 0; i < wq.size(); i++) {
1963       Node* n = wq.at(i);
1964       set_ctrl(n, new_ctrl);
1965       clone_outer_loop_helper(n, loop, outer_loop, old_new, wq, this, false);
1966     }
1967   } else {
1968     Node *newhead = old_new[loop->_head->_idx];
1969     set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
1970   }
1971 }
1972 
1973 //------------------------------clone_loop-------------------------------------
1974 //
1975 //                   C L O N E   A   L O O P   B O D Y
1976 //
1977 // This is the basic building block of the loop optimizations.  It clones an
1978 // entire loop body.  It makes an old_new loop body mapping; with this mapping
1979 // you can find the new-loop equivalent to an old-loop node.  All new-loop
1980 // nodes are exactly equal to their old-loop counterparts, all edges are the
1981 // same.  All exits from the old-loop now have a RegionNode that merges the
1982 // equivalent new-loop path.  This is true even for the normal "loop-exit"
1983 // condition.  All uses of loop-invariant old-loop values now come from (one
1984 // or more) Phis that merge their new-loop equivalents.
1985 //
1986 // This operation leaves the graph in an illegal state: there are two valid
1987 // control edges coming from the loop pre-header to both loop bodies.  I'll
1988 // definitely have to hack the graph after running this transform.
1989 //
1990 // From this building block I will further edit edges to perform loop peeling
1991 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc.
1992 //
1993 // Parameter side_by_size_idom:
1994 //   When side_by_size_idom is NULL, the dominator tree is constructed for
1995 //      the clone loop to dominate the original.  Used in construction of
1996 //      pre-main-post loop sequence.
1997 //   When nonnull, the clone and original are side-by-side, both are
1998 //      dominated by the side_by_side_idom node.  Used in construction of
1999 //      unswitched loops.
2000 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd,
2001                                 CloneLoopMode mode, Node* side_by_side_idom) {
2002 
2003   LoopNode* head = loop->_head->as_Loop();
2004   head->verify_strip_mined(1);
2005 
2006   if (C->do_vector_loop() && PrintOpto) {
2007     const char* mname = C->method()->name()->as_quoted_ascii();
2008     if (mname != NULL) {
2009       tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname);
2010     }
2011   }
2012 
2013   CloneMap& cm = C->clone_map();
2014   Dict* dict = cm.dict();
2015   if (C->do_vector_loop()) {
2016     cm.set_clone_idx(cm.max_gen()+1);
2017 #ifndef PRODUCT
2018     if (PrintOpto) {
2019       tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx());
2020       loop->dump_head();
2021     }
2022 #endif
2023   }
2024 
2025   // Step 1: Clone the loop body.  Make the old->new mapping.
2026   uint i;
2027   for( i = 0; i < loop->_body.size(); i++ ) {
2028     Node *old = loop->_body.at(i);
2029     Node *nnn = old->clone();
2030     old_new.map( old->_idx, nnn );
2031     if (C->do_vector_loop()) {
2032       cm.verify_insert_and_clone(old, nnn, cm.clone_idx());
2033     }
2034     _igvn.register_new_node_with_optimizer(nnn);
2035   }
2036 
2037   IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop;
2038 
2039   // Step 2: Fix the edges in the new body.  If the old input is outside the
2040   // loop use it.  If the old input is INside the loop, use the corresponding
2041   // new node instead.
2042   for( i = 0; i < loop->_body.size(); i++ ) {
2043     Node *old = loop->_body.at(i);
2044     Node *nnn = old_new[old->_idx];
2045     // Fix CFG/Loop controlling the new node
2046     if (has_ctrl(old)) {
2047       set_ctrl(nnn, old_new[get_ctrl(old)->_idx]);
2048     } else {
2049       set_loop(nnn, outer_loop->_parent);
2050       if (old->outcnt() > 0) {
2051         set_idom( nnn, old_new[idom(old)->_idx], dd );
2052       }
2053     }
2054     // Correct edges to the new node
2055     for( uint j = 0; j < nnn->req(); j++ ) {
2056         Node *n = nnn->in(j);
2057         if( n ) {
2058           IdealLoopTree *old_in_loop = get_loop( has_ctrl(n) ? get_ctrl(n) : n );
2059           if( loop->is_member( old_in_loop ) )
2060             nnn->set_req(j, old_new[n->_idx]);
2061         }
2062     }
2063     _igvn.hash_find_insert(nnn);
2064   }
2065 
2066   ResourceArea *area = Thread::current()->resource_area();
2067   Node_List extra_data_nodes(area); // data nodes in the outer strip mined loop
2068   clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes);
2069 
2070   // Step 3: Now fix control uses.  Loop varying control uses have already
2071   // been fixed up (as part of all input edges in Step 2).  Loop invariant
2072   // control uses must be either an IfFalse or an IfTrue.  Make a merge
2073   // point to merge the old and new IfFalse/IfTrue nodes; make the use
2074   // refer to this.
2075   Node_List worklist(area);
2076   uint new_counter = C->unique();
2077   for( i = 0; i < loop->_body.size(); i++ ) {
2078     Node* old = loop->_body.at(i);
2079     if( !old->is_CFG() ) continue;
2080 
2081     // Copy uses to a worklist, so I can munge the def-use info
2082     // with impunity.
2083     for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
2084       worklist.push(old->fast_out(j));
2085 
2086     while( worklist.size() ) {  // Visit all uses
2087       Node *use = worklist.pop();
2088       if (!has_node(use))  continue; // Ignore dead nodes
2089       IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
2090       if( !loop->is_member( use_loop ) && use->is_CFG() ) {
2091         // Both OLD and USE are CFG nodes here.
2092         assert( use->is_Proj(), "" );
2093         Node* nnn = old_new[old->_idx];
2094 
2095         Node* newuse = NULL;
2096         if (head->is_strip_mined() && mode != IgnoreStripMined) {
2097           CountedLoopNode* cl = head->as_CountedLoop();
2098           CountedLoopEndNode* cle = cl->loopexit();
2099           Node* cle_out = cle->proj_out_or_null(false);
2100           if (use == cle_out) {
2101             IfNode* le = cl->outer_loop_end();
2102             use = le->proj_out(false);
2103             use_loop = get_loop(use);
2104             if (mode == CloneIncludesStripMined) {
2105               nnn = old_new[le->_idx];
2106             } else {
2107               newuse = old_new[cle_out->_idx];
2108             }
2109           }
2110         }
2111         if (newuse == NULL) {
2112           newuse = use->clone();
2113         }
2114 
2115         // Clone the loop exit control projection
2116         if (C->do_vector_loop()) {
2117           cm.verify_insert_and_clone(use, newuse, cm.clone_idx());
2118         }
2119         newuse->set_req(0,nnn);
2120         _igvn.register_new_node_with_optimizer(newuse);
2121         set_loop(newuse, use_loop);
2122         set_idom(newuse, nnn, dom_depth(nnn) + 1 );
2123 
2124         // We need a Region to merge the exit from the peeled body and the
2125         // exit from the old loop body.
2126         RegionNode *r = new RegionNode(3);
2127         // Map the old use to the new merge point
2128         old_new.map( use->_idx, r );
2129         uint dd_r = MIN2(dom_depth(newuse),dom_depth(use));
2130         assert( dd_r >= dom_depth(dom_lca(newuse,use)), "" );
2131 
2132         // The original user of 'use' uses 'r' instead.
2133         for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
2134           Node* useuse = use->last_out(l);
2135           _igvn.rehash_node_delayed(useuse);
2136           uint uses_found = 0;
2137           if( useuse->in(0) == use ) {
2138             useuse->set_req(0, r);
2139             uses_found++;
2140             if( useuse->is_CFG() ) {
2141               assert( dom_depth(useuse) > dd_r, "" );
2142               set_idom(useuse, r, dom_depth(useuse));
2143             }
2144           }
2145           for( uint k = 1; k < useuse->req(); k++ ) {
2146             if( useuse->in(k) == use ) {
2147               useuse->set_req(k, r);
2148               uses_found++;
2149               if (useuse->is_Loop() && k == LoopNode::EntryControl) {
2150                 assert(dom_depth(useuse) > dd_r , "");
2151                 set_idom(useuse, r, dom_depth(useuse));
2152               }
2153             }
2154           }
2155           l -= uses_found;    // we deleted 1 or more copies of this edge
2156         }
2157 
2158         // Now finish up 'r'
2159         r->set_req( 1, newuse );
2160         r->set_req( 2,    use );
2161         _igvn.register_new_node_with_optimizer(r);
2162         set_loop(r, use_loop);
2163         set_idom(r, !side_by_side_idom ? newuse->in(0) : side_by_side_idom, dd_r);
2164       } // End of if a loop-exit test
2165     }
2166   }
2167 
2168   // Step 4: If loop-invariant use is not control, it must be dominated by a
2169   // loop exit IfFalse/IfTrue.  Find "proper" loop exit.  Make a Region
2170   // there if needed.  Make a Phi there merging old and new used values.
2171   Node_List *split_if_set = NULL;
2172   Node_List *split_bool_set = NULL;
2173   Node_List *split_cex_set = NULL;
2174   for( i = 0; i < loop->_body.size(); i++ ) {
2175     Node* old = loop->_body.at(i);
2176     clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2177                                 split_bool_set, split_cex_set, worklist, new_counter,
2178                                 mode);
2179   }
2180 
2181   for (i = 0; i < extra_data_nodes.size(); i++) {
2182     Node* old = extra_data_nodes.at(i);
2183     clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2184                                 split_bool_set, split_cex_set, worklist, new_counter,
2185                                 mode);
2186   }
2187 
2188   // Check for IFs that need splitting/cloning.  Happens if an IF outside of
2189   // the loop uses a condition set in the loop.  The original IF probably
2190   // takes control from one or more OLD Regions (which in turn get from NEW
2191   // Regions).  In any case, there will be a set of Phis for each merge point
2192   // from the IF up to where the original BOOL def exists the loop.
2193   if (split_if_set) {
2194     while (split_if_set->size()) {
2195       Node *iff = split_if_set->pop();
2196       if (iff->in(1)->is_Phi()) {
2197         Node *b = clone_iff(iff->in(1)->as_Phi(), loop);
2198         _igvn.replace_input_of(iff, 1, b);
2199       }
2200     }
2201   }
2202   if (split_bool_set) {
2203     while (split_bool_set->size()) {
2204       Node *b = split_bool_set->pop();
2205       Node *phi = b->in(1);
2206       assert(phi->is_Phi(), "");
2207       CmpNode *cmp = clone_bool((PhiNode*)phi, loop);
2208       _igvn.replace_input_of(b, 1, cmp);
2209     }
2210   }
2211   if (split_cex_set) {
2212     while (split_cex_set->size()) {
2213       Node *b = split_cex_set->pop();
2214       assert(b->in(0)->is_Region(), "");
2215       assert(b->in(1)->is_Phi(), "");
2216       assert(b->in(0)->in(0) == b->in(1)->in(0), "");
2217       split_up(b, b->in(0), NULL);
2218     }
2219   }
2220 
2221 }
2222 
2223 
2224 //---------------------- stride_of_possible_iv -------------------------------------
2225 // Looks for an iff/bool/comp with one operand of the compare
2226 // being a cycle involving an add and a phi,
2227 // with an optional truncation (left-shift followed by a right-shift)
2228 // of the add. Returns zero if not an iv.
2229 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) {
2230   Node* trunc1 = NULL;
2231   Node* trunc2 = NULL;
2232   const TypeInt* ttype = NULL;
2233   if (!iff->is_If() || iff->in(1) == NULL || !iff->in(1)->is_Bool()) {
2234     return 0;
2235   }
2236   BoolNode* bl = iff->in(1)->as_Bool();
2237   Node* cmp = bl->in(1);
2238   if (!cmp || (cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU)) {
2239     return 0;
2240   }
2241   // Must have an invariant operand
2242   if (is_member(get_loop(iff), get_ctrl(cmp->in(2)))) {
2243     return 0;
2244   }
2245   Node* add2 = NULL;
2246   Node* cmp1 = cmp->in(1);
2247   if (cmp1->is_Phi()) {
2248     // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) )))
2249     Node* phi = cmp1;
2250     for (uint i = 1; i < phi->req(); i++) {
2251       Node* in = phi->in(i);
2252       Node* add = CountedLoopNode::match_incr_with_optional_truncation(in,
2253                                 &trunc1, &trunc2, &ttype);
2254       if (add && add->in(1) == phi) {
2255         add2 = add->in(2);
2256         break;
2257       }
2258     }
2259   } else {
2260     // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) )))
2261     Node* addtrunc = cmp1;
2262     Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc,
2263                                 &trunc1, &trunc2, &ttype);
2264     if (add && add->in(1)->is_Phi()) {
2265       Node* phi = add->in(1);
2266       for (uint i = 1; i < phi->req(); i++) {
2267         if (phi->in(i) == addtrunc) {
2268           add2 = add->in(2);
2269           break;
2270         }
2271       }
2272     }
2273   }
2274   if (add2 != NULL) {
2275     const TypeInt* add2t = _igvn.type(add2)->is_int();
2276     if (add2t->is_con()) {
2277       return add2t->get_con();
2278     }
2279   }
2280   return 0;
2281 }
2282 
2283 
2284 //---------------------- stay_in_loop -------------------------------------
2285 // Return the (unique) control output node that's in the loop (if it exists.)
2286 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) {
2287   Node* unique = NULL;
2288   if (!n) return NULL;
2289   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2290     Node* use = n->fast_out(i);
2291     if (!has_ctrl(use) && loop->is_member(get_loop(use))) {
2292       if (unique != NULL) {
2293         return NULL;
2294       }
2295       unique = use;
2296     }
2297   }
2298   return unique;
2299 }
2300 
2301 //------------------------------ register_node -------------------------------------
2302 // Utility to register node "n" with PhaseIdealLoop
2303 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth) {
2304   _igvn.register_new_node_with_optimizer(n);
2305   loop->_body.push(n);
2306   if (n->is_CFG()) {
2307     set_loop(n, loop);
2308     set_idom(n, pred, ddepth);
2309   } else {
2310     set_ctrl(n, pred);
2311   }
2312 }
2313 
2314 //------------------------------ proj_clone -------------------------------------
2315 // Utility to create an if-projection
2316 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) {
2317   ProjNode* c = p->clone()->as_Proj();
2318   c->set_req(0, iff);
2319   return c;
2320 }
2321 
2322 //------------------------------ short_circuit_if -------------------------------------
2323 // Force the iff control output to be the live_proj
2324 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) {
2325   guarantee(live_proj != NULL, "null projection");
2326   int proj_con = live_proj->_con;
2327   assert(proj_con == 0 || proj_con == 1, "false or true projection");
2328   Node *con = _igvn.intcon(proj_con);
2329   set_ctrl(con, C->root());
2330   if (iff) {
2331     iff->set_req(1, con);
2332   }
2333   return con;
2334 }
2335 
2336 //------------------------------ insert_if_before_proj -------------------------------------
2337 // Insert a new if before an if projection (* - new node)
2338 //
2339 // before
2340 //           if(test)
2341 //           /     \
2342 //          v       v
2343 //    other-proj   proj (arg)
2344 //
2345 // after
2346 //           if(test)
2347 //           /     \
2348 //          /       v
2349 //         |      * proj-clone
2350 //         v          |
2351 //    other-proj      v
2352 //                * new_if(relop(cmp[IU](left,right)))
2353 //                  /  \
2354 //                 v    v
2355 //         * new-proj  proj
2356 //         (returned)
2357 //
2358 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) {
2359   IfNode* iff = proj->in(0)->as_If();
2360   IdealLoopTree *loop = get_loop(proj);
2361   ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
2362   int ddepth = dom_depth(proj);
2363 
2364   _igvn.rehash_node_delayed(iff);
2365   _igvn.rehash_node_delayed(proj);
2366 
2367   proj->set_req(0, NULL);  // temporary disconnect
2368   ProjNode* proj2 = proj_clone(proj, iff);
2369   register_node(proj2, loop, iff, ddepth);
2370 
2371   Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right);
2372   register_node(cmp, loop, proj2, ddepth);
2373 
2374   BoolNode* bol = new BoolNode(cmp, relop);
2375   register_node(bol, loop, proj2, ddepth);
2376 
2377   int opcode = iff->Opcode();
2378   assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode");
2379   IfNode* new_if = (opcode == Op_If) ? new IfNode(proj2, bol, iff->_prob, iff->_fcnt):
2380     new RangeCheckNode(proj2, bol, iff->_prob, iff->_fcnt);
2381   register_node(new_if, loop, proj2, ddepth);
2382 
2383   proj->set_req(0, new_if); // reattach
2384   set_idom(proj, new_if, ddepth);
2385 
2386   ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj();
2387   guarantee(new_exit != NULL, "null exit node");
2388   register_node(new_exit, get_loop(other_proj), new_if, ddepth);
2389 
2390   return new_exit;
2391 }
2392 
2393 //------------------------------ insert_region_before_proj -------------------------------------
2394 // Insert a region before an if projection (* - new node)
2395 //
2396 // before
2397 //           if(test)
2398 //          /      |
2399 //         v       |
2400 //       proj      v
2401 //               other-proj
2402 //
2403 // after
2404 //           if(test)
2405 //          /      |
2406 //         v       |
2407 // * proj-clone    v
2408 //         |     other-proj
2409 //         v
2410 // * new-region
2411 //         |
2412 //         v
2413 // *      dum_if
2414 //       /     \
2415 //      v       \
2416 // * dum-proj    v
2417 //              proj
2418 //
2419 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
2420   IfNode* iff = proj->in(0)->as_If();
2421   IdealLoopTree *loop = get_loop(proj);
2422   ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
2423   int ddepth = dom_depth(proj);
2424 
2425   _igvn.rehash_node_delayed(iff);
2426   _igvn.rehash_node_delayed(proj);
2427 
2428   proj->set_req(0, NULL);  // temporary disconnect
2429   ProjNode* proj2 = proj_clone(proj, iff);
2430   register_node(proj2, loop, iff, ddepth);
2431 
2432   RegionNode* reg = new RegionNode(2);
2433   reg->set_req(1, proj2);
2434   register_node(reg, loop, iff, ddepth);
2435 
2436   IfNode* dum_if = new IfNode(reg, short_circuit_if(NULL, proj), iff->_prob, iff->_fcnt);
2437   register_node(dum_if, loop, reg, ddepth);
2438 
2439   proj->set_req(0, dum_if); // reattach
2440   set_idom(proj, dum_if, ddepth);
2441 
2442   ProjNode* dum_proj = proj_clone(other_proj, dum_if);
2443   register_node(dum_proj, loop, dum_if, ddepth);
2444 
2445   return reg;
2446 }
2447 
2448 //------------------------------ insert_cmpi_loop_exit -------------------------------------
2449 // Clone a signed compare loop exit from an unsigned compare and
2450 // insert it before the unsigned cmp on the stay-in-loop path.
2451 // All new nodes inserted in the dominator tree between the original
2452 // if and it's projections.  The original if test is replaced with
2453 // a constant to force the stay-in-loop path.
2454 //
2455 // This is done to make sure that the original if and it's projections
2456 // still dominate the same set of control nodes, that the ctrl() relation
2457 // from data nodes to them is preserved, and that their loop nesting is
2458 // preserved.
2459 //
2460 // before
2461 //          if(i <u limit)    unsigned compare loop exit
2462 //         /       |
2463 //        v        v
2464 //   exit-proj   stay-in-loop-proj
2465 //
2466 // after
2467 //          if(stay-in-loop-const)  original if
2468 //         /       |
2469 //        /        v
2470 //       /  if(i <  limit)    new signed test
2471 //      /  /       |
2472 //     /  /        v
2473 //    /  /  if(i <u limit)    new cloned unsigned test
2474 //   /  /   /      |
2475 //   v  v  v       |
2476 //    region       |
2477 //        |        |
2478 //      dum-if     |
2479 //     /  |        |
2480 // ether  |        |
2481 //        v        v
2482 //   exit-proj   stay-in-loop-proj
2483 //
2484 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop) {
2485   const bool Signed   = true;
2486   const bool Unsigned = false;
2487 
2488   BoolNode* bol = if_cmpu->in(1)->as_Bool();
2489   if (bol->_test._test != BoolTest::lt) return NULL;
2490   CmpNode* cmpu = bol->in(1)->as_Cmp();
2491   if (cmpu->Opcode() != Op_CmpU) return NULL;
2492   int stride = stride_of_possible_iv(if_cmpu);
2493   if (stride == 0) return NULL;
2494 
2495   Node* lp_proj = stay_in_loop(if_cmpu, loop);
2496   guarantee(lp_proj != NULL, "null loop node");
2497 
2498   ProjNode* lp_continue = lp_proj->as_Proj();
2499   ProjNode* lp_exit     = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj();
2500 
2501   Node* limit = NULL;
2502   if (stride > 0) {
2503     limit = cmpu->in(2);
2504   } else {
2505     limit = _igvn.makecon(TypeInt::ZERO);
2506     set_ctrl(limit, C->root());
2507   }
2508   // Create a new region on the exit path
2509   RegionNode* reg = insert_region_before_proj(lp_exit);
2510   guarantee(reg != NULL, "null region node");
2511 
2512   // Clone the if-cmpu-true-false using a signed compare
2513   BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge;
2514   ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, limit, lp_continue);
2515   reg->add_req(cmpi_exit);
2516 
2517   // Clone the if-cmpu-true-false
2518   BoolTest::mask rel_u = bol->_test._test;
2519   ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue);
2520   reg->add_req(cmpu_exit);
2521 
2522   // Force original if to stay in loop.
2523   short_circuit_if(if_cmpu, lp_continue);
2524 
2525   return cmpi_exit->in(0)->as_If();
2526 }
2527 
2528 //------------------------------ remove_cmpi_loop_exit -------------------------------------
2529 // Remove a previously inserted signed compare loop exit.
2530 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) {
2531   Node* lp_proj = stay_in_loop(if_cmp, loop);
2532   assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI &&
2533          stay_in_loop(lp_proj, loop)->is_If() &&
2534          stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu");
2535   Node *con = _igvn.makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO);
2536   set_ctrl(con, C->root());
2537   if_cmp->set_req(1, con);
2538 }
2539 
2540 //------------------------------ scheduled_nodelist -------------------------------------
2541 // Create a post order schedule of nodes that are in the
2542 // "member" set.  The list is returned in "sched".
2543 // The first node in "sched" is the loop head, followed by
2544 // nodes which have no inputs in the "member" set, and then
2545 // followed by the nodes that have an immediate input dependence
2546 // on a node in "sched".
2547 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) {
2548 
2549   assert(member.test(loop->_head->_idx), "loop head must be in member set");
2550   Arena *a = Thread::current()->resource_area();
2551   VectorSet visited(a);
2552   Node_Stack nstack(a, loop->_body.size());
2553 
2554   Node* n  = loop->_head;  // top of stack is cached in "n"
2555   uint idx = 0;
2556   visited.set(n->_idx);
2557 
2558   // Initially push all with no inputs from within member set
2559   for(uint i = 0; i < loop->_body.size(); i++ ) {
2560     Node *elt = loop->_body.at(i);
2561     if (member.test(elt->_idx)) {
2562       bool found = false;
2563       for (uint j = 0; j < elt->req(); j++) {
2564         Node* def = elt->in(j);
2565         if (def && member.test(def->_idx) && def != elt) {
2566           found = true;
2567           break;
2568         }
2569       }
2570       if (!found && elt != loop->_head) {
2571         nstack.push(n, idx);
2572         n = elt;
2573         assert(!visited.test(n->_idx), "not seen yet");
2574         visited.set(n->_idx);
2575       }
2576     }
2577   }
2578 
2579   // traverse out's that are in the member set
2580   while (true) {
2581     if (idx < n->outcnt()) {
2582       Node* use = n->raw_out(idx);
2583       idx++;
2584       if (!visited.test_set(use->_idx)) {
2585         if (member.test(use->_idx)) {
2586           nstack.push(n, idx);
2587           n = use;
2588           idx = 0;
2589         }
2590       }
2591     } else {
2592       // All outputs processed
2593       sched.push(n);
2594       if (nstack.is_empty()) break;
2595       n   = nstack.node();
2596       idx = nstack.index();
2597       nstack.pop();
2598     }
2599   }
2600 }
2601 
2602 
2603 //------------------------------ has_use_in_set -------------------------------------
2604 // Has a use in the vector set
2605 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) {
2606   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2607     Node* use = n->fast_out(j);
2608     if (vset.test(use->_idx)) {
2609       return true;
2610     }
2611   }
2612   return false;
2613 }
2614 
2615 
2616 //------------------------------ has_use_internal_to_set -------------------------------------
2617 // Has use internal to the vector set (ie. not in a phi at the loop head)
2618 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) {
2619   Node* head  = loop->_head;
2620   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2621     Node* use = n->fast_out(j);
2622     if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) {
2623       return true;
2624     }
2625   }
2626   return false;
2627 }
2628 
2629 
2630 //------------------------------ clone_for_use_outside_loop -------------------------------------
2631 // clone "n" for uses that are outside of loop
2632 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
2633   int cloned = 0;
2634   assert(worklist.size() == 0, "should be empty");
2635   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2636     Node* use = n->fast_out(j);
2637     if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) {
2638       worklist.push(use);
2639     }
2640   }
2641   while( worklist.size() ) {
2642     Node *use = worklist.pop();
2643     if (!has_node(use) || use->in(0) == C->top()) continue;
2644     uint j;
2645     for (j = 0; j < use->req(); j++) {
2646       if (use->in(j) == n) break;
2647     }
2648     assert(j < use->req(), "must be there");
2649 
2650     // clone "n" and insert it between the inputs of "n" and the use outside the loop
2651     Node* n_clone = n->clone();
2652     _igvn.replace_input_of(use, j, n_clone);
2653     cloned++;
2654     Node* use_c;
2655     if (!use->is_Phi()) {
2656       use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
2657     } else {
2658       // Use in a phi is considered a use in the associated predecessor block
2659       use_c = use->in(0)->in(j);
2660     }
2661     set_ctrl(n_clone, use_c);
2662     assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
2663     get_loop(use_c)->_body.push(n_clone);
2664     _igvn.register_new_node_with_optimizer(n_clone);
2665 #if !defined(PRODUCT)
2666     if (TracePartialPeeling) {
2667       tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx);
2668     }
2669 #endif
2670   }
2671   return cloned;
2672 }
2673 
2674 
2675 //------------------------------ clone_for_special_use_inside_loop -------------------------------------
2676 // clone "n" for special uses that are in the not_peeled region.
2677 // If these def-uses occur in separate blocks, the code generator
2678 // marks the method as not compilable.  For example, if a "BoolNode"
2679 // is in a different basic block than the "IfNode" that uses it, then
2680 // the compilation is aborted in the code generator.
2681 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
2682                                                         VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) {
2683   if (n->is_Phi() || n->is_Load()) {
2684     return;
2685   }
2686   assert(worklist.size() == 0, "should be empty");
2687   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2688     Node* use = n->fast_out(j);
2689     if ( not_peel.test(use->_idx) &&
2690          (use->is_If() || use->is_CMove() || use->is_Bool()) &&
2691          use->in(1) == n)  {
2692       worklist.push(use);
2693     }
2694   }
2695   if (worklist.size() > 0) {
2696     // clone "n" and insert it between inputs of "n" and the use
2697     Node* n_clone = n->clone();
2698     loop->_body.push(n_clone);
2699     _igvn.register_new_node_with_optimizer(n_clone);
2700     set_ctrl(n_clone, get_ctrl(n));
2701     sink_list.push(n_clone);
2702     not_peel <<= n_clone->_idx;  // add n_clone to not_peel set.
2703 #if !defined(PRODUCT)
2704     if (TracePartialPeeling) {
2705       tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx);
2706     }
2707 #endif
2708     while( worklist.size() ) {
2709       Node *use = worklist.pop();
2710       _igvn.rehash_node_delayed(use);
2711       for (uint j = 1; j < use->req(); j++) {
2712         if (use->in(j) == n) {
2713           use->set_req(j, n_clone);
2714         }
2715       }
2716     }
2717   }
2718 }
2719 
2720 
2721 //------------------------------ insert_phi_for_loop -------------------------------------
2722 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
2723 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) {
2724   Node *phi = PhiNode::make(lp, back_edge_val);
2725   phi->set_req(LoopNode::EntryControl, lp_entry_val);
2726   // Use existing phi if it already exists
2727   Node *hit = _igvn.hash_find_insert(phi);
2728   if( hit == NULL ) {
2729     _igvn.register_new_node_with_optimizer(phi);
2730     set_ctrl(phi, lp);
2731   } else {
2732     // Remove the new phi from the graph and use the hit
2733     _igvn.remove_dead_node(phi);
2734     phi = hit;
2735   }
2736   _igvn.replace_input_of(use, idx, phi);
2737 }
2738 
2739 #ifdef ASSERT
2740 //------------------------------ is_valid_loop_partition -------------------------------------
2741 // Validate the loop partition sets: peel and not_peel
2742 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list,
2743                                               VectorSet& not_peel ) {
2744   uint i;
2745   // Check that peel_list entries are in the peel set
2746   for (i = 0; i < peel_list.size(); i++) {
2747     if (!peel.test(peel_list.at(i)->_idx)) {
2748       return false;
2749     }
2750   }
2751   // Check at loop members are in one of peel set or not_peel set
2752   for (i = 0; i < loop->_body.size(); i++ ) {
2753     Node *def  = loop->_body.at(i);
2754     uint di = def->_idx;
2755     // Check that peel set elements are in peel_list
2756     if (peel.test(di)) {
2757       if (not_peel.test(di)) {
2758         return false;
2759       }
2760       // Must be in peel_list also
2761       bool found = false;
2762       for (uint j = 0; j < peel_list.size(); j++) {
2763         if (peel_list.at(j)->_idx == di) {
2764           found = true;
2765           break;
2766         }
2767       }
2768       if (!found) {
2769         return false;
2770       }
2771     } else if (not_peel.test(di)) {
2772       if (peel.test(di)) {
2773         return false;
2774       }
2775     } else {
2776       return false;
2777     }
2778   }
2779   return true;
2780 }
2781 
2782 //------------------------------ is_valid_clone_loop_exit_use -------------------------------------
2783 // Ensure a use outside of loop is of the right form
2784 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) {
2785   Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
2786   return (use->is_Phi() &&
2787           use_c->is_Region() && use_c->req() == 3 &&
2788           (use_c->in(exit_idx)->Opcode() == Op_IfTrue ||
2789            use_c->in(exit_idx)->Opcode() == Op_IfFalse ||
2790            use_c->in(exit_idx)->Opcode() == Op_JumpProj) &&
2791           loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) );
2792 }
2793 
2794 //------------------------------ is_valid_clone_loop_form -------------------------------------
2795 // Ensure that all uses outside of loop are of the right form
2796 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
2797                                                uint orig_exit_idx, uint clone_exit_idx) {
2798   uint len = peel_list.size();
2799   for (uint i = 0; i < len; i++) {
2800     Node *def = peel_list.at(i);
2801 
2802     for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
2803       Node *use = def->fast_out(j);
2804       Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
2805       if (!loop->is_member(get_loop(use_c))) {
2806         // use is not in the loop, check for correct structure
2807         if (use->in(0) == def) {
2808           // Okay
2809         } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) {
2810           return false;
2811         }
2812       }
2813     }
2814   }
2815   return true;
2816 }
2817 #endif
2818 
2819 //------------------------------ partial_peel -------------------------------------
2820 // Partially peel (aka loop rotation) the top portion of a loop (called
2821 // the peel section below) by cloning it and placing one copy just before
2822 // the new loop head and the other copy at the bottom of the new loop.
2823 //
2824 //    before                       after                where it came from
2825 //
2826 //    stmt1                        stmt1
2827 //  loop:                          stmt2                     clone
2828 //    stmt2                        if condA goto exitA       clone
2829 //    if condA goto exitA        new_loop:                   new
2830 //    stmt3                        stmt3                     clone
2831 //    if !condB goto loop          if condB goto exitB       clone
2832 //  exitB:                         stmt2                     orig
2833 //    stmt4                        if !condA goto new_loop   orig
2834 //  exitA:                         goto exitA
2835 //                               exitB:
2836 //                                 stmt4
2837 //                               exitA:
2838 //
2839 // Step 1: find the cut point: an exit test on probable
2840 //         induction variable.
2841 // Step 2: schedule (with cloning) operations in the peel
2842 //         section that can be executed after the cut into
2843 //         the section that is not peeled.  This may need
2844 //         to clone operations into exit blocks.  For
2845 //         instance, a reference to A[i] in the not-peel
2846 //         section and a reference to B[i] in an exit block
2847 //         may cause a left-shift of i by 2 to be placed
2848 //         in the peel block.  This step will clone the left
2849 //         shift into the exit block and sink the left shift
2850 //         from the peel to the not-peel section.
2851 // Step 3: clone the loop, retarget the control, and insert
2852 //         phis for values that are live across the new loop
2853 //         head.  This is very dependent on the graph structure
2854 //         from clone_loop.  It creates region nodes for
2855 //         exit control and associated phi nodes for values
2856 //         flow out of the loop through that exit.  The region
2857 //         node is dominated by the clone's control projection.
2858 //         So the clone's peel section is placed before the
2859 //         new loop head, and the clone's not-peel section is
2860 //         forms the top part of the new loop.  The original
2861 //         peel section forms the tail of the new loop.
2862 // Step 4: update the dominator tree and recompute the
2863 //         dominator depth.
2864 //
2865 //                   orig
2866 //
2867 //                   stmt1
2868 //                     |
2869 //                     v
2870 //               loop predicate
2871 //                     |
2872 //                     v
2873 //                   loop<----+
2874 //                     |      |
2875 //                   stmt2    |
2876 //                     |      |
2877 //                     v      |
2878 //                    ifA     |
2879 //                   / |      |
2880 //                  v  v      |
2881 //               false true   ^  <-- last_peel
2882 //               /     |      |
2883 //              /   ===|==cut |
2884 //             /     stmt3    |  <-- first_not_peel
2885 //            /        |      |
2886 //            |        v      |
2887 //            v       ifB     |
2888 //          exitA:   / \      |
2889 //                  /   \     |
2890 //                 v     v    |
2891 //               false true   |
2892 //               /       \    |
2893 //              /         ----+
2894 //             |
2895 //             v
2896 //           exitB:
2897 //           stmt4
2898 //
2899 //
2900 //            after clone loop
2901 //
2902 //                   stmt1
2903 //                     |
2904 //                     v
2905 //               loop predicate
2906 //                 /       \
2907 //        clone   /         \   orig
2908 //               /           \
2909 //              /             \
2910 //             v               v
2911 //   +---->loop                loop<----+
2912 //   |      |                    |      |
2913 //   |    stmt2                stmt2    |
2914 //   |      |                    |      |
2915 //   |      v                    v      |
2916 //   |      ifA                 ifA     |
2917 //   |      | \                / |      |
2918 //   |      v  v              v  v      |
2919 //   ^    true  false      false true   ^  <-- last_peel
2920 //   |      |   ^   \       /    |      |
2921 //   | cut==|==  \   \     /  ===|==cut |
2922 //   |    stmt3   \   \   /    stmt3    |  <-- first_not_peel
2923 //   |      |    dom   | |       |      |
2924 //   |      v      \  1v v2      v      |
2925 //   |      ifB     regionA     ifB     |
2926 //   |      / \        |       / \      |
2927 //   |     /   \       v      /   \     |
2928 //   |    v     v    exitA:  v     v    |
2929 //   |    true  false      false true   |
2930 //   |    /     ^   \      /       \    |
2931 //   +----       \   \    /         ----+
2932 //               dom  \  /
2933 //                 \  1v v2
2934 //                  regionB
2935 //                     |
2936 //                     v
2937 //                   exitB:
2938 //                   stmt4
2939 //
2940 //
2941 //           after partial peel
2942 //
2943 //                  stmt1
2944 //                     |
2945 //                     v
2946 //               loop predicate
2947 //                 /
2948 //        clone   /             orig
2949 //               /          TOP
2950 //              /             \
2951 //             v               v
2952 //    TOP->loop                loop----+
2953 //          |                    |      |
2954 //        stmt2                stmt2    |
2955 //          |                    |      |
2956 //          v                    v      |
2957 //          ifA                 ifA     |
2958 //          | \                / |      |
2959 //          v  v              v  v      |
2960 //        true  false      false true   |     <-- last_peel
2961 //          |   ^   \       /    +------|---+
2962 //  +->newloop   \   \     /  === ==cut |   |
2963 //  |     stmt3   \   \   /     TOP     |   |
2964 //  |       |    dom   | |      stmt3   |   | <-- first_not_peel
2965 //  |       v      \  1v v2      v      |   |
2966 //  |       ifB     regionA     ifB     ^   v
2967 //  |       / \        |       / \      |   |
2968 //  |      /   \       v      /   \     |   |
2969 //  |     v     v    exitA:  v     v    |   |
2970 //  |     true  false      false true   |   |
2971 //  |     /     ^   \      /       \    |   |
2972 //  |    |       \   \    /         v   |   |
2973 //  |    |       dom  \  /         TOP  |   |
2974 //  |    |         \  1v v2             |   |
2975 //  ^    v          regionB             |   |
2976 //  |    |             |                |   |
2977 //  |    |             v                ^   v
2978 //  |    |           exitB:             |   |
2979 //  |    |           stmt4              |   |
2980 //  |    +------------>-----------------+   |
2981 //  |                                       |
2982 //  +-----------------<---------------------+
2983 //
2984 //
2985 //              final graph
2986 //
2987 //                  stmt1
2988 //                    |
2989 //                    v
2990 //               loop predicate
2991 //                    |
2992 //                    v
2993 //                  stmt2 clone
2994 //                    |
2995 //                    v
2996 //         ........> ifA clone
2997 //         :        / |
2998 //        dom      /  |
2999 //         :      v   v
3000 //         :  false   true
3001 //         :  |       |
3002 //         :  |       v
3003 //         :  |    newloop<-----+
3004 //         :  |        |        |
3005 //         :  |     stmt3 clone |
3006 //         :  |        |        |
3007 //         :  |        v        |
3008 //         :  |       ifB       |
3009 //         :  |      / \        |
3010 //         :  |     v   v       |
3011 //         :  |  false true     |
3012 //         :  |   |     |       |
3013 //         :  |   v    stmt2    |
3014 //         :  | exitB:  |       |
3015 //         :  | stmt4   v       |
3016 //         :  |       ifA orig  |
3017 //         :  |      /  \       |
3018 //         :  |     /    \      |
3019 //         :  |    v     v      |
3020 //         :  |  false  true    |
3021 //         :  |  /        \     |
3022 //         :  v  v         -----+
3023 //          RegionA
3024 //             |
3025 //             v
3026 //           exitA
3027 //
3028 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
3029 
3030   assert(!loop->_head->is_CountedLoop(), "Non-counted loop only");
3031   if (!loop->_head->is_Loop()) {
3032     return false;  }
3033 
3034   LoopNode *head  = loop->_head->as_Loop();
3035 
3036   if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) {
3037     return false;
3038   }
3039 
3040   // Check for complex exit control
3041   for(uint ii = 0; ii < loop->_body.size(); ii++ ) {
3042     Node *n = loop->_body.at(ii);
3043     int opc = n->Opcode();
3044     if (n->is_Call()        ||
3045         opc == Op_Catch     ||
3046         opc == Op_CatchProj ||
3047         opc == Op_Jump      ||
3048         opc == Op_JumpProj) {
3049 #if !defined(PRODUCT)
3050       if (TracePartialPeeling) {
3051         tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
3052       }
3053 #endif
3054       return false;
3055     }
3056   }
3057 
3058   int dd = dom_depth(head);
3059 
3060   // Step 1: find cut point
3061 
3062   // Walk up dominators to loop head looking for first loop exit
3063   // which is executed on every path thru loop.
3064   IfNode *peel_if = NULL;
3065   IfNode *peel_if_cmpu = NULL;
3066 
3067   Node *iff = loop->tail();
3068   while( iff != head ) {
3069     if( iff->is_If() ) {
3070       Node *ctrl = get_ctrl(iff->in(1));
3071       if (ctrl->is_top()) return false; // Dead test on live IF.
3072       // If loop-varying exit-test, check for induction variable
3073       if( loop->is_member(get_loop(ctrl)) &&
3074           loop->is_loop_exit(iff) &&
3075           is_possible_iv_test(iff)) {
3076         Node* cmp = iff->in(1)->in(1);
3077         if (cmp->Opcode() == Op_CmpI) {
3078           peel_if = iff->as_If();
3079         } else {
3080           assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU");
3081           peel_if_cmpu = iff->as_If();
3082         }
3083       }
3084     }
3085     iff = idom(iff);
3086   }
3087   // Prefer signed compare over unsigned compare.
3088   IfNode* new_peel_if = NULL;
3089   if (peel_if == NULL) {
3090     if (!PartialPeelAtUnsignedTests || peel_if_cmpu == NULL) {
3091       return false;   // No peel point found
3092     }
3093     new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop);
3094     if (new_peel_if == NULL) {
3095       return false;   // No peel point found
3096     }
3097     peel_if = new_peel_if;
3098   }
3099   Node* last_peel        = stay_in_loop(peel_if, loop);
3100   Node* first_not_peeled = stay_in_loop(last_peel, loop);
3101   if (first_not_peeled == NULL || first_not_peeled == head) {
3102     return false;
3103   }
3104 
3105 #if !defined(PRODUCT)
3106   if (TraceLoopOpts) {
3107     tty->print("PartialPeel  ");
3108     loop->dump_head();
3109   }
3110 
3111   if (TracePartialPeeling) {
3112     tty->print_cr("before partial peel one iteration");
3113     Node_List wl;
3114     Node* t = head->in(2);
3115     while (true) {
3116       wl.push(t);
3117       if (t == head) break;
3118       t = idom(t);
3119     }
3120     while (wl.size() > 0) {
3121       Node* tt = wl.pop();
3122       tt->dump();
3123       if (tt == last_peel) tty->print_cr("-- cut --");
3124     }
3125   }
3126 #endif
3127   ResourceArea *area = Thread::current()->resource_area();
3128   VectorSet peel(area);
3129   VectorSet not_peel(area);
3130   Node_List peel_list(area);
3131   Node_List worklist(area);
3132   Node_List sink_list(area);
3133 
3134   // Set of cfg nodes to peel are those that are executable from
3135   // the head through last_peel.
3136   assert(worklist.size() == 0, "should be empty");
3137   worklist.push(head);
3138   peel.set(head->_idx);
3139   while (worklist.size() > 0) {
3140     Node *n = worklist.pop();
3141     if (n != last_peel) {
3142       for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3143         Node* use = n->fast_out(j);
3144         if (use->is_CFG() &&
3145             loop->is_member(get_loop(use)) &&
3146             !peel.test_set(use->_idx)) {
3147           worklist.push(use);
3148         }
3149       }
3150     }
3151   }
3152 
3153   // Set of non-cfg nodes to peel are those that are control
3154   // dependent on the cfg nodes.
3155   uint i;
3156   for(i = 0; i < loop->_body.size(); i++ ) {
3157     Node *n = loop->_body.at(i);
3158     Node *n_c = has_ctrl(n) ? get_ctrl(n) : n;
3159     if (peel.test(n_c->_idx)) {
3160       peel.set(n->_idx);
3161     } else {
3162       not_peel.set(n->_idx);
3163     }
3164   }
3165 
3166   // Step 2: move operations from the peeled section down into the
3167   //         not-peeled section
3168 
3169   // Get a post order schedule of nodes in the peel region
3170   // Result in right-most operand.
3171   scheduled_nodelist(loop, peel, peel_list );
3172 
3173   assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
3174 
3175   // For future check for too many new phis
3176   uint old_phi_cnt = 0;
3177   for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
3178     Node* use = head->fast_out(j);
3179     if (use->is_Phi()) old_phi_cnt++;
3180   }
3181 
3182 #if !defined(PRODUCT)
3183   if (TracePartialPeeling) {
3184     tty->print_cr("\npeeled list");
3185   }
3186 #endif
3187 
3188   // Evacuate nodes in peel region into the not_peeled region if possible
3189   uint new_phi_cnt = 0;
3190   uint cloned_for_outside_use = 0;
3191   for (i = 0; i < peel_list.size();) {
3192     Node* n = peel_list.at(i);
3193 #if !defined(PRODUCT)
3194   if (TracePartialPeeling) n->dump();
3195 #endif
3196     bool incr = true;
3197     if ( !n->is_CFG() ) {
3198 
3199       if ( has_use_in_set(n, not_peel) ) {
3200 
3201         // If not used internal to the peeled region,
3202         // move "n" from peeled to not_peeled region.
3203 
3204         if ( !has_use_internal_to_set(n, peel, loop) ) {
3205 
3206           // if not pinned and not a load (which maybe anti-dependent on a store)
3207           // and not a CMove (Matcher expects only bool->cmove).
3208           if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove()) {
3209             cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist );
3210             sink_list.push(n);
3211             peel     >>= n->_idx; // delete n from peel set.
3212             not_peel <<= n->_idx; // add n to not_peel set.
3213             peel_list.remove(i);
3214             incr = false;
3215 #if !defined(PRODUCT)
3216             if (TracePartialPeeling) {
3217               tty->print_cr("sink to not_peeled region: %d newbb: %d",
3218                             n->_idx, get_ctrl(n)->_idx);
3219             }
3220 #endif
3221           }
3222         } else {
3223           // Otherwise check for special def-use cases that span
3224           // the peel/not_peel boundary such as bool->if
3225           clone_for_special_use_inside_loop( loop, n, not_peel, sink_list, worklist );
3226           new_phi_cnt++;
3227         }
3228       }
3229     }
3230     if (incr) i++;
3231   }
3232 
3233   if (new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta) {
3234 #if !defined(PRODUCT)
3235     if (TracePartialPeeling) {
3236       tty->print_cr("\nToo many new phis: %d  old %d new cmpi: %c",
3237                     new_phi_cnt, old_phi_cnt, new_peel_if != NULL?'T':'F');
3238     }
3239 #endif
3240     if (new_peel_if != NULL) {
3241       remove_cmpi_loop_exit(new_peel_if, loop);
3242     }
3243     // Inhibit more partial peeling on this loop
3244     assert(!head->is_partial_peel_loop(), "not partial peeled");
3245     head->mark_partial_peel_failed();
3246     if (cloned_for_outside_use > 0) {
3247       // Terminate this round of loop opts because
3248       // the graph outside this loop was changed.
3249       C->set_major_progress();
3250       return true;
3251     }
3252     return false;
3253   }
3254 
3255   // Step 3: clone loop, retarget control, and insert new phis
3256 
3257   // Create new loop head for new phis and to hang
3258   // the nodes being moved (sinked) from the peel region.
3259   LoopNode* new_head = new LoopNode(last_peel, last_peel);
3260   new_head->set_unswitch_count(head->unswitch_count()); // Preserve
3261   _igvn.register_new_node_with_optimizer(new_head);
3262   assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
3263   _igvn.replace_input_of(first_not_peeled, 0, new_head);
3264   set_loop(new_head, loop);
3265   loop->_body.push(new_head);
3266   not_peel.set(new_head->_idx);
3267   set_idom(new_head, last_peel, dom_depth(first_not_peeled));
3268   set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled));
3269 
3270   while (sink_list.size() > 0) {
3271     Node* n = sink_list.pop();
3272     set_ctrl(n, new_head);
3273   }
3274 
3275   assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
3276 
3277   clone_loop(loop, old_new, dd, IgnoreStripMined);
3278 
3279   const uint clone_exit_idx = 1;
3280   const uint orig_exit_idx  = 2;
3281   assert(is_valid_clone_loop_form( loop, peel_list, orig_exit_idx, clone_exit_idx ), "bad clone loop");
3282 
3283   Node* head_clone             = old_new[head->_idx];
3284   LoopNode* new_head_clone     = old_new[new_head->_idx]->as_Loop();
3285   Node* orig_tail_clone        = head_clone->in(2);
3286 
3287   // Add phi if "def" node is in peel set and "use" is not
3288 
3289   for(i = 0; i < peel_list.size(); i++ ) {
3290     Node *def  = peel_list.at(i);
3291     if (!def->is_CFG()) {
3292       for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
3293         Node *use = def->fast_out(j);
3294         if (has_node(use) && use->in(0) != C->top() &&
3295             (!peel.test(use->_idx) ||
3296              (use->is_Phi() && use->in(0) == head)) ) {
3297           worklist.push(use);
3298         }
3299       }
3300       while( worklist.size() ) {
3301         Node *use = worklist.pop();
3302         for (uint j = 1; j < use->req(); j++) {
3303           Node* n = use->in(j);
3304           if (n == def) {
3305 
3306             // "def" is in peel set, "use" is not in peel set
3307             // or "use" is in the entry boundary (a phi) of the peel set
3308 
3309             Node* use_c = has_ctrl(use) ? get_ctrl(use) : use;
3310 
3311             if ( loop->is_member(get_loop( use_c )) ) {
3312               // use is in loop
3313               if (old_new[use->_idx] != NULL) { // null for dead code
3314                 Node* use_clone = old_new[use->_idx];
3315                 _igvn.replace_input_of(use, j, C->top());
3316                 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
3317               }
3318             } else {
3319               assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format");
3320               // use is not in the loop, check if the live range includes the cut
3321               Node* lp_if = use_c->in(orig_exit_idx)->in(0);
3322               if (not_peel.test(lp_if->_idx)) {
3323                 assert(j == orig_exit_idx, "use from original loop");
3324                 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone );
3325               }
3326             }
3327           }
3328         }
3329       }
3330     }
3331   }
3332 
3333   // Step 3b: retarget control
3334 
3335   // Redirect control to the new loop head if a cloned node in
3336   // the not_peeled region has control that points into the peeled region.
3337   // This necessary because the cloned peeled region will be outside
3338   // the loop.
3339   //                            from    to
3340   //          cloned-peeled    <---+
3341   //    new_head_clone:            |    <--+
3342   //          cloned-not_peeled  in(0)    in(0)
3343   //          orig-peeled
3344 
3345   for(i = 0; i < loop->_body.size(); i++ ) {
3346     Node *n = loop->_body.at(i);
3347     if (!n->is_CFG()           && n->in(0) != NULL        &&
3348         not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
3349       Node* n_clone = old_new[n->_idx];
3350       _igvn.replace_input_of(n_clone, 0, new_head_clone);
3351     }
3352   }
3353 
3354   // Backedge of the surviving new_head (the clone) is original last_peel
3355   _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
3356 
3357   // Cut first node in original not_peel set
3358   _igvn.rehash_node_delayed(new_head);                     // Multiple edge updates:
3359   new_head->set_req(LoopNode::EntryControl,    C->top());  //   use rehash_node_delayed / set_req instead of
3360   new_head->set_req(LoopNode::LoopBackControl, C->top());  //   multiple replace_input_of calls
3361 
3362   // Copy head_clone back-branch info to original head
3363   // and remove original head's loop entry and
3364   // clone head's back-branch
3365   _igvn.rehash_node_delayed(head); // Multiple edge updates
3366   head->set_req(LoopNode::EntryControl,    head_clone->in(LoopNode::LoopBackControl));
3367   head->set_req(LoopNode::LoopBackControl, C->top());
3368   _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
3369 
3370   // Similarly modify the phis
3371   for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
3372     Node* use = head->fast_out(k);
3373     if (use->is_Phi() && use->outcnt() > 0) {
3374       Node* use_clone = old_new[use->_idx];
3375       _igvn.rehash_node_delayed(use); // Multiple edge updates
3376       use->set_req(LoopNode::EntryControl,    use_clone->in(LoopNode::LoopBackControl));
3377       use->set_req(LoopNode::LoopBackControl, C->top());
3378       _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
3379     }
3380   }
3381 
3382   // Step 4: update dominator tree and dominator depth
3383 
3384   set_idom(head, orig_tail_clone, dd);
3385   recompute_dom_depth();
3386 
3387   // Inhibit more partial peeling on this loop
3388   new_head_clone->set_partial_peel_loop();
3389   C->set_major_progress();
3390   loop->record_for_igvn();
3391 
3392 #if !defined(PRODUCT)
3393   if (TracePartialPeeling) {
3394     tty->print_cr("\nafter partial peel one iteration");
3395     Node_List wl(area);
3396     Node* t = last_peel;
3397     while (true) {
3398       wl.push(t);
3399       if (t == head_clone) break;
3400       t = idom(t);
3401     }
3402     while (wl.size() > 0) {
3403       Node* tt = wl.pop();
3404       if (tt == head) tty->print_cr("orig head");
3405       else if (tt == new_head_clone) tty->print_cr("new head");
3406       else if (tt == head_clone) tty->print_cr("clone head");
3407       tt->dump();
3408     }
3409   }
3410 #endif
3411   return true;
3412 }
3413 
3414 //------------------------------reorg_offsets----------------------------------
3415 // Reorganize offset computations to lower register pressure.  Mostly
3416 // prevent loop-fallout uses of the pre-incremented trip counter (which are
3417 // then alive with the post-incremented trip counter forcing an extra
3418 // register move)
3419 void PhaseIdealLoop::reorg_offsets(IdealLoopTree *loop) {
3420   // Perform it only for canonical counted loops.
3421   // Loop's shape could be messed up by iteration_split_impl.
3422   if (!loop->_head->is_CountedLoop())
3423     return;
3424   if (!loop->_head->as_Loop()->is_valid_counted_loop())
3425     return;
3426 
3427   CountedLoopNode *cl = loop->_head->as_CountedLoop();
3428   CountedLoopEndNode *cle = cl->loopexit();
3429   Node *exit = cle->proj_out(false);
3430   Node *phi = cl->phi();
3431 
3432   // Check for the special case of folks using the pre-incremented
3433   // trip-counter on the fall-out path (forces the pre-incremented
3434   // and post-incremented trip counter to be live at the same time).
3435   // Fix this by adjusting to use the post-increment trip counter.
3436 
3437   bool progress = true;
3438   while (progress) {
3439     progress = false;
3440     for (DUIterator_Fast imax, i = phi->fast_outs(imax); i < imax; i++) {
3441       Node* use = phi->fast_out(i);   // User of trip-counter
3442       if (!has_ctrl(use))  continue;
3443       Node *u_ctrl = get_ctrl(use);
3444       if (use->is_Phi()) {
3445         u_ctrl = NULL;
3446         for (uint j = 1; j < use->req(); j++)
3447           if (use->in(j) == phi)
3448             u_ctrl = dom_lca(u_ctrl, use->in(0)->in(j));
3449       }
3450       IdealLoopTree *u_loop = get_loop(u_ctrl);
3451       // Look for loop-invariant use
3452       if (u_loop == loop) continue;
3453       if (loop->is_member(u_loop)) continue;
3454       // Check that use is live out the bottom.  Assuming the trip-counter
3455       // update is right at the bottom, uses of of the loop middle are ok.
3456       if (dom_lca(exit, u_ctrl) != exit) continue;
3457       // Hit!  Refactor use to use the post-incremented tripcounter.
3458       // Compute a post-increment tripcounter.
3459       Node* c = exit;
3460       if (cl->is_strip_mined()) {
3461         IdealLoopTree* outer_loop = get_loop(cl->outer_loop());
3462         if (!outer_loop->is_member(u_loop)) {
3463           c = cl->outer_loop_exit();
3464         }
3465       }
3466       Node *opaq = new Opaque2Node(C, cle->incr());
3467       register_new_node(opaq, c);
3468       Node *neg_stride = _igvn.intcon(-cle->stride_con());
3469       set_ctrl(neg_stride, C->root());
3470       Node *post = new AddINode(opaq, neg_stride);
3471       register_new_node(post, c);
3472       _igvn.rehash_node_delayed(use);
3473       for (uint j = 1; j < use->req(); j++) {
3474         if (use->in(j) == phi)
3475           use->set_req(j, post);
3476       }
3477       // Since DU info changed, rerun loop
3478       progress = true;
3479       break;
3480     }
3481   }
3482 
3483 }