1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"
  36 #include "opto/loopnode.hpp"
  37 #include "opto/matcher.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/opaquenode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/subnode.hpp"
  43 #include "utilities/macros.hpp"
  44 #if INCLUDE_ZGC
  45 #include "gc/z/c2/zBarrierSetC2.hpp"
  46 #endif
  47 
  48 //=============================================================================
  49 //------------------------------split_thru_phi---------------------------------
  50 // Split Node 'n' through merge point if there is enough win.
  51 Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
  52   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  53     // ConvI2L may have type information on it which is unsafe to push up
  54     // so disable this for now
  55     return NULL;
  56   }
  57 
  58   // Splitting range check CastIIs through a loop induction Phi can
  59   // cause new Phis to be created that are left unrelated to the loop
  60   // induction Phi and prevent optimizations (vectorization)
  61   if (n->Opcode() == Op_CastII && n->as_CastII()->has_range_check() &&
  62       region->is_CountedLoop() && n->in(1) == region->as_CountedLoop()->phi()) {
  63     return NULL;
  64   }
  65 
  66   int wins = 0;
  67   assert(!n->is_CFG(), "");
  68   assert(region->is_Region(), "");
  69 
  70   const Type* type = n->bottom_type();
  71   const TypeOopPtr *t_oop = _igvn.type(n)->isa_oopptr();
  72   Node *phi;
  73   if (t_oop != NULL && t_oop->is_known_instance_field()) {
  74     int iid    = t_oop->instance_id();
  75     int index  = C->get_alias_index(t_oop);
  76     int offset = t_oop->offset();
  77     phi = new PhiNode(region, type, NULL, iid, index, offset);
  78   } else {
  79     phi = PhiNode::make_blank(region, n);
  80   }
  81   uint old_unique = C->unique();
  82   for (uint i = 1; i < region->req(); i++) {
  83     Node *x;
  84     Node* the_clone = NULL;
  85     if (region->in(i) == C->top()) {
  86       x = C->top();             // Dead path?  Use a dead data op
  87     } else {
  88       x = n->clone();           // Else clone up the data op
  89       the_clone = x;            // Remember for possible deletion.
  90       // Alter data node to use pre-phi inputs
  91       if (n->in(0) == region)
  92         x->set_req( 0, region->in(i) );
  93       for (uint j = 1; j < n->req(); j++) {
  94         Node *in = n->in(j);
  95         if (in->is_Phi() && in->in(0) == region)
  96           x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone
  97       }
  98     }
  99     // Check for a 'win' on some paths
 100     const Type *t = x->Value(&_igvn);
 101 
 102     bool singleton = t->singleton();
 103 
 104     // A TOP singleton indicates that there are no possible values incoming
 105     // along a particular edge. In most cases, this is OK, and the Phi will
 106     // be eliminated later in an Ideal call. However, we can't allow this to
 107     // happen if the singleton occurs on loop entry, as the elimination of
 108     // the PhiNode may cause the resulting node to migrate back to a previous
 109     // loop iteration.
 110     if (singleton && t == Type::TOP) {
 111       // Is_Loop() == false does not confirm the absence of a loop (e.g., an
 112       // irreducible loop may not be indicated by an affirmative is_Loop());
 113       // therefore, the only top we can split thru a phi is on a backedge of
 114       // a loop.
 115       singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
 116     }
 117 
 118     if (singleton) {
 119       wins++;
 120       x = ((PhaseGVN&)_igvn).makecon(t);
 121     } else {
 122       // We now call Identity to try to simplify the cloned node.
 123       // Note that some Identity methods call phase->type(this).
 124       // Make sure that the type array is big enough for
 125       // our new node, even though we may throw the node away.
 126       // (Note: This tweaking with igvn only works because x is a new node.)
 127       _igvn.set_type(x, t);
 128       // If x is a TypeNode, capture any more-precise type permanently into Node
 129       // otherwise it will be not updated during igvn->transform since
 130       // igvn->type(x) is set to x->Value() already.
 131       x->raise_bottom_type(t);
 132       Node *y = _igvn.apply_identity(x);
 133       if (y != x) {
 134         wins++;
 135         x = y;
 136       } else {
 137         y = _igvn.hash_find(x);
 138         if (y) {
 139           wins++;
 140           x = y;
 141         } else {
 142           // Else x is a new node we are keeping
 143           // We do not need register_new_node_with_optimizer
 144           // because set_type has already been called.
 145           _igvn._worklist.push(x);
 146         }
 147       }
 148     }
 149     if (x != the_clone && the_clone != NULL)
 150       _igvn.remove_dead_node(the_clone);
 151     phi->set_req( i, x );
 152   }
 153   // Too few wins?
 154   if (wins <= policy) {
 155     _igvn.remove_dead_node(phi);
 156     return NULL;
 157   }
 158 
 159   // Record Phi
 160   register_new_node( phi, region );
 161 
 162   for (uint i2 = 1; i2 < phi->req(); i2++) {
 163     Node *x = phi->in(i2);
 164     // If we commoned up the cloned 'x' with another existing Node,
 165     // the existing Node picks up a new use.  We need to make the
 166     // existing Node occur higher up so it dominates its uses.
 167     Node *old_ctrl;
 168     IdealLoopTree *old_loop;
 169 
 170     if (x->is_Con()) {
 171       // Constant's control is always root.
 172       set_ctrl(x, C->root());
 173       continue;
 174     }
 175     // The occasional new node
 176     if (x->_idx >= old_unique) {     // Found a new, unplaced node?
 177       old_ctrl = NULL;
 178       old_loop = NULL;               // Not in any prior loop
 179     } else {
 180       old_ctrl = get_ctrl(x);
 181       old_loop = get_loop(old_ctrl); // Get prior loop
 182     }
 183     // New late point must dominate new use
 184     Node *new_ctrl = dom_lca(old_ctrl, region->in(i2));
 185     if (new_ctrl == old_ctrl) // Nothing is changed
 186       continue;
 187 
 188     IdealLoopTree *new_loop = get_loop(new_ctrl);
 189 
 190     // Don't move x into a loop if its uses are
 191     // outside of loop. Otherwise x will be cloned
 192     // for each use outside of this loop.
 193     IdealLoopTree *use_loop = get_loop(region);
 194     if (!new_loop->is_member(use_loop) &&
 195         (old_loop == NULL || !new_loop->is_member(old_loop))) {
 196       // Take early control, later control will be recalculated
 197       // during next iteration of loop optimizations.
 198       new_ctrl = get_early_ctrl(x);
 199       new_loop = get_loop(new_ctrl);
 200     }
 201     // Set new location
 202     set_ctrl(x, new_ctrl);
 203     // If changing loop bodies, see if we need to collect into new body
 204     if (old_loop != new_loop) {
 205       if (old_loop && !old_loop->_child)
 206         old_loop->_body.yank(x);
 207       if (!new_loop->_child)
 208         new_loop->_body.push(x);  // Collect body info
 209     }
 210   }
 211 
 212   return phi;
 213 }
 214 
 215 //------------------------------dominated_by------------------------------------
 216 // Replace the dominated test with an obvious true or false.  Place it on the
 217 // IGVN worklist for later cleanup.  Move control-dependent data Nodes on the
 218 // live path up to the dominating control.
 219 void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exclude_loop_predicate ) {
 220   if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); }
 221 
 222   // prevdom is the dominating projection of the dominating test.
 223   assert( iff->is_If(), "" );
 224   assert(iff->Opcode() == Op_If || iff->Opcode() == Op_CountedLoopEnd || iff->Opcode() == Op_RangeCheck, "Check this code when new subtype is added");
 225   int pop = prevdom->Opcode();
 226   assert( pop == Op_IfFalse || pop == Op_IfTrue, "" );
 227   if (flip) {
 228     if (pop == Op_IfTrue)
 229       pop = Op_IfFalse;
 230     else
 231       pop = Op_IfTrue;
 232   }
 233   // 'con' is set to true or false to kill the dominated test.
 234   Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
 235   set_ctrl(con, C->root()); // Constant gets a new use
 236   // Hack the dominated test
 237   _igvn.replace_input_of(iff, 1, con);
 238 
 239   // If I dont have a reachable TRUE and FALSE path following the IfNode then
 240   // I can assume this path reaches an infinite loop.  In this case it's not
 241   // important to optimize the data Nodes - either the whole compilation will
 242   // be tossed or this path (and all data Nodes) will go dead.
 243   if (iff->outcnt() != 2) return;
 244 
 245   // Make control-dependent data Nodes on the live path (path that will remain
 246   // once the dominated IF is removed) become control-dependent on the
 247   // dominating projection.
 248   Node* dp = iff->as_If()->proj_out_or_null(pop == Op_IfTrue);
 249 
 250   // Loop predicates may have depending checks which should not
 251   // be skipped. For example, range check predicate has two checks
 252   // for lower and upper bounds.
 253   if (dp == NULL)
 254     return;
 255 
 256   ProjNode* dp_proj  = dp->as_Proj();
 257   ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
 258   if (exclude_loop_predicate &&
 259       (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL ||
 260        unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != NULL ||
 261        unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check) != NULL)) {
 262     // If this is a range check (IfNode::is_range_check), do not
 263     // reorder because Compile::allow_range_check_smearing might have
 264     // changed the check.
 265     return; // Let IGVN transformation change control dependence.
 266   }
 267 
 268   IdealLoopTree *old_loop = get_loop(dp);
 269 
 270   for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
 271     Node* cd = dp->fast_out(i); // Control-dependent node
 272     if (cd->depends_only_on_test()) {
 273       assert(cd->in(0) == dp, "");
 274       _igvn.replace_input_of(cd, 0, prevdom);
 275       set_early_ctrl(cd);
 276       IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
 277       if (old_loop != new_loop) {
 278         if (!old_loop->_child) old_loop->_body.yank(cd);
 279         if (!new_loop->_child) new_loop->_body.push(cd);
 280       }
 281       --i;
 282       --imax;
 283     }
 284   }
 285 }
 286 
 287 //------------------------------has_local_phi_input----------------------------
 288 // Return TRUE if 'n' has Phi inputs from its local block and no other
 289 // block-local inputs (all non-local-phi inputs come from earlier blocks)
 290 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) {
 291   Node *n_ctrl = get_ctrl(n);
 292   // See if some inputs come from a Phi in this block, or from before
 293   // this block.
 294   uint i;
 295   for( i = 1; i < n->req(); i++ ) {
 296     Node *phi = n->in(i);
 297     if( phi->is_Phi() && phi->in(0) == n_ctrl )
 298       break;
 299   }
 300   if( i >= n->req() )
 301     return NULL;                // No Phi inputs; nowhere to clone thru
 302 
 303   // Check for inputs created between 'n' and the Phi input.  These
 304   // must split as well; they have already been given the chance
 305   // (courtesy of a post-order visit) and since they did not we must
 306   // recover the 'cost' of splitting them by being very profitable
 307   // when splitting 'n'.  Since this is unlikely we simply give up.
 308   for( i = 1; i < n->req(); i++ ) {
 309     Node *m = n->in(i);
 310     if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) {
 311       // We allow the special case of AddP's with no local inputs.
 312       // This allows us to split-up address expressions.
 313       if (m->is_AddP() &&
 314           get_ctrl(m->in(2)) != n_ctrl &&
 315           get_ctrl(m->in(3)) != n_ctrl) {
 316         // Move the AddP up to dominating point
 317         Node* c = find_non_split_ctrl(idom(n_ctrl));
 318         if (c->is_OuterStripMinedLoop()) {
 319           c->as_Loop()->verify_strip_mined(1);
 320           c = c->in(LoopNode::EntryControl);
 321         }
 322         set_ctrl_and_loop(m, c);
 323         continue;
 324       }
 325       return NULL;
 326     }
 327     assert(m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control");
 328   }
 329 
 330   return n_ctrl;
 331 }
 332 
 333 //------------------------------remix_address_expressions----------------------
 334 // Rework addressing expressions to get the most loop-invariant stuff
 335 // moved out.  We'd like to do all associative operators, but it's especially
 336 // important (common) to do address expressions.
 337 Node *PhaseIdealLoop::remix_address_expressions( Node *n ) {
 338   if (!has_ctrl(n))  return NULL;
 339   Node *n_ctrl = get_ctrl(n);
 340   IdealLoopTree *n_loop = get_loop(n_ctrl);
 341 
 342   // See if 'n' mixes loop-varying and loop-invariant inputs and
 343   // itself is loop-varying.
 344 
 345   // Only interested in binary ops (and AddP)
 346   if( n->req() < 3 || n->req() > 4 ) return NULL;
 347 
 348   Node *n1_ctrl = get_ctrl(n->in(                    1));
 349   Node *n2_ctrl = get_ctrl(n->in(                    2));
 350   Node *n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3));
 351   IdealLoopTree *n1_loop = get_loop( n1_ctrl );
 352   IdealLoopTree *n2_loop = get_loop( n2_ctrl );
 353   IdealLoopTree *n3_loop = get_loop( n3_ctrl );
 354 
 355   // Does one of my inputs spin in a tighter loop than self?
 356   if( (n_loop->is_member( n1_loop ) && n_loop != n1_loop) ||
 357       (n_loop->is_member( n2_loop ) && n_loop != n2_loop) ||
 358       (n_loop->is_member( n3_loop ) && n_loop != n3_loop) )
 359     return NULL;                // Leave well enough alone
 360 
 361   // Is at least one of my inputs loop-invariant?
 362   if( n1_loop == n_loop &&
 363       n2_loop == n_loop &&
 364       n3_loop == n_loop )
 365     return NULL;                // No loop-invariant inputs
 366 
 367 
 368   int n_op = n->Opcode();
 369 
 370   // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2).
 371   if( n_op == Op_LShiftI ) {
 372     // Scale is loop invariant
 373     Node *scale = n->in(2);
 374     Node *scale_ctrl = get_ctrl(scale);
 375     IdealLoopTree *scale_loop = get_loop(scale_ctrl );
 376     if( n_loop == scale_loop || !scale_loop->is_member( n_loop ) )
 377       return NULL;
 378     const TypeInt *scale_t = scale->bottom_type()->isa_int();
 379     if( scale_t && scale_t->is_con() && scale_t->get_con() >= 16 )
 380       return NULL;              // Dont bother with byte/short masking
 381     // Add must vary with loop (else shift would be loop-invariant)
 382     Node *add = n->in(1);
 383     Node *add_ctrl = get_ctrl(add);
 384     IdealLoopTree *add_loop = get_loop(add_ctrl);
 385     //assert( n_loop == add_loop, "" );
 386     if( n_loop != add_loop ) return NULL;  // happens w/ evil ZKM loops
 387 
 388     // Convert I-V into I+ (0-V); same for V-I
 389     if( add->Opcode() == Op_SubI &&
 390         _igvn.type( add->in(1) ) != TypeInt::ZERO ) {
 391       Node *zero = _igvn.intcon(0);
 392       set_ctrl(zero, C->root());
 393       Node *neg = new SubINode( _igvn.intcon(0), add->in(2) );
 394       register_new_node( neg, get_ctrl(add->in(2) ) );
 395       add = new AddINode( add->in(1), neg );
 396       register_new_node( add, add_ctrl );
 397     }
 398     if( add->Opcode() != Op_AddI ) return NULL;
 399     // See if one add input is loop invariant
 400     Node *add_var = add->in(1);
 401     Node *add_var_ctrl = get_ctrl(add_var);
 402     IdealLoopTree *add_var_loop = get_loop(add_var_ctrl );
 403     Node *add_invar = add->in(2);
 404     Node *add_invar_ctrl = get_ctrl(add_invar);
 405     IdealLoopTree *add_invar_loop = get_loop(add_invar_ctrl );
 406     if( add_var_loop == n_loop ) {
 407     } else if( add_invar_loop == n_loop ) {
 408       // Swap to find the invariant part
 409       add_invar = add_var;
 410       add_invar_ctrl = add_var_ctrl;
 411       add_invar_loop = add_var_loop;
 412       add_var = add->in(2);
 413       Node *add_var_ctrl = get_ctrl(add_var);
 414       IdealLoopTree *add_var_loop = get_loop(add_var_ctrl );
 415     } else                      // Else neither input is loop invariant
 416       return NULL;
 417     if( n_loop == add_invar_loop || !add_invar_loop->is_member( n_loop ) )
 418       return NULL;              // No invariant part of the add?
 419 
 420     // Yes!  Reshape address expression!
 421     Node *inv_scale = new LShiftINode( add_invar, scale );
 422     Node *inv_scale_ctrl =
 423       dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
 424       add_invar_ctrl : scale_ctrl;
 425     register_new_node( inv_scale, inv_scale_ctrl );
 426     Node *var_scale = new LShiftINode( add_var, scale );
 427     register_new_node( var_scale, n_ctrl );
 428     Node *var_add = new AddINode( var_scale, inv_scale );
 429     register_new_node( var_add, n_ctrl );
 430     _igvn.replace_node( n, var_add );
 431     return var_add;
 432   }
 433 
 434   // Replace (I+V) with (V+I)
 435   if( n_op == Op_AddI ||
 436       n_op == Op_AddL ||
 437       n_op == Op_AddF ||
 438       n_op == Op_AddD ||
 439       n_op == Op_MulI ||
 440       n_op == Op_MulL ||
 441       n_op == Op_MulF ||
 442       n_op == Op_MulD ) {
 443     if( n2_loop == n_loop ) {
 444       assert( n1_loop != n_loop, "" );
 445       n->swap_edges(1, 2);
 446     }
 447   }
 448 
 449   // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V),
 450   // but not if I2 is a constant.
 451   if( n_op == Op_AddP ) {
 452     if( n2_loop == n_loop && n3_loop != n_loop ) {
 453       if( n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con() ) {
 454         Node *n22_ctrl = get_ctrl(n->in(2)->in(2));
 455         Node *n23_ctrl = get_ctrl(n->in(2)->in(3));
 456         IdealLoopTree *n22loop = get_loop( n22_ctrl );
 457         IdealLoopTree *n23_loop = get_loop( n23_ctrl );
 458         if( n22loop != n_loop && n22loop->is_member(n_loop) &&
 459             n23_loop == n_loop ) {
 460           Node *add1 = new AddPNode( n->in(1), n->in(2)->in(2), n->in(3) );
 461           // Stuff new AddP in the loop preheader
 462           register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) );
 463           Node *add2 = new AddPNode( n->in(1), add1, n->in(2)->in(3) );
 464           register_new_node( add2, n_ctrl );
 465           _igvn.replace_node( n, add2 );
 466           return add2;
 467         }
 468       }
 469     }
 470 
 471     // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V)
 472     if (n2_loop != n_loop && n3_loop == n_loop) {
 473       if (n->in(3)->Opcode() == Op_AddX) {
 474         Node *V = n->in(3)->in(1);
 475         Node *I = n->in(3)->in(2);
 476         if (is_member(n_loop,get_ctrl(V))) {
 477         } else {
 478           Node *tmp = V; V = I; I = tmp;
 479         }
 480         if (!is_member(n_loop,get_ctrl(I))) {
 481           Node *add1 = new AddPNode(n->in(1), n->in(2), I);
 482           // Stuff new AddP in the loop preheader
 483           register_new_node(add1, n_loop->_head->in(LoopNode::EntryControl));
 484           Node *add2 = new AddPNode(n->in(1), add1, V);
 485           register_new_node(add2, n_ctrl);
 486           _igvn.replace_node(n, add2);
 487           return add2;
 488         }
 489       }
 490     }
 491   }
 492 
 493   return NULL;
 494 }
 495 
 496 // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1]))
 497 Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) {
 498   assert(n->Opcode() == Op_AddI, "sanity");
 499   Node * nn = NULL;
 500   Node * in1 = n->in(1);
 501   Node * in2 = n->in(2);
 502   if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) {
 503     IdealLoopTree* loop_n = get_loop(get_ctrl(n));
 504     if (loop_n->_head->as_Loop()->is_valid_counted_loop() &&
 505         Matcher::match_rule_supported(Op_MulAddS2I) &&
 506         Matcher::match_rule_supported(Op_MulAddVS2VI)) {
 507       Node* mul_in1 = in1->in(1);
 508       Node* mul_in2 = in1->in(2);
 509       Node* mul_in3 = in2->in(1);
 510       Node* mul_in4 = in2->in(2);
 511       if (mul_in1->Opcode() == Op_LoadS &&
 512           mul_in2->Opcode() == Op_LoadS &&
 513           mul_in3->Opcode() == Op_LoadS &&
 514           mul_in4->Opcode() == Op_LoadS) {
 515         IdealLoopTree* loop1 = get_loop(get_ctrl(mul_in1));
 516         IdealLoopTree* loop2 = get_loop(get_ctrl(mul_in2));
 517         IdealLoopTree* loop3 = get_loop(get_ctrl(mul_in3));
 518         IdealLoopTree* loop4 = get_loop(get_ctrl(mul_in4));
 519         IdealLoopTree* loop5 = get_loop(get_ctrl(in1));
 520         IdealLoopTree* loop6 = get_loop(get_ctrl(in2));
 521         // All nodes should be in the same counted loop.
 522         if (loop_n == loop1 && loop_n == loop2 && loop_n == loop3 &&
 523             loop_n == loop4 && loop_n == loop5 && loop_n == loop6) {
 524           Node* adr1 = mul_in1->in(MemNode::Address);
 525           Node* adr2 = mul_in2->in(MemNode::Address);
 526           Node* adr3 = mul_in3->in(MemNode::Address);
 527           Node* adr4 = mul_in4->in(MemNode::Address);
 528           if (adr1->is_AddP() && adr2->is_AddP() && adr3->is_AddP() && adr4->is_AddP()) {
 529             if ((adr1->in(AddPNode::Base) == adr3->in(AddPNode::Base)) &&
 530                 (adr2->in(AddPNode::Base) == adr4->in(AddPNode::Base))) {
 531               nn = new MulAddS2INode(mul_in1, mul_in2, mul_in3, mul_in4);
 532               register_new_node(nn, get_ctrl(n));
 533               _igvn.replace_node(n, nn);
 534               return nn;
 535             } else if ((adr1->in(AddPNode::Base) == adr4->in(AddPNode::Base)) &&
 536                        (adr2->in(AddPNode::Base) == adr3->in(AddPNode::Base))) {
 537               nn = new MulAddS2INode(mul_in1, mul_in2, mul_in4, mul_in3);
 538               register_new_node(nn, get_ctrl(n));
 539               _igvn.replace_node(n, nn);
 540               return nn;
 541             }
 542           }
 543         }
 544       }
 545     }
 546   }
 547   return nn;
 548 }
 549 
 550 //------------------------------conditional_move-------------------------------
 551 // Attempt to replace a Phi with a conditional move.  We have some pretty
 552 // strict profitability requirements.  All Phis at the merge point must
 553 // be converted, so we can remove the control flow.  We need to limit the
 554 // number of c-moves to a small handful.  All code that was in the side-arms
 555 // of the CFG diamond is now speculatively executed.  This code has to be
 556 // "cheap enough".  We are pretty much limited to CFG diamonds that merge
 557 // 1 or 2 items with a total of 1 or 2 ops executed speculatively.
 558 Node *PhaseIdealLoop::conditional_move( Node *region ) {
 559 
 560   assert(region->is_Region(), "sanity check");
 561   if (region->req() != 3) return NULL;
 562 
 563   // Check for CFG diamond
 564   Node *lp = region->in(1);
 565   Node *rp = region->in(2);
 566   if (!lp || !rp) return NULL;
 567   Node *lp_c = lp->in(0);
 568   if (lp_c == NULL || lp_c != rp->in(0) || !lp_c->is_If()) return NULL;
 569   IfNode *iff = lp_c->as_If();
 570 
 571   // Check for ops pinned in an arm of the diamond.
 572   // Can't remove the control flow in this case
 573   if (lp->outcnt() > 1) return NULL;
 574   if (rp->outcnt() > 1) return NULL;
 575 
 576   IdealLoopTree* r_loop = get_loop(region);
 577   assert(r_loop == get_loop(iff), "sanity");
 578   // Always convert to CMOVE if all results are used only outside this loop.
 579   bool used_inside_loop = (r_loop == _ltree_root);
 580 
 581   // Check profitability
 582   int cost = 0;
 583   int phis = 0;
 584   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
 585     Node *out = region->fast_out(i);
 586     if (!out->is_Phi()) continue; // Ignore other control edges, etc
 587     phis++;
 588     PhiNode* phi = out->as_Phi();
 589     BasicType bt = phi->type()->basic_type();
 590     switch (bt) {
 591     case T_DOUBLE:
 592     case T_FLOAT:
 593       if (C->use_cmove()) {
 594         continue; //TODO: maybe we want to add some cost
 595       }
 596       cost += Matcher::float_cmove_cost(); // Could be very expensive
 597       break;
 598     case T_LONG: {
 599       cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's
 600     }
 601     case T_INT:                 // These all CMOV fine
 602     case T_ADDRESS: {           // (RawPtr)
 603       cost++;
 604       break;
 605     }
 606     case T_NARROWOOP: // Fall through
 607     case T_OBJECT: {            // Base oops are OK, but not derived oops
 608       const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
 609       // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
 610       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 611       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 612       // have a Phi for the base here that we convert to a CMOVE all is well
 613       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 614       // the allocator will have to produce a base by creating a CMOVE of the
 615       // relevant bases.  This puts the allocator in the business of
 616       // manufacturing expensive instructions, generally a bad plan.
 617       // Just Say No to Conditionally-Moved Derived Pointers.
 618       if (tp && tp->offset() != 0)
 619         return NULL;
 620       cost++;
 621       break;
 622     }
 623     default:
 624       return NULL;              // In particular, can't do memory or I/O
 625     }
 626     // Add in cost any speculative ops
 627     for (uint j = 1; j < region->req(); j++) {
 628       Node *proj = region->in(j);
 629       Node *inp = phi->in(j);
 630       if (get_ctrl(inp) == proj) { // Found local op
 631         cost++;
 632         // Check for a chain of dependent ops; these will all become
 633         // speculative in a CMOV.
 634         for (uint k = 1; k < inp->req(); k++)
 635           if (get_ctrl(inp->in(k)) == proj)
 636             cost += ConditionalMoveLimit; // Too much speculative goo
 637       }
 638     }
 639     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 640     // This will likely Split-If, a higher-payoff operation.
 641     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 642       Node* use = phi->fast_out(k);
 643       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 644         cost += ConditionalMoveLimit;
 645       // Is there a use inside the loop?
 646       // Note: check only basic types since CMoveP is pinned.
 647       if (!used_inside_loop && is_java_primitive(bt)) {
 648         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 649         if (r_loop == u_loop || r_loop->is_member(u_loop)) {
 650           used_inside_loop = true;
 651         }
 652       }
 653     }
 654   }//for
 655   Node* bol = iff->in(1);
 656   assert(bol->Opcode() == Op_Bool, "");
 657   int cmp_op = bol->in(1)->Opcode();
 658   // It is expensive to generate flags from a float compare.
 659   // Avoid duplicated float compare.
 660   if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return NULL;
 661 
 662   float infrequent_prob = PROB_UNLIKELY_MAG(3);
 663   // Ignore cost and blocks frequency if CMOVE can be moved outside the loop.
 664   if (used_inside_loop) {
 665     if (cost >= ConditionalMoveLimit) return NULL; // Too much goo
 666 
 667     // BlockLayoutByFrequency optimization moves infrequent branch
 668     // from hot path. No point in CMOV'ing in such case (110 is used
 669     // instead of 100 to take into account not exactness of float value).
 670     if (BlockLayoutByFrequency) {
 671       infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f);
 672     }
 673   }
 674   // Check for highly predictable branch.  No point in CMOV'ing if
 675   // we are going to predict accurately all the time.
 676   if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) {
 677     //keep going
 678   } else if (iff->_prob < infrequent_prob ||
 679       iff->_prob > (1.0f - infrequent_prob))
 680     return NULL;
 681 
 682   // --------------
 683   // Now replace all Phis with CMOV's
 684   Node *cmov_ctrl = iff->in(0);
 685   uint flip = (lp->Opcode() == Op_IfTrue);
 686   Node_List wq;
 687   while (1) {
 688     PhiNode* phi = NULL;
 689     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
 690       Node *out = region->fast_out(i);
 691       if (out->is_Phi()) {
 692         phi = out->as_Phi();
 693         break;
 694       }
 695     }
 696     if (phi == NULL)  break;
 697     if (PrintOpto && VerifyLoopOptimizations) { tty->print_cr("CMOV"); }
 698     // Move speculative ops
 699     wq.push(phi);
 700     while (wq.size() > 0) {
 701       Node *n = wq.pop();
 702       for (uint j = 1; j < n->req(); j++) {
 703         Node* m = n->in(j);
 704         if (m != NULL && !is_dominator(get_ctrl(m), cmov_ctrl)) {
 705 #ifndef PRODUCT
 706           if (PrintOpto && VerifyLoopOptimizations) {
 707             tty->print("  speculate: ");
 708             m->dump();
 709           }
 710 #endif
 711           set_ctrl(m, cmov_ctrl);
 712           wq.push(m);
 713         }
 714       }
 715     }
 716     Node *cmov = CMoveNode::make(cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
 717     register_new_node( cmov, cmov_ctrl );
 718     _igvn.replace_node( phi, cmov );
 719 #ifndef PRODUCT
 720     if (TraceLoopOpts) {
 721       tty->print("CMOV  ");
 722       r_loop->dump_head();
 723       if (Verbose) {
 724         bol->in(1)->dump(1);
 725         cmov->dump(1);
 726       }
 727     }
 728     if (VerifyLoopOptimizations) verify();
 729 #endif
 730   }
 731 
 732   // The useless CFG diamond will fold up later; see the optimization in
 733   // RegionNode::Ideal.
 734   _igvn._worklist.push(region);
 735 
 736   return iff->in(1);
 737 }
 738 
 739 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
 740   for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
 741     Node* u = m->fast_out(i);
 742     if (u->is_CFG()) {
 743       if (u->Opcode() == Op_NeverBranch) {
 744         u = ((NeverBranchNode*)u)->proj_out(0);
 745         enqueue_cfg_uses(u, wq);
 746       } else {
 747         wq.push(u);
 748       }
 749     }
 750   }
 751 }
 752 
 753 // Try moving a store out of a loop, right before the loop
 754 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
 755   // Store has to be first in the loop body
 756   IdealLoopTree *n_loop = get_loop(n_ctrl);
 757   if (n->is_Store() && n_loop != _ltree_root &&
 758       n_loop->is_loop() && n_loop->_head->is_Loop() &&
 759       n->in(0) != NULL) {
 760     Node* address = n->in(MemNode::Address);
 761     Node* value = n->in(MemNode::ValueIn);
 762     Node* mem = n->in(MemNode::Memory);
 763     IdealLoopTree* address_loop = get_loop(get_ctrl(address));
 764     IdealLoopTree* value_loop = get_loop(get_ctrl(value));
 765 
 766     // - address and value must be loop invariant
 767     // - memory must be a memory Phi for the loop
 768     // - Store must be the only store on this memory slice in the
 769     // loop: if there's another store following this one then value
 770     // written at iteration i by the second store could be overwritten
 771     // at iteration i+n by the first store: it's not safe to move the
 772     // first store out of the loop
 773     // - nothing must observe the memory Phi: it guarantees no read
 774     // before the store, we are also guaranteed the store post
 775     // dominates the loop head (ignoring a possible early
 776     // exit). Otherwise there would be extra Phi involved between the
 777     // loop's Phi and the store.
 778     // - there must be no early exit from the loop before the Store
 779     // (such an exit most of the time would be an extra use of the
 780     // memory Phi but sometimes is a bottom memory Phi that takes the
 781     // store as input).
 782 
 783     if (!n_loop->is_member(address_loop) &&
 784         !n_loop->is_member(value_loop) &&
 785         mem->is_Phi() && mem->in(0) == n_loop->_head &&
 786         mem->outcnt() == 1 &&
 787         mem->in(LoopNode::LoopBackControl) == n) {
 788 
 789       assert(n_loop->_tail != NULL, "need a tail");
 790       assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop");
 791 
 792       // Verify that there's no early exit of the loop before the store.
 793       bool ctrl_ok = false;
 794       {
 795         // Follow control from loop head until n, we exit the loop or
 796         // we reach the tail
 797         ResourceMark rm;
 798         Unique_Node_List wq;
 799         wq.push(n_loop->_head);
 800 
 801         for (uint next = 0; next < wq.size(); ++next) {
 802           Node *m = wq.at(next);
 803           if (m == n->in(0)) {
 804             ctrl_ok = true;
 805             continue;
 806           }
 807           assert(!has_ctrl(m), "should be CFG");
 808           if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) {
 809             ctrl_ok = false;
 810             break;
 811           }
 812           enqueue_cfg_uses(m, wq);
 813           if (wq.size() > 10) {
 814             ctrl_ok = false;
 815             break;
 816           }
 817         }
 818       }
 819       if (ctrl_ok) {
 820         // move the Store
 821         _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem);
 822         _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl));
 823         _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl));
 824         // Disconnect the phi now. An empty phi can confuse other
 825         // optimizations in this pass of loop opts.
 826         _igvn.replace_node(mem, mem->in(LoopNode::EntryControl));
 827         n_loop->_body.yank(mem);
 828 
 829         set_ctrl_and_loop(n, n->in(0));
 830 
 831         return n;
 832       }
 833     }
 834   }
 835   return NULL;
 836 }
 837 
 838 // Try moving a store out of a loop, right after the loop
 839 void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
 840   if (n->is_Store() && n->in(0) != NULL) {
 841     Node *n_ctrl = get_ctrl(n);
 842     IdealLoopTree *n_loop = get_loop(n_ctrl);
 843     // Store must be in a loop
 844     if (n_loop != _ltree_root && !n_loop->_irreducible) {
 845       Node* address = n->in(MemNode::Address);
 846       Node* value = n->in(MemNode::ValueIn);
 847       IdealLoopTree* address_loop = get_loop(get_ctrl(address));
 848       // address must be loop invariant
 849       if (!n_loop->is_member(address_loop)) {
 850         // Store must be last on this memory slice in the loop and
 851         // nothing in the loop must observe it
 852         Node* phi = NULL;
 853         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 854           Node* u = n->fast_out(i);
 855           if (has_ctrl(u)) { // control use?
 856             IdealLoopTree *u_loop = get_loop(get_ctrl(u));
 857             if (!n_loop->is_member(u_loop)) {
 858               continue;
 859             }
 860             if (u->is_Phi() && u->in(0) == n_loop->_head) {
 861               assert(_igvn.type(u) == Type::MEMORY, "bad phi");
 862               // multiple phis on the same slice are possible
 863               if (phi != NULL) {
 864                 return;
 865               }
 866               phi = u;
 867               continue;
 868             }
 869           }
 870           return;
 871         }
 872         if (phi != NULL) {
 873           // Nothing in the loop before the store (next iteration)
 874           // must observe the stored value
 875           bool mem_ok = true;
 876           {
 877             ResourceMark rm;
 878             Unique_Node_List wq;
 879             wq.push(phi);
 880             for (uint next = 0; next < wq.size() && mem_ok; ++next) {
 881               Node *m = wq.at(next);
 882               for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) {
 883                 Node* u = m->fast_out(i);
 884                 if (u->is_Store() || u->is_Phi()) {
 885                   if (u != n) {
 886                     wq.push(u);
 887                     mem_ok = (wq.size() <= 10);
 888                   }
 889                 } else {
 890                   mem_ok = false;
 891                   break;
 892                 }
 893               }
 894             }
 895           }
 896           if (mem_ok) {
 897             // Move the store out of the loop if the LCA of all
 898             // users (except for the phi) is outside the loop.
 899             Node* hook = new Node(1);
 900             _igvn.rehash_node_delayed(phi);
 901             int count = phi->replace_edge(n, hook);
 902             assert(count > 0, "inconsistent phi");
 903 
 904             // Compute latest point this store can go
 905             Node* lca = get_late_ctrl(n, get_ctrl(n));
 906             if (n_loop->is_member(get_loop(lca))) {
 907               // LCA is in the loop - bail out
 908               _igvn.replace_node(hook, n);
 909               return;
 910             }
 911 #ifdef ASSERT
 912             if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) {
 913               assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined");
 914               n_loop->_head->as_Loop()->verify_strip_mined(1);
 915               Node* outer = n_loop->_head->as_CountedLoop()->outer_loop();
 916               IdealLoopTree* outer_loop = get_loop(outer);
 917               assert(n_loop->_parent == outer_loop, "broken loop tree");
 918               assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state");
 919             }
 920 #endif
 921 
 922             // Move store out of the loop
 923             _igvn.replace_node(hook, n->in(MemNode::Memory));
 924             _igvn.replace_input_of(n, 0, lca);
 925             set_ctrl_and_loop(n, lca);
 926 
 927             // Disconnect the phi now. An empty phi can confuse other
 928             // optimizations in this pass of loop opts..
 929             if (phi->in(LoopNode::LoopBackControl) == phi) {
 930               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
 931               n_loop->_body.yank(phi);
 932             }
 933           }
 934         }
 935       }
 936     }
 937   }
 938 }
 939 
 940 //------------------------------split_if_with_blocks_pre-----------------------
 941 // Do the real work in a non-recursive function.  Data nodes want to be
 942 // cloned in the pre-order so they can feed each other nicely.
 943 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
 944   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 945   Node* bs_res = bs->split_if_pre(this, n);
 946   if (bs_res != NULL) {
 947     return bs_res;
 948   }
 949   // Cloning these guys is unlikely to win
 950   int n_op = n->Opcode();
 951   if( n_op == Op_MergeMem ) return n;
 952   if( n->is_Proj() ) return n;
 953   // Do not clone-up CmpFXXX variations, as these are always
 954   // followed by a CmpI
 955   if( n->is_Cmp() ) return n;
 956   // Attempt to use a conditional move instead of a phi/branch
 957   if( ConditionalMoveLimit > 0 && n_op == Op_Region ) {
 958     Node *cmov = conditional_move( n );
 959     if( cmov ) return cmov;
 960   }
 961   if( n->is_CFG() || n->is_LoadStore() )
 962     return n;
 963   if( n_op == Op_Opaque1 ||     // Opaque nodes cannot be mod'd
 964       n_op == Op_Opaque2 ) {
 965     if( !C->major_progress() )   // If chance of no more loop opts...
 966       _igvn._worklist.push(n);  // maybe we'll remove them
 967     return n;
 968   }
 969 
 970   if( n->is_Con() ) return n;   // No cloning for Con nodes
 971 
 972   Node *n_ctrl = get_ctrl(n);
 973   if( !n_ctrl ) return n;       // Dead node
 974 
 975   Node* res = try_move_store_before_loop(n, n_ctrl);
 976   if (res != NULL) {
 977     return n;
 978   }
 979 
 980   // Attempt to remix address expressions for loop invariants
 981   Node *m = remix_address_expressions( n );
 982   if( m ) return m;
 983 
 984   if (n_op == Op_AddI) {
 985     Node *nn = convert_add_to_muladd( n );
 986     if ( nn ) return nn;
 987   }
 988 
 989   if (n->is_ConstraintCast()) {
 990     Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this);
 991     // ConstraintCastNode::dominating_cast() uses node control input to determine domination.
 992     // Node control inputs don't necessarily agree with loop control info (due to
 993     // transformations happened in between), thus additional dominance check is needed
 994     // to keep loop info valid.
 995     if (dom_cast != NULL && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) {
 996       _igvn.replace_node(n, dom_cast);
 997       return dom_cast;
 998     }
 999   }
1000 
1001   // Determine if the Node has inputs from some local Phi.
1002   // Returns the block to clone thru.
1003   Node *n_blk = has_local_phi_input( n );
1004   if( !n_blk ) return n;
1005 
1006   // Do not clone the trip counter through on a CountedLoop
1007   // (messes up the canonical shape).
1008   if( n_blk->is_CountedLoop() && n->Opcode() == Op_AddI ) return n;
1009 
1010   // Check for having no control input; not pinned.  Allow
1011   // dominating control.
1012   if (n->in(0)) {
1013     Node *dom = idom(n_blk);
1014     if (dom_lca(n->in(0), dom) != n->in(0)) {
1015       return n;
1016     }
1017   }
1018   // Policy: when is it profitable.  You must get more wins than
1019   // policy before it is considered profitable.  Policy is usually 0,
1020   // so 1 win is considered profitable.  Big merges will require big
1021   // cloning, so get a larger policy.
1022   int policy = n_blk->req() >> 2;
1023 
1024   // If the loop is a candidate for range check elimination,
1025   // delay splitting through it's phi until a later loop optimization
1026   if (n_blk->is_CountedLoop()) {
1027     IdealLoopTree *lp = get_loop(n_blk);
1028     if (lp && lp->_rce_candidate) {
1029       return n;
1030     }
1031   }
1032 
1033   if (must_throttle_split_if()) return n;
1034 
1035   // Split 'n' through the merge point if it is profitable
1036   Node *phi = split_thru_phi( n, n_blk, policy );
1037   if (!phi) return n;
1038 
1039   // Found a Phi to split thru!
1040   // Replace 'n' with the new phi
1041   _igvn.replace_node( n, phi );
1042   // Moved a load around the loop, 'en-registering' something.
1043   if (n_blk->is_Loop() && n->is_Load() &&
1044       !phi->in(LoopNode::LoopBackControl)->is_Load())
1045     C->set_major_progress();
1046 
1047   return phi;
1048 }
1049 
1050 static bool merge_point_too_heavy(Compile* C, Node* region) {
1051   // Bail out if the region and its phis have too many users.
1052   int weight = 0;
1053   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1054     weight += region->fast_out(i)->outcnt();
1055   }
1056   int nodes_left = C->max_node_limit() - C->live_nodes();
1057   if (weight * 8 > nodes_left) {
1058     if (PrintOpto) {
1059       tty->print_cr("*** Split-if bails out:  %d nodes, region weight %d", C->unique(), weight);
1060     }
1061     return true;
1062   } else {
1063     return false;
1064   }
1065 }
1066 
1067 static bool merge_point_safe(Node* region) {
1068   // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1069   // having a PhiNode input. This sidesteps the dangerous case where the split
1070   // ConvI2LNode may become TOP if the input Value() does not
1071   // overlap the ConvI2L range, leaving a node which may not dominate its
1072   // uses.
1073   // A better fix for this problem can be found in the BugTraq entry, but
1074   // expediency for Mantis demands this hack.
1075   // 6855164: If the merge point has a FastLockNode with a PhiNode input, we stop
1076   // split_if_with_blocks from splitting a block because we could not move around
1077   // the FastLockNode.
1078   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1079     Node* n = region->fast_out(i);
1080     if (n->is_Phi()) {
1081       for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1082         Node* m = n->fast_out(j);
1083         if (m->is_FastLock())
1084           return false;
1085 #if INCLUDE_SHENANDOAHGC
1086         if (m->is_ShenandoahBarrier() && m->has_out_with(Op_FastLock)) {
1087           return false;
1088         }
1089 #endif
1090 #ifdef _LP64
1091         if (m->Opcode() == Op_ConvI2L)
1092           return false;
1093         if (m->is_CastII() && m->isa_CastII()->has_range_check()) {
1094           return false;
1095         }
1096 #endif
1097       }
1098     }
1099   }
1100   return true;
1101 }
1102 
1103 
1104 //------------------------------place_near_use---------------------------------
1105 // Place some computation next to use but not inside inner loops.
1106 // For inner loop uses move it to the preheader area.
1107 Node *PhaseIdealLoop::place_near_use(Node *useblock) const {
1108   IdealLoopTree *u_loop = get_loop( useblock );
1109   if (u_loop->_irreducible) {
1110     return useblock;
1111   }
1112   if (u_loop->_child) {
1113     if (useblock == u_loop->_head && u_loop->_head->is_OuterStripMinedLoop()) {
1114       return u_loop->_head->in(LoopNode::EntryControl);
1115     }
1116     return useblock;
1117   }
1118   return u_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
1119 }
1120 
1121 
1122 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) {
1123   if (!n->is_If() || n->is_CountedLoopEnd()) {
1124     return false;
1125   }
1126   if (!n->in(0)->is_Region()) {
1127     return false;
1128   }
1129   Node* region = n->in(0);
1130   Node* dom = idom(region);
1131   if (!dom->is_If() || dom->in(1) != n->in(1)) {
1132     return false;
1133   }
1134   IfNode* dom_if = dom->as_If();
1135   Node* proj_true = dom_if->proj_out(1);
1136   Node* proj_false = dom_if->proj_out(0);
1137 
1138   for (uint i = 1; i < region->req(); i++) {
1139     if (is_dominator(proj_true, region->in(i))) {
1140       continue;
1141     }
1142     if (is_dominator(proj_false, region->in(i))) {
1143       continue;
1144     }
1145     return false;
1146   }
1147 
1148   return true;
1149 }
1150 
1151 
1152 bool PhaseIdealLoop::can_split_if(Node* n_ctrl) {
1153   if (must_throttle_split_if()) {
1154     return false;
1155   }
1156 
1157   // Do not do 'split-if' if irreducible loops are present.
1158   if (_has_irreducible_loops) {
1159     return false;
1160   }
1161 
1162   if (merge_point_too_heavy(C, n_ctrl)) {
1163     return false;
1164   }
1165 
1166   // Do not do 'split-if' if some paths are dead.  First do dead code
1167   // elimination and then see if its still profitable.
1168   for (uint i = 1; i < n_ctrl->req(); i++) {
1169     if (n_ctrl->in(i) == C->top()) {
1170       return false;
1171     }
1172   }
1173 
1174   // If trying to do a 'Split-If' at the loop head, it is only
1175   // profitable if the cmp folds up on BOTH paths.  Otherwise we
1176   // risk peeling a loop forever.
1177 
1178   // CNC - Disabled for now.  Requires careful handling of loop
1179   // body selection for the cloned code.  Also, make sure we check
1180   // for any input path not being in the same loop as n_ctrl.  For
1181   // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
1182   // because the alternative loop entry points won't be converted
1183   // into LoopNodes.
1184   IdealLoopTree *n_loop = get_loop(n_ctrl);
1185   for (uint j = 1; j < n_ctrl->req(); j++) {
1186     if (get_loop(n_ctrl->in(j)) != n_loop) {
1187       return false;
1188     }
1189   }
1190 
1191   // Check for safety of the merge point.
1192   if (!merge_point_safe(n_ctrl)) {
1193     return false;
1194   }
1195 
1196   return true;
1197 }
1198 
1199 //------------------------------split_if_with_blocks_post----------------------
1200 // Do the real work in a non-recursive function.  CFG hackery wants to be
1201 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1202 // info.
1203 void PhaseIdealLoop::split_if_with_blocks_post(Node *n, bool last_round) {
1204 
1205   // Cloning Cmp through Phi's involves the split-if transform.
1206   // FastLock is not used by an If
1207   if (n->is_Cmp() && !n->is_FastLock() && !last_round) {
1208     Node *n_ctrl = get_ctrl(n);
1209     // Determine if the Node has inputs from some local Phi.
1210     // Returns the block to clone thru.
1211     Node *n_blk = has_local_phi_input(n);
1212     if (n_blk != n_ctrl) {
1213       return;
1214     }
1215 
1216     if (!can_split_if(n_ctrl)) {
1217       return;
1218     }
1219 
1220     if (n->outcnt() != 1) {
1221       return; // Multiple bool's from 1 compare?
1222     }
1223     Node *bol = n->unique_out();
1224     assert(bol->is_Bool(), "expect a bool here");
1225     if (bol->outcnt() != 1) {
1226       return;// Multiple branches from 1 compare?
1227     }
1228     Node *iff = bol->unique_out();
1229 
1230     // Check some safety conditions
1231     if (iff->is_If()) {        // Classic split-if?
1232       if (iff->in(0) != n_ctrl) {
1233         return; // Compare must be in same blk as if
1234       }
1235     } else if (iff->is_CMove()) { // Trying to split-up a CMOVE
1236       // Can't split CMove with different control edge.
1237       if (iff->in(0) != NULL && iff->in(0) != n_ctrl ) {
1238         return;
1239       }
1240       if (get_ctrl(iff->in(2)) == n_ctrl ||
1241           get_ctrl(iff->in(3)) == n_ctrl) {
1242         return;                 // Inputs not yet split-up
1243       }
1244       if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) {
1245         return;                 // Loop-invar test gates loop-varying CMOVE
1246       }
1247     } else {
1248       return;  // some other kind of node, such as an Allocate
1249     }
1250 
1251     // When is split-if profitable?  Every 'win' on means some control flow
1252     // goes dead, so it's almost always a win.
1253     int policy = 0;
1254     // Split compare 'n' through the merge point if it is profitable
1255     Node *phi = split_thru_phi( n, n_ctrl, policy);
1256     if (!phi) {
1257       return;
1258     }
1259 
1260     // Found a Phi to split thru!
1261     // Replace 'n' with the new phi
1262     _igvn.replace_node(n, phi);
1263 
1264     // Now split the bool up thru the phi
1265     Node *bolphi = split_thru_phi(bol, n_ctrl, -1);
1266     guarantee(bolphi != NULL, "null boolean phi node");
1267 
1268     _igvn.replace_node(bol, bolphi);
1269     assert(iff->in(1) == bolphi, "");
1270 
1271     if (bolphi->Value(&_igvn)->singleton()) {
1272       return;
1273     }
1274 
1275     // Conditional-move?  Must split up now
1276     if (!iff->is_If()) {
1277       Node *cmovphi = split_thru_phi(iff, n_ctrl, -1);
1278       _igvn.replace_node(iff, cmovphi);
1279       return;
1280     }
1281 
1282     // Now split the IF
1283     do_split_if(iff);
1284     return;
1285   }
1286 
1287   // Two identical ifs back to back can be merged
1288   if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) {
1289     Node *n_ctrl = n->in(0);
1290     PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1291     IfNode* dom_if = idom(n_ctrl)->as_If();
1292     Node* proj_true = dom_if->proj_out(1);
1293     Node* proj_false = dom_if->proj_out(0);
1294     Node* con_true = _igvn.makecon(TypeInt::ONE);
1295     Node* con_false = _igvn.makecon(TypeInt::ZERO);
1296 
1297     for (uint i = 1; i < n_ctrl->req(); i++) {
1298       if (is_dominator(proj_true, n_ctrl->in(i))) {
1299         bolphi->init_req(i, con_true);
1300       } else {
1301         assert(is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1302         bolphi->init_req(i, con_false);
1303       }
1304     }
1305     register_new_node(bolphi, n_ctrl);
1306     _igvn.replace_input_of(n, 1, bolphi);
1307 
1308     // Now split the IF
1309     do_split_if(n);
1310     return;
1311   }
1312 
1313   // Check for an IF ready to split; one that has its
1314   // condition codes input coming from a Phi at the block start.
1315   int n_op = n->Opcode();
1316 
1317   // Check for an IF being dominated by another IF same test
1318   if (n_op == Op_If ||
1319       n_op == Op_RangeCheck) {
1320     Node *bol = n->in(1);
1321     uint max = bol->outcnt();
1322     // Check for same test used more than once?
1323     if (max > 1 && bol->is_Bool()) {
1324       // Search up IDOMs to see if this IF is dominated.
1325       Node *cutoff = get_ctrl(bol);
1326 
1327       // Now search up IDOMs till cutoff, looking for a dominating test
1328       Node *prevdom = n;
1329       Node *dom = idom(prevdom);
1330       while (dom != cutoff) {
1331         if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) {
1332           // Replace the dominated test with an obvious true or false.
1333           // Place it on the IGVN worklist for later cleanup.
1334           C->set_major_progress();
1335           dominated_by(prevdom, n, false, true);
1336 #ifndef PRODUCT
1337           if( VerifyLoopOptimizations ) verify();
1338 #endif
1339           return;
1340         }
1341         prevdom = dom;
1342         dom = idom(prevdom);
1343       }
1344     }
1345   }
1346 
1347   // See if a shared loop-varying computation has no loop-varying uses.
1348   // Happens if something is only used for JVM state in uncommon trap exits,
1349   // like various versions of induction variable+offset.  Clone the
1350   // computation per usage to allow it to sink out of the loop.
1351   if (has_ctrl(n) && !n->in(0)) {// n not dead and has no control edge (can float about)
1352     Node *n_ctrl = get_ctrl(n);
1353     IdealLoopTree *n_loop = get_loop(n_ctrl);
1354     if( n_loop != _ltree_root ) {
1355       DUIterator_Fast imax, i = n->fast_outs(imax);
1356       for (; i < imax; i++) {
1357         Node* u = n->fast_out(i);
1358         if( !has_ctrl(u) )     break; // Found control user
1359         IdealLoopTree *u_loop = get_loop(get_ctrl(u));
1360         if( u_loop == n_loop ) break; // Found loop-varying use
1361         if( n_loop->is_member( u_loop ) ) break; // Found use in inner loop
1362         if( u->Opcode() == Op_Opaque1 ) break; // Found loop limit, bugfix for 4677003
1363       }
1364       bool did_break = (i < imax);  // Did we break out of the previous loop?
1365       if (!did_break && n->outcnt() > 1) { // All uses in outer loops!
1366         Node *late_load_ctrl = NULL;
1367         if (n->is_Load()) {
1368           // If n is a load, get and save the result from get_late_ctrl(),
1369           // to be later used in calculating the control for n's clones.
1370           clear_dom_lca_tags();
1371           late_load_ctrl = get_late_ctrl(n, n_ctrl);
1372         }
1373         // If n is a load, and the late control is the same as the current
1374         // control, then the cloning of n is a pointless exercise, because
1375         // GVN will ensure that we end up where we started.
1376         if (!n->is_Load() || late_load_ctrl != n_ctrl) {
1377           BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1378           for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) {
1379             Node *u = n->last_out(j); // Clone private computation per use
1380             _igvn.rehash_node_delayed(u);
1381             Node *x = n->clone(); // Clone computation
1382             Node *x_ctrl = NULL;
1383             if( u->is_Phi() ) {
1384               // Replace all uses of normal nodes.  Replace Phi uses
1385               // individually, so the separate Nodes can sink down
1386               // different paths.
1387               uint k = 1;
1388               while( u->in(k) != n ) k++;
1389               u->set_req( k, x );
1390               // x goes next to Phi input path
1391               x_ctrl = u->in(0)->in(k);
1392               --j;
1393             } else {              // Normal use
1394               // Replace all uses
1395               for( uint k = 0; k < u->req(); k++ ) {
1396                 if( u->in(k) == n ) {
1397                   u->set_req( k, x );
1398                   --j;
1399                 }
1400               }
1401               x_ctrl = get_ctrl(u);
1402             }
1403 
1404             // Find control for 'x' next to use but not inside inner loops.
1405             // For inner loop uses get the preheader area.
1406             x_ctrl = place_near_use(x_ctrl);
1407 
1408             if (bs->sink_node(this, n, x, x_ctrl, n_ctrl)) {
1409               continue;
1410             }
1411 
1412             if (n->is_Load()) {
1413               // For loads, add a control edge to a CFG node outside of the loop
1414               // to force them to not combine and return back inside the loop
1415               // during GVN optimization (4641526).
1416               //
1417               // Because we are setting the actual control input, factor in
1418               // the result from get_late_ctrl() so we respect any
1419               // anti-dependences. (6233005).
1420               x_ctrl = dom_lca(late_load_ctrl, x_ctrl);
1421 
1422               // Don't allow the control input to be a CFG splitting node.
1423               // Such nodes should only have ProjNodes as outs, e.g. IfNode
1424               // should only have IfTrueNode and IfFalseNode (4985384).
1425               x_ctrl = find_non_split_ctrl(x_ctrl);
1426               assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone");
1427 
1428               x->set_req(0, x_ctrl);
1429             }
1430             register_new_node(x, x_ctrl);
1431 
1432             // Some institutional knowledge is needed here: 'x' is
1433             // yanked because if the optimizer runs GVN on it all the
1434             // cloned x's will common up and undo this optimization and
1435             // be forced back in the loop.  This is annoying because it
1436             // makes +VerifyOpto report false-positives on progress.  I
1437             // tried setting control edges on the x's to force them to
1438             // not combine, but the matching gets worried when it tries
1439             // to fold a StoreP and an AddP together (as part of an
1440             // address expression) and the AddP and StoreP have
1441             // different controls.
1442             if (!x->is_Load() && !x->is_DecodeNarrowPtr()) _igvn._worklist.yank(x);
1443           }
1444           _igvn.remove_dead_node(n);
1445         }
1446       }
1447     }
1448   }
1449 
1450   try_move_store_after_loop(n);
1451 
1452   // Check for Opaque2's who's loop has disappeared - who's input is in the
1453   // same loop nest as their output.  Remove 'em, they are no longer useful.
1454   if( n_op == Op_Opaque2 &&
1455       n->in(1) != NULL &&
1456       get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) {
1457     _igvn.replace_node( n, n->in(1) );
1458   }
1459 
1460 #if INCLUDE_ZGC
1461   if (UseZGC) {
1462     ZBarrierSetC2::loop_optimize_gc_barrier(this, n, last_round);
1463   }
1464 #endif
1465 }
1466 
1467 //------------------------------split_if_with_blocks---------------------------
1468 // Check for aggressive application of 'split-if' optimization,
1469 // using basic block level info.
1470 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack, bool last_round) {
1471   Node* root = C->root();
1472   visited.set(root->_idx); // first, mark root as visited
1473   // Do pre-visit work for root
1474   Node* n   = split_if_with_blocks_pre(root);
1475   uint  cnt = n->outcnt();
1476   uint  i   = 0;
1477 
1478   while (true) {
1479     // Visit all children
1480     if (i < cnt) {
1481       Node* use = n->raw_out(i);
1482       ++i;
1483       if (use->outcnt() != 0 && !visited.test_set(use->_idx)) {
1484         // Now do pre-visit work for this use
1485         use = split_if_with_blocks_pre(use);
1486         nstack.push(n, i); // Save parent and next use's index.
1487         n   = use;         // Process all children of current use.
1488         cnt = use->outcnt();
1489         i   = 0;
1490       }
1491     }
1492     else {
1493       // All of n's children have been processed, complete post-processing.
1494       if (cnt != 0 && !n->is_Con()) {
1495         assert(has_node(n), "no dead nodes");
1496         split_if_with_blocks_post(n, last_round);
1497       }
1498       if (must_throttle_split_if()) {
1499         nstack.clear();
1500       }
1501       if (nstack.is_empty()) {
1502         // Finished all nodes on stack.
1503         break;
1504       }
1505       // Get saved parent node and next use's index. Visit the rest of uses.
1506       n   = nstack.node();
1507       cnt = n->outcnt();
1508       i   = nstack.index();
1509       nstack.pop();
1510     }
1511   }
1512 }
1513 
1514 
1515 //=============================================================================
1516 //
1517 //                   C L O N E   A   L O O P   B O D Y
1518 //
1519 
1520 //------------------------------clone_iff--------------------------------------
1521 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
1522 // "Nearly" because all Nodes have been cloned from the original in the loop,
1523 // but the fall-in edges to the Cmp are different.  Clone bool/Cmp pairs
1524 // through the Phi recursively, and return a Bool.
1525 Node* PhaseIdealLoop::clone_iff(PhiNode *phi, IdealLoopTree *loop) {
1526 
1527   // Convert this Phi into a Phi merging Bools
1528   uint i;
1529   for (i = 1; i < phi->req(); i++) {
1530     Node *b = phi->in(i);
1531     if (b->is_Phi()) {
1532       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi(), loop));
1533     } else {
1534       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
1535     }
1536   }
1537 
1538   Node* n = phi->in(1);
1539   Node* sample_opaque = NULL;
1540   Node *sample_bool = NULL;
1541   if (n->Opcode() == Op_Opaque4) {
1542     sample_opaque = n;
1543     sample_bool = n->in(1);
1544     assert(sample_bool->is_Bool(), "wrong type");
1545   } else {
1546     sample_bool = n;
1547   }
1548   Node *sample_cmp = sample_bool->in(1);
1549 
1550   // Make Phis to merge the Cmp's inputs.
1551   PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
1552   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
1553   for (i = 1; i < phi->req(); i++) {
1554     Node *n1 = sample_opaque == NULL ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
1555     Node *n2 = sample_opaque == NULL ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
1556     phi1->set_req(i, n1);
1557     phi2->set_req(i, n2);
1558     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1559     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1560   }
1561   // See if these Phis have been made before.
1562   // Register with optimizer
1563   Node *hit1 = _igvn.hash_find_insert(phi1);
1564   if (hit1) {                   // Hit, toss just made Phi
1565     _igvn.remove_dead_node(phi1); // Remove new phi
1566     assert(hit1->is_Phi(), "" );
1567     phi1 = (PhiNode*)hit1;      // Use existing phi
1568   } else {                      // Miss
1569     _igvn.register_new_node_with_optimizer(phi1);
1570   }
1571   Node *hit2 = _igvn.hash_find_insert(phi2);
1572   if (hit2) {                   // Hit, toss just made Phi
1573     _igvn.remove_dead_node(phi2); // Remove new phi
1574     assert(hit2->is_Phi(), "" );
1575     phi2 = (PhiNode*)hit2;      // Use existing phi
1576   } else {                      // Miss
1577     _igvn.register_new_node_with_optimizer(phi2);
1578   }
1579   // Register Phis with loop/block info
1580   set_ctrl(phi1, phi->in(0));
1581   set_ctrl(phi2, phi->in(0));
1582   // Make a new Cmp
1583   Node *cmp = sample_cmp->clone();
1584   cmp->set_req(1, phi1);
1585   cmp->set_req(2, phi2);
1586   _igvn.register_new_node_with_optimizer(cmp);
1587   set_ctrl(cmp, phi->in(0));
1588 
1589   // Make a new Bool
1590   Node *b = sample_bool->clone();
1591   b->set_req(1,cmp);
1592   _igvn.register_new_node_with_optimizer(b);
1593   set_ctrl(b, phi->in(0));
1594 
1595   if (sample_opaque != NULL) {
1596     Node* opaque = sample_opaque->clone();
1597     opaque->set_req(1, b);
1598     _igvn.register_new_node_with_optimizer(opaque);
1599     set_ctrl(opaque, phi->in(0));
1600     return opaque;
1601   }
1602 
1603   assert(b->is_Bool(), "");
1604   return b;
1605 }
1606 
1607 //------------------------------clone_bool-------------------------------------
1608 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
1609 // "Nearly" because all Nodes have been cloned from the original in the loop,
1610 // but the fall-in edges to the Cmp are different.  Clone bool/Cmp pairs
1611 // through the Phi recursively, and return a Bool.
1612 CmpNode *PhaseIdealLoop::clone_bool( PhiNode *phi, IdealLoopTree *loop ) {
1613   uint i;
1614   // Convert this Phi into a Phi merging Bools
1615   for( i = 1; i < phi->req(); i++ ) {
1616     Node *b = phi->in(i);
1617     if( b->is_Phi() ) {
1618       _igvn.replace_input_of(phi, i, clone_bool( b->as_Phi(), loop ));
1619     } else {
1620       assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
1621     }
1622   }
1623 
1624   Node *sample_cmp = phi->in(1);
1625 
1626   // Make Phis to merge the Cmp's inputs.
1627   PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP );
1628   PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP );
1629   for( uint j = 1; j < phi->req(); j++ ) {
1630     Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP
1631     Node *n1, *n2;
1632     if( cmp_top->is_Cmp() ) {
1633       n1 = cmp_top->in(1);
1634       n2 = cmp_top->in(2);
1635     } else {
1636       n1 = n2 = cmp_top;
1637     }
1638     phi1->set_req( j, n1 );
1639     phi2->set_req( j, n2 );
1640     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1641     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1642   }
1643 
1644   // See if these Phis have been made before.
1645   // Register with optimizer
1646   Node *hit1 = _igvn.hash_find_insert(phi1);
1647   if( hit1 ) {                  // Hit, toss just made Phi
1648     _igvn.remove_dead_node(phi1); // Remove new phi
1649     assert( hit1->is_Phi(), "" );
1650     phi1 = (PhiNode*)hit1;      // Use existing phi
1651   } else {                      // Miss
1652     _igvn.register_new_node_with_optimizer(phi1);
1653   }
1654   Node *hit2 = _igvn.hash_find_insert(phi2);
1655   if( hit2 ) {                  // Hit, toss just made Phi
1656     _igvn.remove_dead_node(phi2); // Remove new phi
1657     assert( hit2->is_Phi(), "" );
1658     phi2 = (PhiNode*)hit2;      // Use existing phi
1659   } else {                      // Miss
1660     _igvn.register_new_node_with_optimizer(phi2);
1661   }
1662   // Register Phis with loop/block info
1663   set_ctrl(phi1, phi->in(0));
1664   set_ctrl(phi2, phi->in(0));
1665   // Make a new Cmp
1666   Node *cmp = sample_cmp->clone();
1667   cmp->set_req( 1, phi1 );
1668   cmp->set_req( 2, phi2 );
1669   _igvn.register_new_node_with_optimizer(cmp);
1670   set_ctrl(cmp, phi->in(0));
1671 
1672   assert( cmp->is_Cmp(), "" );
1673   return (CmpNode*)cmp;
1674 }
1675 
1676 //------------------------------sink_use---------------------------------------
1677 // If 'use' was in the loop-exit block, it now needs to be sunk
1678 // below the post-loop merge point.
1679 void PhaseIdealLoop::sink_use( Node *use, Node *post_loop ) {
1680   if (!use->is_CFG() && get_ctrl(use) == post_loop->in(2)) {
1681     set_ctrl(use, post_loop);
1682     for (DUIterator j = use->outs(); use->has_out(j); j++)
1683       sink_use(use->out(j), post_loop);
1684   }
1685 }
1686 
1687 void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
1688                                                  IdealLoopTree* loop, IdealLoopTree* outer_loop,
1689                                                  Node_List*& split_if_set, Node_List*& split_bool_set,
1690                                                  Node_List*& split_cex_set, Node_List& worklist,
1691                                                  uint new_counter, CloneLoopMode mode) {
1692   Node* nnn = old_new[old->_idx];
1693   // Copy uses to a worklist, so I can munge the def-use info
1694   // with impunity.
1695   for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
1696     worklist.push(old->fast_out(j));
1697 
1698   while( worklist.size() ) {
1699     Node *use = worklist.pop();
1700     if (!has_node(use))  continue; // Ignore dead nodes
1701     if (use->in(0) == C->top())  continue;
1702     IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
1703     // Check for data-use outside of loop - at least one of OLD or USE
1704     // must not be a CFG node.
1705 #ifdef ASSERT
1706     if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == NULL) {
1707       Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint();
1708       assert(mode == ControlAroundStripMined && use == sfpt, "missed a node");
1709     }
1710 #endif
1711     if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) {
1712 
1713       // If the Data use is an IF, that means we have an IF outside of the
1714       // loop that is switching on a condition that is set inside of the
1715       // loop.  Happens if people set a loop-exit flag; then test the flag
1716       // in the loop to break the loop, then test is again outside of the
1717       // loop to determine which way the loop exited.
1718       // Loop predicate If node connects to Bool node through Opaque1 node.
1719       if (use->is_If() || use->is_CMove() || C->is_predicate_opaq(use) || use->Opcode() == Op_Opaque4) {
1720         // Since this code is highly unlikely, we lazily build the worklist
1721         // of such Nodes to go split.
1722         if (!split_if_set) {
1723           ResourceArea *area = Thread::current()->resource_area();
1724           split_if_set = new Node_List(area);
1725         }
1726         split_if_set->push(use);
1727       }
1728       if (use->is_Bool()) {
1729         if (!split_bool_set) {
1730           ResourceArea *area = Thread::current()->resource_area();
1731           split_bool_set = new Node_List(area);
1732         }
1733         split_bool_set->push(use);
1734       }
1735       if (use->Opcode() == Op_CreateEx) {
1736         if (!split_cex_set) {
1737           ResourceArea *area = Thread::current()->resource_area();
1738           split_cex_set = new Node_List(area);
1739         }
1740         split_cex_set->push(use);
1741       }
1742 
1743 
1744       // Get "block" use is in
1745       uint idx = 0;
1746       while( use->in(idx) != old ) idx++;
1747       Node *prev = use->is_CFG() ? use : get_ctrl(use);
1748       assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" );
1749       Node *cfg = prev->_idx >= new_counter
1750         ? prev->in(2)
1751         : idom(prev);
1752       if( use->is_Phi() )     // Phi use is in prior block
1753         cfg = prev->in(idx);  // NOT in block of Phi itself
1754       if (cfg->is_top()) {    // Use is dead?
1755         _igvn.replace_input_of(use, idx, C->top());
1756         continue;
1757       }
1758 
1759       // If use is referenced through control edge... (idx == 0)
1760       if (mode == IgnoreStripMined && idx == 0) {
1761         LoopNode *head = loop->_head->as_Loop();
1762         if (head->is_strip_mined() && is_dominator(head->outer_loop_exit(), prev)) {
1763           // That node is outside the inner loop, leave it outside the
1764           // outer loop as well to not confuse verification code.
1765           assert(!loop->_parent->is_member(use_loop), "should be out of the outer loop");
1766           _igvn.replace_input_of(use, 0, head->outer_loop_exit());
1767           continue;
1768         }
1769       }
1770 
1771       while(!outer_loop->is_member(get_loop(cfg))) {
1772         prev = cfg;
1773         cfg = cfg->_idx >= new_counter ? cfg->in(2) : idom(cfg);
1774       }
1775       // If the use occurs after merging several exits from the loop, then
1776       // old value must have dominated all those exits.  Since the same old
1777       // value was used on all those exits we did not need a Phi at this
1778       // merge point.  NOW we do need a Phi here.  Each loop exit value
1779       // is now merged with the peeled body exit; each exit gets its own
1780       // private Phi and those Phis need to be merged here.
1781       Node *phi;
1782       if( prev->is_Region() ) {
1783         if( idx == 0 ) {      // Updating control edge?
1784           phi = prev;         // Just use existing control
1785         } else {              // Else need a new Phi
1786           phi = PhiNode::make( prev, old );
1787           // Now recursively fix up the new uses of old!
1788           for( uint i = 1; i < prev->req(); i++ ) {
1789             worklist.push(phi); // Onto worklist once for each 'old' input
1790           }
1791         }
1792       } else {
1793         // Get new RegionNode merging old and new loop exits
1794         prev = old_new[prev->_idx];
1795         assert( prev, "just made this in step 7" );
1796         if( idx == 0) {      // Updating control edge?
1797           phi = prev;         // Just use existing control
1798         } else {              // Else need a new Phi
1799           // Make a new Phi merging data values properly
1800           phi = PhiNode::make( prev, old );
1801           phi->set_req( 1, nnn );
1802         }
1803       }
1804       // If inserting a new Phi, check for prior hits
1805       if( idx != 0 ) {
1806         Node *hit = _igvn.hash_find_insert(phi);
1807         if( hit == NULL ) {
1808           _igvn.register_new_node_with_optimizer(phi); // Register new phi
1809         } else {                                      // or
1810           // Remove the new phi from the graph and use the hit
1811           _igvn.remove_dead_node(phi);
1812           phi = hit;                                  // Use existing phi
1813         }
1814         set_ctrl(phi, prev);
1815       }
1816       // Make 'use' use the Phi instead of the old loop body exit value
1817       _igvn.replace_input_of(use, idx, phi);
1818       if( use->_idx >= new_counter ) { // If updating new phis
1819         // Not needed for correctness, but prevents a weak assert
1820         // in AddPNode from tripping (when we end up with different
1821         // base & derived Phis that will become the same after
1822         // IGVN does CSE).
1823         Node *hit = _igvn.hash_find_insert(use);
1824         if( hit )             // Go ahead and re-hash for hits.
1825           _igvn.replace_node( use, hit );
1826       }
1827 
1828       // If 'use' was in the loop-exit block, it now needs to be sunk
1829       // below the post-loop merge point.
1830       sink_use( use, prev );
1831     }
1832   }
1833 }
1834 
1835 static void clone_outer_loop_helper(Node* n, const IdealLoopTree *loop, const IdealLoopTree* outer_loop,
1836                                     const Node_List &old_new, Unique_Node_List& wq, PhaseIdealLoop* phase,
1837                                     bool check_old_new) {
1838   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1839     Node* u = n->fast_out(j);
1840     assert(check_old_new || old_new[u->_idx] == NULL, "shouldn't have been cloned");
1841     if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == NULL)) {
1842       Node* c = phase->get_ctrl(u);
1843       IdealLoopTree* u_loop = phase->get_loop(c);
1844       assert(!loop->is_member(u_loop), "can be in outer loop or out of both loops only");
1845       if (outer_loop->is_member(u_loop)) {
1846         wq.push(u);
1847       }
1848     }
1849   }
1850 }
1851 
1852 void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
1853                                       IdealLoopTree* outer_loop, int dd, Node_List &old_new,
1854                                       Node_List& extra_data_nodes) {
1855   if (head->is_strip_mined() && mode != IgnoreStripMined) {
1856     CountedLoopNode* cl = head->as_CountedLoop();
1857     Node* l = cl->outer_loop();
1858     Node* tail = cl->outer_loop_tail();
1859     IfNode* le = cl->outer_loop_end();
1860     Node* sfpt = cl->outer_safepoint();
1861     CountedLoopEndNode* cle = cl->loopexit();
1862     CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop();
1863     CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null();
1864     Node* cle_out = cle->proj_out(false);
1865 
1866     Node* new_sfpt = NULL;
1867     Node* new_cle_out = cle_out->clone();
1868     old_new.map(cle_out->_idx, new_cle_out);
1869     if (mode == CloneIncludesStripMined) {
1870       // clone outer loop body
1871       Node* new_l = l->clone();
1872       Node* new_tail = tail->clone();
1873       IfNode* new_le = le->clone()->as_If();
1874       new_sfpt = sfpt->clone();
1875 
1876       set_loop(new_l, outer_loop->_parent);
1877       set_idom(new_l, new_l->in(LoopNode::EntryControl), dd);
1878       set_loop(new_cle_out, outer_loop->_parent);
1879       set_idom(new_cle_out, new_cle, dd);
1880       set_loop(new_sfpt, outer_loop->_parent);
1881       set_idom(new_sfpt, new_cle_out, dd);
1882       set_loop(new_le, outer_loop->_parent);
1883       set_idom(new_le, new_sfpt, dd);
1884       set_loop(new_tail, outer_loop->_parent);
1885       set_idom(new_tail, new_le, dd);
1886       set_idom(new_cl, new_l, dd);
1887 
1888       old_new.map(l->_idx, new_l);
1889       old_new.map(tail->_idx, new_tail);
1890       old_new.map(le->_idx, new_le);
1891       old_new.map(sfpt->_idx, new_sfpt);
1892 
1893       new_l->set_req(LoopNode::LoopBackControl, new_tail);
1894       new_l->set_req(0, new_l);
1895       new_tail->set_req(0, new_le);
1896       new_le->set_req(0, new_sfpt);
1897       new_sfpt->set_req(0, new_cle_out);
1898       new_cle_out->set_req(0, new_cle);
1899       new_cl->set_req(LoopNode::EntryControl, new_l);
1900 
1901       _igvn.register_new_node_with_optimizer(new_l);
1902       _igvn.register_new_node_with_optimizer(new_tail);
1903       _igvn.register_new_node_with_optimizer(new_le);
1904     } else {
1905       Node *newhead = old_new[loop->_head->_idx];
1906       newhead->as_Loop()->clear_strip_mined();
1907       _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl));
1908       set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
1909     }
1910     // Look at data node that were assigned a control in the outer
1911     // loop: they are kept in the outer loop by the safepoint so start
1912     // from the safepoint node's inputs.
1913     IdealLoopTree* outer_loop = get_loop(l);
1914     Node_Stack stack(2);
1915     stack.push(sfpt, 1);
1916     uint new_counter = C->unique();
1917     while (stack.size() > 0) {
1918       Node* n = stack.node();
1919       uint i = stack.index();
1920       while (i < n->req() &&
1921              (n->in(i) == NULL ||
1922               !has_ctrl(n->in(i)) ||
1923               get_loop(get_ctrl(n->in(i))) != outer_loop ||
1924               (old_new[n->in(i)->_idx] != NULL && old_new[n->in(i)->_idx]->_idx >= new_counter))) {
1925         i++;
1926       }
1927       if (i < n->req()) {
1928         stack.set_index(i+1);
1929         stack.push(n->in(i), 0);
1930       } else {
1931         assert(old_new[n->_idx] == NULL || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet");
1932         Node* m = n == sfpt ? new_sfpt : n->clone();
1933         if (m != NULL) {
1934           for (uint i = 0; i < n->req(); i++) {
1935             if (m->in(i) != NULL && old_new[m->in(i)->_idx] != NULL) {
1936               m->set_req(i, old_new[m->in(i)->_idx]);
1937             }
1938           }
1939         } else {
1940           assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?");
1941         }
1942         if (n != sfpt) {
1943           extra_data_nodes.push(n);
1944           _igvn.register_new_node_with_optimizer(m);
1945           assert(get_ctrl(n) == cle_out, "what other control?");
1946           set_ctrl(m, new_cle_out);
1947           old_new.map(n->_idx, m);
1948         }
1949         stack.pop();
1950       }
1951     }
1952     if (mode == CloneIncludesStripMined) {
1953       _igvn.register_new_node_with_optimizer(new_sfpt);
1954       _igvn.register_new_node_with_optimizer(new_cle_out);
1955     }
1956     // Some other transformation may have pessimistically assign some
1957     // data nodes to the outer loop. Set their control so they are out
1958     // of the outer loop.
1959     ResourceMark rm;
1960     Unique_Node_List wq;
1961     for (uint i = 0; i < extra_data_nodes.size(); i++) {
1962       Node* old = extra_data_nodes.at(i);
1963       clone_outer_loop_helper(old, loop, outer_loop, old_new, wq, this, true);
1964     }
1965     Node* new_ctrl = cl->outer_loop_exit();
1966     assert(get_loop(new_ctrl) != outer_loop, "must be out of the loop nest");
1967     for (uint i = 0; i < wq.size(); i++) {
1968       Node* n = wq.at(i);
1969       set_ctrl(n, new_ctrl);
1970       clone_outer_loop_helper(n, loop, outer_loop, old_new, wq, this, false);
1971     }
1972   } else {
1973     Node *newhead = old_new[loop->_head->_idx];
1974     set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
1975   }
1976 }
1977 
1978 //------------------------------clone_loop-------------------------------------
1979 //
1980 //                   C L O N E   A   L O O P   B O D Y
1981 //
1982 // This is the basic building block of the loop optimizations.  It clones an
1983 // entire loop body.  It makes an old_new loop body mapping; with this mapping
1984 // you can find the new-loop equivalent to an old-loop node.  All new-loop
1985 // nodes are exactly equal to their old-loop counterparts, all edges are the
1986 // same.  All exits from the old-loop now have a RegionNode that merges the
1987 // equivalent new-loop path.  This is true even for the normal "loop-exit"
1988 // condition.  All uses of loop-invariant old-loop values now come from (one
1989 // or more) Phis that merge their new-loop equivalents.
1990 //
1991 // This operation leaves the graph in an illegal state: there are two valid
1992 // control edges coming from the loop pre-header to both loop bodies.  I'll
1993 // definitely have to hack the graph after running this transform.
1994 //
1995 // From this building block I will further edit edges to perform loop peeling
1996 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc.
1997 //
1998 // Parameter side_by_size_idom:
1999 //   When side_by_size_idom is NULL, the dominator tree is constructed for
2000 //      the clone loop to dominate the original.  Used in construction of
2001 //      pre-main-post loop sequence.
2002 //   When nonnull, the clone and original are side-by-side, both are
2003 //      dominated by the side_by_side_idom node.  Used in construction of
2004 //      unswitched loops.
2005 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd,
2006                                 CloneLoopMode mode, Node* side_by_side_idom) {
2007 
2008   LoopNode* head = loop->_head->as_Loop();
2009   head->verify_strip_mined(1);
2010 
2011   if (C->do_vector_loop() && PrintOpto) {
2012     const char* mname = C->method()->name()->as_quoted_ascii();
2013     if (mname != NULL) {
2014       tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname);
2015     }
2016   }
2017 
2018   CloneMap& cm = C->clone_map();
2019   Dict* dict = cm.dict();
2020   if (C->do_vector_loop()) {
2021     cm.set_clone_idx(cm.max_gen()+1);
2022 #ifndef PRODUCT
2023     if (PrintOpto) {
2024       tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx());
2025       loop->dump_head();
2026     }
2027 #endif
2028   }
2029 
2030   // Step 1: Clone the loop body.  Make the old->new mapping.
2031   uint i;
2032   for( i = 0; i < loop->_body.size(); i++ ) {
2033     Node *old = loop->_body.at(i);
2034     Node *nnn = old->clone();
2035     old_new.map( old->_idx, nnn );
2036     if (C->do_vector_loop()) {
2037       cm.verify_insert_and_clone(old, nnn, cm.clone_idx());
2038     }
2039     _igvn.register_new_node_with_optimizer(nnn);
2040   }
2041 
2042   IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop;
2043 
2044   // Step 2: Fix the edges in the new body.  If the old input is outside the
2045   // loop use it.  If the old input is INside the loop, use the corresponding
2046   // new node instead.
2047   for( i = 0; i < loop->_body.size(); i++ ) {
2048     Node *old = loop->_body.at(i);
2049     Node *nnn = old_new[old->_idx];
2050     // Fix CFG/Loop controlling the new node
2051     if (has_ctrl(old)) {
2052       set_ctrl(nnn, old_new[get_ctrl(old)->_idx]);
2053     } else {
2054       set_loop(nnn, outer_loop->_parent);
2055       if (old->outcnt() > 0) {
2056         set_idom( nnn, old_new[idom(old)->_idx], dd );
2057       }
2058     }
2059     // Correct edges to the new node
2060     for( uint j = 0; j < nnn->req(); j++ ) {
2061         Node *n = nnn->in(j);
2062         if( n ) {
2063           IdealLoopTree *old_in_loop = get_loop( has_ctrl(n) ? get_ctrl(n) : n );
2064           if( loop->is_member( old_in_loop ) )
2065             nnn->set_req(j, old_new[n->_idx]);
2066         }
2067     }
2068     _igvn.hash_find_insert(nnn);
2069   }
2070 
2071   ResourceArea *area = Thread::current()->resource_area();
2072   Node_List extra_data_nodes(area); // data nodes in the outer strip mined loop
2073   clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes);
2074 
2075   // Step 3: Now fix control uses.  Loop varying control uses have already
2076   // been fixed up (as part of all input edges in Step 2).  Loop invariant
2077   // control uses must be either an IfFalse or an IfTrue.  Make a merge
2078   // point to merge the old and new IfFalse/IfTrue nodes; make the use
2079   // refer to this.
2080   Node_List worklist(area);
2081   uint new_counter = C->unique();
2082   for( i = 0; i < loop->_body.size(); i++ ) {
2083     Node* old = loop->_body.at(i);
2084     if( !old->is_CFG() ) continue;
2085 
2086     // Copy uses to a worklist, so I can munge the def-use info
2087     // with impunity.
2088     for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
2089       worklist.push(old->fast_out(j));
2090 
2091     while( worklist.size() ) {  // Visit all uses
2092       Node *use = worklist.pop();
2093       if (!has_node(use))  continue; // Ignore dead nodes
2094       IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
2095       if( !loop->is_member( use_loop ) && use->is_CFG() ) {
2096         // Both OLD and USE are CFG nodes here.
2097         assert( use->is_Proj(), "" );
2098         Node* nnn = old_new[old->_idx];
2099 
2100         Node* newuse = NULL;
2101         if (head->is_strip_mined() && mode != IgnoreStripMined) {
2102           CountedLoopNode* cl = head->as_CountedLoop();
2103           CountedLoopEndNode* cle = cl->loopexit();
2104           Node* cle_out = cle->proj_out_or_null(false);
2105           if (use == cle_out) {
2106             IfNode* le = cl->outer_loop_end();
2107             use = le->proj_out(false);
2108             use_loop = get_loop(use);
2109             if (mode == CloneIncludesStripMined) {
2110               nnn = old_new[le->_idx];
2111             } else {
2112               newuse = old_new[cle_out->_idx];
2113             }
2114           }
2115         }
2116         if (newuse == NULL) {
2117           newuse = use->clone();
2118         }
2119 
2120         // Clone the loop exit control projection
2121         if (C->do_vector_loop()) {
2122           cm.verify_insert_and_clone(use, newuse, cm.clone_idx());
2123         }
2124         newuse->set_req(0,nnn);
2125         _igvn.register_new_node_with_optimizer(newuse);
2126         set_loop(newuse, use_loop);
2127         set_idom(newuse, nnn, dom_depth(nnn) + 1 );
2128 
2129         // We need a Region to merge the exit from the peeled body and the
2130         // exit from the old loop body.
2131         RegionNode *r = new RegionNode(3);
2132         // Map the old use to the new merge point
2133         old_new.map( use->_idx, r );
2134         uint dd_r = MIN2(dom_depth(newuse),dom_depth(use));
2135         assert( dd_r >= dom_depth(dom_lca(newuse,use)), "" );
2136 
2137         // The original user of 'use' uses 'r' instead.
2138         for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
2139           Node* useuse = use->last_out(l);
2140           _igvn.rehash_node_delayed(useuse);
2141           uint uses_found = 0;
2142           if( useuse->in(0) == use ) {
2143             useuse->set_req(0, r);
2144             uses_found++;
2145             if( useuse->is_CFG() ) {
2146               assert( dom_depth(useuse) > dd_r, "" );
2147               set_idom(useuse, r, dom_depth(useuse));
2148             }
2149           }
2150           for( uint k = 1; k < useuse->req(); k++ ) {
2151             if( useuse->in(k) == use ) {
2152               useuse->set_req(k, r);
2153               uses_found++;
2154               if (useuse->is_Loop() && k == LoopNode::EntryControl) {
2155                 assert(dom_depth(useuse) > dd_r , "");
2156                 set_idom(useuse, r, dom_depth(useuse));
2157               }
2158             }
2159           }
2160           l -= uses_found;    // we deleted 1 or more copies of this edge
2161         }
2162 
2163         // Now finish up 'r'
2164         r->set_req( 1, newuse );
2165         r->set_req( 2,    use );
2166         _igvn.register_new_node_with_optimizer(r);
2167         set_loop(r, use_loop);
2168         set_idom(r, !side_by_side_idom ? newuse->in(0) : side_by_side_idom, dd_r);
2169       } // End of if a loop-exit test
2170     }
2171   }
2172 
2173   // Step 4: If loop-invariant use is not control, it must be dominated by a
2174   // loop exit IfFalse/IfTrue.  Find "proper" loop exit.  Make a Region
2175   // there if needed.  Make a Phi there merging old and new used values.
2176   Node_List *split_if_set = NULL;
2177   Node_List *split_bool_set = NULL;
2178   Node_List *split_cex_set = NULL;
2179   for( i = 0; i < loop->_body.size(); i++ ) {
2180     Node* old = loop->_body.at(i);
2181     clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2182                                 split_bool_set, split_cex_set, worklist, new_counter,
2183                                 mode);
2184   }
2185 
2186   for (i = 0; i < extra_data_nodes.size(); i++) {
2187     Node* old = extra_data_nodes.at(i);
2188     clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2189                                 split_bool_set, split_cex_set, worklist, new_counter,
2190                                 mode);
2191   }
2192 
2193   // Check for IFs that need splitting/cloning.  Happens if an IF outside of
2194   // the loop uses a condition set in the loop.  The original IF probably
2195   // takes control from one or more OLD Regions (which in turn get from NEW
2196   // Regions).  In any case, there will be a set of Phis for each merge point
2197   // from the IF up to where the original BOOL def exists the loop.
2198   if (split_if_set) {
2199     while (split_if_set->size()) {
2200       Node *iff = split_if_set->pop();
2201       if (iff->in(1)->is_Phi()) {
2202         Node *b = clone_iff(iff->in(1)->as_Phi(), loop);
2203         _igvn.replace_input_of(iff, 1, b);
2204       }
2205     }
2206   }
2207   if (split_bool_set) {
2208     while (split_bool_set->size()) {
2209       Node *b = split_bool_set->pop();
2210       Node *phi = b->in(1);
2211       assert(phi->is_Phi(), "");
2212       CmpNode *cmp = clone_bool((PhiNode*)phi, loop);
2213       _igvn.replace_input_of(b, 1, cmp);
2214     }
2215   }
2216   if (split_cex_set) {
2217     while (split_cex_set->size()) {
2218       Node *b = split_cex_set->pop();
2219       assert(b->in(0)->is_Region(), "");
2220       assert(b->in(1)->is_Phi(), "");
2221       assert(b->in(0)->in(0) == b->in(1)->in(0), "");
2222       split_up(b, b->in(0), NULL);
2223     }
2224   }
2225 
2226 }
2227 
2228 
2229 //---------------------- stride_of_possible_iv -------------------------------------
2230 // Looks for an iff/bool/comp with one operand of the compare
2231 // being a cycle involving an add and a phi,
2232 // with an optional truncation (left-shift followed by a right-shift)
2233 // of the add. Returns zero if not an iv.
2234 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) {
2235   Node* trunc1 = NULL;
2236   Node* trunc2 = NULL;
2237   const TypeInt* ttype = NULL;
2238   if (!iff->is_If() || iff->in(1) == NULL || !iff->in(1)->is_Bool()) {
2239     return 0;
2240   }
2241   BoolNode* bl = iff->in(1)->as_Bool();
2242   Node* cmp = bl->in(1);
2243   if (!cmp || (cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU)) {
2244     return 0;
2245   }
2246   // Must have an invariant operand
2247   if (is_member(get_loop(iff), get_ctrl(cmp->in(2)))) {
2248     return 0;
2249   }
2250   Node* add2 = NULL;
2251   Node* cmp1 = cmp->in(1);
2252   if (cmp1->is_Phi()) {
2253     // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) )))
2254     Node* phi = cmp1;
2255     for (uint i = 1; i < phi->req(); i++) {
2256       Node* in = phi->in(i);
2257       Node* add = CountedLoopNode::match_incr_with_optional_truncation(in,
2258                                 &trunc1, &trunc2, &ttype);
2259       if (add && add->in(1) == phi) {
2260         add2 = add->in(2);
2261         break;
2262       }
2263     }
2264   } else {
2265     // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) )))
2266     Node* addtrunc = cmp1;
2267     Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc,
2268                                 &trunc1, &trunc2, &ttype);
2269     if (add && add->in(1)->is_Phi()) {
2270       Node* phi = add->in(1);
2271       for (uint i = 1; i < phi->req(); i++) {
2272         if (phi->in(i) == addtrunc) {
2273           add2 = add->in(2);
2274           break;
2275         }
2276       }
2277     }
2278   }
2279   if (add2 != NULL) {
2280     const TypeInt* add2t = _igvn.type(add2)->is_int();
2281     if (add2t->is_con()) {
2282       return add2t->get_con();
2283     }
2284   }
2285   return 0;
2286 }
2287 
2288 
2289 //---------------------- stay_in_loop -------------------------------------
2290 // Return the (unique) control output node that's in the loop (if it exists.)
2291 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) {
2292   Node* unique = NULL;
2293   if (!n) return NULL;
2294   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2295     Node* use = n->fast_out(i);
2296     if (!has_ctrl(use) && loop->is_member(get_loop(use))) {
2297       if (unique != NULL) {
2298         return NULL;
2299       }
2300       unique = use;
2301     }
2302   }
2303   return unique;
2304 }
2305 
2306 //------------------------------ register_node -------------------------------------
2307 // Utility to register node "n" with PhaseIdealLoop
2308 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth) {
2309   _igvn.register_new_node_with_optimizer(n);
2310   loop->_body.push(n);
2311   if (n->is_CFG()) {
2312     set_loop(n, loop);
2313     set_idom(n, pred, ddepth);
2314   } else {
2315     set_ctrl(n, pred);
2316   }
2317 }
2318 
2319 //------------------------------ proj_clone -------------------------------------
2320 // Utility to create an if-projection
2321 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) {
2322   ProjNode* c = p->clone()->as_Proj();
2323   c->set_req(0, iff);
2324   return c;
2325 }
2326 
2327 //------------------------------ short_circuit_if -------------------------------------
2328 // Force the iff control output to be the live_proj
2329 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) {
2330   guarantee(live_proj != NULL, "null projection");
2331   int proj_con = live_proj->_con;
2332   assert(proj_con == 0 || proj_con == 1, "false or true projection");
2333   Node *con = _igvn.intcon(proj_con);
2334   set_ctrl(con, C->root());
2335   if (iff) {
2336     iff->set_req(1, con);
2337   }
2338   return con;
2339 }
2340 
2341 //------------------------------ insert_if_before_proj -------------------------------------
2342 // Insert a new if before an if projection (* - new node)
2343 //
2344 // before
2345 //           if(test)
2346 //           /     \
2347 //          v       v
2348 //    other-proj   proj (arg)
2349 //
2350 // after
2351 //           if(test)
2352 //           /     \
2353 //          /       v
2354 //         |      * proj-clone
2355 //         v          |
2356 //    other-proj      v
2357 //                * new_if(relop(cmp[IU](left,right)))
2358 //                  /  \
2359 //                 v    v
2360 //         * new-proj  proj
2361 //         (returned)
2362 //
2363 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) {
2364   IfNode* iff = proj->in(0)->as_If();
2365   IdealLoopTree *loop = get_loop(proj);
2366   ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
2367   int ddepth = dom_depth(proj);
2368 
2369   _igvn.rehash_node_delayed(iff);
2370   _igvn.rehash_node_delayed(proj);
2371 
2372   proj->set_req(0, NULL);  // temporary disconnect
2373   ProjNode* proj2 = proj_clone(proj, iff);
2374   register_node(proj2, loop, iff, ddepth);
2375 
2376   Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right);
2377   register_node(cmp, loop, proj2, ddepth);
2378 
2379   BoolNode* bol = new BoolNode(cmp, relop);
2380   register_node(bol, loop, proj2, ddepth);
2381 
2382   int opcode = iff->Opcode();
2383   assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode");
2384   IfNode* new_if = (opcode == Op_If) ? new IfNode(proj2, bol, iff->_prob, iff->_fcnt):
2385     new RangeCheckNode(proj2, bol, iff->_prob, iff->_fcnt);
2386   register_node(new_if, loop, proj2, ddepth);
2387 
2388   proj->set_req(0, new_if); // reattach
2389   set_idom(proj, new_if, ddepth);
2390 
2391   ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj();
2392   guarantee(new_exit != NULL, "null exit node");
2393   register_node(new_exit, get_loop(other_proj), new_if, ddepth);
2394 
2395   return new_exit;
2396 }
2397 
2398 //------------------------------ insert_region_before_proj -------------------------------------
2399 // Insert a region before an if projection (* - new node)
2400 //
2401 // before
2402 //           if(test)
2403 //          /      |
2404 //         v       |
2405 //       proj      v
2406 //               other-proj
2407 //
2408 // after
2409 //           if(test)
2410 //          /      |
2411 //         v       |
2412 // * proj-clone    v
2413 //         |     other-proj
2414 //         v
2415 // * new-region
2416 //         |
2417 //         v
2418 // *      dum_if
2419 //       /     \
2420 //      v       \
2421 // * dum-proj    v
2422 //              proj
2423 //
2424 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
2425   IfNode* iff = proj->in(0)->as_If();
2426   IdealLoopTree *loop = get_loop(proj);
2427   ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
2428   int ddepth = dom_depth(proj);
2429 
2430   _igvn.rehash_node_delayed(iff);
2431   _igvn.rehash_node_delayed(proj);
2432 
2433   proj->set_req(0, NULL);  // temporary disconnect
2434   ProjNode* proj2 = proj_clone(proj, iff);
2435   register_node(proj2, loop, iff, ddepth);
2436 
2437   RegionNode* reg = new RegionNode(2);
2438   reg->set_req(1, proj2);
2439   register_node(reg, loop, iff, ddepth);
2440 
2441   IfNode* dum_if = new IfNode(reg, short_circuit_if(NULL, proj), iff->_prob, iff->_fcnt);
2442   register_node(dum_if, loop, reg, ddepth);
2443 
2444   proj->set_req(0, dum_if); // reattach
2445   set_idom(proj, dum_if, ddepth);
2446 
2447   ProjNode* dum_proj = proj_clone(other_proj, dum_if);
2448   register_node(dum_proj, loop, dum_if, ddepth);
2449 
2450   return reg;
2451 }
2452 
2453 //------------------------------ insert_cmpi_loop_exit -------------------------------------
2454 // Clone a signed compare loop exit from an unsigned compare and
2455 // insert it before the unsigned cmp on the stay-in-loop path.
2456 // All new nodes inserted in the dominator tree between the original
2457 // if and it's projections.  The original if test is replaced with
2458 // a constant to force the stay-in-loop path.
2459 //
2460 // This is done to make sure that the original if and it's projections
2461 // still dominate the same set of control nodes, that the ctrl() relation
2462 // from data nodes to them is preserved, and that their loop nesting is
2463 // preserved.
2464 //
2465 // before
2466 //          if(i <u limit)    unsigned compare loop exit
2467 //         /       |
2468 //        v        v
2469 //   exit-proj   stay-in-loop-proj
2470 //
2471 // after
2472 //          if(stay-in-loop-const)  original if
2473 //         /       |
2474 //        /        v
2475 //       /  if(i <  limit)    new signed test
2476 //      /  /       |
2477 //     /  /        v
2478 //    /  /  if(i <u limit)    new cloned unsigned test
2479 //   /  /   /      |
2480 //   v  v  v       |
2481 //    region       |
2482 //        |        |
2483 //      dum-if     |
2484 //     /  |        |
2485 // ether  |        |
2486 //        v        v
2487 //   exit-proj   stay-in-loop-proj
2488 //
2489 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop) {
2490   const bool Signed   = true;
2491   const bool Unsigned = false;
2492 
2493   BoolNode* bol = if_cmpu->in(1)->as_Bool();
2494   if (bol->_test._test != BoolTest::lt) return NULL;
2495   CmpNode* cmpu = bol->in(1)->as_Cmp();
2496   if (cmpu->Opcode() != Op_CmpU) return NULL;
2497   int stride = stride_of_possible_iv(if_cmpu);
2498   if (stride == 0) return NULL;
2499 
2500   Node* lp_proj = stay_in_loop(if_cmpu, loop);
2501   guarantee(lp_proj != NULL, "null loop node");
2502 
2503   ProjNode* lp_continue = lp_proj->as_Proj();
2504   ProjNode* lp_exit     = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj();
2505 
2506   Node* limit = NULL;
2507   if (stride > 0) {
2508     limit = cmpu->in(2);
2509   } else {
2510     limit = _igvn.makecon(TypeInt::ZERO);
2511     set_ctrl(limit, C->root());
2512   }
2513   // Create a new region on the exit path
2514   RegionNode* reg = insert_region_before_proj(lp_exit);
2515   guarantee(reg != NULL, "null region node");
2516 
2517   // Clone the if-cmpu-true-false using a signed compare
2518   BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge;
2519   ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, limit, lp_continue);
2520   reg->add_req(cmpi_exit);
2521 
2522   // Clone the if-cmpu-true-false
2523   BoolTest::mask rel_u = bol->_test._test;
2524   ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue);
2525   reg->add_req(cmpu_exit);
2526 
2527   // Force original if to stay in loop.
2528   short_circuit_if(if_cmpu, lp_continue);
2529 
2530   return cmpi_exit->in(0)->as_If();
2531 }
2532 
2533 //------------------------------ remove_cmpi_loop_exit -------------------------------------
2534 // Remove a previously inserted signed compare loop exit.
2535 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) {
2536   Node* lp_proj = stay_in_loop(if_cmp, loop);
2537   assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI &&
2538          stay_in_loop(lp_proj, loop)->is_If() &&
2539          stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu");
2540   Node *con = _igvn.makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO);
2541   set_ctrl(con, C->root());
2542   if_cmp->set_req(1, con);
2543 }
2544 
2545 //------------------------------ scheduled_nodelist -------------------------------------
2546 // Create a post order schedule of nodes that are in the
2547 // "member" set.  The list is returned in "sched".
2548 // The first node in "sched" is the loop head, followed by
2549 // nodes which have no inputs in the "member" set, and then
2550 // followed by the nodes that have an immediate input dependence
2551 // on a node in "sched".
2552 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) {
2553 
2554   assert(member.test(loop->_head->_idx), "loop head must be in member set");
2555   Arena *a = Thread::current()->resource_area();
2556   VectorSet visited(a);
2557   Node_Stack nstack(a, loop->_body.size());
2558 
2559   Node* n  = loop->_head;  // top of stack is cached in "n"
2560   uint idx = 0;
2561   visited.set(n->_idx);
2562 
2563   // Initially push all with no inputs from within member set
2564   for(uint i = 0; i < loop->_body.size(); i++ ) {
2565     Node *elt = loop->_body.at(i);
2566     if (member.test(elt->_idx)) {
2567       bool found = false;
2568       for (uint j = 0; j < elt->req(); j++) {
2569         Node* def = elt->in(j);
2570         if (def && member.test(def->_idx) && def != elt) {
2571           found = true;
2572           break;
2573         }
2574       }
2575       if (!found && elt != loop->_head) {
2576         nstack.push(n, idx);
2577         n = elt;
2578         assert(!visited.test(n->_idx), "not seen yet");
2579         visited.set(n->_idx);
2580       }
2581     }
2582   }
2583 
2584   // traverse out's that are in the member set
2585   while (true) {
2586     if (idx < n->outcnt()) {
2587       Node* use = n->raw_out(idx);
2588       idx++;
2589       if (!visited.test_set(use->_idx)) {
2590         if (member.test(use->_idx)) {
2591           nstack.push(n, idx);
2592           n = use;
2593           idx = 0;
2594         }
2595       }
2596     } else {
2597       // All outputs processed
2598       sched.push(n);
2599       if (nstack.is_empty()) break;
2600       n   = nstack.node();
2601       idx = nstack.index();
2602       nstack.pop();
2603     }
2604   }
2605 }
2606 
2607 
2608 //------------------------------ has_use_in_set -------------------------------------
2609 // Has a use in the vector set
2610 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) {
2611   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2612     Node* use = n->fast_out(j);
2613     if (vset.test(use->_idx)) {
2614       return true;
2615     }
2616   }
2617   return false;
2618 }
2619 
2620 
2621 //------------------------------ has_use_internal_to_set -------------------------------------
2622 // Has use internal to the vector set (ie. not in a phi at the loop head)
2623 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) {
2624   Node* head  = loop->_head;
2625   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2626     Node* use = n->fast_out(j);
2627     if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) {
2628       return true;
2629     }
2630   }
2631   return false;
2632 }
2633 
2634 
2635 //------------------------------ clone_for_use_outside_loop -------------------------------------
2636 // clone "n" for uses that are outside of loop
2637 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
2638   int cloned = 0;
2639   assert(worklist.size() == 0, "should be empty");
2640   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2641     Node* use = n->fast_out(j);
2642     if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) {
2643       worklist.push(use);
2644     }
2645   }
2646   while( worklist.size() ) {
2647     Node *use = worklist.pop();
2648     if (!has_node(use) || use->in(0) == C->top()) continue;
2649     uint j;
2650     for (j = 0; j < use->req(); j++) {
2651       if (use->in(j) == n) break;
2652     }
2653     assert(j < use->req(), "must be there");
2654 
2655     // clone "n" and insert it between the inputs of "n" and the use outside the loop
2656     Node* n_clone = n->clone();
2657     _igvn.replace_input_of(use, j, n_clone);
2658     cloned++;
2659     Node* use_c;
2660     if (!use->is_Phi()) {
2661       use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
2662     } else {
2663       // Use in a phi is considered a use in the associated predecessor block
2664       use_c = use->in(0)->in(j);
2665     }
2666     set_ctrl(n_clone, use_c);
2667     assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
2668     get_loop(use_c)->_body.push(n_clone);
2669     _igvn.register_new_node_with_optimizer(n_clone);
2670 #if !defined(PRODUCT)
2671     if (TracePartialPeeling) {
2672       tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx);
2673     }
2674 #endif
2675   }
2676   return cloned;
2677 }
2678 
2679 
2680 //------------------------------ clone_for_special_use_inside_loop -------------------------------------
2681 // clone "n" for special uses that are in the not_peeled region.
2682 // If these def-uses occur in separate blocks, the code generator
2683 // marks the method as not compilable.  For example, if a "BoolNode"
2684 // is in a different basic block than the "IfNode" that uses it, then
2685 // the compilation is aborted in the code generator.
2686 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
2687                                                         VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) {
2688   if (n->is_Phi() || n->is_Load()) {
2689     return;
2690   }
2691   assert(worklist.size() == 0, "should be empty");
2692   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2693     Node* use = n->fast_out(j);
2694     if ( not_peel.test(use->_idx) &&
2695          (use->is_If() || use->is_CMove() || use->is_Bool()) &&
2696          use->in(1) == n)  {
2697       worklist.push(use);
2698     }
2699   }
2700   if (worklist.size() > 0) {
2701     // clone "n" and insert it between inputs of "n" and the use
2702     Node* n_clone = n->clone();
2703     loop->_body.push(n_clone);
2704     _igvn.register_new_node_with_optimizer(n_clone);
2705     set_ctrl(n_clone, get_ctrl(n));
2706     sink_list.push(n_clone);
2707     not_peel <<= n_clone->_idx;  // add n_clone to not_peel set.
2708 #if !defined(PRODUCT)
2709     if (TracePartialPeeling) {
2710       tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx);
2711     }
2712 #endif
2713     while( worklist.size() ) {
2714       Node *use = worklist.pop();
2715       _igvn.rehash_node_delayed(use);
2716       for (uint j = 1; j < use->req(); j++) {
2717         if (use->in(j) == n) {
2718           use->set_req(j, n_clone);
2719         }
2720       }
2721     }
2722   }
2723 }
2724 
2725 
2726 //------------------------------ insert_phi_for_loop -------------------------------------
2727 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
2728 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) {
2729   Node *phi = PhiNode::make(lp, back_edge_val);
2730   phi->set_req(LoopNode::EntryControl, lp_entry_val);
2731   // Use existing phi if it already exists
2732   Node *hit = _igvn.hash_find_insert(phi);
2733   if( hit == NULL ) {
2734     _igvn.register_new_node_with_optimizer(phi);
2735     set_ctrl(phi, lp);
2736   } else {
2737     // Remove the new phi from the graph and use the hit
2738     _igvn.remove_dead_node(phi);
2739     phi = hit;
2740   }
2741   _igvn.replace_input_of(use, idx, phi);
2742 }
2743 
2744 #ifdef ASSERT
2745 //------------------------------ is_valid_loop_partition -------------------------------------
2746 // Validate the loop partition sets: peel and not_peel
2747 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list,
2748                                               VectorSet& not_peel ) {
2749   uint i;
2750   // Check that peel_list entries are in the peel set
2751   for (i = 0; i < peel_list.size(); i++) {
2752     if (!peel.test(peel_list.at(i)->_idx)) {
2753       return false;
2754     }
2755   }
2756   // Check at loop members are in one of peel set or not_peel set
2757   for (i = 0; i < loop->_body.size(); i++ ) {
2758     Node *def  = loop->_body.at(i);
2759     uint di = def->_idx;
2760     // Check that peel set elements are in peel_list
2761     if (peel.test(di)) {
2762       if (not_peel.test(di)) {
2763         return false;
2764       }
2765       // Must be in peel_list also
2766       bool found = false;
2767       for (uint j = 0; j < peel_list.size(); j++) {
2768         if (peel_list.at(j)->_idx == di) {
2769           found = true;
2770           break;
2771         }
2772       }
2773       if (!found) {
2774         return false;
2775       }
2776     } else if (not_peel.test(di)) {
2777       if (peel.test(di)) {
2778         return false;
2779       }
2780     } else {
2781       return false;
2782     }
2783   }
2784   return true;
2785 }
2786 
2787 //------------------------------ is_valid_clone_loop_exit_use -------------------------------------
2788 // Ensure a use outside of loop is of the right form
2789 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) {
2790   Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
2791   return (use->is_Phi() &&
2792           use_c->is_Region() && use_c->req() == 3 &&
2793           (use_c->in(exit_idx)->Opcode() == Op_IfTrue ||
2794            use_c->in(exit_idx)->Opcode() == Op_IfFalse ||
2795            use_c->in(exit_idx)->Opcode() == Op_JumpProj) &&
2796           loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) );
2797 }
2798 
2799 //------------------------------ is_valid_clone_loop_form -------------------------------------
2800 // Ensure that all uses outside of loop are of the right form
2801 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
2802                                                uint orig_exit_idx, uint clone_exit_idx) {
2803   uint len = peel_list.size();
2804   for (uint i = 0; i < len; i++) {
2805     Node *def = peel_list.at(i);
2806 
2807     for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
2808       Node *use = def->fast_out(j);
2809       Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
2810       if (!loop->is_member(get_loop(use_c))) {
2811         // use is not in the loop, check for correct structure
2812         if (use->in(0) == def) {
2813           // Okay
2814         } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) {
2815           return false;
2816         }
2817       }
2818     }
2819   }
2820   return true;
2821 }
2822 #endif
2823 
2824 //------------------------------ partial_peel -------------------------------------
2825 // Partially peel (aka loop rotation) the top portion of a loop (called
2826 // the peel section below) by cloning it and placing one copy just before
2827 // the new loop head and the other copy at the bottom of the new loop.
2828 //
2829 //    before                       after                where it came from
2830 //
2831 //    stmt1                        stmt1
2832 //  loop:                          stmt2                     clone
2833 //    stmt2                        if condA goto exitA       clone
2834 //    if condA goto exitA        new_loop:                   new
2835 //    stmt3                        stmt3                     clone
2836 //    if !condB goto loop          if condB goto exitB       clone
2837 //  exitB:                         stmt2                     orig
2838 //    stmt4                        if !condA goto new_loop   orig
2839 //  exitA:                         goto exitA
2840 //                               exitB:
2841 //                                 stmt4
2842 //                               exitA:
2843 //
2844 // Step 1: find the cut point: an exit test on probable
2845 //         induction variable.
2846 // Step 2: schedule (with cloning) operations in the peel
2847 //         section that can be executed after the cut into
2848 //         the section that is not peeled.  This may need
2849 //         to clone operations into exit blocks.  For
2850 //         instance, a reference to A[i] in the not-peel
2851 //         section and a reference to B[i] in an exit block
2852 //         may cause a left-shift of i by 2 to be placed
2853 //         in the peel block.  This step will clone the left
2854 //         shift into the exit block and sink the left shift
2855 //         from the peel to the not-peel section.
2856 // Step 3: clone the loop, retarget the control, and insert
2857 //         phis for values that are live across the new loop
2858 //         head.  This is very dependent on the graph structure
2859 //         from clone_loop.  It creates region nodes for
2860 //         exit control and associated phi nodes for values
2861 //         flow out of the loop through that exit.  The region
2862 //         node is dominated by the clone's control projection.
2863 //         So the clone's peel section is placed before the
2864 //         new loop head, and the clone's not-peel section is
2865 //         forms the top part of the new loop.  The original
2866 //         peel section forms the tail of the new loop.
2867 // Step 4: update the dominator tree and recompute the
2868 //         dominator depth.
2869 //
2870 //                   orig
2871 //
2872 //                   stmt1
2873 //                     |
2874 //                     v
2875 //               loop predicate
2876 //                     |
2877 //                     v
2878 //                   loop<----+
2879 //                     |      |
2880 //                   stmt2    |
2881 //                     |      |
2882 //                     v      |
2883 //                    ifA     |
2884 //                   / |      |
2885 //                  v  v      |
2886 //               false true   ^  <-- last_peel
2887 //               /     |      |
2888 //              /   ===|==cut |
2889 //             /     stmt3    |  <-- first_not_peel
2890 //            /        |      |
2891 //            |        v      |
2892 //            v       ifB     |
2893 //          exitA:   / \      |
2894 //                  /   \     |
2895 //                 v     v    |
2896 //               false true   |
2897 //               /       \    |
2898 //              /         ----+
2899 //             |
2900 //             v
2901 //           exitB:
2902 //           stmt4
2903 //
2904 //
2905 //            after clone loop
2906 //
2907 //                   stmt1
2908 //                     |
2909 //                     v
2910 //               loop predicate
2911 //                 /       \
2912 //        clone   /         \   orig
2913 //               /           \
2914 //              /             \
2915 //             v               v
2916 //   +---->loop                loop<----+
2917 //   |      |                    |      |
2918 //   |    stmt2                stmt2    |
2919 //   |      |                    |      |
2920 //   |      v                    v      |
2921 //   |      ifA                 ifA     |
2922 //   |      | \                / |      |
2923 //   |      v  v              v  v      |
2924 //   ^    true  false      false true   ^  <-- last_peel
2925 //   |      |   ^   \       /    |      |
2926 //   | cut==|==  \   \     /  ===|==cut |
2927 //   |    stmt3   \   \   /    stmt3    |  <-- first_not_peel
2928 //   |      |    dom   | |       |      |
2929 //   |      v      \  1v v2      v      |
2930 //   |      ifB     regionA     ifB     |
2931 //   |      / \        |       / \      |
2932 //   |     /   \       v      /   \     |
2933 //   |    v     v    exitA:  v     v    |
2934 //   |    true  false      false true   |
2935 //   |    /     ^   \      /       \    |
2936 //   +----       \   \    /         ----+
2937 //               dom  \  /
2938 //                 \  1v v2
2939 //                  regionB
2940 //                     |
2941 //                     v
2942 //                   exitB:
2943 //                   stmt4
2944 //
2945 //
2946 //           after partial peel
2947 //
2948 //                  stmt1
2949 //                     |
2950 //                     v
2951 //               loop predicate
2952 //                 /
2953 //        clone   /             orig
2954 //               /          TOP
2955 //              /             \
2956 //             v               v
2957 //    TOP->loop                loop----+
2958 //          |                    |      |
2959 //        stmt2                stmt2    |
2960 //          |                    |      |
2961 //          v                    v      |
2962 //          ifA                 ifA     |
2963 //          | \                / |      |
2964 //          v  v              v  v      |
2965 //        true  false      false true   |     <-- last_peel
2966 //          |   ^   \       /    +------|---+
2967 //  +->newloop   \   \     /  === ==cut |   |
2968 //  |     stmt3   \   \   /     TOP     |   |
2969 //  |       |    dom   | |      stmt3   |   | <-- first_not_peel
2970 //  |       v      \  1v v2      v      |   |
2971 //  |       ifB     regionA     ifB     ^   v
2972 //  |       / \        |       / \      |   |
2973 //  |      /   \       v      /   \     |   |
2974 //  |     v     v    exitA:  v     v    |   |
2975 //  |     true  false      false true   |   |
2976 //  |     /     ^   \      /       \    |   |
2977 //  |    |       \   \    /         v   |   |
2978 //  |    |       dom  \  /         TOP  |   |
2979 //  |    |         \  1v v2             |   |
2980 //  ^    v          regionB             |   |
2981 //  |    |             |                |   |
2982 //  |    |             v                ^   v
2983 //  |    |           exitB:             |   |
2984 //  |    |           stmt4              |   |
2985 //  |    +------------>-----------------+   |
2986 //  |                                       |
2987 //  +-----------------<---------------------+
2988 //
2989 //
2990 //              final graph
2991 //
2992 //                  stmt1
2993 //                    |
2994 //                    v
2995 //               loop predicate
2996 //                    |
2997 //                    v
2998 //                  stmt2 clone
2999 //                    |
3000 //                    v
3001 //         ........> ifA clone
3002 //         :        / |
3003 //        dom      /  |
3004 //         :      v   v
3005 //         :  false   true
3006 //         :  |       |
3007 //         :  |       v
3008 //         :  |    newloop<-----+
3009 //         :  |        |        |
3010 //         :  |     stmt3 clone |
3011 //         :  |        |        |
3012 //         :  |        v        |
3013 //         :  |       ifB       |
3014 //         :  |      / \        |
3015 //         :  |     v   v       |
3016 //         :  |  false true     |
3017 //         :  |   |     |       |
3018 //         :  |   v    stmt2    |
3019 //         :  | exitB:  |       |
3020 //         :  | stmt4   v       |
3021 //         :  |       ifA orig  |
3022 //         :  |      /  \       |
3023 //         :  |     /    \      |
3024 //         :  |    v     v      |
3025 //         :  |  false  true    |
3026 //         :  |  /        \     |
3027 //         :  v  v         -----+
3028 //          RegionA
3029 //             |
3030 //             v
3031 //           exitA
3032 //
3033 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
3034 
3035   assert(!loop->_head->is_CountedLoop(), "Non-counted loop only");
3036   if (!loop->_head->is_Loop()) {
3037     return false;  }
3038 
3039   LoopNode *head  = loop->_head->as_Loop();
3040 
3041   if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) {
3042     return false;
3043   }
3044 
3045   // Check for complex exit control
3046   for(uint ii = 0; ii < loop->_body.size(); ii++ ) {
3047     Node *n = loop->_body.at(ii);
3048     int opc = n->Opcode();
3049     if (n->is_Call()        ||
3050         opc == Op_Catch     ||
3051         opc == Op_CatchProj ||
3052         opc == Op_Jump      ||
3053         opc == Op_JumpProj) {
3054 #if !defined(PRODUCT)
3055       if (TracePartialPeeling) {
3056         tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
3057       }
3058 #endif
3059       return false;
3060     }
3061   }
3062 
3063   int dd = dom_depth(head);
3064 
3065   // Step 1: find cut point
3066 
3067   // Walk up dominators to loop head looking for first loop exit
3068   // which is executed on every path thru loop.
3069   IfNode *peel_if = NULL;
3070   IfNode *peel_if_cmpu = NULL;
3071 
3072   Node *iff = loop->tail();
3073   while( iff != head ) {
3074     if( iff->is_If() ) {
3075       Node *ctrl = get_ctrl(iff->in(1));
3076       if (ctrl->is_top()) return false; // Dead test on live IF.
3077       // If loop-varying exit-test, check for induction variable
3078       if( loop->is_member(get_loop(ctrl)) &&
3079           loop->is_loop_exit(iff) &&
3080           is_possible_iv_test(iff)) {
3081         Node* cmp = iff->in(1)->in(1);
3082         if (cmp->Opcode() == Op_CmpI) {
3083           peel_if = iff->as_If();
3084         } else {
3085           assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU");
3086           peel_if_cmpu = iff->as_If();
3087         }
3088       }
3089     }
3090     iff = idom(iff);
3091   }
3092   // Prefer signed compare over unsigned compare.
3093   IfNode* new_peel_if = NULL;
3094   if (peel_if == NULL) {
3095     if (!PartialPeelAtUnsignedTests || peel_if_cmpu == NULL) {
3096       return false;   // No peel point found
3097     }
3098     new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop);
3099     if (new_peel_if == NULL) {
3100       return false;   // No peel point found
3101     }
3102     peel_if = new_peel_if;
3103   }
3104   Node* last_peel        = stay_in_loop(peel_if, loop);
3105   Node* first_not_peeled = stay_in_loop(last_peel, loop);
3106   if (first_not_peeled == NULL || first_not_peeled == head) {
3107     return false;
3108   }
3109 
3110 #if !defined(PRODUCT)
3111   if (TraceLoopOpts) {
3112     tty->print("PartialPeel  ");
3113     loop->dump_head();
3114   }
3115 
3116   if (TracePartialPeeling) {
3117     tty->print_cr("before partial peel one iteration");
3118     Node_List wl;
3119     Node* t = head->in(2);
3120     while (true) {
3121       wl.push(t);
3122       if (t == head) break;
3123       t = idom(t);
3124     }
3125     while (wl.size() > 0) {
3126       Node* tt = wl.pop();
3127       tt->dump();
3128       if (tt == last_peel) tty->print_cr("-- cut --");
3129     }
3130   }
3131 #endif
3132   ResourceArea *area = Thread::current()->resource_area();
3133   VectorSet peel(area);
3134   VectorSet not_peel(area);
3135   Node_List peel_list(area);
3136   Node_List worklist(area);
3137   Node_List sink_list(area);
3138 
3139   // Set of cfg nodes to peel are those that are executable from
3140   // the head through last_peel.
3141   assert(worklist.size() == 0, "should be empty");
3142   worklist.push(head);
3143   peel.set(head->_idx);
3144   while (worklist.size() > 0) {
3145     Node *n = worklist.pop();
3146     if (n != last_peel) {
3147       for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3148         Node* use = n->fast_out(j);
3149         if (use->is_CFG() &&
3150             loop->is_member(get_loop(use)) &&
3151             !peel.test_set(use->_idx)) {
3152           worklist.push(use);
3153         }
3154       }
3155     }
3156   }
3157 
3158   // Set of non-cfg nodes to peel are those that are control
3159   // dependent on the cfg nodes.
3160   uint i;
3161   for(i = 0; i < loop->_body.size(); i++ ) {
3162     Node *n = loop->_body.at(i);
3163     Node *n_c = has_ctrl(n) ? get_ctrl(n) : n;
3164     if (peel.test(n_c->_idx)) {
3165       peel.set(n->_idx);
3166     } else {
3167       not_peel.set(n->_idx);
3168     }
3169   }
3170 
3171   // Step 2: move operations from the peeled section down into the
3172   //         not-peeled section
3173 
3174   // Get a post order schedule of nodes in the peel region
3175   // Result in right-most operand.
3176   scheduled_nodelist(loop, peel, peel_list );
3177 
3178   assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
3179 
3180   // For future check for too many new phis
3181   uint old_phi_cnt = 0;
3182   for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
3183     Node* use = head->fast_out(j);
3184     if (use->is_Phi()) old_phi_cnt++;
3185   }
3186 
3187 #if !defined(PRODUCT)
3188   if (TracePartialPeeling) {
3189     tty->print_cr("\npeeled list");
3190   }
3191 #endif
3192 
3193   // Evacuate nodes in peel region into the not_peeled region if possible
3194   uint new_phi_cnt = 0;
3195   uint cloned_for_outside_use = 0;
3196   for (i = 0; i < peel_list.size();) {
3197     Node* n = peel_list.at(i);
3198 #if !defined(PRODUCT)
3199   if (TracePartialPeeling) n->dump();
3200 #endif
3201     bool incr = true;
3202     if ( !n->is_CFG() ) {
3203 
3204       if ( has_use_in_set(n, not_peel) ) {
3205 
3206         // If not used internal to the peeled region,
3207         // move "n" from peeled to not_peeled region.
3208 
3209         if ( !has_use_internal_to_set(n, peel, loop) ) {
3210 
3211           // if not pinned and not a load (which maybe anti-dependent on a store)
3212           // and not a CMove (Matcher expects only bool->cmove).
3213           if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove() && n->Opcode() != Op_ShenandoahWBMemProj) {
3214             cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist );
3215             sink_list.push(n);
3216             peel     >>= n->_idx; // delete n from peel set.
3217             not_peel <<= n->_idx; // add n to not_peel set.
3218             peel_list.remove(i);
3219             incr = false;
3220 #if !defined(PRODUCT)
3221             if (TracePartialPeeling) {
3222               tty->print_cr("sink to not_peeled region: %d newbb: %d",
3223                             n->_idx, get_ctrl(n)->_idx);
3224             }
3225 #endif
3226           }
3227         } else {
3228           // Otherwise check for special def-use cases that span
3229           // the peel/not_peel boundary such as bool->if
3230           clone_for_special_use_inside_loop( loop, n, not_peel, sink_list, worklist );
3231           new_phi_cnt++;
3232         }
3233       }
3234     }
3235     if (incr) i++;
3236   }
3237 
3238   if (new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta) {
3239 #if !defined(PRODUCT)
3240     if (TracePartialPeeling) {
3241       tty->print_cr("\nToo many new phis: %d  old %d new cmpi: %c",
3242                     new_phi_cnt, old_phi_cnt, new_peel_if != NULL?'T':'F');
3243     }
3244 #endif
3245     if (new_peel_if != NULL) {
3246       remove_cmpi_loop_exit(new_peel_if, loop);
3247     }
3248     // Inhibit more partial peeling on this loop
3249     assert(!head->is_partial_peel_loop(), "not partial peeled");
3250     head->mark_partial_peel_failed();
3251     if (cloned_for_outside_use > 0) {
3252       // Terminate this round of loop opts because
3253       // the graph outside this loop was changed.
3254       C->set_major_progress();
3255       return true;
3256     }
3257     return false;
3258   }
3259 
3260   // Step 3: clone loop, retarget control, and insert new phis
3261 
3262   // Create new loop head for new phis and to hang
3263   // the nodes being moved (sinked) from the peel region.
3264   LoopNode* new_head = new LoopNode(last_peel, last_peel);
3265   new_head->set_unswitch_count(head->unswitch_count()); // Preserve
3266   _igvn.register_new_node_with_optimizer(new_head);
3267   assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
3268   _igvn.replace_input_of(first_not_peeled, 0, new_head);
3269   set_loop(new_head, loop);
3270   loop->_body.push(new_head);
3271   not_peel.set(new_head->_idx);
3272   set_idom(new_head, last_peel, dom_depth(first_not_peeled));
3273   set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled));
3274 
3275   while (sink_list.size() > 0) {
3276     Node* n = sink_list.pop();
3277     set_ctrl(n, new_head);
3278   }
3279 
3280   assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
3281 
3282   clone_loop(loop, old_new, dd, IgnoreStripMined);
3283 
3284   const uint clone_exit_idx = 1;
3285   const uint orig_exit_idx  = 2;
3286   assert(is_valid_clone_loop_form( loop, peel_list, orig_exit_idx, clone_exit_idx ), "bad clone loop");
3287 
3288   Node* head_clone             = old_new[head->_idx];
3289   LoopNode* new_head_clone     = old_new[new_head->_idx]->as_Loop();
3290   Node* orig_tail_clone        = head_clone->in(2);
3291 
3292   // Add phi if "def" node is in peel set and "use" is not
3293 
3294   for(i = 0; i < peel_list.size(); i++ ) {
3295     Node *def  = peel_list.at(i);
3296     if (!def->is_CFG()) {
3297       for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
3298         Node *use = def->fast_out(j);
3299         if (has_node(use) && use->in(0) != C->top() &&
3300             (!peel.test(use->_idx) ||
3301              (use->is_Phi() && use->in(0) == head)) ) {
3302           worklist.push(use);
3303         }
3304       }
3305       while( worklist.size() ) {
3306         Node *use = worklist.pop();
3307         for (uint j = 1; j < use->req(); j++) {
3308           Node* n = use->in(j);
3309           if (n == def) {
3310 
3311             // "def" is in peel set, "use" is not in peel set
3312             // or "use" is in the entry boundary (a phi) of the peel set
3313 
3314             Node* use_c = has_ctrl(use) ? get_ctrl(use) : use;
3315 
3316             if ( loop->is_member(get_loop( use_c )) ) {
3317               // use is in loop
3318               if (old_new[use->_idx] != NULL) { // null for dead code
3319                 Node* use_clone = old_new[use->_idx];
3320                 _igvn.replace_input_of(use, j, C->top());
3321                 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
3322               }
3323             } else {
3324               assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format");
3325               // use is not in the loop, check if the live range includes the cut
3326               Node* lp_if = use_c->in(orig_exit_idx)->in(0);
3327               if (not_peel.test(lp_if->_idx)) {
3328                 assert(j == orig_exit_idx, "use from original loop");
3329                 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone );
3330               }
3331             }
3332           }
3333         }
3334       }
3335     }
3336   }
3337 
3338   // Step 3b: retarget control
3339 
3340   // Redirect control to the new loop head if a cloned node in
3341   // the not_peeled region has control that points into the peeled region.
3342   // This necessary because the cloned peeled region will be outside
3343   // the loop.
3344   //                            from    to
3345   //          cloned-peeled    <---+
3346   //    new_head_clone:            |    <--+
3347   //          cloned-not_peeled  in(0)    in(0)
3348   //          orig-peeled
3349 
3350   for(i = 0; i < loop->_body.size(); i++ ) {
3351     Node *n = loop->_body.at(i);
3352     if (!n->is_CFG()           && n->in(0) != NULL        &&
3353         not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
3354       Node* n_clone = old_new[n->_idx];
3355       _igvn.replace_input_of(n_clone, 0, new_head_clone);
3356     }
3357   }
3358 
3359   // Backedge of the surviving new_head (the clone) is original last_peel
3360   _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
3361 
3362   // Cut first node in original not_peel set
3363   _igvn.rehash_node_delayed(new_head);                     // Multiple edge updates:
3364   new_head->set_req(LoopNode::EntryControl,    C->top());  //   use rehash_node_delayed / set_req instead of
3365   new_head->set_req(LoopNode::LoopBackControl, C->top());  //   multiple replace_input_of calls
3366 
3367   // Copy head_clone back-branch info to original head
3368   // and remove original head's loop entry and
3369   // clone head's back-branch
3370   _igvn.rehash_node_delayed(head); // Multiple edge updates
3371   head->set_req(LoopNode::EntryControl,    head_clone->in(LoopNode::LoopBackControl));
3372   head->set_req(LoopNode::LoopBackControl, C->top());
3373   _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
3374 
3375   // Similarly modify the phis
3376   for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
3377     Node* use = head->fast_out(k);
3378     if (use->is_Phi() && use->outcnt() > 0) {
3379       Node* use_clone = old_new[use->_idx];
3380       _igvn.rehash_node_delayed(use); // Multiple edge updates
3381       use->set_req(LoopNode::EntryControl,    use_clone->in(LoopNode::LoopBackControl));
3382       use->set_req(LoopNode::LoopBackControl, C->top());
3383       _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
3384     }
3385   }
3386 
3387   // Step 4: update dominator tree and dominator depth
3388 
3389   set_idom(head, orig_tail_clone, dd);
3390   recompute_dom_depth();
3391 
3392   // Inhibit more partial peeling on this loop
3393   new_head_clone->set_partial_peel_loop();
3394   C->set_major_progress();
3395   loop->record_for_igvn();
3396 
3397 #if !defined(PRODUCT)
3398   if (TracePartialPeeling) {
3399     tty->print_cr("\nafter partial peel one iteration");
3400     Node_List wl(area);
3401     Node* t = last_peel;
3402     while (true) {
3403       wl.push(t);
3404       if (t == head_clone) break;
3405       t = idom(t);
3406     }
3407     while (wl.size() > 0) {
3408       Node* tt = wl.pop();
3409       if (tt == head) tty->print_cr("orig head");
3410       else if (tt == new_head_clone) tty->print_cr("new head");
3411       else if (tt == head_clone) tty->print_cr("clone head");
3412       tt->dump();
3413     }
3414   }
3415 #endif
3416   return true;
3417 }
3418 
3419 //------------------------------reorg_offsets----------------------------------
3420 // Reorganize offset computations to lower register pressure.  Mostly
3421 // prevent loop-fallout uses of the pre-incremented trip counter (which are
3422 // then alive with the post-incremented trip counter forcing an extra
3423 // register move)
3424 void PhaseIdealLoop::reorg_offsets(IdealLoopTree *loop) {
3425   // Perform it only for canonical counted loops.
3426   // Loop's shape could be messed up by iteration_split_impl.
3427   if (!loop->_head->is_CountedLoop())
3428     return;
3429   if (!loop->_head->as_Loop()->is_valid_counted_loop())
3430     return;
3431 
3432   CountedLoopNode *cl = loop->_head->as_CountedLoop();
3433   CountedLoopEndNode *cle = cl->loopexit();
3434   Node *exit = cle->proj_out(false);
3435   Node *phi = cl->phi();
3436 
3437   // Check for the special case of folks using the pre-incremented
3438   // trip-counter on the fall-out path (forces the pre-incremented
3439   // and post-incremented trip counter to be live at the same time).
3440   // Fix this by adjusting to use the post-increment trip counter.
3441 
3442   bool progress = true;
3443   while (progress) {
3444     progress = false;
3445     for (DUIterator_Fast imax, i = phi->fast_outs(imax); i < imax; i++) {
3446       Node* use = phi->fast_out(i);   // User of trip-counter
3447       if (!has_ctrl(use))  continue;
3448       Node *u_ctrl = get_ctrl(use);
3449       if (use->is_Phi()) {
3450         u_ctrl = NULL;
3451         for (uint j = 1; j < use->req(); j++)
3452           if (use->in(j) == phi)
3453             u_ctrl = dom_lca(u_ctrl, use->in(0)->in(j));
3454       }
3455       IdealLoopTree *u_loop = get_loop(u_ctrl);
3456       // Look for loop-invariant use
3457       if (u_loop == loop) continue;
3458       if (loop->is_member(u_loop)) continue;
3459       // Check that use is live out the bottom.  Assuming the trip-counter
3460       // update is right at the bottom, uses of of the loop middle are ok.
3461       if (dom_lca(exit, u_ctrl) != exit) continue;
3462       // Hit!  Refactor use to use the post-incremented tripcounter.
3463       // Compute a post-increment tripcounter.
3464       Node* c = exit;
3465       if (cl->is_strip_mined()) {
3466         IdealLoopTree* outer_loop = get_loop(cl->outer_loop());
3467         if (!outer_loop->is_member(u_loop)) {
3468           c = cl->outer_loop_exit();
3469         }
3470       }
3471       Node *opaq = new Opaque2Node(C, cle->incr());
3472       register_new_node(opaq, c);
3473       Node *neg_stride = _igvn.intcon(-cle->stride_con());
3474       set_ctrl(neg_stride, C->root());
3475       Node *post = new AddINode(opaq, neg_stride);
3476       register_new_node(post, c);
3477       _igvn.rehash_node_delayed(use);
3478       for (uint j = 1; j < use->req(); j++) {
3479         if (use->in(j) == phi)
3480           use->set_req(j, post);
3481       }
3482       // Since DU info changed, rerun loop
3483       progress = true;
3484       break;
3485     }
3486   }
3487 
3488 }