1 /*
   2  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "opto/addnode.hpp"
  29 #include "opto/callnode.hpp"
  30 #include "opto/connode.hpp"
  31 #include "opto/divnode.hpp"
  32 #include "opto/loopnode.hpp"
  33 #include "opto/mulnode.hpp"
  34 #include "opto/rootnode.hpp"
  35 #include "opto/runtime.hpp"
  36 #include "opto/subnode.hpp"
  37 
  38 //------------------------------is_loop_exit-----------------------------------
  39 // Given an IfNode, return the loop-exiting projection or NULL if both
  40 // arms remain in the loop.
  41 Node *IdealLoopTree::is_loop_exit(Node *iff) const {
  42   if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests
  43   PhaseIdealLoop *phase = _phase;
  44   // Test is an IfNode, has 2 projections.  If BOTH are in the loop
  45   // we need loop unswitching instead of peeling.
  46   if( !is_member(phase->get_loop( iff->raw_out(0) )) )
  47     return iff->raw_out(0);
  48   if( !is_member(phase->get_loop( iff->raw_out(1) )) )
  49     return iff->raw_out(1);
  50   return NULL;
  51 }
  52 
  53 
  54 //=============================================================================
  55 
  56 
  57 //------------------------------record_for_igvn----------------------------
  58 // Put loop body on igvn work list
  59 void IdealLoopTree::record_for_igvn() {
  60   for( uint i = 0; i < _body.size(); i++ ) {
  61     Node *n = _body.at(i);
  62     _phase->_igvn._worklist.push(n);
  63   }
  64 }
  65 
  66 //------------------------------compute_profile_trip_cnt----------------------------
  67 // Compute loop trip count from profile data as
  68 //    (backedge_count + loop_exit_count) / loop_exit_count
  69 void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) {
  70   if (!_head->is_CountedLoop()) {
  71     return;
  72   }
  73   CountedLoopNode* head = _head->as_CountedLoop();
  74   if (head->profile_trip_cnt() != COUNT_UNKNOWN) {
  75     return; // Already computed
  76   }
  77   float trip_cnt = (float)max_jint; // default is big
  78 
  79   Node* back = head->in(LoopNode::LoopBackControl);
  80   while (back != head) {
  81     if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
  82         back->in(0) &&
  83         back->in(0)->is_If() &&
  84         back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN &&
  85         back->in(0)->as_If()->_prob != PROB_UNKNOWN) {
  86       break;
  87     }
  88     back = phase->idom(back);
  89   }
  90   if (back != head) {
  91     assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
  92            back->in(0), "if-projection exists");
  93     IfNode* back_if = back->in(0)->as_If();
  94     float loop_back_cnt = back_if->_fcnt * back_if->_prob;
  95 
  96     // Now compute a loop exit count
  97     float loop_exit_cnt = 0.0f;
  98     for( uint i = 0; i < _body.size(); i++ ) {
  99       Node *n = _body[i];
 100       if( n->is_If() ) {
 101         IfNode *iff = n->as_If();
 102         if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) {
 103           Node *exit = is_loop_exit(iff);
 104           if( exit ) {
 105             float exit_prob = iff->_prob;
 106             if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob;
 107             if (exit_prob > PROB_MIN) {
 108               float exit_cnt = iff->_fcnt * exit_prob;
 109               loop_exit_cnt += exit_cnt;
 110             }
 111           }
 112         }
 113       }
 114     }
 115     if (loop_exit_cnt > 0.0f) {
 116       trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt;
 117     } else {
 118       // No exit count so use
 119       trip_cnt = loop_back_cnt;
 120     }
 121   }
 122 #ifndef PRODUCT
 123   if (TraceProfileTripCount) {
 124     tty->print_cr("compute_profile_trip_cnt  lp: %d cnt: %f\n", head->_idx, trip_cnt);
 125   }
 126 #endif
 127   head->set_profile_trip_cnt(trip_cnt);
 128 }
 129 
 130 //---------------------is_invariant_addition-----------------------------
 131 // Return nonzero index of invariant operand for an Add or Sub
 132 // of (nonconstant) invariant and variant values. Helper for reassociate_invariants.
 133 int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) {
 134   int op = n->Opcode();
 135   if (op == Op_AddI || op == Op_SubI) {
 136     bool in1_invar = this->is_invariant(n->in(1));
 137     bool in2_invar = this->is_invariant(n->in(2));
 138     if (in1_invar && !in2_invar) return 1;
 139     if (!in1_invar && in2_invar) return 2;
 140   }
 141   return 0;
 142 }
 143 
 144 //---------------------reassociate_add_sub-----------------------------
 145 // Reassociate invariant add and subtract expressions:
 146 //
 147 // inv1 + (x + inv2)  =>  ( inv1 + inv2) + x
 148 // (x + inv2) + inv1  =>  ( inv1 + inv2) + x
 149 // inv1 + (x - inv2)  =>  ( inv1 - inv2) + x
 150 // inv1 - (inv2 - x)  =>  ( inv1 - inv2) + x
 151 // (x + inv2) - inv1  =>  (-inv1 + inv2) + x
 152 // (x - inv2) + inv1  =>  ( inv1 - inv2) + x
 153 // (x - inv2) - inv1  =>  (-inv1 - inv2) + x
 154 // inv1 + (inv2 - x)  =>  ( inv1 + inv2) - x
 155 // inv1 - (x - inv2)  =>  ( inv1 + inv2) - x
 156 // (inv2 - x) + inv1  =>  ( inv1 + inv2) - x
 157 // (inv2 - x) - inv1  =>  (-inv1 + inv2) - x
 158 // inv1 - (x + inv2)  =>  ( inv1 - inv2) - x
 159 //
 160 Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) {
 161   if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL;
 162   if (is_invariant(n1)) return NULL;
 163   int inv1_idx = is_invariant_addition(n1, phase);
 164   if (!inv1_idx) return NULL;
 165   // Don't mess with add of constant (igvn moves them to expression tree root.)
 166   if (n1->is_Add() && n1->in(2)->is_Con()) return NULL;
 167   Node* inv1 = n1->in(inv1_idx);
 168   Node* n2 = n1->in(3 - inv1_idx);
 169   int inv2_idx = is_invariant_addition(n2, phase);
 170   if (!inv2_idx) return NULL;
 171   Node* x    = n2->in(3 - inv2_idx);
 172   Node* inv2 = n2->in(inv2_idx);
 173 
 174   bool neg_x    = n2->is_Sub() && inv2_idx == 1;
 175   bool neg_inv2 = n2->is_Sub() && inv2_idx == 2;
 176   bool neg_inv1 = n1->is_Sub() && inv1_idx == 2;
 177   if (n1->is_Sub() && inv1_idx == 1) {
 178     neg_x    = !neg_x;
 179     neg_inv2 = !neg_inv2;
 180   }
 181   Node* inv1_c = phase->get_ctrl(inv1);
 182   Node* inv2_c = phase->get_ctrl(inv2);
 183   Node* n_inv1;
 184   if (neg_inv1) {
 185     Node *zero = phase->_igvn.intcon(0);
 186     phase->set_ctrl(zero, phase->C->root());
 187     n_inv1 = new (phase->C, 3) SubINode(zero, inv1);
 188     phase->register_new_node(n_inv1, inv1_c);
 189   } else {
 190     n_inv1 = inv1;
 191   }
 192   Node* inv;
 193   if (neg_inv2) {
 194     inv = new (phase->C, 3) SubINode(n_inv1, inv2);
 195   } else {
 196     inv = new (phase->C, 3) AddINode(n_inv1, inv2);
 197   }
 198   phase->register_new_node(inv, phase->get_early_ctrl(inv));
 199 
 200   Node* addx;
 201   if (neg_x) {
 202     addx = new (phase->C, 3) SubINode(inv, x);
 203   } else {
 204     addx = new (phase->C, 3) AddINode(x, inv);
 205   }
 206   phase->register_new_node(addx, phase->get_ctrl(x));
 207   phase->_igvn.replace_node(n1, addx);
 208   return addx;
 209 }
 210 
 211 //---------------------reassociate_invariants-----------------------------
 212 // Reassociate invariant expressions:
 213 void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) {
 214   for (int i = _body.size() - 1; i >= 0; i--) {
 215     Node *n = _body.at(i);
 216     for (int j = 0; j < 5; j++) {
 217       Node* nn = reassociate_add_sub(n, phase);
 218       if (nn == NULL) break;
 219       n = nn; // again
 220     };
 221   }
 222 }
 223 
 224 //------------------------------policy_peeling---------------------------------
 225 // Return TRUE or FALSE if the loop should be peeled or not.  Peel if we can
 226 // make some loop-invariant test (usually a null-check) happen before the loop.
 227 bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
 228   Node *test = ((IdealLoopTree*)this)->tail();
 229   int  body_size = ((IdealLoopTree*)this)->_body.size();
 230   int  uniq      = phase->C->unique();
 231   // Peeling does loop cloning which can result in O(N^2) node construction
 232   if( body_size > 255 /* Prevent overflow for large body_size */
 233       || (body_size * body_size + uniq > MaxNodeLimit) ) {
 234     return false;           // too large to safely clone
 235   }
 236   while( test != _head ) {      // Scan till run off top of loop
 237     if( test->is_If() ) {       // Test?
 238       Node *ctrl = phase->get_ctrl(test->in(1));
 239       if (ctrl->is_top())
 240         return false;           // Found dead test on live IF?  No peeling!
 241       // Standard IF only has one input value to check for loop invariance
 242       assert( test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added");
 243       // Condition is not a member of this loop?
 244       if( !is_member(phase->get_loop(ctrl)) &&
 245           is_loop_exit(test) )
 246         return true;            // Found reason to peel!
 247     }
 248     // Walk up dominators to loop _head looking for test which is
 249     // executed on every path thru loop.
 250     test = phase->idom(test);
 251   }
 252   return false;
 253 }
 254 
 255 //------------------------------peeled_dom_test_elim---------------------------
 256 // If we got the effect of peeling, either by actually peeling or by making
 257 // a pre-loop which must execute at least once, we can remove all
 258 // loop-invariant dominated tests in the main body.
 259 void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) {
 260   bool progress = true;
 261   while( progress ) {
 262     progress = false;           // Reset for next iteration
 263     Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail();
 264     Node *test = prev->in(0);
 265     while( test != loop->_head ) { // Scan till run off top of loop
 266 
 267       int p_op = prev->Opcode();
 268       if( (p_op == Op_IfFalse || p_op == Op_IfTrue) &&
 269           test->is_If() &&      // Test?
 270           !test->in(1)->is_Con() && // And not already obvious?
 271           // Condition is not a member of this loop?
 272           !loop->is_member(get_loop(get_ctrl(test->in(1))))){
 273         // Walk loop body looking for instances of this test
 274         for( uint i = 0; i < loop->_body.size(); i++ ) {
 275           Node *n = loop->_body.at(i);
 276           if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) {
 277             // IfNode was dominated by version in peeled loop body
 278             progress = true;
 279             dominated_by( old_new[prev->_idx], n );
 280           }
 281         }
 282       }
 283       prev = test;
 284       test = idom(test);
 285     } // End of scan tests in loop
 286 
 287   } // End of while( progress )
 288 }
 289 
 290 //------------------------------do_peeling-------------------------------------
 291 // Peel the first iteration of the given loop.
 292 // Step 1: Clone the loop body.  The clone becomes the peeled iteration.
 293 //         The pre-loop illegally has 2 control users (old & new loops).
 294 // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
 295 //         Do this by making the old-loop fall-in edges act as if they came
 296 //         around the loopback from the prior iteration (follow the old-loop
 297 //         backedges) and then map to the new peeled iteration.  This leaves
 298 //         the pre-loop with only 1 user (the new peeled iteration), but the
 299 //         peeled-loop backedge has 2 users.
 300 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
 301 //         extra backedge user.
 302 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
 303 
 304   C->set_major_progress();
 305   // Peeling a 'main' loop in a pre/main/post situation obfuscates the
 306   // 'pre' loop from the main and the 'pre' can no longer have it's
 307   // iterations adjusted.  Therefore, we need to declare this loop as
 308   // no longer a 'main' loop; it will need new pre and post loops before
 309   // we can do further RCE.
 310   Node *h = loop->_head;
 311   if( h->is_CountedLoop() ) {
 312     CountedLoopNode *cl = h->as_CountedLoop();
 313     assert(cl->trip_count() > 0, "peeling a fully unrolled loop");
 314     cl->set_trip_count(cl->trip_count() - 1);
 315     if( cl->is_main_loop() ) {
 316       cl->set_normal_loop();
 317 #ifndef PRODUCT
 318       if( PrintOpto && VerifyLoopOptimizations ) {
 319         tty->print("Peeling a 'main' loop; resetting to 'normal' ");
 320         loop->dump_head();
 321       }
 322 #endif
 323     }
 324   }
 325 
 326   // Step 1: Clone the loop body.  The clone becomes the peeled iteration.
 327   //         The pre-loop illegally has 2 control users (old & new loops).
 328   clone_loop( loop, old_new, dom_depth(loop->_head) );
 329 
 330 
 331   // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
 332   //         Do this by making the old-loop fall-in edges act as if they came
 333   //         around the loopback from the prior iteration (follow the old-loop
 334   //         backedges) and then map to the new peeled iteration.  This leaves
 335   //         the pre-loop with only 1 user (the new peeled iteration), but the
 336   //         peeled-loop backedge has 2 users.
 337   for (DUIterator_Fast jmax, j = loop->_head->fast_outs(jmax); j < jmax; j++) {
 338     Node* old = loop->_head->fast_out(j);
 339     if( old->in(0) == loop->_head && old->req() == 3 &&
 340         (old->is_Loop() || old->is_Phi()) ) {
 341       Node *new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx];
 342       if( !new_exit_value )     // Backedge value is ALSO loop invariant?
 343         // Then loop body backedge value remains the same.
 344         new_exit_value = old->in(LoopNode::LoopBackControl);
 345       _igvn.hash_delete(old);
 346       old->set_req(LoopNode::EntryControl, new_exit_value);
 347     }
 348   }
 349 
 350 
 351   // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
 352   //         extra backedge user.
 353   Node *nnn = old_new[loop->_head->_idx];
 354   _igvn.hash_delete(nnn);
 355   nnn->set_req(LoopNode::LoopBackControl, C->top());
 356   for (DUIterator_Fast j2max, j2 = nnn->fast_outs(j2max); j2 < j2max; j2++) {
 357     Node* use = nnn->fast_out(j2);
 358     if( use->in(0) == nnn && use->req() == 3 && use->is_Phi() ) {
 359       _igvn.hash_delete(use);
 360       use->set_req(LoopNode::LoopBackControl, C->top());
 361     }
 362   }
 363 
 364 
 365   // Step 4: Correct dom-depth info.  Set to loop-head depth.
 366   int dd = dom_depth(loop->_head);
 367   set_idom(loop->_head, loop->_head->in(1), dd);
 368   for (uint j3 = 0; j3 < loop->_body.size(); j3++) {
 369     Node *old = loop->_body.at(j3);
 370     Node *nnn = old_new[old->_idx];
 371     if (!has_ctrl(nnn))
 372       set_idom(nnn, idom(nnn), dd-1);
 373     // While we're at it, remove any SafePoints from the peeled code
 374     if( old->Opcode() == Op_SafePoint ) {
 375       Node *nnn = old_new[old->_idx];
 376       lazy_replace(nnn,nnn->in(TypeFunc::Control));
 377     }
 378   }
 379 
 380   // Now force out all loop-invariant dominating tests.  The optimizer
 381   // finds some, but we _know_ they are all useless.
 382   peeled_dom_test_elim(loop,old_new);
 383 
 384   loop->record_for_igvn();
 385 }
 386 
 387 //------------------------------policy_maximally_unroll------------------------
 388 // Return exact loop trip count, or 0 if not maximally unrolling
 389 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
 390   CountedLoopNode *cl = _head->as_CountedLoop();
 391   assert( cl->is_normal_loop(), "" );
 392 
 393   Node *init_n = cl->init_trip();
 394   Node *limit_n = cl->limit();
 395 
 396   // Non-constant bounds
 397   if( init_n   == NULL || !init_n->is_Con()  ||
 398       limit_n  == NULL || !limit_n->is_Con() ||
 399       // protect against stride not being a constant
 400       !cl->stride_is_con() ) {
 401     return false;
 402   }
 403   int init   = init_n->get_int();
 404   int limit  = limit_n->get_int();
 405   int span   = limit - init;
 406   int stride = cl->stride_con();
 407 
 408   if (init >= limit || stride > span) {
 409     // return a false (no maximally unroll) and the regular unroll/peel
 410     // route will make a small mess which CCP will fold away.
 411     return false;
 412   }
 413   uint trip_count = span/stride;   // trip_count can be greater than 2 Gig.
 414   assert( (int)trip_count*stride == span, "must divide evenly" );
 415 
 416   // Real policy: if we maximally unroll, does it get too big?
 417   // Allow the unrolled mess to get larger than standard loop
 418   // size.  After all, it will no longer be a loop.
 419   uint body_size    = _body.size();
 420   uint unroll_limit = (uint)LoopUnrollLimit * 4;
 421   assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
 422   cl->set_trip_count(trip_count);
 423   if( trip_count <= unroll_limit && body_size <= unroll_limit ) {
 424     uint new_body_size = body_size * trip_count;
 425     if (new_body_size <= unroll_limit &&
 426         body_size == new_body_size / trip_count &&
 427         // Unrolling can result in a large amount of node construction
 428         new_body_size < MaxNodeLimit - phase->C->unique()) {
 429       return true;    // maximally unroll
 430     }
 431   }
 432 
 433   return false;               // Do not maximally unroll
 434 }
 435 
 436 
 437 //------------------------------policy_unroll----------------------------------
 438 // Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
 439 // the loop is a CountedLoop and the body is small enough.
 440 bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
 441 
 442   CountedLoopNode *cl = _head->as_CountedLoop();
 443   assert( cl->is_normal_loop() || cl->is_main_loop(), "" );
 444 
 445   // protect against stride not being a constant
 446   if( !cl->stride_is_con() ) return false;
 447 
 448   // protect against over-unrolling
 449   if( cl->trip_count() <= 1 ) return false;
 450 
 451   int future_unroll_ct = cl->unrolled_count() * 2;
 452 
 453   // Don't unroll if the next round of unrolling would push us
 454   // over the expected trip count of the loop.  One is subtracted
 455   // from the expected trip count because the pre-loop normally
 456   // executes 1 iteration.
 457   if (UnrollLimitForProfileCheck > 0 &&
 458       cl->profile_trip_cnt() != COUNT_UNKNOWN &&
 459       future_unroll_ct        > UnrollLimitForProfileCheck &&
 460       (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) {
 461     return false;
 462   }
 463 
 464   // When unroll count is greater than LoopUnrollMin, don't unroll if:
 465   //   the residual iterations are more than 10% of the trip count
 466   //   and rounds of "unroll,optimize" are not making significant progress
 467   //   Progress defined as current size less than 20% larger than previous size.
 468   if (UseSuperWord && cl->node_count_before_unroll() > 0 &&
 469       future_unroll_ct > LoopUnrollMin &&
 470       (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() &&
 471       1.2 * cl->node_count_before_unroll() < (double)_body.size()) {
 472     return false;
 473   }
 474 
 475   Node *init_n = cl->init_trip();
 476   Node *limit_n = cl->limit();
 477   // Non-constant bounds.
 478   // Protect against over-unrolling when init or/and limit are not constant
 479   // (so that trip_count's init value is maxint) but iv range is known.
 480   if( init_n   == NULL || !init_n->is_Con()  ||
 481       limit_n  == NULL || !limit_n->is_Con() ) {
 482     Node* phi = cl->phi();
 483     if( phi != NULL ) {
 484       assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi.");
 485       const TypeInt* iv_type = phase->_igvn.type(phi)->is_int();
 486       int next_stride = cl->stride_con() * 2; // stride after this unroll
 487       if( next_stride > 0 ) {
 488         if( iv_type->_lo + next_stride <= iv_type->_lo || // overflow
 489             iv_type->_lo + next_stride >  iv_type->_hi ) {
 490           return false;  // over-unrolling
 491         }
 492       } else if( next_stride < 0 ) {
 493         if( iv_type->_hi + next_stride >= iv_type->_hi || // overflow
 494             iv_type->_hi + next_stride <  iv_type->_lo ) {
 495           return false;  // over-unrolling
 496         }
 497       }
 498     }
 499   }
 500 
 501   // Adjust body_size to determine if we unroll or not
 502   uint body_size = _body.size();
 503   // Key test to unroll CaffeineMark's Logic test
 504   int xors_in_loop = 0;
 505   // Also count ModL, DivL and MulL which expand mightly
 506   for( uint k = 0; k < _body.size(); k++ ) {
 507     switch( _body.at(k)->Opcode() ) {
 508     case Op_XorI: xors_in_loop++; break; // CaffeineMark's Logic test
 509     case Op_ModL: body_size += 30; break;
 510     case Op_DivL: body_size += 30; break;
 511     case Op_MulL: body_size += 10; break;
 512     }
 513   }
 514 
 515   // Check for being too big
 516   if( body_size > (uint)LoopUnrollLimit ) {
 517     if( xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
 518     // Normal case: loop too big
 519     return false;
 520   }
 521 
 522   // Check for stride being a small enough constant
 523   if( abs(cl->stride_con()) > (1<<3) ) return false;
 524 
 525   // Unroll once!  (Each trip will soon do double iterations)
 526   return true;
 527 }
 528 
 529 //------------------------------policy_align-----------------------------------
 530 // Return TRUE or FALSE if the loop should be cache-line aligned.  Gather the
 531 // expression that does the alignment.  Note that only one array base can be
 532 // aligned in a loop (unless the VM guarantees mutual alignment).  Note that
 533 // if we vectorize short memory ops into longer memory ops, we may want to
 534 // increase alignment.
 535 bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
 536   return false;
 537 }
 538 
 539 //------------------------------policy_range_check-----------------------------
 540 // Return TRUE or FALSE if the loop should be range-check-eliminated.
 541 // Actually we do iteration-splitting, a more powerful form of RCE.
 542 bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
 543   if( !RangeCheckElimination ) return false;
 544 
 545   CountedLoopNode *cl = _head->as_CountedLoop();
 546   // If we unrolled with no intention of doing RCE and we later
 547   // changed our minds, we got no pre-loop.  Either we need to
 548   // make a new pre-loop, or we gotta disallow RCE.
 549   if( cl->is_main_no_pre_loop() ) return false; // Disallowed for now.
 550   Node *trip_counter = cl->phi();
 551 
 552   // Check loop body for tests of trip-counter plus loop-invariant vs
 553   // loop-invariant.
 554   for( uint i = 0; i < _body.size(); i++ ) {
 555     Node *iff = _body[i];
 556     if( iff->Opcode() == Op_If ) { // Test?
 557 
 558       // Comparing trip+off vs limit
 559       Node *bol = iff->in(1);
 560       if( bol->req() != 2 ) continue; // dead constant test
 561       if (!bol->is_Bool()) {
 562         assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only");
 563         continue;
 564       }
 565       Node *cmp = bol->in(1);
 566 
 567       Node *rc_exp = cmp->in(1);
 568       Node *limit = cmp->in(2);
 569 
 570       Node *limit_c = phase->get_ctrl(limit);
 571       if( limit_c == phase->C->top() )
 572         return false;           // Found dead test on live IF?  No RCE!
 573       if( is_member(phase->get_loop(limit_c) ) ) {
 574         // Compare might have operands swapped; commute them
 575         rc_exp = cmp->in(2);
 576         limit  = cmp->in(1);
 577         limit_c = phase->get_ctrl(limit);
 578         if( is_member(phase->get_loop(limit_c) ) )
 579           continue;             // Both inputs are loop varying; cannot RCE
 580       }
 581 
 582       if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) {
 583         continue;
 584       }
 585       // Yeah!  Found a test like 'trip+off vs limit'
 586       // Test is an IfNode, has 2 projections.  If BOTH are in the loop
 587       // we need loop unswitching instead of iteration splitting.
 588       if( is_loop_exit(iff) )
 589         return true;            // Found reason to split iterations
 590     } // End of is IF
 591   }
 592 
 593   return false;
 594 }
 595 
 596 //------------------------------policy_peel_only-------------------------------
 597 // Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned.  Useful
 598 // for unrolling loops with NO array accesses.
 599 bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const {
 600 
 601   for( uint i = 0; i < _body.size(); i++ )
 602     if( _body[i]->is_Mem() )
 603       return false;
 604 
 605   // No memory accesses at all!
 606   return true;
 607 }
 608 
 609 //------------------------------clone_up_backedge_goo--------------------------
 610 // If Node n lives in the back_ctrl block and cannot float, we clone a private
 611 // version of n in preheader_ctrl block and return that, otherwise return n.
 612 Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n ) {
 613   if( get_ctrl(n) != back_ctrl ) return n;
 614 
 615   Node *x = NULL;               // If required, a clone of 'n'
 616   // Check for 'n' being pinned in the backedge.
 617   if( n->in(0) && n->in(0) == back_ctrl ) {
 618     x = n->clone();             // Clone a copy of 'n' to preheader
 619     x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader
 620   }
 621 
 622   // Recursive fixup any other input edges into x.
 623   // If there are no changes we can just return 'n', otherwise
 624   // we need to clone a private copy and change it.
 625   for( uint i = 1; i < n->req(); i++ ) {
 626     Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i) );
 627     if( g != n->in(i) ) {
 628       if( !x )
 629         x = n->clone();
 630       x->set_req(i, g);
 631     }
 632   }
 633   if( x ) {                     // x can legally float to pre-header location
 634     register_new_node( x, preheader_ctrl );
 635     return x;
 636   } else {                      // raise n to cover LCA of uses
 637     set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) );
 638   }
 639   return n;
 640 }
 641 
 642 //------------------------------insert_pre_post_loops--------------------------
 643 // Insert pre and post loops.  If peel_only is set, the pre-loop can not have
 644 // more iterations added.  It acts as a 'peel' only, no lower-bound RCE, no
 645 // alignment.  Useful to unroll loops that do no array accesses.
 646 void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) {
 647 
 648   C->set_major_progress();
 649 
 650   // Find common pieces of the loop being guarded with pre & post loops
 651   CountedLoopNode *main_head = loop->_head->as_CountedLoop();
 652   assert( main_head->is_normal_loop(), "" );
 653   CountedLoopEndNode *main_end = main_head->loopexit();
 654   assert( main_end->outcnt() == 2, "1 true, 1 false path only" );
 655   uint dd_main_head = dom_depth(main_head);
 656   uint max = main_head->outcnt();
 657 
 658   Node *pre_header= main_head->in(LoopNode::EntryControl);
 659   Node *init      = main_head->init_trip();
 660   Node *incr      = main_end ->incr();
 661   Node *limit     = main_end ->limit();
 662   Node *stride    = main_end ->stride();
 663   Node *cmp       = main_end ->cmp_node();
 664   BoolTest::mask b_test = main_end->test_trip();
 665 
 666   // Need only 1 user of 'bol' because I will be hacking the loop bounds.
 667   Node *bol = main_end->in(CountedLoopEndNode::TestValue);
 668   if( bol->outcnt() != 1 ) {
 669     bol = bol->clone();
 670     register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl));
 671     _igvn.hash_delete(main_end);
 672     main_end->set_req(CountedLoopEndNode::TestValue, bol);
 673   }
 674   // Need only 1 user of 'cmp' because I will be hacking the loop bounds.
 675   if( cmp->outcnt() != 1 ) {
 676     cmp = cmp->clone();
 677     register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl));
 678     _igvn.hash_delete(bol);
 679     bol->set_req(1, cmp);
 680   }
 681 
 682   //------------------------------
 683   // Step A: Create Post-Loop.
 684   Node* main_exit = main_end->proj_out(false);
 685   assert( main_exit->Opcode() == Op_IfFalse, "" );
 686   int dd_main_exit = dom_depth(main_exit);
 687 
 688   // Step A1: Clone the loop body.  The clone becomes the post-loop.  The main
 689   // loop pre-header illegally has 2 control users (old & new loops).
 690   clone_loop( loop, old_new, dd_main_exit );
 691   assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" );
 692   CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop();
 693   post_head->set_post_loop(main_head);
 694 
 695   // Reduce the post-loop trip count.
 696   CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd();
 697   post_end->_prob = PROB_FAIR;
 698 
 699   // Build the main-loop normal exit.
 700   IfFalseNode *new_main_exit = new (C, 1) IfFalseNode(main_end);
 701   _igvn.register_new_node_with_optimizer( new_main_exit );
 702   set_idom(new_main_exit, main_end, dd_main_exit );
 703   set_loop(new_main_exit, loop->_parent);
 704 
 705   // Step A2: Build a zero-trip guard for the post-loop.  After leaving the
 706   // main-loop, the post-loop may not execute at all.  We 'opaque' the incr
 707   // (the main-loop trip-counter exit value) because we will be changing
 708   // the exit value (via unrolling) so we cannot constant-fold away the zero
 709   // trip guard until all unrolling is done.
 710   Node *zer_opaq = new (C, 2) Opaque1Node(C, incr);
 711   Node *zer_cmp  = new (C, 3) CmpINode( zer_opaq, limit );
 712   Node *zer_bol  = new (C, 2) BoolNode( zer_cmp, b_test );
 713   register_new_node( zer_opaq, new_main_exit );
 714   register_new_node( zer_cmp , new_main_exit );
 715   register_new_node( zer_bol , new_main_exit );
 716 
 717   // Build the IfNode
 718   IfNode *zer_iff = new (C, 2) IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN );
 719   _igvn.register_new_node_with_optimizer( zer_iff );
 720   set_idom(zer_iff, new_main_exit, dd_main_exit);
 721   set_loop(zer_iff, loop->_parent);
 722 
 723   // Plug in the false-path, taken if we need to skip post-loop
 724   _igvn.hash_delete( main_exit );
 725   main_exit->set_req(0, zer_iff);
 726   _igvn._worklist.push(main_exit);
 727   set_idom(main_exit, zer_iff, dd_main_exit);
 728   set_idom(main_exit->unique_out(), zer_iff, dd_main_exit);
 729   // Make the true-path, must enter the post loop
 730   Node *zer_taken = new (C, 1) IfTrueNode( zer_iff );
 731   _igvn.register_new_node_with_optimizer( zer_taken );
 732   set_idom(zer_taken, zer_iff, dd_main_exit);
 733   set_loop(zer_taken, loop->_parent);
 734   // Plug in the true path
 735   _igvn.hash_delete( post_head );
 736   post_head->set_req(LoopNode::EntryControl, zer_taken);
 737   set_idom(post_head, zer_taken, dd_main_exit);
 738 
 739   // Step A3: Make the fall-in values to the post-loop come from the
 740   // fall-out values of the main-loop.
 741   for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) {
 742     Node* main_phi = main_head->fast_out(i);
 743     if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) {
 744       Node *post_phi = old_new[main_phi->_idx];
 745       Node *fallmain  = clone_up_backedge_goo(main_head->back_control(),
 746                                               post_head->init_control(),
 747                                               main_phi->in(LoopNode::LoopBackControl));
 748       _igvn.hash_delete(post_phi);
 749       post_phi->set_req( LoopNode::EntryControl, fallmain );
 750     }
 751   }
 752 
 753   // Update local caches for next stanza
 754   main_exit = new_main_exit;
 755 
 756 
 757   //------------------------------
 758   // Step B: Create Pre-Loop.
 759 
 760   // Step B1: Clone the loop body.  The clone becomes the pre-loop.  The main
 761   // loop pre-header illegally has 2 control users (old & new loops).
 762   clone_loop( loop, old_new, dd_main_head );
 763   CountedLoopNode*    pre_head = old_new[main_head->_idx]->as_CountedLoop();
 764   CountedLoopEndNode* pre_end  = old_new[main_end ->_idx]->as_CountedLoopEnd();
 765   pre_head->set_pre_loop(main_head);
 766   Node *pre_incr = old_new[incr->_idx];
 767 
 768   // Reduce the pre-loop trip count.
 769   pre_end->_prob = PROB_FAIR;
 770 
 771   // Find the pre-loop normal exit.
 772   Node* pre_exit = pre_end->proj_out(false);
 773   assert( pre_exit->Opcode() == Op_IfFalse, "" );
 774   IfFalseNode *new_pre_exit = new (C, 1) IfFalseNode(pre_end);
 775   _igvn.register_new_node_with_optimizer( new_pre_exit );
 776   set_idom(new_pre_exit, pre_end, dd_main_head);
 777   set_loop(new_pre_exit, loop->_parent);
 778 
 779   // Step B2: Build a zero-trip guard for the main-loop.  After leaving the
 780   // pre-loop, the main-loop may not execute at all.  Later in life this
 781   // zero-trip guard will become the minimum-trip guard when we unroll
 782   // the main-loop.
 783   Node *min_opaq = new (C, 2) Opaque1Node(C, limit);
 784   Node *min_cmp  = new (C, 3) CmpINode( pre_incr, min_opaq );
 785   Node *min_bol  = new (C, 2) BoolNode( min_cmp, b_test );
 786   register_new_node( min_opaq, new_pre_exit );
 787   register_new_node( min_cmp , new_pre_exit );
 788   register_new_node( min_bol , new_pre_exit );
 789 
 790   // Build the IfNode (assume the main-loop is executed always).
 791   IfNode *min_iff = new (C, 2) IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN );
 792   _igvn.register_new_node_with_optimizer( min_iff );
 793   set_idom(min_iff, new_pre_exit, dd_main_head);
 794   set_loop(min_iff, loop->_parent);
 795 
 796   // Plug in the false-path, taken if we need to skip main-loop
 797   _igvn.hash_delete( pre_exit );
 798   pre_exit->set_req(0, min_iff);
 799   set_idom(pre_exit, min_iff, dd_main_head);
 800   set_idom(pre_exit->unique_out(), min_iff, dd_main_head);
 801   // Make the true-path, must enter the main loop
 802   Node *min_taken = new (C, 1) IfTrueNode( min_iff );
 803   _igvn.register_new_node_with_optimizer( min_taken );
 804   set_idom(min_taken, min_iff, dd_main_head);
 805   set_loop(min_taken, loop->_parent);
 806   // Plug in the true path
 807   _igvn.hash_delete( main_head );
 808   main_head->set_req(LoopNode::EntryControl, min_taken);
 809   set_idom(main_head, min_taken, dd_main_head);
 810 
 811   // Step B3: Make the fall-in values to the main-loop come from the
 812   // fall-out values of the pre-loop.
 813   for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) {
 814     Node* main_phi = main_head->fast_out(i2);
 815     if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) {
 816       Node *pre_phi = old_new[main_phi->_idx];
 817       Node *fallpre  = clone_up_backedge_goo(pre_head->back_control(),
 818                                              main_head->init_control(),
 819                                              pre_phi->in(LoopNode::LoopBackControl));
 820       _igvn.hash_delete(main_phi);
 821       main_phi->set_req( LoopNode::EntryControl, fallpre );
 822     }
 823   }
 824 
 825   // Step B4: Shorten the pre-loop to run only 1 iteration (for now).
 826   // RCE and alignment may change this later.
 827   Node *cmp_end = pre_end->cmp_node();
 828   assert( cmp_end->in(2) == limit, "" );
 829   Node *pre_limit = new (C, 3) AddINode( init, stride );
 830 
 831   // Save the original loop limit in this Opaque1 node for
 832   // use by range check elimination.
 833   Node *pre_opaq  = new (C, 3) Opaque1Node(C, pre_limit, limit);
 834 
 835   register_new_node( pre_limit, pre_head->in(0) );
 836   register_new_node( pre_opaq , pre_head->in(0) );
 837 
 838   // Since no other users of pre-loop compare, I can hack limit directly
 839   assert( cmp_end->outcnt() == 1, "no other users" );
 840   _igvn.hash_delete(cmp_end);
 841   cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq);
 842 
 843   // Special case for not-equal loop bounds:
 844   // Change pre loop test, main loop test, and the
 845   // main loop guard test to use lt or gt depending on stride
 846   // direction:
 847   // positive stride use <
 848   // negative stride use >
 849 
 850   if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) {
 851 
 852     BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt;
 853     // Modify pre loop end condition
 854     Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool();
 855     BoolNode* new_bol0 = new (C, 2) BoolNode(pre_bol->in(1), new_test);
 856     register_new_node( new_bol0, pre_head->in(0) );
 857     _igvn.hash_delete(pre_end);
 858     pre_end->set_req(CountedLoopEndNode::TestValue, new_bol0);
 859     // Modify main loop guard condition
 860     assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay");
 861     BoolNode* new_bol1 = new (C, 2) BoolNode(min_bol->in(1), new_test);
 862     register_new_node( new_bol1, new_pre_exit );
 863     _igvn.hash_delete(min_iff);
 864     min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1);
 865     // Modify main loop end condition
 866     BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool();
 867     BoolNode* new_bol2 = new (C, 2) BoolNode(main_bol->in(1), new_test);
 868     register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) );
 869     _igvn.hash_delete(main_end);
 870     main_end->set_req(CountedLoopEndNode::TestValue, new_bol2);
 871   }
 872 
 873   // Flag main loop
 874   main_head->set_main_loop();
 875   if( peel_only ) main_head->set_main_no_pre_loop();
 876 
 877   // It's difficult to be precise about the trip-counts
 878   // for the pre/post loops.  They are usually very short,
 879   // so guess that 4 trips is a reasonable value.
 880   post_head->set_profile_trip_cnt(4.0);
 881   pre_head->set_profile_trip_cnt(4.0);
 882 
 883   // Now force out all loop-invariant dominating tests.  The optimizer
 884   // finds some, but we _know_ they are all useless.
 885   peeled_dom_test_elim(loop,old_new);
 886 }
 887 
 888 //------------------------------is_invariant-----------------------------
 889 // Return true if n is invariant
 890 bool IdealLoopTree::is_invariant(Node* n) const {
 891   Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n;
 892   if (n_c->is_top()) return false;
 893   return !is_member(_phase->get_loop(n_c));
 894 }
 895 
 896 
 897 //------------------------------do_unroll--------------------------------------
 898 // Unroll the loop body one step - make each trip do 2 iterations.
 899 void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) {
 900   assert( LoopUnrollLimit, "" );
 901 #ifndef PRODUCT
 902   if( PrintOpto && VerifyLoopOptimizations ) {
 903     tty->print("Unrolling ");
 904     loop->dump_head();
 905   }
 906 #endif
 907   CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
 908   CountedLoopEndNode *loop_end = loop_head->loopexit();
 909   assert( loop_end, "" );
 910 
 911   // Remember loop node count before unrolling to detect
 912   // if rounds of unroll,optimize are making progress
 913   loop_head->set_node_count_before_unroll(loop->_body.size());
 914 
 915   Node *ctrl  = loop_head->in(LoopNode::EntryControl);
 916   Node *limit = loop_head->limit();
 917   Node *init  = loop_head->init_trip();
 918   Node *strid = loop_head->stride();
 919 
 920   Node *opaq = NULL;
 921   if( adjust_min_trip ) {       // If not maximally unrolling, need adjustment
 922     assert( loop_head->is_main_loop(), "" );
 923     assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" );
 924     Node *iff = ctrl->in(0);
 925     assert( iff->Opcode() == Op_If, "" );
 926     Node *bol = iff->in(1);
 927     assert( bol->Opcode() == Op_Bool, "" );
 928     Node *cmp = bol->in(1);
 929     assert( cmp->Opcode() == Op_CmpI, "" );
 930     opaq = cmp->in(2);
 931     // Occasionally it's possible for a pre-loop Opaque1 node to be
 932     // optimized away and then another round of loop opts attempted.
 933     // We can not optimize this particular loop in that case.
 934     if( opaq->Opcode() != Op_Opaque1 )
 935       return;                   // Cannot find pre-loop!  Bail out!
 936   }
 937 
 938   C->set_major_progress();
 939 
 940   // Adjust max trip count. The trip count is intentionally rounded
 941   // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
 942   // the main, unrolled, part of the loop will never execute as it is protected
 943   // by the min-trip test.  See bug 4834191 for a case where we over-unrolled
 944   // and later determined that part of the unrolled loop was dead.
 945   loop_head->set_trip_count(loop_head->trip_count() / 2);
 946 
 947   // Double the count of original iterations in the unrolled loop body.
 948   loop_head->double_unrolled_count();
 949 
 950   // -----------
 951   // Step 2: Cut back the trip counter for an unroll amount of 2.
 952   // Loop will normally trip (limit - init)/stride_con.  Since it's a
 953   // CountedLoop this is exact (stride divides limit-init exactly).
 954   // We are going to double the loop body, so we want to knock off any
 955   // odd iteration: (trip_cnt & ~1).  Then back compute a new limit.
 956   Node *span = new (C, 3) SubINode( limit, init );
 957   register_new_node( span, ctrl );
 958   Node *trip = new (C, 3) DivINode( 0, span, strid );
 959   register_new_node( trip, ctrl );
 960   Node *mtwo = _igvn.intcon(-2);
 961   set_ctrl(mtwo, C->root());
 962   Node *rond = new (C, 3) AndINode( trip, mtwo );
 963   register_new_node( rond, ctrl );
 964   Node *spn2 = new (C, 3) MulINode( rond, strid );
 965   register_new_node( spn2, ctrl );
 966   Node *lim2 = new (C, 3) AddINode( spn2, init );
 967   register_new_node( lim2, ctrl );
 968 
 969   // Hammer in the new limit
 970   Node *ctrl2 = loop_end->in(0);
 971   Node *cmp2 = new (C, 3) CmpINode( loop_head->incr(), lim2 );
 972   register_new_node( cmp2, ctrl2 );
 973   Node *bol2 = new (C, 2) BoolNode( cmp2, loop_end->test_trip() );
 974   register_new_node( bol2, ctrl2 );
 975   _igvn.hash_delete(loop_end);
 976   loop_end->set_req(CountedLoopEndNode::TestValue, bol2);
 977 
 978   // Step 3: Find the min-trip test guaranteed before a 'main' loop.
 979   // Make it a 1-trip test (means at least 2 trips).
 980   if( adjust_min_trip ) {
 981     // Guard test uses an 'opaque' node which is not shared.  Hence I
 982     // can edit it's inputs directly.  Hammer in the new limit for the
 983     // minimum-trip guard.
 984     assert( opaq->outcnt() == 1, "" );
 985     _igvn.hash_delete(opaq);
 986     opaq->set_req(1, lim2);
 987   }
 988 
 989   // ---------
 990   // Step 4: Clone the loop body.  Move it inside the loop.  This loop body
 991   // represents the odd iterations; since the loop trips an even number of
 992   // times its backedge is never taken.  Kill the backedge.
 993   uint dd = dom_depth(loop_head);
 994   clone_loop( loop, old_new, dd );
 995 
 996   // Make backedges of the clone equal to backedges of the original.
 997   // Make the fall-in from the original come from the fall-out of the clone.
 998   for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) {
 999     Node* phi = loop_head->fast_out(j);
1000     if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) {
1001       Node *newphi = old_new[phi->_idx];
1002       _igvn.hash_delete( phi );
1003       _igvn.hash_delete( newphi );
1004 
1005       phi   ->set_req(LoopNode::   EntryControl, newphi->in(LoopNode::LoopBackControl));
1006       newphi->set_req(LoopNode::LoopBackControl, phi   ->in(LoopNode::LoopBackControl));
1007       phi   ->set_req(LoopNode::LoopBackControl, C->top());
1008     }
1009   }
1010   Node *clone_head = old_new[loop_head->_idx];
1011   _igvn.hash_delete( clone_head );
1012   loop_head ->set_req(LoopNode::   EntryControl, clone_head->in(LoopNode::LoopBackControl));
1013   clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl));
1014   loop_head ->set_req(LoopNode::LoopBackControl, C->top());
1015   loop->_head = clone_head;     // New loop header
1016 
1017   set_idom(loop_head,  loop_head ->in(LoopNode::EntryControl), dd);
1018   set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd);
1019 
1020   // Kill the clone's backedge
1021   Node *newcle = old_new[loop_end->_idx];
1022   _igvn.hash_delete( newcle );
1023   Node *one = _igvn.intcon(1);
1024   set_ctrl(one, C->root());
1025   newcle->set_req(1, one);
1026   // Force clone into same loop body
1027   uint max = loop->_body.size();
1028   for( uint k = 0; k < max; k++ ) {
1029     Node *old = loop->_body.at(k);
1030     Node *nnn = old_new[old->_idx];
1031     loop->_body.push(nnn);
1032     if (!has_ctrl(old))
1033       set_loop(nnn, loop);
1034   }
1035 
1036   loop->record_for_igvn();
1037 }
1038 
1039 //------------------------------do_maximally_unroll----------------------------
1040 
1041 void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
1042   CountedLoopNode *cl = loop->_head->as_CountedLoop();
1043   assert( cl->trip_count() > 0, "");
1044 
1045   // If loop is tripping an odd number of times, peel odd iteration
1046   if( (cl->trip_count() & 1) == 1 ) {
1047     do_peeling( loop, old_new );
1048   }
1049 
1050   // Now its tripping an even number of times remaining.  Double loop body.
1051   // Do not adjust pre-guards; they are not needed and do not exist.
1052   if( cl->trip_count() > 0 ) {
1053     do_unroll( loop, old_new, false );
1054   }
1055 }
1056 
1057 //------------------------------dominates_backedge---------------------------------
1058 // Returns true if ctrl is executed on every complete iteration
1059 bool IdealLoopTree::dominates_backedge(Node* ctrl) {
1060   assert(ctrl->is_CFG(), "must be control");
1061   Node* backedge = _head->as_Loop()->in(LoopNode::LoopBackControl);
1062   return _phase->dom_lca_internal(ctrl, backedge) == ctrl;
1063 }
1064 
1065 //------------------------------add_constraint---------------------------------
1066 // Constrain the main loop iterations so the condition:
1067 //    scale_con * I + offset  <  limit
1068 // always holds true.  That is, either increase the number of iterations in
1069 // the pre-loop or the post-loop until the condition holds true in the main
1070 // loop.  Stride, scale, offset and limit are all loop invariant.  Further,
1071 // stride and scale are constants (offset and limit often are).
1072 void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
1073 
1074   // Compute "I :: (limit-offset)/scale_con"
1075   Node *con = new (C, 3) SubINode( limit, offset );
1076   register_new_node( con, pre_ctrl );
1077   Node *scale = _igvn.intcon(scale_con);
1078   set_ctrl(scale, C->root());
1079   Node *X = new (C, 3) DivINode( 0, con, scale );
1080   register_new_node( X, pre_ctrl );
1081 
1082   // For positive stride, the pre-loop limit always uses a MAX function
1083   // and the main loop a MIN function.  For negative stride these are
1084   // reversed.
1085 
1086   // Also for positive stride*scale the affine function is increasing, so the
1087   // pre-loop must check for underflow and the post-loop for overflow.
1088   // Negative stride*scale reverses this; pre-loop checks for overflow and
1089   // post-loop for underflow.
1090   if( stride_con*scale_con > 0 ) {
1091     // Compute I < (limit-offset)/scale_con
1092     // Adjust main-loop last iteration to be MIN/MAX(main_loop,X)
1093     *main_limit = (stride_con > 0)
1094       ? (Node*)(new (C, 3) MinINode( *main_limit, X ))
1095       : (Node*)(new (C, 3) MaxINode( *main_limit, X ));
1096     register_new_node( *main_limit, pre_ctrl );
1097 
1098   } else {
1099     // Compute (limit-offset)/scale_con + SGN(-scale_con) <= I
1100     // Add the negation of the main-loop constraint to the pre-loop.
1101     // See footnote [++] below for a derivation of the limit expression.
1102     Node *incr = _igvn.intcon(scale_con > 0 ? -1 : 1);
1103     set_ctrl(incr, C->root());
1104     Node *adj = new (C, 3) AddINode( X, incr );
1105     register_new_node( adj, pre_ctrl );
1106     *pre_limit = (scale_con > 0)
1107       ? (Node*)new (C, 3) MinINode( *pre_limit, adj )
1108       : (Node*)new (C, 3) MaxINode( *pre_limit, adj );
1109     register_new_node( *pre_limit, pre_ctrl );
1110 
1111 //   [++] Here's the algebra that justifies the pre-loop limit expression:
1112 //
1113 //   NOT( scale_con * I + offset  <  limit )
1114 //      ==
1115 //   scale_con * I + offset  >=  limit
1116 //      ==
1117 //   SGN(scale_con) * I  >=  (limit-offset)/|scale_con|
1118 //      ==
1119 //   (limit-offset)/|scale_con|   <=  I * SGN(scale_con)
1120 //      ==
1121 //   (limit-offset)/|scale_con|-1  <  I * SGN(scale_con)
1122 //      ==
1123 //   ( if (scale_con > 0) /*common case*/
1124 //       (limit-offset)/scale_con - 1  <  I
1125 //     else
1126 //       (limit-offset)/scale_con + 1  >  I
1127 //    )
1128 //   ( if (scale_con > 0) /*common case*/
1129 //       (limit-offset)/scale_con + SGN(-scale_con)  <  I
1130 //     else
1131 //       (limit-offset)/scale_con + SGN(-scale_con)  >  I
1132   }
1133 }
1134 
1135 
1136 //------------------------------is_scaled_iv---------------------------------
1137 // Return true if exp is a constant times an induction var
1138 bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) {
1139   if (exp == iv) {
1140     if (p_scale != NULL) {
1141       *p_scale = 1;
1142     }
1143     return true;
1144   }
1145   int opc = exp->Opcode();
1146   if (opc == Op_MulI) {
1147     if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1148       if (p_scale != NULL) {
1149         *p_scale = exp->in(2)->get_int();
1150       }
1151       return true;
1152     }
1153     if (exp->in(2) == iv && exp->in(1)->is_Con()) {
1154       if (p_scale != NULL) {
1155         *p_scale = exp->in(1)->get_int();
1156       }
1157       return true;
1158     }
1159   } else if (opc == Op_LShiftI) {
1160     if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1161       if (p_scale != NULL) {
1162         *p_scale = 1 << exp->in(2)->get_int();
1163       }
1164       return true;
1165     }
1166   }
1167   return false;
1168 }
1169 
1170 //-----------------------------is_scaled_iv_plus_offset------------------------------
1171 // Return true if exp is a simple induction variable expression: k1*iv + (invar + k2)
1172 bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) {
1173   if (is_scaled_iv(exp, iv, p_scale)) {
1174     if (p_offset != NULL) {
1175       Node *zero = _igvn.intcon(0);
1176       set_ctrl(zero, C->root());
1177       *p_offset = zero;
1178     }
1179     return true;
1180   }
1181   int opc = exp->Opcode();
1182   if (opc == Op_AddI) {
1183     if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1184       if (p_offset != NULL) {
1185         *p_offset = exp->in(2);
1186       }
1187       return true;
1188     }
1189     if (exp->in(2)->is_Con()) {
1190       Node* offset2 = NULL;
1191       if (depth < 2 &&
1192           is_scaled_iv_plus_offset(exp->in(1), iv, p_scale,
1193                                    p_offset != NULL ? &offset2 : NULL, depth+1)) {
1194         if (p_offset != NULL) {
1195           Node *ctrl_off2 = get_ctrl(offset2);
1196           Node* offset = new (C, 3) AddINode(offset2, exp->in(2));
1197           register_new_node(offset, ctrl_off2);
1198           *p_offset = offset;
1199         }
1200         return true;
1201       }
1202     }
1203   } else if (opc == Op_SubI) {
1204     if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1205       if (p_offset != NULL) {
1206         Node *zero = _igvn.intcon(0);
1207         set_ctrl(zero, C->root());
1208         Node *ctrl_off = get_ctrl(exp->in(2));
1209         Node* offset = new (C, 3) SubINode(zero, exp->in(2));
1210         register_new_node(offset, ctrl_off);
1211         *p_offset = offset;
1212       }
1213       return true;
1214     }
1215     if (is_scaled_iv(exp->in(2), iv, p_scale)) {
1216       if (p_offset != NULL) {
1217         *p_scale *= -1;
1218         *p_offset = exp->in(1);
1219       }
1220       return true;
1221     }
1222   }
1223   return false;
1224 }
1225 
1226 //------------------------------do_range_check---------------------------------
1227 // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1228 void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
1229 #ifndef PRODUCT
1230   if( PrintOpto && VerifyLoopOptimizations ) {
1231     tty->print("Range Check Elimination ");
1232     loop->dump_head();
1233   }
1234 #endif
1235   assert( RangeCheckElimination, "" );
1236   CountedLoopNode *cl = loop->_head->as_CountedLoop();
1237   assert( cl->is_main_loop(), "" );
1238 
1239   // Find the trip counter; we are iteration splitting based on it
1240   Node *trip_counter = cl->phi();
1241   // Find the main loop limit; we will trim it's iterations
1242   // to not ever trip end tests
1243   Node *main_limit = cl->limit();
1244   // Find the pre-loop limit; we will expand it's iterations to
1245   // not ever trip low tests.
1246   Node *ctrl  = cl->in(LoopNode::EntryControl);
1247   assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" );
1248   Node *iffm = ctrl->in(0);
1249   assert( iffm->Opcode() == Op_If, "" );
1250   Node *p_f = iffm->in(0);
1251   assert( p_f->Opcode() == Op_IfFalse, "" );
1252   CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
1253   assert( pre_end->loopnode()->is_pre_loop(), "" );
1254   Node *pre_opaq1 = pre_end->limit();
1255   // Occasionally it's possible for a pre-loop Opaque1 node to be
1256   // optimized away and then another round of loop opts attempted.
1257   // We can not optimize this particular loop in that case.
1258   if( pre_opaq1->Opcode() != Op_Opaque1 )
1259     return;
1260   Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1;
1261   Node *pre_limit = pre_opaq->in(1);
1262 
1263   // Where do we put new limit calculations
1264   Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl);
1265 
1266   // Ensure the original loop limit is available from the
1267   // pre-loop Opaque1 node.
1268   Node *orig_limit = pre_opaq->original_loop_limit();
1269   if( orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP )
1270     return;
1271 
1272   // Need to find the main-loop zero-trip guard
1273   Node *bolzm = iffm->in(1);
1274   assert( bolzm->Opcode() == Op_Bool, "" );
1275   Node *cmpzm = bolzm->in(1);
1276   assert( cmpzm->is_Cmp(), "" );
1277   Node *opqzm = cmpzm->in(2);
1278   if( opqzm->Opcode() != Op_Opaque1 )
1279     return;
1280   assert( opqzm->in(1) == main_limit, "do not understand situation" );
1281 
1282   // Must know if its a count-up or count-down loop
1283 
1284   // protect against stride not being a constant
1285   if ( !cl->stride_is_con() ) {
1286     return;
1287   }
1288   int stride_con = cl->stride_con();
1289   Node *zero = _igvn.intcon(0);
1290   Node *one  = _igvn.intcon(1);
1291   set_ctrl(zero, C->root());
1292   set_ctrl(one,  C->root());
1293 
1294   // Range checks that do not dominate the loop backedge (ie.
1295   // conditionally executed) can lengthen the pre loop limit beyond
1296   // the original loop limit. To prevent this, the pre limit is
1297   // (for stride > 0) MINed with the original loop limit (MAXed
1298   // stride < 0) when some range_check (rc) is conditionally
1299   // executed.
1300   bool conditional_rc = false;
1301 
1302   // Check loop body for tests of trip-counter plus loop-invariant vs
1303   // loop-invariant.
1304   for( uint i = 0; i < loop->_body.size(); i++ ) {
1305     Node *iff = loop->_body[i];
1306     if( iff->Opcode() == Op_If ) { // Test?
1307 
1308       // Test is an IfNode, has 2 projections.  If BOTH are in the loop
1309       // we need loop unswitching instead of iteration splitting.
1310       Node *exit = loop->is_loop_exit(iff);
1311       if( !exit ) continue;
1312       int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0;
1313 
1314       // Get boolean condition to test
1315       Node *i1 = iff->in(1);
1316       if( !i1->is_Bool() ) continue;
1317       BoolNode *bol = i1->as_Bool();
1318       BoolTest b_test = bol->_test;
1319       // Flip sense of test if exit condition is flipped
1320       if( flip )
1321         b_test = b_test.negate();
1322 
1323       // Get compare
1324       Node *cmp = bol->in(1);
1325 
1326       // Look for trip_counter + offset vs limit
1327       Node *rc_exp = cmp->in(1);
1328       Node *limit  = cmp->in(2);
1329       jint scale_con= 1;        // Assume trip counter not scaled
1330 
1331       Node *limit_c = get_ctrl(limit);
1332       if( loop->is_member(get_loop(limit_c) ) ) {
1333         // Compare might have operands swapped; commute them
1334         b_test = b_test.commute();
1335         rc_exp = cmp->in(2);
1336         limit  = cmp->in(1);
1337         limit_c = get_ctrl(limit);
1338         if( loop->is_member(get_loop(limit_c) ) )
1339           continue;             // Both inputs are loop varying; cannot RCE
1340       }
1341       // Here we know 'limit' is loop invariant
1342 
1343       // 'limit' maybe pinned below the zero trip test (probably from a
1344       // previous round of rce), in which case, it can't be used in the
1345       // zero trip test expression which must occur before the zero test's if.
1346       if( limit_c == ctrl ) {
1347         continue;  // Don't rce this check but continue looking for other candidates.
1348       }
1349 
1350       // Check for scaled induction variable plus an offset
1351       Node *offset = NULL;
1352 
1353       if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) {
1354         continue;
1355       }
1356 
1357       Node *offset_c = get_ctrl(offset);
1358       if( loop->is_member( get_loop(offset_c) ) )
1359         continue;               // Offset is not really loop invariant
1360       // Here we know 'offset' is loop invariant.
1361 
1362       // As above for the 'limit', the 'offset' maybe pinned below the
1363       // zero trip test.
1364       if( offset_c == ctrl ) {
1365         continue; // Don't rce this check but continue looking for other candidates.
1366       }
1367 
1368       // At this point we have the expression as:
1369       //   scale_con * trip_counter + offset :: limit
1370       // where scale_con, offset and limit are loop invariant.  Trip_counter
1371       // monotonically increases by stride_con, a constant.  Both (or either)
1372       // stride_con and scale_con can be negative which will flip about the
1373       // sense of the test.
1374 
1375       // Adjust pre and main loop limits to guard the correct iteration set
1376       if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests
1377         if( b_test._test == BoolTest::lt ) { // Range checks always use lt
1378           // The overflow limit: scale*I+offset < limit
1379           add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit );
1380           // The underflow limit: 0 <= scale*I+offset.
1381           // Some math yields: -scale*I-(offset+1) < 0
1382           Node *plus_one = new (C, 3) AddINode( offset, one );
1383           register_new_node( plus_one, pre_ctrl );
1384           Node *neg_offset = new (C, 3) SubINode( zero, plus_one );
1385           register_new_node( neg_offset, pre_ctrl );
1386           add_constraint( stride_con, -scale_con, neg_offset, zero, pre_ctrl, &pre_limit, &main_limit );
1387           if (!conditional_rc) {
1388             conditional_rc = !loop->dominates_backedge(iff);
1389           }
1390         } else {
1391 #ifndef PRODUCT
1392           if( PrintOpto )
1393             tty->print_cr("missed RCE opportunity");
1394 #endif
1395           continue;             // In release mode, ignore it
1396         }
1397       } else {                  // Otherwise work on normal compares
1398         switch( b_test._test ) {
1399         case BoolTest::ge:      // Convert X >= Y to -X <= -Y
1400           scale_con = -scale_con;
1401           offset = new (C, 3) SubINode( zero, offset );
1402           register_new_node( offset, pre_ctrl );
1403           limit  = new (C, 3) SubINode( zero, limit  );
1404           register_new_node( limit, pre_ctrl );
1405           // Fall into LE case
1406         case BoolTest::le:      // Convert X <= Y to X < Y+1
1407           limit = new (C, 3) AddINode( limit, one );
1408           register_new_node( limit, pre_ctrl );
1409           // Fall into LT case
1410         case BoolTest::lt:
1411           add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit );
1412           if (!conditional_rc) {
1413             conditional_rc = !loop->dominates_backedge(iff);
1414           }
1415           break;
1416         default:
1417 #ifndef PRODUCT
1418           if( PrintOpto )
1419             tty->print_cr("missed RCE opportunity");
1420 #endif
1421           continue;             // Unhandled case
1422         }
1423       }
1424 
1425       // Kill the eliminated test
1426       C->set_major_progress();
1427       Node *kill_con = _igvn.intcon( 1-flip );
1428       set_ctrl(kill_con, C->root());
1429       _igvn.hash_delete(iff);
1430       iff->set_req(1, kill_con);
1431       _igvn._worklist.push(iff);
1432       // Find surviving projection
1433       assert(iff->is_If(), "");
1434       ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip);
1435       // Find loads off the surviving projection; remove their control edge
1436       for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
1437         Node* cd = dp->fast_out(i); // Control-dependent node
1438         if( cd->is_Load() ) {   // Loads can now float around in the loop
1439           _igvn.hash_delete(cd);
1440           // Allow the load to float around in the loop, or before it
1441           // but NOT before the pre-loop.
1442           cd->set_req(0, ctrl);   // ctrl, not NULL
1443           _igvn._worklist.push(cd);
1444           --i;
1445           --imax;
1446         }
1447       }
1448 
1449     } // End of is IF
1450 
1451   }
1452 
1453   // Update loop limits
1454   if (conditional_rc) {
1455     pre_limit = (stride_con > 0) ? (Node*)new (C,3) MinINode(pre_limit, orig_limit)
1456                                  : (Node*)new (C,3) MaxINode(pre_limit, orig_limit);
1457     register_new_node(pre_limit, pre_ctrl);
1458   }
1459   _igvn.hash_delete(pre_opaq);
1460   pre_opaq->set_req(1, pre_limit);
1461 
1462   // Note:: we are making the main loop limit no longer precise;
1463   // need to round up based on stride.
1464   if( stride_con != 1 && stride_con != -1 ) { // Cutout for common case
1465     // "Standard" round-up logic:  ([main_limit-init+(y-1)]/y)*y+init
1466     // Hopefully, compiler will optimize for powers of 2.
1467     Node *ctrl = get_ctrl(main_limit);
1468     Node *stride = cl->stride();
1469     Node *init = cl->init_trip();
1470     Node *span = new (C, 3) SubINode(main_limit,init);
1471     register_new_node(span,ctrl);
1472     Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1));
1473     Node *add = new (C, 3) AddINode(span,rndup);
1474     register_new_node(add,ctrl);
1475     Node *div = new (C, 3) DivINode(0,add,stride);
1476     register_new_node(div,ctrl);
1477     Node *mul = new (C, 3) MulINode(div,stride);
1478     register_new_node(mul,ctrl);
1479     Node *newlim = new (C, 3) AddINode(mul,init);
1480     register_new_node(newlim,ctrl);
1481     main_limit = newlim;
1482   }
1483 
1484   Node *main_cle = cl->loopexit();
1485   Node *main_bol = main_cle->in(1);
1486   // Hacking loop bounds; need private copies of exit test
1487   if( main_bol->outcnt() > 1 ) {// BoolNode shared?
1488     _igvn.hash_delete(main_cle);
1489     main_bol = main_bol->clone();// Clone a private BoolNode
1490     register_new_node( main_bol, main_cle->in(0) );
1491     main_cle->set_req(1,main_bol);
1492   }
1493   Node *main_cmp = main_bol->in(1);
1494   if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
1495     _igvn.hash_delete(main_bol);
1496     main_cmp = main_cmp->clone();// Clone a private CmpNode
1497     register_new_node( main_cmp, main_cle->in(0) );
1498     main_bol->set_req(1,main_cmp);
1499   }
1500   // Hack the now-private loop bounds
1501   _igvn.hash_delete(main_cmp);
1502   main_cmp->set_req(2, main_limit);
1503   _igvn._worklist.push(main_cmp);
1504   // The OpaqueNode is unshared by design
1505   _igvn.hash_delete(opqzm);
1506   assert( opqzm->outcnt() == 1, "cannot hack shared node" );
1507   opqzm->set_req(1,main_limit);
1508   _igvn._worklist.push(opqzm);
1509 }
1510 
1511 //------------------------------DCE_loop_body----------------------------------
1512 // Remove simplistic dead code from loop body
1513 void IdealLoopTree::DCE_loop_body() {
1514   for( uint i = 0; i < _body.size(); i++ )
1515     if( _body.at(i)->outcnt() == 0 )
1516       _body.map( i--, _body.pop() );
1517 }
1518 
1519 
1520 //------------------------------adjust_loop_exit_prob--------------------------
1521 // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage.
1522 // Replace with a 1-in-10 exit guess.
1523 void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
1524   Node *test = tail();
1525   while( test != _head ) {
1526     uint top = test->Opcode();
1527     if( top == Op_IfTrue || top == Op_IfFalse ) {
1528       int test_con = ((ProjNode*)test)->_con;
1529       assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity");
1530       IfNode *iff = test->in(0)->as_If();
1531       if( iff->outcnt() == 2 ) {        // Ignore dead tests
1532         Node *bol = iff->in(1);
1533         if( bol && bol->req() > 1 && bol->in(1) &&
1534             ((bol->in(1)->Opcode() == Op_StorePConditional ) ||
1535              (bol->in(1)->Opcode() == Op_StoreIConditional ) ||
1536              (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
1537              (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
1538              (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
1539              (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
1540              (bol->in(1)->Opcode() == Op_CompareAndSwapN )))
1541           return;               // Allocation loops RARELY take backedge
1542         // Find the OTHER exit path from the IF
1543         Node* ex = iff->proj_out(1-test_con);
1544         float p = iff->_prob;
1545         if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) {
1546           if( top == Op_IfTrue ) {
1547             if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) {
1548               iff->_prob = PROB_STATIC_FREQUENT;
1549             }
1550           } else {
1551             if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
1552               iff->_prob = PROB_STATIC_INFREQUENT;
1553             }
1554           }
1555         }
1556       }
1557     }
1558     test = phase->idom(test);
1559   }
1560 }
1561 
1562 
1563 //------------------------------policy_do_remove_empty_loop--------------------
1564 // Micro-benchmark spamming.  Policy is to always remove empty loops.
1565 // The 'DO' part is to replace the trip counter with the value it will
1566 // have on the last iteration.  This will break the loop.
1567 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
1568   // Minimum size must be empty loop
1569   if( _body.size() > 7/*number of nodes in an empty loop*/ ) return false;
1570 
1571   if( !_head->is_CountedLoop() ) return false;     // Dead loop
1572   CountedLoopNode *cl = _head->as_CountedLoop();
1573   if( !cl->loopexit() ) return false; // Malformed loop
1574   if( !phase->is_member(this,phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)) ) )
1575     return false;             // Infinite loop
1576 #ifndef PRODUCT
1577   if( PrintOpto )
1578     tty->print_cr("Removing empty loop");
1579 #endif
1580 #ifdef ASSERT
1581   // Ensure only one phi which is the iv.
1582   Node* iv = NULL;
1583   for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
1584     Node* n = cl->fast_out(i);
1585     if (n->Opcode() == Op_Phi) {
1586       assert(iv == NULL, "Too many phis" );
1587       iv = n;
1588     }
1589   }
1590   assert(iv == cl->phi(), "Wrong phi" );
1591 #endif
1592   // Replace the phi at loop head with the final value of the last
1593   // iteration.  Then the CountedLoopEnd will collapse (backedge never
1594   // taken) and all loop-invariant uses of the exit values will be correct.
1595   Node *phi = cl->phi();
1596   Node *final = new (phase->C, 3) SubINode( cl->limit(), cl->stride() );
1597   phase->register_new_node(final,cl->in(LoopNode::EntryControl));
1598   phase->_igvn.replace_node(phi,final);
1599   phase->C->set_major_progress();
1600   return true;
1601 }
1602 
1603 
1604 //=============================================================================
1605 //------------------------------iteration_split_impl---------------------------
1606 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
1607   // Check and remove empty loops (spam micro-benchmarks)
1608   if( policy_do_remove_empty_loop(phase) )
1609     return true;  // Here we removed an empty loop
1610 
1611   bool should_peel = policy_peeling(phase); // Should we peel?
1612 
1613   bool should_unswitch = policy_unswitching(phase);
1614 
1615   // Non-counted loops may be peeled; exactly 1 iteration is peeled.
1616   // This removes loop-invariant tests (usually null checks).
1617   if( !_head->is_CountedLoop() ) { // Non-counted loop
1618     if (PartialPeelLoop && phase->partial_peel(this, old_new)) {
1619       // Partial peel succeeded so terminate this round of loop opts
1620       return false;
1621     }
1622     if( should_peel ) {            // Should we peel?
1623 #ifndef PRODUCT
1624       if (PrintOpto) tty->print_cr("should_peel");
1625 #endif
1626       phase->do_peeling(this,old_new);
1627     } else if( should_unswitch ) {
1628       phase->do_unswitching(this, old_new);
1629     }
1630     return true;
1631   }
1632   CountedLoopNode *cl = _head->as_CountedLoop();
1633 
1634   if( !cl->loopexit() ) return true; // Ignore various kinds of broken loops
1635 
1636   // Do nothing special to pre- and post- loops
1637   if( cl->is_pre_loop() || cl->is_post_loop() ) return true;
1638 
1639   // Compute loop trip count from profile data
1640   compute_profile_trip_cnt(phase);
1641 
1642   // Before attempting fancy unrolling, RCE or alignment, see if we want
1643   // to completely unroll this loop or do loop unswitching.
1644   if( cl->is_normal_loop() ) {
1645     if (should_unswitch) {
1646       phase->do_unswitching(this, old_new);
1647       return true;
1648     }
1649     bool should_maximally_unroll =  policy_maximally_unroll(phase);
1650     if( should_maximally_unroll ) {
1651       // Here we did some unrolling and peeling.  Eventually we will
1652       // completely unroll this loop and it will no longer be a loop.
1653       phase->do_maximally_unroll(this,old_new);
1654       return true;
1655     }
1656   }
1657 
1658 
1659   // Counted loops may be peeled, may need some iterations run up
1660   // front for RCE, and may want to align loop refs to a cache
1661   // line.  Thus we clone a full loop up front whose trip count is
1662   // at least 1 (if peeling), but may be several more.
1663 
1664   // The main loop will start cache-line aligned with at least 1
1665   // iteration of the unrolled body (zero-trip test required) and
1666   // will have some range checks removed.
1667 
1668   // A post-loop will finish any odd iterations (leftover after
1669   // unrolling), plus any needed for RCE purposes.
1670 
1671   bool should_unroll = policy_unroll(phase);
1672 
1673   bool should_rce = policy_range_check(phase);
1674 
1675   bool should_align = policy_align(phase);
1676 
1677   // If not RCE'ing (iteration splitting) or Aligning, then we do not
1678   // need a pre-loop.  We may still need to peel an initial iteration but
1679   // we will not be needing an unknown number of pre-iterations.
1680   //
1681   // Basically, if may_rce_align reports FALSE first time through,
1682   // we will not be able to later do RCE or Aligning on this loop.
1683   bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align;
1684 
1685   // If we have any of these conditions (RCE, alignment, unrolling) met, then
1686   // we switch to the pre-/main-/post-loop model.  This model also covers
1687   // peeling.
1688   if( should_rce || should_align || should_unroll ) {
1689     if( cl->is_normal_loop() )  // Convert to 'pre/main/post' loops
1690       phase->insert_pre_post_loops(this,old_new, !may_rce_align);
1691 
1692     // Adjust the pre- and main-loop limits to let the pre and post loops run
1693     // with full checks, but the main-loop with no checks.  Remove said
1694     // checks from the main body.
1695     if( should_rce )
1696       phase->do_range_check(this,old_new);
1697 
1698     // Double loop body for unrolling.  Adjust the minimum-trip test (will do
1699     // twice as many iterations as before) and the main body limit (only do
1700     // an even number of trips).  If we are peeling, we might enable some RCE
1701     // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
1702     // peeling.
1703       if( should_unroll && !should_peel )
1704         phase->do_unroll(this,old_new, true);
1705 
1706     // Adjust the pre-loop limits to align the main body
1707     // iterations.
1708     if( should_align )
1709       Unimplemented();
1710 
1711   } else {                      // Else we have an unchanged counted loop
1712     if( should_peel )           // Might want to peel but do nothing else
1713       phase->do_peeling(this,old_new);
1714   }
1715   return true;
1716 }
1717 
1718 
1719 //=============================================================================
1720 //------------------------------iteration_split--------------------------------
1721 bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) {
1722   // Recursively iteration split nested loops
1723   if( _child && !_child->iteration_split( phase, old_new ))
1724     return false;
1725 
1726   // Clean out prior deadwood
1727   DCE_loop_body();
1728 
1729 
1730   // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
1731   // Replace with a 1-in-10 exit guess.
1732   if( _parent /*not the root loop*/ &&
1733       !_irreducible &&
1734       // Also ignore the occasional dead backedge
1735       !tail()->is_top() ) {
1736     adjust_loop_exit_prob(phase);
1737   }
1738 
1739 
1740   // Gate unrolling, RCE and peeling efforts.
1741   if( !_child &&                // If not an inner loop, do not split
1742       !_irreducible &&
1743       _allow_optimizations &&
1744       !tail()->is_top() ) {     // Also ignore the occasional dead backedge
1745     if (!_has_call) {
1746         if (!iteration_split_impl( phase, old_new )) {
1747           return false;
1748         }
1749     } else if (policy_unswitching(phase)) {
1750       phase->do_unswitching(this, old_new);
1751     }
1752   }
1753 
1754   // Minor offset re-organization to remove loop-fallout uses of
1755   // trip counter.
1756   if( _head->is_CountedLoop() ) phase->reorg_offsets( this );
1757   if( _next && !_next->iteration_split( phase, old_new ))
1758     return false;
1759   return true;
1760 }
1761 
1762 //-------------------------------is_uncommon_trap_proj----------------------------
1763 // Return true if proj is the form of "proj->[region->..]call_uct"
1764 bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, bool must_reason_predicate) {
1765   int path_limit = 10;
1766   assert(proj, "invalid argument");
1767   Node* out = proj;
1768   for (int ct = 0; ct < path_limit; ct++) {
1769     out = out->unique_ctrl_out();
1770     if (out == NULL || out->is_Root() || out->is_Start())
1771       return false;
1772     if (out->is_CallStaticJava()) {
1773       int req = out->as_CallStaticJava()->uncommon_trap_request();
1774       if (req != 0) {
1775         Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(req);
1776         if (!must_reason_predicate || reason == Deoptimization::Reason_predicate){
1777            return true;
1778         }
1779       }
1780       return false; // don't do further after call
1781     }
1782   }
1783   return false;
1784 }
1785 
1786 //-------------------------------is_uncommon_trap_if_pattern-------------------------
1787 // Return true  for "if(test)-> proj -> ...
1788 //                          |
1789 //                          V
1790 //                      other_proj->[region->..]call_uct"
1791 //
1792 // "must_reason_predicate" means the uct reason must be Reason_predicate
1793 bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, bool must_reason_predicate) {
1794   Node *in0 = proj->in(0);
1795   if (!in0->is_If()) return false;
1796   // Variation of a dead If node.
1797   if (in0->outcnt() < 2)  return false;
1798   IfNode* iff = in0->as_If();
1799 
1800   // we need "If(Conv2B(Opaque1(...)))" pattern for must_reason_predicate
1801   if (must_reason_predicate) {
1802     if (iff->in(1)->Opcode() != Op_Conv2B ||
1803        iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
1804       return false;
1805     }
1806   }
1807 
1808   ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj();
1809   return is_uncommon_trap_proj(other_proj, must_reason_predicate);
1810 }
1811 
1812 //------------------------------create_new_if_for_predicate------------------------
1813 // create a new if above the uct_if_pattern for the predicate to be promoted.
1814 //
1815 //          before                                after
1816 //        ----------                           ----------
1817 //           ctrl                                 ctrl
1818 //            |                                     |
1819 //            |                                     |
1820 //            v                                     v
1821 //           iff                                 new_iff
1822 //          /    \                                /      \
1823 //         /      \                              /        \
1824 //        v        v                            v          v
1825 //  uncommon_proj cont_proj                   if_uct     if_cont
1826 // \      |        |                           |          |
1827 //  \     |        |                           |          |
1828 //   v    v        v                           |          v
1829 //     rgn       loop                          |         iff
1830 //      |                                      |        /     \
1831 //      |                                      |       /       \
1832 //      v                                      |      v         v
1833 // uncommon_trap                               | uncommon_proj cont_proj
1834 //                                           \  \    |           |
1835 //                                            \  \   |           |
1836 //                                             v  v  v           v
1837 //                                               rgn           loop
1838 //                                                |
1839 //                                                |
1840 //                                                v
1841 //                                           uncommon_trap
1842 //
1843 //
1844 // We will create a region to guard the uct call if there is no one there.
1845 // The true projecttion (if_cont) of the new_iff is returned.
1846 ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj) {
1847   assert(is_uncommon_trap_if_pattern(cont_proj, true), "must be a uct if pattern!");
1848   IfNode* iff = cont_proj->in(0)->as_If();
1849 
1850   ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
1851   Node     *rgn   = uncommon_proj->unique_ctrl_out();
1852   assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
1853 
1854   if (!rgn->is_Region()) { // create a region to guard the call
1855     assert(rgn->is_Call(), "must be call uct");
1856     CallNode* call = rgn->as_Call();
1857     rgn = new (C, 1) RegionNode(1);
1858     _igvn.set_type(rgn, rgn->bottom_type());
1859     rgn->add_req(uncommon_proj);
1860     set_idom(rgn, idom(uncommon_proj), dom_depth(uncommon_proj)+1);
1861     _igvn.hash_delete(call);
1862     call->set_req(0, rgn);
1863   }
1864 
1865   // Create new_iff
1866   uint  iffdd  = dom_depth(iff);
1867   IdealLoopTree* lp = get_loop(iff);
1868   IfNode *new_iff = new (C, 2) IfNode(iff->in(0), NULL, iff->_prob, iff->_fcnt);
1869   register_node(new_iff, lp, idom(iff), iffdd);
1870   Node *if_cont = new (C, 1) IfTrueNode(new_iff);
1871   Node *if_uct  = new (C, 1) IfFalseNode(new_iff);
1872   if (cont_proj->is_IfFalse()) {
1873     // Swap
1874     Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp;
1875   }
1876   register_node(if_cont, lp, new_iff, iffdd);
1877   register_node(if_uct, get_loop(rgn), new_iff, iffdd);
1878 
1879   // if_cont to iff
1880   _igvn.hash_delete(iff);
1881   iff->set_req(0, if_cont);
1882   set_idom(iff, if_cont, dom_depth(iff));
1883 
1884   // if_uct to rgn
1885   _igvn.hash_delete(rgn);
1886   rgn->add_req(if_uct);
1887   Node* ridom = idom(rgn);
1888   Node* nrdom = dom_lca(ridom, new_iff);
1889   set_idom(rgn, nrdom, dom_depth(rgn));
1890 
1891   // rgn must have no phis
1892   assert(!rgn->as_Region()->has_phi(), "region must have no phis");
1893 
1894   return if_cont->as_Proj();
1895 }
1896 
1897 //------------------------------find_predicate_insertion_point--------------------------
1898 // Find a good location to insert a predicate
1899 ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c) {
1900   if (start_c == C->root() || !start_c->is_Proj())
1901     return NULL;
1902   if (is_uncommon_trap_if_pattern(start_c->as_Proj(), true/*Reason_Predicate*/)) {
1903     return start_c->as_Proj();
1904   }
1905   return NULL;
1906 }
1907 
1908 //------------------------------Invariance-----------------------------------
1909 // Helper class for loop_predication_impl to compute invariance on the fly and
1910 // clone invariants.
1911 class Invariance : public StackObj {
1912   VectorSet _visited, _invariant;
1913   Node_Stack _stack;
1914   VectorSet _clone_visited;
1915   Node_List _old_new; // map of old to new (clone)
1916   IdealLoopTree* _lpt;
1917   PhaseIdealLoop* _phase;
1918 
1919   // Helper function to set up the invariance for invariance computation
1920   // If n is a known invariant, set up directly. Otherwise, look up the
1921   // the possibility to push n onto the stack for further processing.
1922   void visit(Node* use, Node* n) {
1923     if (_lpt->is_invariant(n)) { // known invariant
1924       _invariant.set(n->_idx);
1925     } else if (!n->is_CFG()) {
1926       Node *n_ctrl = _phase->ctrl_or_self(n);
1927       Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG
1928       if (_phase->is_dominator(n_ctrl, u_ctrl)) {
1929         _stack.push(n, n->in(0) == NULL ? 1 : 0);
1930       }
1931     }
1932   }
1933 
1934   // Compute invariance for "the_node" and (possibly) all its inputs recursively
1935   // on the fly
1936   void compute_invariance(Node* n) {
1937     assert(_visited.test(n->_idx), "must be");
1938     visit(n, n);
1939     while (_stack.is_nonempty()) {
1940       Node*  n = _stack.node();
1941       uint idx = _stack.index();
1942       if (idx == n->req()) { // all inputs are processed
1943         _stack.pop();
1944         // n is invariant if it's inputs are all invariant
1945         bool all_inputs_invariant = true;
1946         for (uint i = 0; i < n->req(); i++) {
1947           Node* in = n->in(i);
1948           if (in == NULL) continue;
1949           assert(_visited.test(in->_idx), "must have visited input");
1950           if (!_invariant.test(in->_idx)) { // bad guy
1951             all_inputs_invariant = false;
1952             break;
1953           }
1954         }
1955         if (all_inputs_invariant) {
1956           _invariant.set(n->_idx); // I am a invariant too
1957         }
1958       } else { // process next input
1959         _stack.set_index(idx + 1);
1960         Node* m = n->in(idx);
1961         if (m != NULL && !_visited.test_set(m->_idx)) {
1962           visit(n, m);
1963         }
1964       }
1965     }
1966   }
1967 
1968   // Helper function to set up _old_new map for clone_nodes.
1969   // If n is a known invariant, set up directly ("clone" of n == n).
1970   // Otherwise, push n onto the stack for real cloning.
1971   void clone_visit(Node* n) {
1972     assert(_invariant.test(n->_idx), "must be invariant");
1973     if (_lpt->is_invariant(n)) { // known invariant
1974       _old_new.map(n->_idx, n);
1975     } else{ // to be cloned
1976       assert (!n->is_CFG(), "should not see CFG here");
1977       _stack.push(n, n->in(0) == NULL ? 1 : 0);
1978     }
1979   }
1980 
1981   // Clone "n" and (possibly) all its inputs recursively
1982   void clone_nodes(Node* n, Node* ctrl) {
1983     clone_visit(n);
1984     while (_stack.is_nonempty()) {
1985       Node*  n = _stack.node();
1986       uint idx = _stack.index();
1987       if (idx == n->req()) { // all inputs processed, clone n!
1988         _stack.pop();
1989         // clone invariant node
1990         Node* n_cl = n->clone();
1991         _old_new.map(n->_idx, n_cl);
1992         _phase->register_new_node(n_cl, ctrl);
1993         for (uint i = 0; i < n->req(); i++) {
1994           Node* in = n_cl->in(i);
1995           if (in == NULL) continue;
1996           n_cl->set_req(i, _old_new[in->_idx]);
1997         }
1998       } else { // process next input
1999         _stack.set_index(idx + 1);
2000         Node* m = n->in(idx);
2001         if (m != NULL && !_clone_visited.test_set(m->_idx)) {
2002           clone_visit(m); // visit the input
2003         }
2004       }
2005     }
2006   }
2007 
2008  public:
2009   Invariance(Arena* area, IdealLoopTree* lpt) :
2010     _lpt(lpt), _phase(lpt->_phase),
2011     _visited(area), _invariant(area), _stack(area, 10 /* guess */),
2012     _clone_visited(area), _old_new(area)
2013   {}
2014 
2015   // Map old to n for invariance computation and clone
2016   void map_ctrl(Node* old, Node* n) {
2017     assert(old->is_CFG() && n->is_CFG(), "must be");
2018     _old_new.map(old->_idx, n); // "clone" of old is n
2019     _invariant.set(old->_idx);  // old is invariant
2020     _clone_visited.set(old->_idx);
2021   }
2022 
2023   // Driver function to compute invariance
2024   bool is_invariant(Node* n) {
2025     if (!_visited.test_set(n->_idx))
2026       compute_invariance(n);
2027     return (_invariant.test(n->_idx) != 0);
2028   }
2029 
2030   // Driver function to clone invariant
2031   Node* clone(Node* n, Node* ctrl) {
2032     assert(ctrl->is_CFG(), "must be");
2033     assert(_invariant.test(n->_idx), "must be an invariant");
2034     if (!_clone_visited.test(n->_idx))
2035       clone_nodes(n, ctrl);
2036     return _old_new[n->_idx];
2037   }
2038 };
2039 
2040 //------------------------------is_range_check_if -----------------------------------
2041 // Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format
2042 // Note: this function is particularly designed for loop predication. We require load_range
2043 //       and offset to be loop invariant computed on the fly by "invar"
2044 bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const {
2045   if (!is_loop_exit(iff)) {
2046     return false;
2047   }
2048   if (!iff->in(1)->is_Bool()) {
2049     return false;
2050   }
2051   const BoolNode *bol = iff->in(1)->as_Bool();
2052   if (bol->_test._test != BoolTest::lt) {
2053     return false;
2054   }
2055   if (!bol->in(1)->is_Cmp()) {
2056     return false;
2057   }
2058   const CmpNode *cmp = bol->in(1)->as_Cmp();
2059   if (cmp->Opcode() != Op_CmpU ) {
2060     return false;
2061   }
2062   Node* range = cmp->in(2);
2063   if (range->Opcode() != Op_LoadRange) {
2064     const TypeInt* tint = phase->_igvn.type(range)->isa_int();
2065     if (!OptimizeFill || tint == NULL || tint->empty() || tint->_lo < 0) {
2066       // Allow predication on positive values that aren't LoadRanges.
2067       // This allows optimization of loops where the length of the
2068       // array is a known value and doesn't need to be loaded back
2069       // from the array.
2070       return false;
2071     }
2072   }
2073   if (!invar.is_invariant(range)) {
2074     return false;
2075   }
2076   Node *iv     = _head->as_CountedLoop()->phi();
2077   int   scale  = 0;
2078   Node *offset = NULL;
2079   if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, &scale, &offset)) {
2080     return false;
2081   }
2082   if(offset && !invar.is_invariant(offset)) { // offset must be invariant
2083     return false;
2084   }
2085   return true;
2086 }
2087 
2088 //------------------------------rc_predicate-----------------------------------
2089 // Create a range check predicate
2090 //
2091 // for (i = init; i < limit; i += stride) {
2092 //    a[scale*i+offset]
2093 // }
2094 //
2095 // Compute max(scale*i + offset) for init <= i < limit and build the predicate
2096 // as "max(scale*i + offset) u< a.length".
2097 //
2098 // There are two cases for max(scale*i + offset):
2099 // (1) stride*scale > 0
2100 //   max(scale*i + offset) = scale*(limit-stride) + offset
2101 // (2) stride*scale < 0
2102 //   max(scale*i + offset) = scale*init + offset
2103 BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl,
2104                                        int scale, Node* offset,
2105                                        Node* init, Node* limit, Node* stride,
2106                                        Node* range, bool upper) {
2107   DEBUG_ONLY(ttyLocker ttyl);
2108   if (TraceLoopPredicate) tty->print("rc_predicate ");
2109 
2110   Node* max_idx_expr  = init;
2111   int stride_con = stride->get_int();
2112   if ((stride_con > 0) == (scale > 0) == upper) {
2113     max_idx_expr = new (C, 3) SubINode(limit, stride);
2114     register_new_node(max_idx_expr, ctrl);
2115     if (TraceLoopPredicate) tty->print("(limit - stride) ");
2116   } else {
2117     if (TraceLoopPredicate) tty->print("init ");
2118   }
2119 
2120   if (scale != 1) {
2121     ConNode* con_scale = _igvn.intcon(scale);
2122     max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale);
2123     register_new_node(max_idx_expr, ctrl);
2124     if (TraceLoopPredicate) tty->print("* %d ", scale);
2125   }
2126 
2127   if (offset && (!offset->is_Con() || offset->get_int() != 0)){
2128     max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset);
2129     register_new_node(max_idx_expr, ctrl);
2130     if (TraceLoopPredicate)
2131       if (offset->is_Con()) tty->print("+ %d ", offset->get_int());
2132       else tty->print("+ offset ");
2133   }
2134 
2135   CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range);
2136   register_new_node(cmp, ctrl);
2137   BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt);
2138   register_new_node(bol, ctrl);
2139 
2140   if (TraceLoopPredicate) tty->print_cr("<u range");
2141   return bol;
2142 }
2143 
2144 //------------------------------ loop_predication_impl--------------------------
2145 // Insert loop predicates for null checks and range checks
2146 bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
2147   if (!UseLoopPredicate) return false;
2148 
2149   if (!loop->_head->is_Loop()) {
2150     // Could be a simple region when irreducible loops are present.
2151     return false;
2152   }
2153 
2154   CountedLoopNode *cl = NULL;
2155   if (loop->_head->is_CountedLoop()) {
2156     cl = loop->_head->as_CountedLoop();
2157     // do nothing for iteration-splitted loops
2158     if (!cl->is_normal_loop()) return false;
2159   }
2160 
2161   // Too many traps seen?
2162   bool tmt = C->too_many_traps(C->method(), 0, Deoptimization::Reason_predicate);
2163   int tc = C->trap_count(Deoptimization::Reason_predicate);
2164   if (tmt || tc > 0) {
2165     if (TraceLoopPredicate) {
2166       tty->print_cr("too many predicate traps: %d", tc);
2167       C->method()->print(); // which method has too many predicate traps
2168       tty->print_cr("");
2169     }
2170     return false;
2171   }
2172 
2173   LoopNode *lpn  = loop->_head->as_Loop();
2174   Node* entry = lpn->in(LoopNode::EntryControl);
2175 
2176   ProjNode *predicate_proj = find_predicate_insertion_point(entry);
2177   if (!predicate_proj){
2178 #ifndef PRODUCT
2179     if (TraceLoopPredicate) {
2180       tty->print("missing predicate:");
2181       loop->dump_head();
2182     }
2183 #endif
2184     return false;
2185   }
2186 
2187   ConNode* zero = _igvn.intcon(0);
2188   set_ctrl(zero, C->root());
2189   Node *cond_false = new (C, 2) Conv2BNode(zero);
2190   register_new_node(cond_false, C->root());
2191   ConNode* one = _igvn.intcon(1);
2192   set_ctrl(one, C->root());
2193   Node *cond_true = new (C, 2) Conv2BNode(one);
2194   register_new_node(cond_true, C->root());
2195 
2196   ResourceArea *area = Thread::current()->resource_area();
2197   Invariance invar(area, loop);
2198 
2199   // Create list of if-projs such that a newer proj dominates all older
2200   // projs in the list, and they all dominate loop->tail()
2201   Node_List if_proj_list(area);
2202   LoopNode *head  = loop->_head->as_Loop();
2203   Node *current_proj = loop->tail(); //start from tail
2204   while ( current_proj != head ) {
2205     if (loop == get_loop(current_proj) && // still in the loop ?
2206         current_proj->is_Proj()        && // is a projection  ?
2207         current_proj->in(0)->Opcode() == Op_If) { // is a if projection ?
2208       if_proj_list.push(current_proj);
2209     }
2210     current_proj = idom(current_proj);
2211   }
2212 
2213   bool hoisted = false; // true if at least one proj is promoted
2214   while (if_proj_list.size() > 0) {
2215     // Following are changed to nonnull when a predicate can be hoisted
2216     ProjNode* new_predicate_proj = NULL;
2217 
2218     ProjNode* proj = if_proj_list.pop()->as_Proj();
2219     IfNode*   iff  = proj->in(0)->as_If();
2220 
2221     if (!is_uncommon_trap_if_pattern(proj)) {
2222       if (loop->is_loop_exit(iff)) {
2223         // stop processing the remaining projs in the list because the execution of them
2224         // depends on the condition of "iff" (iff->in(1)).
2225         break;
2226       } else {
2227         // Both arms are inside the loop. There are two cases:
2228         // (1) there is one backward branch. In this case, any remaining proj
2229         //     in the if_proj list post-dominates "iff". So, the condition of "iff"
2230         //     does not determine the execution the remining projs directly, and we
2231         //     can safely continue.
2232         // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj"
2233         //     does not dominate loop->tail(), so it can not be in the if_proj list.
2234         continue;
2235       }
2236     }
2237 
2238     Node*     test = iff->in(1);
2239     if (!test->is_Bool()){ //Conv2B, ...
2240       continue;
2241     }
2242     BoolNode* bol = test->as_Bool();
2243     if (invar.is_invariant(bol)) {
2244       // Invariant test
2245       new_predicate_proj = create_new_if_for_predicate(predicate_proj);
2246       Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0);
2247       BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool();
2248 
2249       // Negate test if necessary
2250       bool negated = false;
2251       if (proj->_con != predicate_proj->_con) {
2252         new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate());
2253         register_new_node(new_predicate_bol, ctrl);
2254         negated = true;
2255       }
2256       IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If();
2257       _igvn.hash_delete(new_predicate_iff);
2258       new_predicate_iff->set_req(1, new_predicate_bol);
2259       if (TraceLoopPredicate) tty->print_cr("invariant if%s: %d", negated ? " negated" : "", new_predicate_iff->_idx);
2260 
2261     } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
2262       assert(proj->_con == predicate_proj->_con, "must match");
2263 
2264       // Range check for counted loops
2265       const Node*    cmp    = bol->in(1)->as_Cmp();
2266       Node*          idx    = cmp->in(1);
2267       assert(!invar.is_invariant(idx), "index is variant");
2268       assert(cmp->in(2)->Opcode() == Op_LoadRange || OptimizeFill, "must be");
2269       Node* rng = cmp->in(2);
2270       assert(invar.is_invariant(rng), "range must be invariant");
2271       int scale    = 1;
2272       Node* offset = zero;
2273       bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset);
2274       assert(ok, "must be index expression");
2275 
2276       Node* init    = cl->init_trip();
2277       Node* limit   = cl->limit();
2278       Node* stride  = cl->stride();
2279 
2280       // Build if's for the upper and lower bound tests.  The
2281       // lower_bound test will dominate the upper bound test and all
2282       // cloned or created nodes will use the lower bound test as
2283       // their declared control.
2284       ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj);
2285       ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj);
2286       assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate");
2287       Node *ctrl = lower_bound_proj->in(0)->as_If()->in(0);
2288 
2289       // Perform cloning to keep Invariance state correct since the
2290       // late schedule will place invariant things in the loop.
2291       rng = invar.clone(rng, ctrl);
2292       if (offset && offset != zero) {
2293         assert(invar.is_invariant(offset), "offset must be loop invariant");
2294         offset = invar.clone(offset, ctrl);
2295       }
2296 
2297       // Test the lower bound
2298       Node*  lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, false);
2299       IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
2300       _igvn.hash_delete(lower_bound_iff);
2301       lower_bound_iff->set_req(1, lower_bound_bol);
2302       if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx);
2303 
2304       // Test the upper bound
2305       Node* upper_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, true);
2306       IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
2307       _igvn.hash_delete(upper_bound_iff);
2308       upper_bound_iff->set_req(1, upper_bound_bol);
2309       if (TraceLoopPredicate) tty->print_cr("upper bound check if: %d", lower_bound_iff->_idx);
2310 
2311       // Fall through into rest of the clean up code which will move
2312       // any dependent nodes onto the upper bound test.
2313       new_predicate_proj = upper_bound_proj;
2314     } else {
2315       // The other proj of the "iff" is a uncommon trap projection, and we can assume
2316       // the other proj will not be executed ("executed" means uct raised).
2317       continue;
2318     }
2319 
2320     // Success - attach condition (new_predicate_bol) to predicate if
2321     invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
2322 
2323     // Eliminate the old if in the loop body
2324     _igvn.hash_delete(iff);
2325     iff->set_req(1, proj->is_IfFalse() ? cond_false : cond_true);
2326 
2327     Node* ctrl = new_predicate_proj; // new control
2328     ProjNode* dp = proj;     // old control
2329     assert(get_loop(dp) == loop, "guaranteed at the time of collecting proj");
2330     // Find nodes (depends only on the test) off the surviving projection;
2331     // move them outside the loop with the control of proj_clone
2332     for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
2333       Node* cd = dp->fast_out(i); // Control-dependent node
2334       if (cd->depends_only_on_test()) {
2335         assert(cd->in(0) == dp, "");
2336         _igvn.hash_delete(cd);
2337         cd->set_req(0, ctrl); // ctrl, not NULL
2338         set_early_ctrl(cd);
2339         _igvn._worklist.push(cd);
2340         IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
2341         if (new_loop != loop) {
2342           if (!loop->_child) loop->_body.yank(cd);
2343           if (!new_loop->_child ) new_loop->_body.push(cd);
2344         }
2345         --i;
2346         --imax;
2347       }
2348     }
2349 
2350     hoisted = true;
2351     C->set_major_progress();
2352   } // end while
2353 
2354 #ifndef PRODUCT
2355   // report that the loop predication has been actually performed
2356   // for this loop
2357   if (TraceLoopPredicate && hoisted) {
2358     tty->print("Loop Predication Performed:");
2359     loop->dump_head();
2360   }
2361 #endif
2362 
2363   return hoisted;
2364 }
2365 
2366 //------------------------------loop_predication--------------------------------
2367 // driver routine for loop predication optimization
2368 bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) {
2369   bool hoisted = false;
2370   // Recursively promote predicates
2371   if ( _child ) {
2372     hoisted = _child->loop_predication( phase);
2373   }
2374 
2375   // self
2376   if (!_irreducible && !tail()->is_top()) {
2377     hoisted |= phase->loop_predication_impl(this);
2378   }
2379 
2380   if ( _next ) { //sibling
2381     hoisted |= _next->loop_predication( phase);
2382   }
2383 
2384   return hoisted;
2385 }
2386 
2387 
2388 // Process all the loops in the loop tree and replace any fill
2389 // patterns with an intrisc version.
2390 bool PhaseIdealLoop::do_intrinsify_fill() {
2391   bool changed = false;
2392   for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2393     IdealLoopTree* lpt = iter.current();
2394     changed |= intrinsify_fill(lpt);
2395   }
2396   return changed;
2397 }
2398 
2399 
2400 // Examine an inner loop looking for a a single store of an invariant
2401 // value in a unit stride loop,
2402 bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
2403                                      Node*& shift, Node*& con) {
2404   const char* msg = NULL;
2405   Node* msg_node = NULL;
2406 
2407   store_value = NULL;
2408   con = NULL;
2409   shift = NULL;
2410 
2411   // Process the loop looking for stores.  If there are multiple
2412   // stores or extra control flow give at this point.
2413   CountedLoopNode* head = lpt->_head->as_CountedLoop();
2414   for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2415     Node* n = lpt->_body.at(i);
2416     if (n->outcnt() == 0) continue; // Ignore dead
2417     if (n->is_Store()) {
2418       if (store != NULL) {
2419         msg = "multiple stores";
2420         break;
2421       }
2422       int opc = n->Opcode();
2423       if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreCM) {
2424         msg = "oop fills not handled";
2425         break;
2426       }
2427       Node* value = n->in(MemNode::ValueIn);
2428       if (!lpt->is_invariant(value)) {
2429         msg  = "variant store value";
2430       } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) {
2431         msg = "not array address";
2432       }
2433       store = n;
2434       store_value = value;
2435     } else if (n->is_If() && n != head->loopexit()) {
2436       msg = "extra control flow";
2437       msg_node = n;
2438     }
2439   }
2440 
2441   if (store == NULL) {
2442     // No store in loop
2443     return false;
2444   }
2445 
2446   if (msg == NULL && head->stride_con() != 1) {
2447     // could handle negative strides too
2448     if (head->stride_con() < 0) {
2449       msg = "negative stride";
2450     } else {
2451       msg = "non-unit stride";
2452     }
2453   }
2454 
2455   if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) {
2456     msg = "can't handle store address";
2457     msg_node = store->in(MemNode::Address);
2458   }
2459 
2460   if (msg == NULL &&
2461       (!store->in(MemNode::Memory)->is_Phi() ||
2462        store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) {
2463     msg = "store memory isn't proper phi";
2464     msg_node = store->in(MemNode::Memory);
2465   }
2466 
2467   // Make sure there is an appropriate fill routine
2468   BasicType t = store->as_Mem()->memory_type();
2469   const char* fill_name;
2470   if (msg == NULL &&
2471       StubRoutines::select_fill_function(t, false, fill_name) == NULL) {
2472     msg = "unsupported store";
2473     msg_node = store;
2474   }
2475 
2476   if (msg != NULL) {
2477 #ifndef PRODUCT
2478     if (TraceOptimizeFill) {
2479       tty->print_cr("not fill intrinsic candidate: %s", msg);
2480       if (msg_node != NULL) msg_node->dump();
2481     }
2482 #endif
2483     return false;
2484   }
2485 
2486   // Make sure the address expression can be handled.  It should be
2487   // head->phi * elsize + con.  head->phi might have a ConvI2L.
2488   Node* elements[4];
2489   Node* conv = NULL;
2490   bool found_index = false;
2491   int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
2492   for (int e = 0; e < count; e++) {
2493     Node* n = elements[e];
2494     if (n->is_Con() && con == NULL) {
2495       con = n;
2496     } else if (n->Opcode() == Op_LShiftX && shift == NULL) {
2497       Node* value = n->in(1);
2498 #ifdef _LP64
2499       if (value->Opcode() == Op_ConvI2L) {
2500         conv = value;
2501         value = value->in(1);
2502       }
2503 #endif
2504       if (value != head->phi()) {
2505         msg = "unhandled shift in address";
2506       } else {
2507         found_index = true;
2508         shift = n;
2509         assert(type2aelembytes(store->as_Mem()->memory_type(), true) == 1 << shift->in(2)->get_int(), "scale should match");
2510       }
2511     } else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
2512       if (n->in(1) == head->phi()) {
2513         found_index = true;
2514         conv = n;
2515       } else {
2516         msg = "unhandled input to ConvI2L";
2517       }
2518     } else if (n == head->phi()) {
2519       // no shift, check below for allowed cases
2520       found_index = true;
2521     } else {
2522       msg = "unhandled node in address";
2523       msg_node = n;
2524     }
2525   }
2526 
2527   if (count == -1) {
2528     msg = "malformed address expression";
2529     msg_node = store;
2530   }
2531 
2532   if (!found_index) {
2533     msg = "missing use of index";
2534   }
2535 
2536   // byte sized items won't have a shift
2537   if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) {
2538     msg = "can't find shift";
2539     msg_node = store;
2540   }
2541 
2542   if (msg != NULL) {
2543 #ifndef PRODUCT
2544     if (TraceOptimizeFill) {
2545       tty->print_cr("not fill intrinsic: %s", msg);
2546       if (msg_node != NULL) msg_node->dump();
2547     }
2548 #endif
2549     return false;
2550   }
2551 
2552   // No make sure all the other nodes in the loop can be handled
2553   VectorSet ok(Thread::current()->resource_area());
2554 
2555   // store related values are ok
2556   ok.set(store->_idx);
2557   ok.set(store->in(MemNode::Memory)->_idx);
2558 
2559   // Loop structure is ok
2560   ok.set(head->_idx);
2561   ok.set(head->loopexit()->_idx);
2562   ok.set(head->phi()->_idx);
2563   ok.set(head->incr()->_idx);
2564   ok.set(head->loopexit()->cmp_node()->_idx);
2565   ok.set(head->loopexit()->in(1)->_idx);
2566 
2567   // Address elements are ok
2568   if (con)   ok.set(con->_idx);
2569   if (shift) ok.set(shift->_idx);
2570   if (conv)  ok.set(conv->_idx);
2571 
2572   for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2573     Node* n = lpt->_body.at(i);
2574     if (n->outcnt() == 0) continue; // Ignore dead
2575     if (ok.test(n->_idx)) continue;
2576     // Backedge projection is ok
2577     if (n->is_IfTrue() && n->in(0) == head->loopexit()) continue;
2578     if (!n->is_AddP()) {
2579       msg = "unhandled node";
2580       msg_node = n;
2581       break;
2582     }
2583   }
2584 
2585   // Make sure no unexpected values are used outside the loop
2586   for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2587     Node* n = lpt->_body.at(i);
2588     // These values can be replaced with other nodes if they are used
2589     // outside the loop.
2590     if (n == store || n == head->loopexit() || n == head->incr() || n == store->in(MemNode::Memory)) continue;
2591     for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) {
2592       Node* use = iter.get();
2593       if (!lpt->_body.contains(use)) {
2594         msg = "node is used outside loop";
2595         // lpt->_body.dump();
2596         msg_node = n;
2597         break;
2598       }
2599     }
2600   }
2601 
2602 #ifdef ASSERT
2603   if (TraceOptimizeFill) {
2604     if (msg != NULL) {
2605       tty->print_cr("no fill intrinsic: %s", msg);
2606       if (msg_node != NULL) msg_node->dump();
2607     } else {
2608       tty->print_cr("fill intrinsic for:");
2609     }
2610     store->dump();
2611     if (Verbose) {
2612       lpt->_body.dump();
2613     }
2614   }
2615 #endif
2616 
2617   return msg == NULL;
2618 }
2619 
2620 
2621 
2622 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
2623   // Only for counted inner loops
2624   if (!lpt->is_counted() || !lpt->is_inner()) {
2625     return false;
2626   }
2627 
2628   // Must have constant stride
2629   CountedLoopNode* head = lpt->_head->as_CountedLoop();
2630   if (!head->stride_is_con() || !head->is_normal_loop()) {
2631     return false;
2632   }
2633 
2634   // Check that the body only contains a store of a loop invariant
2635   // value that is indexed by the loop phi.
2636   Node* store = NULL;
2637   Node* store_value = NULL;
2638   Node* shift = NULL;
2639   Node* offset = NULL;
2640   if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
2641     return false;
2642   }
2643 
2644   // Now replace the whole loop body by a call to a fill routine that
2645   // covers the same region as the loop.
2646   Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base);
2647 
2648   // Build an expression for the beginning of the copy region
2649   Node* index = head->init_trip();
2650 #ifdef _LP64
2651   index = new (C, 2) ConvI2LNode(index);
2652   _igvn.register_new_node_with_optimizer(index);
2653 #endif
2654   if (shift != NULL) {
2655     // byte arrays don't require a shift but others do.
2656     index = new (C, 3) LShiftXNode(index, shift->in(2));
2657     _igvn.register_new_node_with_optimizer(index);
2658   }
2659   index = new (C, 4) AddPNode(base, base, index);
2660   _igvn.register_new_node_with_optimizer(index);
2661   Node* from = new (C, 4) AddPNode(base, index, offset);
2662   _igvn.register_new_node_with_optimizer(from);
2663   // Compute the number of elements to copy
2664   Node* len = new (C, 3) SubINode(head->limit(), head->init_trip());
2665   _igvn.register_new_node_with_optimizer(len);
2666 
2667   BasicType t = store->as_Mem()->memory_type();
2668   bool aligned = false;
2669   if (offset != NULL && head->init_trip()->is_Con()) {
2670     int element_size = type2aelembytes(t);
2671     aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0;
2672   }
2673 
2674   // Build a call to the fill routine
2675   const char* fill_name;
2676   address fill = StubRoutines::select_fill_function(t, aligned, fill_name);
2677   assert(fill != NULL, "what?");
2678 
2679   // Convert float/double to int/long for fill routines
2680   if (t == T_FLOAT) {
2681     store_value = new (C, 2) MoveF2INode(store_value);
2682     _igvn.register_new_node_with_optimizer(store_value);
2683   } else if (t == T_DOUBLE) {
2684     store_value = new (C, 2) MoveD2LNode(store_value);
2685     _igvn.register_new_node_with_optimizer(store_value);
2686   }
2687 
2688   Node* mem_phi = store->in(MemNode::Memory);
2689   Node* result_ctrl;
2690   Node* result_mem;
2691   const TypeFunc* call_type = OptoRuntime::array_fill_Type();
2692   int size = call_type->domain()->cnt();
2693   CallLeafNode *call = new (C, size) CallLeafNoFPNode(call_type, fill,
2694                                                       fill_name, TypeAryPtr::get_array_body_type(t));
2695   call->init_req(TypeFunc::Parms+0, from);
2696   call->init_req(TypeFunc::Parms+1, store_value);
2697 #ifdef _LP64
2698   len = new (C, 2) ConvI2LNode(len);
2699   _igvn.register_new_node_with_optimizer(len);
2700 #endif
2701   call->init_req(TypeFunc::Parms+2, len);
2702 #ifdef _LP64
2703   call->init_req(TypeFunc::Parms+3, C->top());
2704 #endif
2705   call->init_req( TypeFunc::Control, head->init_control());
2706   call->init_req( TypeFunc::I_O    , C->top() )        ;   // does no i/o
2707   call->init_req( TypeFunc::Memory ,  mem_phi->in(LoopNode::EntryControl) );
2708   call->init_req( TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr) );
2709   call->init_req( TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr) );
2710   _igvn.register_new_node_with_optimizer(call);
2711   result_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control);
2712   _igvn.register_new_node_with_optimizer(result_ctrl);
2713   result_mem = new (C, 1) ProjNode(call,TypeFunc::Memory);
2714   _igvn.register_new_node_with_optimizer(result_mem);
2715 
2716   // If this fill is tightly coupled to an allocation and overwrites
2717   // the whole body, allow it to take over the zeroing.
2718   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this);
2719   if (alloc != NULL && alloc->is_AllocateArray()) {
2720     Node* length = alloc->as_AllocateArray()->Ideal_length();
2721     if (head->limit() == length &&
2722         head->init_trip() == _igvn.intcon(0)) {
2723       if (TraceOptimizeFill) {
2724         tty->print_cr("Eliminated zeroing in allocation");
2725       }
2726       alloc->maybe_set_complete(&_igvn);
2727     } else {
2728 #ifdef ASSERT
2729       if (TraceOptimizeFill) {
2730         tty->print_cr("filling array but bounds don't match");
2731         alloc->dump();
2732         head->init_trip()->dump();
2733         head->limit()->dump();
2734         length->dump();
2735       }
2736 #endif
2737     }
2738   }
2739 
2740   // Redirect the old control and memory edges that are outside the loop.
2741   Node* exit = head->loopexit()->proj_out(0);
2742   // Sometimes the memory phi of the head is used as the outgoing
2743   // state of the loop.  It's safe in this case to replace it with the
2744   // result_mem.
2745   _igvn.replace_node(store->in(MemNode::Memory), result_mem);
2746   _igvn.replace_node(exit, result_ctrl);
2747   _igvn.replace_node(store, result_mem);
2748   // Any uses the increment outside of the loop become the loop limit.
2749   _igvn.replace_node(head->incr(), head->limit());
2750 
2751   // Disconnect the head from the loop.
2752   for (uint i = 0; i < lpt->_body.size(); i++) {
2753     Node* n = lpt->_body.at(i);
2754     _igvn.replace_node(n, C->top());
2755   }
2756 
2757   return true;
2758 }