1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "opto/addnode.hpp"
  29 #include "opto/callnode.hpp"
  30 #include "opto/castnode.hpp"
  31 #include "opto/connode.hpp"
  32 #include "opto/convertnode.hpp"
  33 #include "opto/divnode.hpp"
  34 #include "opto/loopnode.hpp"
  35 #include "opto/mulnode.hpp"
  36 #include "opto/movenode.hpp"
  37 #include "opto/opaquenode.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/runtime.hpp"
  40 #include "opto/subnode.hpp"
  41 #include "opto/superword.hpp"
  42 #include "opto/vectornode.hpp"
  43 
  44 //------------------------------is_loop_exit-----------------------------------
  45 // Given an IfNode, return the loop-exiting projection or NULL if both
  46 // arms remain in the loop.
  47 Node *IdealLoopTree::is_loop_exit(Node *iff) const {
  48   if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests
  49   PhaseIdealLoop *phase = _phase;
  50   // Test is an IfNode, has 2 projections.  If BOTH are in the loop
  51   // we need loop unswitching instead of peeling.
  52   if( !is_member(phase->get_loop( iff->raw_out(0) )) )
  53     return iff->raw_out(0);
  54   if( !is_member(phase->get_loop( iff->raw_out(1) )) )
  55     return iff->raw_out(1);
  56   return NULL;
  57 }
  58 
  59 
  60 //=============================================================================
  61 
  62 
  63 //------------------------------record_for_igvn----------------------------
  64 // Put loop body on igvn work list
  65 void IdealLoopTree::record_for_igvn() {
  66   for( uint i = 0; i < _body.size(); i++ ) {
  67     Node *n = _body.at(i);
  68     _phase->_igvn._worklist.push(n);
  69   }
  70 }
  71 
  72 //------------------------------compute_exact_trip_count-----------------------
  73 // Compute loop exact trip count if possible. Do not recalculate trip count for
  74 // split loops (pre-main-post) which have their limits and inits behind Opaque node.
  75 void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) {
  76   if (!_head->as_Loop()->is_valid_counted_loop()) {
  77     return;
  78   }
  79   CountedLoopNode* cl = _head->as_CountedLoop();
  80   // Trip count may become nonexact for iteration split loops since
  81   // RCE modifies limits. Note, _trip_count value is not reset since
  82   // it is used to limit unrolling of main loop.
  83   cl->set_nonexact_trip_count();
  84 
  85   // Loop's test should be part of loop.
  86   if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
  87     return; // Infinite loop
  88 
  89 #ifdef ASSERT
  90   BoolTest::mask bt = cl->loopexit()->test_trip();
  91   assert(bt == BoolTest::lt || bt == BoolTest::gt ||
  92          bt == BoolTest::ne, "canonical test is expected");
  93 #endif
  94 
  95   Node* init_n = cl->init_trip();
  96   Node* limit_n = cl->limit();
  97   if (init_n  != NULL &&  init_n->is_Con() &&
  98       limit_n != NULL && limit_n->is_Con()) {
  99     // Use longs to avoid integer overflow.
 100     int stride_con  = cl->stride_con();
 101     jlong init_con   = cl->init_trip()->get_int();
 102     jlong limit_con  = cl->limit()->get_int();
 103     int stride_m    = stride_con - (stride_con > 0 ? 1 : -1);
 104     jlong trip_count = (limit_con - init_con + stride_m)/stride_con;
 105     if (trip_count > 0 && (julong)trip_count < (julong)max_juint) {
 106       // Set exact trip count.
 107       cl->set_exact_trip_count((uint)trip_count);
 108     }
 109   }
 110 }
 111 
 112 //------------------------------compute_profile_trip_cnt----------------------------
 113 // Compute loop trip count from profile data as
 114 //    (backedge_count + loop_exit_count) / loop_exit_count
 115 void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) {
 116   if (!_head->is_CountedLoop()) {
 117     return;
 118   }
 119   CountedLoopNode* head = _head->as_CountedLoop();
 120   if (head->profile_trip_cnt() != COUNT_UNKNOWN) {
 121     return; // Already computed
 122   }
 123   float trip_cnt = (float)max_jint; // default is big
 124 
 125   Node* back = head->in(LoopNode::LoopBackControl);
 126   while (back != head) {
 127     if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
 128         back->in(0) &&
 129         back->in(0)->is_If() &&
 130         back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN &&
 131         back->in(0)->as_If()->_prob != PROB_UNKNOWN) {
 132       break;
 133     }
 134     back = phase->idom(back);
 135   }
 136   if (back != head) {
 137     assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
 138            back->in(0), "if-projection exists");
 139     IfNode* back_if = back->in(0)->as_If();
 140     float loop_back_cnt = back_if->_fcnt * back_if->_prob;
 141 
 142     // Now compute a loop exit count
 143     float loop_exit_cnt = 0.0f;
 144     for( uint i = 0; i < _body.size(); i++ ) {
 145       Node *n = _body[i];
 146       if( n->is_If() ) {
 147         IfNode *iff = n->as_If();
 148         if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) {
 149           Node *exit = is_loop_exit(iff);
 150           if( exit ) {
 151             float exit_prob = iff->_prob;
 152             if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob;
 153             if (exit_prob > PROB_MIN) {
 154               float exit_cnt = iff->_fcnt * exit_prob;
 155               loop_exit_cnt += exit_cnt;
 156             }
 157           }
 158         }
 159       }
 160     }
 161     if (loop_exit_cnt > 0.0f) {
 162       trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt;
 163     } else {
 164       // No exit count so use
 165       trip_cnt = loop_back_cnt;
 166     }
 167   }
 168 #ifndef PRODUCT
 169   if (TraceProfileTripCount) {
 170     tty->print_cr("compute_profile_trip_cnt  lp: %d cnt: %f\n", head->_idx, trip_cnt);
 171   }
 172 #endif
 173   head->set_profile_trip_cnt(trip_cnt);
 174 }
 175 
 176 //---------------------is_invariant_addition-----------------------------
 177 // Return nonzero index of invariant operand for an Add or Sub
 178 // of (nonconstant) invariant and variant values. Helper for reassociate_invariants.
 179 int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) {
 180   int op = n->Opcode();
 181   if (op == Op_AddI || op == Op_SubI) {
 182     bool in1_invar = this->is_invariant(n->in(1));
 183     bool in2_invar = this->is_invariant(n->in(2));
 184     if (in1_invar && !in2_invar) return 1;
 185     if (!in1_invar && in2_invar) return 2;
 186   }
 187   return 0;
 188 }
 189 
 190 //---------------------reassociate_add_sub-----------------------------
 191 // Reassociate invariant add and subtract expressions:
 192 //
 193 // inv1 + (x + inv2)  =>  ( inv1 + inv2) + x
 194 // (x + inv2) + inv1  =>  ( inv1 + inv2) + x
 195 // inv1 + (x - inv2)  =>  ( inv1 - inv2) + x
 196 // inv1 - (inv2 - x)  =>  ( inv1 - inv2) + x
 197 // (x + inv2) - inv1  =>  (-inv1 + inv2) + x
 198 // (x - inv2) + inv1  =>  ( inv1 - inv2) + x
 199 // (x - inv2) - inv1  =>  (-inv1 - inv2) + x
 200 // inv1 + (inv2 - x)  =>  ( inv1 + inv2) - x
 201 // inv1 - (x - inv2)  =>  ( inv1 + inv2) - x
 202 // (inv2 - x) + inv1  =>  ( inv1 + inv2) - x
 203 // (inv2 - x) - inv1  =>  (-inv1 + inv2) - x
 204 // inv1 - (x + inv2)  =>  ( inv1 - inv2) - x
 205 //
 206 Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) {
 207   if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL;
 208   if (is_invariant(n1)) return NULL;
 209   int inv1_idx = is_invariant_addition(n1, phase);
 210   if (!inv1_idx) return NULL;
 211   // Don't mess with add of constant (igvn moves them to expression tree root.)
 212   if (n1->is_Add() && n1->in(2)->is_Con()) return NULL;
 213   Node* inv1 = n1->in(inv1_idx);
 214   Node* n2 = n1->in(3 - inv1_idx);
 215   int inv2_idx = is_invariant_addition(n2, phase);
 216   if (!inv2_idx) return NULL;
 217   Node* x    = n2->in(3 - inv2_idx);
 218   Node* inv2 = n2->in(inv2_idx);
 219 
 220   bool neg_x    = n2->is_Sub() && inv2_idx == 1;
 221   bool neg_inv2 = n2->is_Sub() && inv2_idx == 2;
 222   bool neg_inv1 = n1->is_Sub() && inv1_idx == 2;
 223   if (n1->is_Sub() && inv1_idx == 1) {
 224     neg_x    = !neg_x;
 225     neg_inv2 = !neg_inv2;
 226   }
 227   Node* inv1_c = phase->get_ctrl(inv1);
 228   Node* inv2_c = phase->get_ctrl(inv2);
 229   Node* n_inv1;
 230   if (neg_inv1) {
 231     Node *zero = phase->_igvn.intcon(0);
 232     phase->set_ctrl(zero, phase->C->root());
 233     n_inv1 = new SubINode(zero, inv1);
 234     phase->register_new_node(n_inv1, inv1_c);
 235   } else {
 236     n_inv1 = inv1;
 237   }
 238   Node* inv;
 239   if (neg_inv2) {
 240     inv = new SubINode(n_inv1, inv2);
 241   } else {
 242     inv = new AddINode(n_inv1, inv2);
 243   }
 244   phase->register_new_node(inv, phase->get_early_ctrl(inv));
 245 
 246   Node* addx;
 247   if (neg_x) {
 248     addx = new SubINode(inv, x);
 249   } else {
 250     addx = new AddINode(x, inv);
 251   }
 252   phase->register_new_node(addx, phase->get_ctrl(x));
 253   phase->_igvn.replace_node(n1, addx);
 254   assert(phase->get_loop(phase->get_ctrl(n1)) == this, "");
 255   _body.yank(n1);
 256   return addx;
 257 }
 258 
 259 //---------------------reassociate_invariants-----------------------------
 260 // Reassociate invariant expressions:
 261 void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) {
 262   for (int i = _body.size() - 1; i >= 0; i--) {
 263     Node *n = _body.at(i);
 264     for (int j = 0; j < 5; j++) {
 265       Node* nn = reassociate_add_sub(n, phase);
 266       if (nn == NULL) break;
 267       n = nn; // again
 268     };
 269   }
 270 }
 271 
 272 //------------------------------policy_peeling---------------------------------
 273 // Return TRUE or FALSE if the loop should be peeled or not.  Peel if we can
 274 // make some loop-invariant test (usually a null-check) happen before the loop.
 275 bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
 276   Node *test = ((IdealLoopTree*)this)->tail();
 277   int  body_size = ((IdealLoopTree*)this)->_body.size();
 278   // Peeling does loop cloning which can result in O(N^2) node construction
 279   if( body_size > 255 /* Prevent overflow for large body_size */
 280       || (body_size * body_size + phase->C->live_nodes()) > phase->C->max_node_limit() ) {
 281     return false;           // too large to safely clone
 282   }
 283 
 284   // check for vectorized loops, any peeling done was already applied
 285   if (_head->is_CountedLoop() && _head->as_CountedLoop()->do_unroll_only()) return false;
 286 
 287   while( test != _head ) {      // Scan till run off top of loop
 288     if( test->is_If() ) {       // Test?
 289       Node *ctrl = phase->get_ctrl(test->in(1));
 290       if (ctrl->is_top())
 291         return false;           // Found dead test on live IF?  No peeling!
 292       // Standard IF only has one input value to check for loop invariance
 293       assert(test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd || test->Opcode() == Op_RangeCheck, "Check this code when new subtype is added");
 294       // Condition is not a member of this loop?
 295       if( !is_member(phase->get_loop(ctrl)) &&
 296           is_loop_exit(test) )
 297         return true;            // Found reason to peel!
 298     }
 299     // Walk up dominators to loop _head looking for test which is
 300     // executed on every path thru loop.
 301     test = phase->idom(test);
 302   }
 303   return false;
 304 }
 305 
 306 //------------------------------peeled_dom_test_elim---------------------------
 307 // If we got the effect of peeling, either by actually peeling or by making
 308 // a pre-loop which must execute at least once, we can remove all
 309 // loop-invariant dominated tests in the main body.
 310 void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) {
 311   bool progress = true;
 312   while( progress ) {
 313     progress = false;           // Reset for next iteration
 314     Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail();
 315     Node *test = prev->in(0);
 316     while( test != loop->_head ) { // Scan till run off top of loop
 317 
 318       int p_op = prev->Opcode();
 319       if( (p_op == Op_IfFalse || p_op == Op_IfTrue) &&
 320           test->is_If() &&      // Test?
 321           !test->in(1)->is_Con() && // And not already obvious?
 322           // Condition is not a member of this loop?
 323           !loop->is_member(get_loop(get_ctrl(test->in(1))))){
 324         // Walk loop body looking for instances of this test
 325         for( uint i = 0; i < loop->_body.size(); i++ ) {
 326           Node *n = loop->_body.at(i);
 327           if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) {
 328             // IfNode was dominated by version in peeled loop body
 329             progress = true;
 330             dominated_by( old_new[prev->_idx], n );
 331           }
 332         }
 333       }
 334       prev = test;
 335       test = idom(test);
 336     } // End of scan tests in loop
 337 
 338   } // End of while( progress )
 339 }
 340 
 341 //------------------------------do_peeling-------------------------------------
 342 // Peel the first iteration of the given loop.
 343 // Step 1: Clone the loop body.  The clone becomes the peeled iteration.
 344 //         The pre-loop illegally has 2 control users (old & new loops).
 345 // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
 346 //         Do this by making the old-loop fall-in edges act as if they came
 347 //         around the loopback from the prior iteration (follow the old-loop
 348 //         backedges) and then map to the new peeled iteration.  This leaves
 349 //         the pre-loop with only 1 user (the new peeled iteration), but the
 350 //         peeled-loop backedge has 2 users.
 351 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
 352 //         extra backedge user.
 353 //
 354 //                   orig
 355 //
 356 //                  stmt1
 357 //                    |
 358 //                    v
 359 //              loop predicate
 360 //                    |
 361 //                    v
 362 //                   loop<----+
 363 //                     |      |
 364 //                   stmt2    |
 365 //                     |      |
 366 //                     v      |
 367 //                    if      ^
 368 //                   / \      |
 369 //                  /   \     |
 370 //                 v     v    |
 371 //               false true   |
 372 //               /       \    |
 373 //              /         ----+
 374 //             |
 375 //             v
 376 //           exit
 377 //
 378 //
 379 //            after clone loop
 380 //
 381 //                   stmt1
 382 //                     |
 383 //                     v
 384 //               loop predicate
 385 //                 /       \
 386 //        clone   /         \   orig
 387 //               /           \
 388 //              /             \
 389 //             v               v
 390 //   +---->loop clone          loop<----+
 391 //   |      |                    |      |
 392 //   |    stmt2 clone          stmt2    |
 393 //   |      |                    |      |
 394 //   |      v                    v      |
 395 //   ^      if clone            If      ^
 396 //   |      / \                / \      |
 397 //   |     /   \              /   \     |
 398 //   |    v     v            v     v    |
 399 //   |    true  false      false true   |
 400 //   |    /         \      /       \    |
 401 //   +----           \    /         ----+
 402 //                    \  /
 403 //                    1v v2
 404 //                  region
 405 //                     |
 406 //                     v
 407 //                   exit
 408 //
 409 //
 410 //         after peel and predicate move
 411 //
 412 //                   stmt1
 413 //                    /
 414 //                   /
 415 //        clone     /            orig
 416 //                 /
 417 //                /              +----------+
 418 //               /               |          |
 419 //              /          loop predicate   |
 420 //             /                 |          |
 421 //            v                  v          |
 422 //   TOP-->loop clone          loop<----+   |
 423 //          |                    |      |   |
 424 //        stmt2 clone          stmt2    |   |
 425 //          |                    |      |   ^
 426 //          v                    v      |   |
 427 //          if clone            If      ^   |
 428 //          / \                / \      |   |
 429 //         /   \              /   \     |   |
 430 //        v     v            v     v    |   |
 431 //      true   false      false  true   |   |
 432 //        |         \      /       \    |   |
 433 //        |          \    /         ----+   ^
 434 //        |           \  /                  |
 435 //        |           1v v2                 |
 436 //        v         region                  |
 437 //        |            |                    |
 438 //        |            v                    |
 439 //        |          exit                   |
 440 //        |                                 |
 441 //        +--------------->-----------------+
 442 //
 443 //
 444 //              final graph
 445 //
 446 //                  stmt1
 447 //                    |
 448 //                    v
 449 //                  stmt2 clone
 450 //                    |
 451 //                    v
 452 //                   if clone
 453 //                  / |
 454 //                 /  |
 455 //                v   v
 456 //            false  true
 457 //             |      |
 458 //             |      v
 459 //             | loop predicate
 460 //             |      |
 461 //             |      v
 462 //             |     loop<----+
 463 //             |      |       |
 464 //             |    stmt2     |
 465 //             |      |       |
 466 //             |      v       |
 467 //             v      if      ^
 468 //             |     /  \     |
 469 //             |    /    \    |
 470 //             |   v     v    |
 471 //             | false  true  |
 472 //             |  |        \  |
 473 //             v  v         --+
 474 //            region
 475 //              |
 476 //              v
 477 //             exit
 478 //
 479 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
 480 
 481   C->set_major_progress();
 482   // Peeling a 'main' loop in a pre/main/post situation obfuscates the
 483   // 'pre' loop from the main and the 'pre' can no longer have its
 484   // iterations adjusted.  Therefore, we need to declare this loop as
 485   // no longer a 'main' loop; it will need new pre and post loops before
 486   // we can do further RCE.
 487 #ifndef PRODUCT
 488   if (TraceLoopOpts) {
 489     tty->print("Peel         ");
 490     loop->dump_head();
 491   }
 492 #endif
 493   Node* head = loop->_head;
 494   bool counted_loop = head->is_CountedLoop();
 495   if (counted_loop) {
 496     CountedLoopNode *cl = head->as_CountedLoop();
 497     assert(cl->trip_count() > 0, "peeling a fully unrolled loop");
 498     cl->set_trip_count(cl->trip_count() - 1);
 499     if (cl->is_main_loop()) {
 500       cl->set_normal_loop();
 501 #ifndef PRODUCT
 502       if (PrintOpto && VerifyLoopOptimizations) {
 503         tty->print("Peeling a 'main' loop; resetting to 'normal' ");
 504         loop->dump_head();
 505       }
 506 #endif
 507     }
 508   }
 509   Node* entry = head->in(LoopNode::EntryControl);
 510 
 511   // Step 1: Clone the loop body.  The clone becomes the peeled iteration.
 512   //         The pre-loop illegally has 2 control users (old & new loops).
 513   clone_loop( loop, old_new, dom_depth(head) );
 514 
 515   // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
 516   //         Do this by making the old-loop fall-in edges act as if they came
 517   //         around the loopback from the prior iteration (follow the old-loop
 518   //         backedges) and then map to the new peeled iteration.  This leaves
 519   //         the pre-loop with only 1 user (the new peeled iteration), but the
 520   //         peeled-loop backedge has 2 users.
 521   Node* new_entry = old_new[head->in(LoopNode::LoopBackControl)->_idx];
 522   _igvn.hash_delete(head);
 523   head->set_req(LoopNode::EntryControl, new_entry);
 524   for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
 525     Node* old = head->fast_out(j);
 526     if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) {
 527       Node* new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx];
 528       if (!new_exit_value )     // Backedge value is ALSO loop invariant?
 529         // Then loop body backedge value remains the same.
 530         new_exit_value = old->in(LoopNode::LoopBackControl);
 531       _igvn.hash_delete(old);
 532       old->set_req(LoopNode::EntryControl, new_exit_value);
 533     }
 534   }
 535 
 536 
 537   // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
 538   //         extra backedge user.
 539   Node* new_head = old_new[head->_idx];
 540   _igvn.hash_delete(new_head);
 541   new_head->set_req(LoopNode::LoopBackControl, C->top());
 542   for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) {
 543     Node* use = new_head->fast_out(j2);
 544     if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) {
 545       _igvn.hash_delete(use);
 546       use->set_req(LoopNode::LoopBackControl, C->top());
 547     }
 548   }
 549 
 550 
 551   // Step 4: Correct dom-depth info.  Set to loop-head depth.
 552   int dd = dom_depth(head);
 553   set_idom(head, head->in(1), dd);
 554   for (uint j3 = 0; j3 < loop->_body.size(); j3++) {
 555     Node *old = loop->_body.at(j3);
 556     Node *nnn = old_new[old->_idx];
 557     if (!has_ctrl(nnn))
 558       set_idom(nnn, idom(nnn), dd-1);
 559   }
 560 
 561   // Now force out all loop-invariant dominating tests.  The optimizer
 562   // finds some, but we _know_ they are all useless.
 563   peeled_dom_test_elim(loop,old_new);
 564 
 565   loop->record_for_igvn();
 566 }
 567 
 568 #define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop
 569 
 570 //------------------------------policy_maximally_unroll------------------------
 571 // Calculate exact loop trip count and return true if loop can be maximally
 572 // unrolled.
 573 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
 574   CountedLoopNode *cl = _head->as_CountedLoop();
 575   assert(cl->is_normal_loop(), "");
 576   if (!cl->is_valid_counted_loop())
 577     return false; // Malformed counted loop
 578 
 579   if (!cl->has_exact_trip_count()) {
 580     // Trip count is not exact.
 581     return false;
 582   }
 583 
 584   uint trip_count = cl->trip_count();
 585   // Note, max_juint is used to indicate unknown trip count.
 586   assert(trip_count > 1, "one iteration loop should be optimized out already");
 587   assert(trip_count < max_juint, "exact trip_count should be less than max_uint.");
 588 
 589   // Real policy: if we maximally unroll, does it get too big?
 590   // Allow the unrolled mess to get larger than standard loop
 591   // size.  After all, it will no longer be a loop.
 592   uint body_size    = _body.size();
 593   uint unroll_limit = (uint)LoopUnrollLimit * 4;
 594   assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
 595   if (trip_count > unroll_limit || body_size > unroll_limit) {
 596     return false;
 597   }
 598 
 599   // Fully unroll a loop with few iterations regardless next
 600   // conditions since following loop optimizations will split
 601   // such loop anyway (pre-main-post).
 602   if (trip_count <= 3)
 603     return true;
 604 
 605   // Take into account that after unroll conjoined heads and tails will fold,
 606   // otherwise policy_unroll() may allow more unrolling than max unrolling.
 607   uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count;
 608   uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE;
 609   if (body_size != tst_body_size) // Check for int overflow
 610     return false;
 611   if (new_body_size > unroll_limit ||
 612       // Unrolling can result in a large amount of node construction
 613       new_body_size >= phase->C->max_node_limit() - phase->C->live_nodes()) {
 614     return false;
 615   }
 616 
 617   // Do not unroll a loop with String intrinsics code.
 618   // String intrinsics are large and have loops.
 619   for (uint k = 0; k < _body.size(); k++) {
 620     Node* n = _body.at(k);
 621     switch (n->Opcode()) {
 622       case Op_StrComp:
 623       case Op_StrEquals:
 624       case Op_StrIndexOf:
 625       case Op_StrIndexOfChar:
 626       case Op_EncodeISOArray:
 627       case Op_AryEq:
 628       case Op_HasNegatives: {
 629         return false;
 630       }
 631 #if INCLUDE_RTM_OPT
 632       case Op_FastLock:
 633       case Op_FastUnlock: {
 634         // Don't unroll RTM locking code because it is large.
 635         if (UseRTMLocking) {
 636           return false;
 637         }
 638       }
 639 #endif
 640     } // switch
 641   }
 642 
 643   return true; // Do maximally unroll
 644 }
 645 
 646 
 647 //------------------------------policy_unroll----------------------------------
 648 // Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
 649 // the loop is a CountedLoop and the body is small enough.
 650 bool IdealLoopTree::policy_unroll(PhaseIdealLoop *phase) {
 651 
 652   CountedLoopNode *cl = _head->as_CountedLoop();
 653   assert(cl->is_normal_loop() || cl->is_main_loop(), "");
 654 
 655   if (!cl->is_valid_counted_loop())
 656     return false; // Malformed counted loop
 657 
 658   // Protect against over-unrolling.
 659   // After split at least one iteration will be executed in pre-loop.
 660   if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
 661 
 662   _local_loop_unroll_limit = LoopUnrollLimit;
 663   _local_loop_unroll_factor = 4;
 664   int future_unroll_ct = cl->unrolled_count() * 2;
 665   if (!cl->do_unroll_only()) {
 666     if (future_unroll_ct > LoopMaxUnroll) return false;
 667   } else {
 668     // obey user constraints on vector mapped loops with additional unrolling applied
 669     if ((future_unroll_ct / cl->slp_max_unroll()) > LoopMaxUnroll) return false;
 670   }
 671 
 672   // Check for initial stride being a small enough constant
 673   if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false;
 674 
 675   // Don't unroll if the next round of unrolling would push us
 676   // over the expected trip count of the loop.  One is subtracted
 677   // from the expected trip count because the pre-loop normally
 678   // executes 1 iteration.
 679   if (UnrollLimitForProfileCheck > 0 &&
 680       cl->profile_trip_cnt() != COUNT_UNKNOWN &&
 681       future_unroll_ct        > UnrollLimitForProfileCheck &&
 682       (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) {
 683     return false;
 684   }
 685 
 686   // When unroll count is greater than LoopUnrollMin, don't unroll if:
 687   //   the residual iterations are more than 10% of the trip count
 688   //   and rounds of "unroll,optimize" are not making significant progress
 689   //   Progress defined as current size less than 20% larger than previous size.
 690   if (UseSuperWord && cl->node_count_before_unroll() > 0 &&
 691       future_unroll_ct > LoopUnrollMin &&
 692       (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() &&
 693       1.2 * cl->node_count_before_unroll() < (double)_body.size()) {
 694     return false;
 695   }
 696 
 697   Node *init_n = cl->init_trip();
 698   Node *limit_n = cl->limit();
 699   int stride_con = cl->stride_con();
 700   // Non-constant bounds.
 701   // Protect against over-unrolling when init or/and limit are not constant
 702   // (so that trip_count's init value is maxint) but iv range is known.
 703   if (init_n   == NULL || !init_n->is_Con()  ||
 704       limit_n  == NULL || !limit_n->is_Con()) {
 705     Node* phi = cl->phi();
 706     if (phi != NULL) {
 707       assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi.");
 708       const TypeInt* iv_type = phase->_igvn.type(phi)->is_int();
 709       int next_stride = stride_con * 2; // stride after this unroll
 710       if (next_stride > 0) {
 711         if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow
 712             iv_type->_lo + next_stride >  iv_type->_hi) {
 713           return false;  // over-unrolling
 714         }
 715       } else if (next_stride < 0) {
 716         if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow
 717             iv_type->_hi + next_stride <  iv_type->_lo) {
 718           return false;  // over-unrolling
 719         }
 720       }
 721     }
 722   }
 723 
 724   // After unroll limit will be adjusted: new_limit = limit-stride.
 725   // Bailout if adjustment overflow.
 726   const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int();
 727   if (stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi) ||
 728       stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo))
 729     return false;  // overflow
 730 
 731   // Adjust body_size to determine if we unroll or not
 732   uint body_size = _body.size();
 733   // Key test to unroll loop in CRC32 java code
 734   int xors_in_loop = 0;
 735   // Also count ModL, DivL and MulL which expand mightly
 736   for (uint k = 0; k < _body.size(); k++) {
 737     Node* n = _body.at(k);
 738     switch (n->Opcode()) {
 739       case Op_XorI: xors_in_loop++; break; // CRC32 java code
 740       case Op_ModL: body_size += 30; break;
 741       case Op_DivL: body_size += 30; break;
 742       case Op_MulL: body_size += 10; break;
 743       case Op_StrComp:
 744       case Op_StrEquals:
 745       case Op_StrIndexOf:
 746       case Op_StrIndexOfChar:
 747       case Op_EncodeISOArray:
 748       case Op_AryEq:
 749       case Op_HasNegatives: {
 750         // Do not unroll a loop with String intrinsics code.
 751         // String intrinsics are large and have loops.
 752         return false;
 753       }
 754 #if INCLUDE_RTM_OPT
 755       case Op_FastLock:
 756       case Op_FastUnlock: {
 757         // Don't unroll RTM locking code because it is large.
 758         if (UseRTMLocking) {
 759           return false;
 760         }
 761       }
 762 #endif
 763     } // switch
 764   }
 765 
 766   if (UseSuperWord) {
 767     if (!cl->is_reduction_loop()) {
 768       phase->mark_reductions(this);
 769     }
 770 
 771     // Only attempt slp analysis when user controls do not prohibit it
 772     if (LoopMaxUnroll > _local_loop_unroll_factor) {
 773       // Once policy_slp_analysis succeeds, mark the loop with the
 774       // maximal unroll factor so that we minimize analysis passes
 775       if (future_unroll_ct >= _local_loop_unroll_factor) {
 776         policy_unroll_slp_analysis(cl, phase, future_unroll_ct);
 777       }
 778     }
 779   }
 780 
 781   int slp_max_unroll_factor = cl->slp_max_unroll();
 782   if (cl->has_passed_slp()) {
 783     if (slp_max_unroll_factor >= future_unroll_ct) return true;
 784     // Normal case: loop too big
 785     return false;
 786   }
 787 
 788   // Check for being too big
 789   if (body_size > (uint)_local_loop_unroll_limit) {
 790     if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
 791     // Normal case: loop too big
 792     return false;
 793   }
 794 
 795   if (cl->do_unroll_only()) {
 796     if (TraceSuperWordLoopUnrollAnalysis) {
 797       tty->print_cr("policy_unroll passed vector loop(vlen=%d,factor = %d)\n", slp_max_unroll_factor, future_unroll_ct);
 798     }
 799   }
 800 
 801   // Unroll once!  (Each trip will soon do double iterations)
 802   return true;
 803 }
 804 
 805 void IdealLoopTree::policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct) {
 806   // Enable this functionality target by target as needed
 807   if (SuperWordLoopUnrollAnalysis) {
 808     if (!cl->was_slp_analyzed()) {
 809       SuperWord sw(phase);
 810       sw.transform_loop(this, false);
 811 
 812       // If the loop is slp canonical analyze it
 813       if (sw.early_return() == false) {
 814         sw.unrolling_analysis(_local_loop_unroll_factor);
 815       }
 816     }
 817 
 818     if (cl->has_passed_slp()) {
 819       int slp_max_unroll_factor = cl->slp_max_unroll();
 820       if (slp_max_unroll_factor >= future_unroll_ct) {
 821         int new_limit = cl->node_count_before_unroll() * slp_max_unroll_factor;
 822         if (new_limit > LoopUnrollLimit) {
 823           if (TraceSuperWordLoopUnrollAnalysis) {
 824             tty->print_cr("slp analysis unroll=%d, default limit=%d\n", new_limit, _local_loop_unroll_limit);
 825           }
 826           _local_loop_unroll_limit = new_limit;
 827         }
 828       }
 829     }
 830   }
 831 }
 832 
 833 //------------------------------policy_align-----------------------------------
 834 // Return TRUE or FALSE if the loop should be cache-line aligned.  Gather the
 835 // expression that does the alignment.  Note that only one array base can be
 836 // aligned in a loop (unless the VM guarantees mutual alignment).  Note that
 837 // if we vectorize short memory ops into longer memory ops, we may want to
 838 // increase alignment.
 839 bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
 840   return false;
 841 }
 842 
 843 //------------------------------policy_range_check-----------------------------
 844 // Return TRUE or FALSE if the loop should be range-check-eliminated.
 845 // Actually we do iteration-splitting, a more powerful form of RCE.
 846 bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
 847   if (!RangeCheckElimination) return false;
 848 
 849   CountedLoopNode *cl = _head->as_CountedLoop();
 850   // If we unrolled with no intention of doing RCE and we later
 851   // changed our minds, we got no pre-loop.  Either we need to
 852   // make a new pre-loop, or we gotta disallow RCE.
 853   if (cl->is_main_no_pre_loop()) return false; // Disallowed for now.
 854   Node *trip_counter = cl->phi();
 855 
 856   // check for vectorized loops, some opts are no longer needed
 857   if (cl->do_unroll_only()) return false;
 858 
 859   // Check loop body for tests of trip-counter plus loop-invariant vs
 860   // loop-invariant.
 861   for (uint i = 0; i < _body.size(); i++) {
 862     Node *iff = _body[i];
 863     if (iff->Opcode() == Op_If ||
 864         iff->Opcode() == Op_RangeCheck) { // Test?
 865 
 866       // Comparing trip+off vs limit
 867       Node *bol = iff->in(1);
 868       if (bol->req() != 2) continue; // dead constant test
 869       if (!bol->is_Bool()) {
 870         assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only");
 871         continue;
 872       }
 873       if (bol->as_Bool()->_test._test == BoolTest::ne)
 874         continue; // not RC
 875 
 876       Node *cmp = bol->in(1);
 877       Node *rc_exp = cmp->in(1);
 878       Node *limit = cmp->in(2);
 879 
 880       Node *limit_c = phase->get_ctrl(limit);
 881       if( limit_c == phase->C->top() )
 882         return false;           // Found dead test on live IF?  No RCE!
 883       if( is_member(phase->get_loop(limit_c) ) ) {
 884         // Compare might have operands swapped; commute them
 885         rc_exp = cmp->in(2);
 886         limit  = cmp->in(1);
 887         limit_c = phase->get_ctrl(limit);
 888         if( is_member(phase->get_loop(limit_c) ) )
 889           continue;             // Both inputs are loop varying; cannot RCE
 890       }
 891 
 892       if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) {
 893         continue;
 894       }
 895       // Yeah!  Found a test like 'trip+off vs limit'
 896       // Test is an IfNode, has 2 projections.  If BOTH are in the loop
 897       // we need loop unswitching instead of iteration splitting.
 898       if( is_loop_exit(iff) )
 899         return true;            // Found reason to split iterations
 900     } // End of is IF
 901   }
 902 
 903   return false;
 904 }
 905 
 906 //------------------------------policy_peel_only-------------------------------
 907 // Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned.  Useful
 908 // for unrolling loops with NO array accesses.
 909 bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const {
 910   // check for vectorized loops, any peeling done was already applied
 911   if (_head->is_CountedLoop() && _head->as_CountedLoop()->do_unroll_only()) return false;
 912 
 913   for( uint i = 0; i < _body.size(); i++ )
 914     if( _body[i]->is_Mem() )
 915       return false;
 916 
 917   // No memory accesses at all!
 918   return true;
 919 }
 920 
 921 //------------------------------clone_up_backedge_goo--------------------------
 922 // If Node n lives in the back_ctrl block and cannot float, we clone a private
 923 // version of n in preheader_ctrl block and return that, otherwise return n.
 924 Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) {
 925   if( get_ctrl(n) != back_ctrl ) return n;
 926 
 927   // Only visit once
 928   if (visited.test_set(n->_idx)) {
 929     Node *x = clones.find(n->_idx);
 930     if (x != NULL)
 931       return x;
 932     return n;
 933   }
 934 
 935   Node *x = NULL;               // If required, a clone of 'n'
 936   // Check for 'n' being pinned in the backedge.
 937   if( n->in(0) && n->in(0) == back_ctrl ) {
 938     assert(clones.find(n->_idx) == NULL, "dead loop");
 939     x = n->clone();             // Clone a copy of 'n' to preheader
 940     clones.push(x, n->_idx);
 941     x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader
 942   }
 943 
 944   // Recursive fixup any other input edges into x.
 945   // If there are no changes we can just return 'n', otherwise
 946   // we need to clone a private copy and change it.
 947   for( uint i = 1; i < n->req(); i++ ) {
 948     Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones );
 949     if( g != n->in(i) ) {
 950       if( !x ) {
 951         assert(clones.find(n->_idx) == NULL, "dead loop");
 952         x = n->clone();
 953         clones.push(x, n->_idx);
 954       }
 955       x->set_req(i, g);
 956     }
 957   }
 958   if( x ) {                     // x can legally float to pre-header location
 959     register_new_node( x, preheader_ctrl );
 960     return x;
 961   } else {                      // raise n to cover LCA of uses
 962     set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) );
 963   }
 964   return n;
 965 }
 966 
 967 bool PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop) {
 968   Node* castii = new CastIINode(incr, TypeInt::INT, true);
 969   castii->set_req(0, ctrl);
 970   register_new_node(castii, ctrl);
 971   for (DUIterator_Fast imax, i = incr->fast_outs(imax); i < imax; i++) {
 972     Node* n = incr->fast_out(i);
 973     if (n->is_Phi() && n->in(0) == loop) {
 974       int nrep = n->replace_edge(incr, castii);
 975       return true;
 976     }
 977   }
 978   return false;
 979 }
 980 
 981 //------------------------------insert_pre_post_loops--------------------------
 982 // Insert pre and post loops.  If peel_only is set, the pre-loop can not have
 983 // more iterations added.  It acts as a 'peel' only, no lower-bound RCE, no
 984 // alignment.  Useful to unroll loops that do no array accesses.
 985 void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) {
 986 
 987 #ifndef PRODUCT
 988   if (TraceLoopOpts) {
 989     if (peel_only)
 990       tty->print("PeelMainPost ");
 991     else
 992       tty->print("PreMainPost  ");
 993     loop->dump_head();
 994   }
 995 #endif
 996   C->set_major_progress();
 997 
 998   // Find common pieces of the loop being guarded with pre & post loops
 999   CountedLoopNode *main_head = loop->_head->as_CountedLoop();
1000   assert( main_head->is_normal_loop(), "" );
1001   CountedLoopEndNode *main_end = main_head->loopexit();
1002   guarantee(main_end != NULL, "no loop exit node");
1003   assert( main_end->outcnt() == 2, "1 true, 1 false path only" );
1004   uint dd_main_head = dom_depth(main_head);
1005   uint max = main_head->outcnt();
1006 
1007   Node *pre_header= main_head->in(LoopNode::EntryControl);
1008   Node *init      = main_head->init_trip();
1009   Node *incr      = main_end ->incr();
1010   Node *limit     = main_end ->limit();
1011   Node *stride    = main_end ->stride();
1012   Node *cmp       = main_end ->cmp_node();
1013   BoolTest::mask b_test = main_end->test_trip();
1014 
1015   // Need only 1 user of 'bol' because I will be hacking the loop bounds.
1016   Node *bol = main_end->in(CountedLoopEndNode::TestValue);
1017   if( bol->outcnt() != 1 ) {
1018     bol = bol->clone();
1019     register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl));
1020     _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, bol);
1021   }
1022   // Need only 1 user of 'cmp' because I will be hacking the loop bounds.
1023   if( cmp->outcnt() != 1 ) {
1024     cmp = cmp->clone();
1025     register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl));
1026     _igvn.replace_input_of(bol, 1, cmp);
1027   }
1028 
1029   //------------------------------
1030   // Step A: Create Post-Loop.
1031   Node* main_exit = main_end->proj_out(false);
1032   assert( main_exit->Opcode() == Op_IfFalse, "" );
1033   int dd_main_exit = dom_depth(main_exit);
1034 
1035   // Step A1: Clone the loop body.  The clone becomes the post-loop.  The main
1036   // loop pre-header illegally has 2 control users (old & new loops).
1037   clone_loop( loop, old_new, dd_main_exit );
1038   assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" );
1039   CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop();
1040   post_head->set_post_loop(main_head);
1041 
1042   // Reduce the post-loop trip count.
1043   CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd();
1044   post_end->_prob = PROB_FAIR;
1045 
1046   // Build the main-loop normal exit.
1047   IfFalseNode *new_main_exit = new IfFalseNode(main_end);
1048   _igvn.register_new_node_with_optimizer( new_main_exit );
1049   set_idom(new_main_exit, main_end, dd_main_exit );
1050   set_loop(new_main_exit, loop->_parent);
1051 
1052   // Step A2: Build a zero-trip guard for the post-loop.  After leaving the
1053   // main-loop, the post-loop may not execute at all.  We 'opaque' the incr
1054   // (the main-loop trip-counter exit value) because we will be changing
1055   // the exit value (via unrolling) so we cannot constant-fold away the zero
1056   // trip guard until all unrolling is done.
1057   Node *zer_opaq = new Opaque1Node(C, incr);
1058   Node *zer_cmp  = new CmpINode( zer_opaq, limit );
1059   Node *zer_bol  = new BoolNode( zer_cmp, b_test );
1060   register_new_node( zer_opaq, new_main_exit );
1061   register_new_node( zer_cmp , new_main_exit );
1062   register_new_node( zer_bol , new_main_exit );
1063 
1064   // Build the IfNode
1065   IfNode *zer_iff = new IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN );
1066   _igvn.register_new_node_with_optimizer( zer_iff );
1067   set_idom(zer_iff, new_main_exit, dd_main_exit);
1068   set_loop(zer_iff, loop->_parent);
1069 
1070   // Plug in the false-path, taken if we need to skip post-loop
1071   _igvn.replace_input_of(main_exit, 0, zer_iff);
1072   set_idom(main_exit, zer_iff, dd_main_exit);
1073   set_idom(main_exit->unique_out(), zer_iff, dd_main_exit);
1074   // Make the true-path, must enter the post loop
1075   Node *zer_taken = new IfTrueNode( zer_iff );
1076   _igvn.register_new_node_with_optimizer( zer_taken );
1077   set_idom(zer_taken, zer_iff, dd_main_exit);
1078   set_loop(zer_taken, loop->_parent);
1079   // Plug in the true path
1080   _igvn.hash_delete( post_head );
1081   post_head->set_req(LoopNode::EntryControl, zer_taken);
1082   set_idom(post_head, zer_taken, dd_main_exit);
1083 
1084   Arena *a = Thread::current()->resource_area();
1085   VectorSet visited(a);
1086   Node_Stack clones(a, main_head->back_control()->outcnt());
1087   // Step A3: Make the fall-in values to the post-loop come from the
1088   // fall-out values of the main-loop.
1089   for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) {
1090     Node* main_phi = main_head->fast_out(i);
1091     if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) {
1092       Node *post_phi = old_new[main_phi->_idx];
1093       Node *fallmain  = clone_up_backedge_goo(main_head->back_control(),
1094                                               post_head->init_control(),
1095                                               main_phi->in(LoopNode::LoopBackControl),
1096                                               visited, clones);
1097       _igvn.hash_delete(post_phi);
1098       post_phi->set_req( LoopNode::EntryControl, fallmain );
1099     }
1100   }
1101 
1102   // Update local caches for next stanza
1103   main_exit = new_main_exit;
1104 
1105 
1106   //------------------------------
1107   // Step B: Create Pre-Loop.
1108 
1109   // Step B1: Clone the loop body.  The clone becomes the pre-loop.  The main
1110   // loop pre-header illegally has 2 control users (old & new loops).
1111   clone_loop( loop, old_new, dd_main_head );
1112   CountedLoopNode*    pre_head = old_new[main_head->_idx]->as_CountedLoop();
1113   CountedLoopEndNode* pre_end  = old_new[main_end ->_idx]->as_CountedLoopEnd();
1114   pre_head->set_pre_loop(main_head);
1115   Node *pre_incr = old_new[incr->_idx];
1116 
1117   // Reduce the pre-loop trip count.
1118   pre_end->_prob = PROB_FAIR;
1119 
1120   // Find the pre-loop normal exit.
1121   Node* pre_exit = pre_end->proj_out(false);
1122   assert( pre_exit->Opcode() == Op_IfFalse, "" );
1123   IfFalseNode *new_pre_exit = new IfFalseNode(pre_end);
1124   _igvn.register_new_node_with_optimizer( new_pre_exit );
1125   set_idom(new_pre_exit, pre_end, dd_main_head);
1126   set_loop(new_pre_exit, loop->_parent);
1127 
1128   // Step B2: Build a zero-trip guard for the main-loop.  After leaving the
1129   // pre-loop, the main-loop may not execute at all.  Later in life this
1130   // zero-trip guard will become the minimum-trip guard when we unroll
1131   // the main-loop.
1132   Node *min_opaq = new Opaque1Node(C, limit);
1133   Node *min_cmp  = new CmpINode( pre_incr, min_opaq );
1134   Node *min_bol  = new BoolNode( min_cmp, b_test );
1135   register_new_node( min_opaq, new_pre_exit );
1136   register_new_node( min_cmp , new_pre_exit );
1137   register_new_node( min_bol , new_pre_exit );
1138 
1139   // Build the IfNode (assume the main-loop is executed always).
1140   IfNode *min_iff = new IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN );
1141   _igvn.register_new_node_with_optimizer( min_iff );
1142   set_idom(min_iff, new_pre_exit, dd_main_head);
1143   set_loop(min_iff, loop->_parent);
1144 
1145   // Plug in the false-path, taken if we need to skip main-loop
1146   _igvn.hash_delete( pre_exit );
1147   pre_exit->set_req(0, min_iff);
1148   set_idom(pre_exit, min_iff, dd_main_head);
1149   set_idom(pre_exit->unique_out(), min_iff, dd_main_head);
1150   // Make the true-path, must enter the main loop
1151   Node *min_taken = new IfTrueNode( min_iff );
1152   _igvn.register_new_node_with_optimizer( min_taken );
1153   set_idom(min_taken, min_iff, dd_main_head);
1154   set_loop(min_taken, loop->_parent);
1155   // Plug in the true path
1156   _igvn.hash_delete( main_head );
1157   main_head->set_req(LoopNode::EntryControl, min_taken);
1158   set_idom(main_head, min_taken, dd_main_head);
1159 
1160   visited.Clear();
1161   clones.clear();
1162   // Step B3: Make the fall-in values to the main-loop come from the
1163   // fall-out values of the pre-loop.
1164   for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) {
1165     Node* main_phi = main_head->fast_out(i2);
1166     if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) {
1167       Node *pre_phi = old_new[main_phi->_idx];
1168       Node *fallpre  = clone_up_backedge_goo(pre_head->back_control(),
1169                                              main_head->init_control(),
1170                                              pre_phi->in(LoopNode::LoopBackControl),
1171                                              visited, clones);
1172       _igvn.hash_delete(main_phi);
1173       main_phi->set_req( LoopNode::EntryControl, fallpre );
1174     }
1175   }
1176 
1177   // Nodes inside the loop may be control dependent on a predicate
1178   // that was moved before the preloop. If the back branch of the main
1179   // or post loops becomes dead, those nodes won't be dependent on the
1180   // test that guards that loop nest anymore which could lead to an
1181   // incorrect array access because it executes independently of the
1182   // test that was guarding the loop nest. We add a special CastII on
1183   // the if branch that enters the loop, between the input induction
1184   // variable value and the induction variable Phi to preserve correct
1185   // dependencies.
1186 
1187   // CastII for the post loop:
1188   bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head);
1189   assert(inserted, "no castII inserted");
1190 
1191   // CastII for the main loop:
1192   inserted = cast_incr_before_loop(pre_incr, min_taken, main_head);
1193   assert(inserted, "no castII inserted");
1194 
1195   // Step B4: Shorten the pre-loop to run only 1 iteration (for now).
1196   // RCE and alignment may change this later.
1197   Node *cmp_end = pre_end->cmp_node();
1198   assert( cmp_end->in(2) == limit, "" );
1199   Node *pre_limit = new AddINode( init, stride );
1200 
1201   // Save the original loop limit in this Opaque1 node for
1202   // use by range check elimination.
1203   Node *pre_opaq  = new Opaque1Node(C, pre_limit, limit);
1204 
1205   register_new_node( pre_limit, pre_head->in(0) );
1206   register_new_node( pre_opaq , pre_head->in(0) );
1207 
1208   // Since no other users of pre-loop compare, I can hack limit directly
1209   assert( cmp_end->outcnt() == 1, "no other users" );
1210   _igvn.hash_delete(cmp_end);
1211   cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq);
1212 
1213   // Special case for not-equal loop bounds:
1214   // Change pre loop test, main loop test, and the
1215   // main loop guard test to use lt or gt depending on stride
1216   // direction:
1217   // positive stride use <
1218   // negative stride use >
1219   //
1220   // not-equal test is kept for post loop to handle case
1221   // when init > limit when stride > 0 (and reverse).
1222 
1223   if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) {
1224 
1225     BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt;
1226     // Modify pre loop end condition
1227     Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool();
1228     BoolNode* new_bol0 = new BoolNode(pre_bol->in(1), new_test);
1229     register_new_node( new_bol0, pre_head->in(0) );
1230     _igvn.replace_input_of(pre_end, CountedLoopEndNode::TestValue, new_bol0);
1231     // Modify main loop guard condition
1232     assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay");
1233     BoolNode* new_bol1 = new BoolNode(min_bol->in(1), new_test);
1234     register_new_node( new_bol1, new_pre_exit );
1235     _igvn.hash_delete(min_iff);
1236     min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1);
1237     // Modify main loop end condition
1238     BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool();
1239     BoolNode* new_bol2 = new BoolNode(main_bol->in(1), new_test);
1240     register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) );
1241     _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, new_bol2);
1242   }
1243 
1244   // Flag main loop
1245   main_head->set_main_loop();
1246   if( peel_only ) main_head->set_main_no_pre_loop();
1247 
1248   // Subtract a trip count for the pre-loop.
1249   main_head->set_trip_count(main_head->trip_count() - 1);
1250 
1251   // It's difficult to be precise about the trip-counts
1252   // for the pre/post loops.  They are usually very short,
1253   // so guess that 4 trips is a reasonable value.
1254   post_head->set_profile_trip_cnt(4.0);
1255   pre_head->set_profile_trip_cnt(4.0);
1256 
1257   // Now force out all loop-invariant dominating tests.  The optimizer
1258   // finds some, but we _know_ they are all useless.
1259   peeled_dom_test_elim(loop,old_new);
1260   loop->record_for_igvn();
1261 }
1262 
1263 //------------------------------is_invariant-----------------------------
1264 // Return true if n is invariant
1265 bool IdealLoopTree::is_invariant(Node* n) const {
1266   Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n;
1267   if (n_c->is_top()) return false;
1268   return !is_member(_phase->get_loop(n_c));
1269 }
1270 
1271 
1272 //------------------------------do_unroll--------------------------------------
1273 // Unroll the loop body one step - make each trip do 2 iterations.
1274 void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) {
1275   assert(LoopUnrollLimit, "");
1276   CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
1277   CountedLoopEndNode *loop_end = loop_head->loopexit();
1278   assert(loop_end, "");
1279 #ifndef PRODUCT
1280   if (PrintOpto && VerifyLoopOptimizations) {
1281     tty->print("Unrolling ");
1282     loop->dump_head();
1283   } else if (TraceLoopOpts) {
1284     if (loop_head->trip_count() < (uint)LoopUnrollLimit) {
1285       tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count());
1286     } else {
1287       tty->print("Unroll %d     ", loop_head->unrolled_count()*2);
1288     }
1289     loop->dump_head();
1290   }
1291 
1292   if (C->do_vector_loop() && (PrintOpto && VerifyLoopOptimizations || TraceLoopOpts)) {
1293     Arena* arena = Thread::current()->resource_area();
1294     Node_Stack stack(arena, C->live_nodes() >> 2);
1295     Node_List rpo_list;
1296     VectorSet visited(arena);
1297     visited.set(loop_head->_idx);
1298     rpo( loop_head, stack, visited, rpo_list );
1299     dump(loop, rpo_list.size(), rpo_list );
1300   }
1301 #endif
1302 
1303   // Remember loop node count before unrolling to detect
1304   // if rounds of unroll,optimize are making progress
1305   loop_head->set_node_count_before_unroll(loop->_body.size());
1306 
1307   Node *ctrl  = loop_head->in(LoopNode::EntryControl);
1308   Node *limit = loop_head->limit();
1309   Node *init  = loop_head->init_trip();
1310   Node *stride = loop_head->stride();
1311 
1312   Node *opaq = NULL;
1313   if (adjust_min_trip) {       // If not maximally unrolling, need adjustment
1314     // Search for zero-trip guard.
1315     assert( loop_head->is_main_loop(), "" );
1316     assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" );
1317     Node *iff = ctrl->in(0);
1318     assert( iff->Opcode() == Op_If, "" );
1319     Node *bol = iff->in(1);
1320     assert( bol->Opcode() == Op_Bool, "" );
1321     Node *cmp = bol->in(1);
1322     assert( cmp->Opcode() == Op_CmpI, "" );
1323     opaq = cmp->in(2);
1324     // Occasionally it's possible for a zero-trip guard Opaque1 node to be
1325     // optimized away and then another round of loop opts attempted.
1326     // We can not optimize this particular loop in that case.
1327     if (opaq->Opcode() != Op_Opaque1)
1328       return; // Cannot find zero-trip guard!  Bail out!
1329     // Zero-trip test uses an 'opaque' node which is not shared.
1330     assert(opaq->outcnt() == 1 && opaq->in(1) == limit, "");
1331   }
1332 
1333   C->set_major_progress();
1334 
1335   Node* new_limit = NULL;
1336   if (UnrollLimitCheck) {
1337     int stride_con = stride->get_int();
1338     int stride_p = (stride_con > 0) ? stride_con : -stride_con;
1339     uint old_trip_count = loop_head->trip_count();
1340     // Verify that unroll policy result is still valid.
1341     assert(old_trip_count > 1 &&
1342            (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity");
1343 
1344     // Adjust loop limit to keep valid iterations number after unroll.
1345     // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride
1346     // which may overflow.
1347     if (!adjust_min_trip) {
1348       assert(old_trip_count > 1 && (old_trip_count & 1) == 0,
1349              "odd trip count for maximally unroll");
1350       // Don't need to adjust limit for maximally unroll since trip count is even.
1351     } else if (loop_head->has_exact_trip_count() && init->is_Con()) {
1352       // Loop's limit is constant. Loop's init could be constant when pre-loop
1353       // become peeled iteration.
1354       jlong init_con = init->get_int();
1355       // We can keep old loop limit if iterations count stays the same:
1356       //   old_trip_count == new_trip_count * 2
1357       // Note: since old_trip_count >= 2 then new_trip_count >= 1
1358       // so we also don't need to adjust zero trip test.
1359       jlong limit_con  = limit->get_int();
1360       // (stride_con*2) not overflow since stride_con <= 8.
1361       int new_stride_con = stride_con * 2;
1362       int stride_m    = new_stride_con - (stride_con > 0 ? 1 : -1);
1363       jlong trip_count = (limit_con - init_con + stride_m)/new_stride_con;
1364       // New trip count should satisfy next conditions.
1365       assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity");
1366       uint new_trip_count = (uint)trip_count;
1367       adjust_min_trip = (old_trip_count != new_trip_count*2);
1368     }
1369 
1370     if (adjust_min_trip) {
1371       // Step 2: Adjust the trip limit if it is called for.
1372       // The adjustment amount is -stride. Need to make sure if the
1373       // adjustment underflows or overflows, then the main loop is skipped.
1374       Node* cmp = loop_end->cmp_node();
1375       assert(cmp->in(2) == limit, "sanity");
1376       assert(opaq != NULL && opaq->in(1) == limit, "sanity");
1377 
1378       // Verify that policy_unroll result is still valid.
1379       const TypeInt* limit_type = _igvn.type(limit)->is_int();
1380       assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) ||
1381              stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity");
1382 
1383       if (limit->is_Con()) {
1384         // The check in policy_unroll and the assert above guarantee
1385         // no underflow if limit is constant.
1386         new_limit = _igvn.intcon(limit->get_int() - stride_con);
1387         set_ctrl(new_limit, C->root());
1388       } else {
1389         // Limit is not constant.
1390         if (loop_head->unrolled_count() == 1) { // only for first unroll
1391           // Separate limit by Opaque node in case it is an incremented
1392           // variable from previous loop to avoid using pre-incremented
1393           // value which could increase register pressure.
1394           // Otherwise reorg_offsets() optimization will create a separate
1395           // Opaque node for each use of trip-counter and as result
1396           // zero trip guard limit will be different from loop limit.
1397           assert(has_ctrl(opaq), "should have it");
1398           Node* opaq_ctrl = get_ctrl(opaq);
1399           limit = new Opaque2Node( C, limit );
1400           register_new_node( limit, opaq_ctrl );
1401         }
1402         if (stride_con > 0 && (java_subtract(limit_type->_lo, stride_con) < limit_type->_lo) ||
1403             stride_con < 0 && (java_subtract(limit_type->_hi, stride_con) > limit_type->_hi)) {
1404           // No underflow.
1405           new_limit = new SubINode(limit, stride);
1406         } else {
1407           // (limit - stride) may underflow.
1408           // Clamp the adjustment value with MININT or MAXINT:
1409           //
1410           //   new_limit = limit-stride
1411           //   if (stride > 0)
1412           //     new_limit = (limit < new_limit) ? MININT : new_limit;
1413           //   else
1414           //     new_limit = (limit > new_limit) ? MAXINT : new_limit;
1415           //
1416           BoolTest::mask bt = loop_end->test_trip();
1417           assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
1418           Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint);
1419           set_ctrl(adj_max, C->root());
1420           Node* old_limit = NULL;
1421           Node* adj_limit = NULL;
1422           Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL;
1423           if (loop_head->unrolled_count() > 1 &&
1424               limit->is_CMove() && limit->Opcode() == Op_CMoveI &&
1425               limit->in(CMoveNode::IfTrue) == adj_max &&
1426               bol->as_Bool()->_test._test == bt &&
1427               bol->in(1)->Opcode() == Op_CmpI &&
1428               bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) {
1429             // Loop was unrolled before.
1430             // Optimize the limit to avoid nested CMove:
1431             // use original limit as old limit.
1432             old_limit = bol->in(1)->in(1);
1433             // Adjust previous adjusted limit.
1434             adj_limit = limit->in(CMoveNode::IfFalse);
1435             adj_limit = new SubINode(adj_limit, stride);
1436           } else {
1437             old_limit = limit;
1438             adj_limit = new SubINode(limit, stride);
1439           }
1440           assert(old_limit != NULL && adj_limit != NULL, "");
1441           register_new_node( adj_limit, ctrl ); // adjust amount
1442           Node* adj_cmp = new CmpINode(old_limit, adj_limit);
1443           register_new_node( adj_cmp, ctrl );
1444           Node* adj_bool = new BoolNode(adj_cmp, bt);
1445           register_new_node( adj_bool, ctrl );
1446           new_limit = new CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT);
1447         }
1448         register_new_node(new_limit, ctrl);
1449       }
1450       assert(new_limit != NULL, "");
1451       // Replace in loop test.
1452       assert(loop_end->in(1)->in(1) == cmp, "sanity");
1453       if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) {
1454         // Don't need to create new test since only one user.
1455         _igvn.hash_delete(cmp);
1456         cmp->set_req(2, new_limit);
1457       } else {
1458         // Create new test since it is shared.
1459         Node* ctrl2 = loop_end->in(0);
1460         Node* cmp2  = cmp->clone();
1461         cmp2->set_req(2, new_limit);
1462         register_new_node(cmp2, ctrl2);
1463         Node* bol2 = loop_end->in(1)->clone();
1464         bol2->set_req(1, cmp2);
1465         register_new_node(bol2, ctrl2);
1466         _igvn.replace_input_of(loop_end, 1, bol2);
1467       }
1468       // Step 3: Find the min-trip test guaranteed before a 'main' loop.
1469       // Make it a 1-trip test (means at least 2 trips).
1470 
1471       // Guard test uses an 'opaque' node which is not shared.  Hence I
1472       // can edit it's inputs directly.  Hammer in the new limit for the
1473       // minimum-trip guard.
1474       assert(opaq->outcnt() == 1, "");
1475       _igvn.replace_input_of(opaq, 1, new_limit);
1476     }
1477 
1478     // Adjust max trip count. The trip count is intentionally rounded
1479     // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
1480     // the main, unrolled, part of the loop will never execute as it is protected
1481     // by the min-trip test.  See bug 4834191 for a case where we over-unrolled
1482     // and later determined that part of the unrolled loop was dead.
1483     loop_head->set_trip_count(old_trip_count / 2);
1484 
1485     // Double the count of original iterations in the unrolled loop body.
1486     loop_head->double_unrolled_count();
1487 
1488   } else { // LoopLimitCheck
1489 
1490     // Adjust max trip count. The trip count is intentionally rounded
1491     // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
1492     // the main, unrolled, part of the loop will never execute as it is protected
1493     // by the min-trip test.  See bug 4834191 for a case where we over-unrolled
1494     // and later determined that part of the unrolled loop was dead.
1495     loop_head->set_trip_count(loop_head->trip_count() / 2);
1496 
1497     // Double the count of original iterations in the unrolled loop body.
1498     loop_head->double_unrolled_count();
1499 
1500     // -----------
1501     // Step 2: Cut back the trip counter for an unroll amount of 2.
1502     // Loop will normally trip (limit - init)/stride_con.  Since it's a
1503     // CountedLoop this is exact (stride divides limit-init exactly).
1504     // We are going to double the loop body, so we want to knock off any
1505     // odd iteration: (trip_cnt & ~1).  Then back compute a new limit.
1506     Node *span = new SubINode( limit, init );
1507     register_new_node( span, ctrl );
1508     Node *trip = new DivINode( 0, span, stride );
1509     register_new_node( trip, ctrl );
1510     Node *mtwo = _igvn.intcon(-2);
1511     set_ctrl(mtwo, C->root());
1512     Node *rond = new AndINode( trip, mtwo );
1513     register_new_node( rond, ctrl );
1514     Node *spn2 = new MulINode( rond, stride );
1515     register_new_node( spn2, ctrl );
1516     new_limit = new AddINode( spn2, init );
1517     register_new_node( new_limit, ctrl );
1518 
1519     // Hammer in the new limit
1520     Node *ctrl2 = loop_end->in(0);
1521     Node *cmp2 = new CmpINode( loop_head->incr(), new_limit );
1522     register_new_node( cmp2, ctrl2 );
1523     Node *bol2 = new BoolNode( cmp2, loop_end->test_trip() );
1524     register_new_node( bol2, ctrl2 );
1525     _igvn.replace_input_of(loop_end, CountedLoopEndNode::TestValue, bol2);
1526 
1527     // Step 3: Find the min-trip test guaranteed before a 'main' loop.
1528     // Make it a 1-trip test (means at least 2 trips).
1529     if( adjust_min_trip ) {
1530       assert( new_limit != NULL, "" );
1531       // Guard test uses an 'opaque' node which is not shared.  Hence I
1532       // can edit it's inputs directly.  Hammer in the new limit for the
1533       // minimum-trip guard.
1534       assert( opaq->outcnt() == 1, "" );
1535       _igvn.hash_delete(opaq);
1536       opaq->set_req(1, new_limit);
1537     }
1538   } // LoopLimitCheck
1539 
1540   // ---------
1541   // Step 4: Clone the loop body.  Move it inside the loop.  This loop body
1542   // represents the odd iterations; since the loop trips an even number of
1543   // times its backedge is never taken.  Kill the backedge.
1544   uint dd = dom_depth(loop_head);
1545   clone_loop( loop, old_new, dd );
1546 
1547   // Make backedges of the clone equal to backedges of the original.
1548   // Make the fall-in from the original come from the fall-out of the clone.
1549   for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) {
1550     Node* phi = loop_head->fast_out(j);
1551     if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) {
1552       Node *newphi = old_new[phi->_idx];
1553       _igvn.hash_delete( phi );
1554       _igvn.hash_delete( newphi );
1555 
1556       phi   ->set_req(LoopNode::   EntryControl, newphi->in(LoopNode::LoopBackControl));
1557       newphi->set_req(LoopNode::LoopBackControl, phi   ->in(LoopNode::LoopBackControl));
1558       phi   ->set_req(LoopNode::LoopBackControl, C->top());
1559     }
1560   }
1561   Node *clone_head = old_new[loop_head->_idx];
1562   _igvn.hash_delete( clone_head );
1563   loop_head ->set_req(LoopNode::   EntryControl, clone_head->in(LoopNode::LoopBackControl));
1564   clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl));
1565   loop_head ->set_req(LoopNode::LoopBackControl, C->top());
1566   loop->_head = clone_head;     // New loop header
1567 
1568   set_idom(loop_head,  loop_head ->in(LoopNode::EntryControl), dd);
1569   set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd);
1570 
1571   // Kill the clone's backedge
1572   Node *newcle = old_new[loop_end->_idx];
1573   _igvn.hash_delete( newcle );
1574   Node *one = _igvn.intcon(1);
1575   set_ctrl(one, C->root());
1576   newcle->set_req(1, one);
1577   // Force clone into same loop body
1578   uint max = loop->_body.size();
1579   for( uint k = 0; k < max; k++ ) {
1580     Node *old = loop->_body.at(k);
1581     Node *nnn = old_new[old->_idx];
1582     loop->_body.push(nnn);
1583     if (!has_ctrl(old))
1584       set_loop(nnn, loop);
1585   }
1586 
1587   loop->record_for_igvn();
1588 
1589 #ifndef PRODUCT
1590   if (C->do_vector_loop() && (PrintOpto && VerifyLoopOptimizations || TraceLoopOpts)) {
1591     tty->print("\nnew loop after unroll\n");       loop->dump_head();
1592     for (uint i = 0; i < loop->_body.size(); i++) {
1593       loop->_body.at(i)->dump();
1594     }
1595     if(C->clone_map().is_debug()) {
1596       tty->print("\nCloneMap\n");
1597       Dict* dict = C->clone_map().dict();
1598       DictI i(dict);
1599       tty->print_cr("Dict@%p[%d] = ", dict, dict->Size());
1600       for (int ii = 0; i.test(); ++i, ++ii) {
1601         NodeCloneInfo cl((uint64_t)dict->operator[]((void*)i._key));
1602         tty->print("%d->%d:%d,", (int)(intptr_t)i._key, cl.idx(), cl.gen());
1603         if (ii % 10 == 9) {
1604           tty->print_cr(" ");
1605         }
1606       }
1607       tty->print_cr(" ");
1608     }
1609   }
1610 #endif
1611 
1612 }
1613 
1614 //------------------------------do_maximally_unroll----------------------------
1615 
1616 void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
1617   CountedLoopNode *cl = loop->_head->as_CountedLoop();
1618   assert(cl->has_exact_trip_count(), "trip count is not exact");
1619   assert(cl->trip_count() > 0, "");
1620 #ifndef PRODUCT
1621   if (TraceLoopOpts) {
1622     tty->print("MaxUnroll  %d ", cl->trip_count());
1623     loop->dump_head();
1624   }
1625 #endif
1626 
1627   // If loop is tripping an odd number of times, peel odd iteration
1628   if ((cl->trip_count() & 1) == 1) {
1629     do_peeling(loop, old_new);
1630   }
1631 
1632   // Now its tripping an even number of times remaining.  Double loop body.
1633   // Do not adjust pre-guards; they are not needed and do not exist.
1634   if (cl->trip_count() > 0) {
1635     assert((cl->trip_count() & 1) == 0, "missed peeling");
1636     do_unroll(loop, old_new, false);
1637   }
1638 }
1639 
1640 void PhaseIdealLoop::mark_reductions(IdealLoopTree *loop) {
1641   if (SuperWordReductions == false) return;
1642 
1643   CountedLoopNode* loop_head = loop->_head->as_CountedLoop();
1644   if (loop_head->unrolled_count() > 1) {
1645     return;
1646   }
1647 
1648   Node* trip_phi = loop_head->phi();
1649   for (DUIterator_Fast imax, i = loop_head->fast_outs(imax); i < imax; i++) {
1650     Node* phi = loop_head->fast_out(i);
1651     if (phi->is_Phi() && phi->outcnt() > 0 && phi != trip_phi) {
1652       // For definitions which are loop inclusive and not tripcounts.
1653       Node* def_node = phi->in(LoopNode::LoopBackControl);
1654 
1655       if (def_node != NULL) {
1656         Node* n_ctrl = get_ctrl(def_node);
1657         if (n_ctrl != NULL && loop->is_member(get_loop(n_ctrl))) {
1658           // Now test it to see if it fits the standard pattern for a reduction operator.
1659           int opc = def_node->Opcode();
1660           if (opc != ReductionNode::opcode(opc, def_node->bottom_type()->basic_type())) {
1661             if (!def_node->is_reduction()) { // Not marked yet
1662               // To be a reduction, the arithmetic node must have the phi as input and provide a def to it
1663               bool ok = false;
1664               for (unsigned j = 1; j < def_node->req(); j++) {
1665                 Node* in = def_node->in(j);
1666                 if (in == phi) {
1667                   ok = true;
1668                   break;
1669                 }
1670               }
1671 
1672               // do nothing if we did not match the initial criteria
1673               if (ok == false) {
1674                 continue;
1675               }
1676 
1677               // The result of the reduction must not be used in the loop
1678               for (DUIterator_Fast imax, i = def_node->fast_outs(imax); i < imax && ok; i++) {
1679                 Node* u = def_node->fast_out(i);
1680                 if (has_ctrl(u) && !loop->is_member(get_loop(get_ctrl(u)))) {
1681                   continue;
1682                 }
1683                 if (u == phi) {
1684                   continue;
1685                 }
1686                 ok = false;
1687               }
1688 
1689               // iff the uses conform
1690               if (ok) {
1691                 def_node->add_flag(Node::Flag_is_reduction);
1692                 loop_head->mark_has_reductions();
1693               }
1694             }
1695           }
1696         }
1697       }
1698     }
1699   }
1700 }
1701 
1702 //------------------------------dominates_backedge---------------------------------
1703 // Returns true if ctrl is executed on every complete iteration
1704 bool IdealLoopTree::dominates_backedge(Node* ctrl) {
1705   assert(ctrl->is_CFG(), "must be control");
1706   Node* backedge = _head->as_Loop()->in(LoopNode::LoopBackControl);
1707   return _phase->dom_lca_internal(ctrl, backedge) == ctrl;
1708 }
1709 
1710 //------------------------------adjust_limit-----------------------------------
1711 // Helper function for add_constraint().
1712 Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) {
1713   // Compute "I :: (limit-offset)/scale"
1714   Node *con = new SubINode(rc_limit, offset);
1715   register_new_node(con, pre_ctrl);
1716   Node *X = new DivINode(0, con, scale);
1717   register_new_node(X, pre_ctrl);
1718 
1719   // Adjust loop limit
1720   loop_limit = (stride_con > 0)
1721                ? (Node*)(new MinINode(loop_limit, X))
1722                : (Node*)(new MaxINode(loop_limit, X));
1723   register_new_node(loop_limit, pre_ctrl);
1724   return loop_limit;
1725 }
1726 
1727 //------------------------------add_constraint---------------------------------
1728 // Constrain the main loop iterations so the conditions:
1729 //    low_limit <= scale_con * I + offset  <  upper_limit
1730 // always holds true.  That is, either increase the number of iterations in
1731 // the pre-loop or the post-loop until the condition holds true in the main
1732 // loop.  Stride, scale, offset and limit are all loop invariant.  Further,
1733 // stride and scale are constants (offset and limit often are).
1734 void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
1735   // For positive stride, the pre-loop limit always uses a MAX function
1736   // and the main loop a MIN function.  For negative stride these are
1737   // reversed.
1738 
1739   // Also for positive stride*scale the affine function is increasing, so the
1740   // pre-loop must check for underflow and the post-loop for overflow.
1741   // Negative stride*scale reverses this; pre-loop checks for overflow and
1742   // post-loop for underflow.
1743 
1744   Node *scale = _igvn.intcon(scale_con);
1745   set_ctrl(scale, C->root());
1746 
1747   if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow
1748     // The overflow limit: scale*I+offset < upper_limit
1749     // For main-loop compute
1750     //   ( if (scale > 0) /* and stride > 0 */
1751     //       I < (upper_limit-offset)/scale
1752     //     else /* scale < 0 and stride < 0 */
1753     //       I > (upper_limit-offset)/scale
1754     //   )
1755     //
1756     // (upper_limit-offset) may overflow or underflow.
1757     // But it is fine since main loop will either have
1758     // less iterations or will be skipped in such case.
1759     *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl);
1760 
1761     // The underflow limit: low_limit <= scale*I+offset.
1762     // For pre-loop compute
1763     //   NOT(scale*I+offset >= low_limit)
1764     //   scale*I+offset < low_limit
1765     //   ( if (scale > 0) /* and stride > 0 */
1766     //       I < (low_limit-offset)/scale
1767     //     else /* scale < 0 and stride < 0 */
1768     //       I > (low_limit-offset)/scale
1769     //   )
1770 
1771     if (low_limit->get_int() == -max_jint) {
1772       if (!RangeLimitCheck) return;
1773       // We need this guard when scale*pre_limit+offset >= limit
1774       // due to underflow. So we need execute pre-loop until
1775       // scale*I+offset >= min_int. But (min_int-offset) will
1776       // underflow when offset > 0 and X will be > original_limit
1777       // when stride > 0. To avoid it we replace positive offset with 0.
1778       //
1779       // Also (min_int+1 == -max_int) is used instead of min_int here
1780       // to avoid problem with scale == -1 (min_int/(-1) == min_int).
1781       Node* shift = _igvn.intcon(31);
1782       set_ctrl(shift, C->root());
1783       Node* sign = new RShiftINode(offset, shift);
1784       register_new_node(sign, pre_ctrl);
1785       offset = new AndINode(offset, sign);
1786       register_new_node(offset, pre_ctrl);
1787     } else {
1788       assert(low_limit->get_int() == 0, "wrong low limit for range check");
1789       // The only problem we have here when offset == min_int
1790       // since (0-min_int) == min_int. It may be fine for stride > 0
1791       // but for stride < 0 X will be < original_limit. To avoid it
1792       // max(pre_limit, original_limit) is used in do_range_check().
1793     }
1794     // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
1795     *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl);
1796 
1797   } else { // stride_con*scale_con < 0
1798     // For negative stride*scale pre-loop checks for overflow and
1799     // post-loop for underflow.
1800     //
1801     // The overflow limit: scale*I+offset < upper_limit
1802     // For pre-loop compute
1803     //   NOT(scale*I+offset < upper_limit)
1804     //   scale*I+offset >= upper_limit
1805     //   scale*I+offset+1 > upper_limit
1806     //   ( if (scale < 0) /* and stride > 0 */
1807     //       I < (upper_limit-(offset+1))/scale
1808     //     else /* scale > 0 and stride < 0 */
1809     //       I > (upper_limit-(offset+1))/scale
1810     //   )
1811     //
1812     // (upper_limit-offset-1) may underflow or overflow.
1813     // To avoid it min(pre_limit, original_limit) is used
1814     // in do_range_check() for stride > 0 and max() for < 0.
1815     Node *one  = _igvn.intcon(1);
1816     set_ctrl(one, C->root());
1817 
1818     Node *plus_one = new AddINode(offset, one);
1819     register_new_node( plus_one, pre_ctrl );
1820     // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
1821     *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl);
1822 
1823     if (low_limit->get_int() == -max_jint) {
1824       if (!RangeLimitCheck) return;
1825       // We need this guard when scale*main_limit+offset >= limit
1826       // due to underflow. So we need execute main-loop while
1827       // scale*I+offset+1 > min_int. But (min_int-offset-1) will
1828       // underflow when (offset+1) > 0 and X will be < main_limit
1829       // when scale < 0 (and stride > 0). To avoid it we replace
1830       // positive (offset+1) with 0.
1831       //
1832       // Also (min_int+1 == -max_int) is used instead of min_int here
1833       // to avoid problem with scale == -1 (min_int/(-1) == min_int).
1834       Node* shift = _igvn.intcon(31);
1835       set_ctrl(shift, C->root());
1836       Node* sign = new RShiftINode(plus_one, shift);
1837       register_new_node(sign, pre_ctrl);
1838       plus_one = new AndINode(plus_one, sign);
1839       register_new_node(plus_one, pre_ctrl);
1840     } else {
1841       assert(low_limit->get_int() == 0, "wrong low limit for range check");
1842       // The only problem we have here when offset == max_int
1843       // since (max_int+1) == min_int and (0-min_int) == min_int.
1844       // But it is fine since main loop will either have
1845       // less iterations or will be skipped in such case.
1846     }
1847     // The underflow limit: low_limit <= scale*I+offset.
1848     // For main-loop compute
1849     //   scale*I+offset+1 > low_limit
1850     //   ( if (scale < 0) /* and stride > 0 */
1851     //       I < (low_limit-(offset+1))/scale
1852     //     else /* scale > 0 and stride < 0 */
1853     //       I > (low_limit-(offset+1))/scale
1854     //   )
1855 
1856     *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl);
1857   }
1858 }
1859 
1860 
1861 //------------------------------is_scaled_iv---------------------------------
1862 // Return true if exp is a constant times an induction var
1863 bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) {
1864   if (exp == iv) {
1865     if (p_scale != NULL) {
1866       *p_scale = 1;
1867     }
1868     return true;
1869   }
1870   int opc = exp->Opcode();
1871   if (opc == Op_MulI) {
1872     if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1873       if (p_scale != NULL) {
1874         *p_scale = exp->in(2)->get_int();
1875       }
1876       return true;
1877     }
1878     if (exp->in(2) == iv && exp->in(1)->is_Con()) {
1879       if (p_scale != NULL) {
1880         *p_scale = exp->in(1)->get_int();
1881       }
1882       return true;
1883     }
1884   } else if (opc == Op_LShiftI) {
1885     if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1886       if (p_scale != NULL) {
1887         *p_scale = 1 << exp->in(2)->get_int();
1888       }
1889       return true;
1890     }
1891   }
1892   return false;
1893 }
1894 
1895 //-----------------------------is_scaled_iv_plus_offset------------------------------
1896 // Return true if exp is a simple induction variable expression: k1*iv + (invar + k2)
1897 bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) {
1898   if (is_scaled_iv(exp, iv, p_scale)) {
1899     if (p_offset != NULL) {
1900       Node *zero = _igvn.intcon(0);
1901       set_ctrl(zero, C->root());
1902       *p_offset = zero;
1903     }
1904     return true;
1905   }
1906   int opc = exp->Opcode();
1907   if (opc == Op_AddI) {
1908     if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1909       if (p_offset != NULL) {
1910         *p_offset = exp->in(2);
1911       }
1912       return true;
1913     }
1914     if (is_scaled_iv(exp->in(2), iv, p_scale)) {
1915       if (p_offset != NULL) {
1916         *p_offset = exp->in(1);
1917       }
1918       return true;
1919     }
1920     if (exp->in(2)->is_Con()) {
1921       Node* offset2 = NULL;
1922       if (depth < 2 &&
1923           is_scaled_iv_plus_offset(exp->in(1), iv, p_scale,
1924                                    p_offset != NULL ? &offset2 : NULL, depth+1)) {
1925         if (p_offset != NULL) {
1926           Node *ctrl_off2 = get_ctrl(offset2);
1927           Node* offset = new AddINode(offset2, exp->in(2));
1928           register_new_node(offset, ctrl_off2);
1929           *p_offset = offset;
1930         }
1931         return true;
1932       }
1933     }
1934   } else if (opc == Op_SubI) {
1935     if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1936       if (p_offset != NULL) {
1937         Node *zero = _igvn.intcon(0);
1938         set_ctrl(zero, C->root());
1939         Node *ctrl_off = get_ctrl(exp->in(2));
1940         Node* offset = new SubINode(zero, exp->in(2));
1941         register_new_node(offset, ctrl_off);
1942         *p_offset = offset;
1943       }
1944       return true;
1945     }
1946     if (is_scaled_iv(exp->in(2), iv, p_scale)) {
1947       if (p_offset != NULL) {
1948         *p_scale *= -1;
1949         *p_offset = exp->in(1);
1950       }
1951       return true;
1952     }
1953   }
1954   return false;
1955 }
1956 
1957 //------------------------------do_range_check---------------------------------
1958 // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1959 void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
1960 #ifndef PRODUCT
1961   if (PrintOpto && VerifyLoopOptimizations) {
1962     tty->print("Range Check Elimination ");
1963     loop->dump_head();
1964   } else if (TraceLoopOpts) {
1965     tty->print("RangeCheck   ");
1966     loop->dump_head();
1967   }
1968 #endif
1969   assert(RangeCheckElimination, "");
1970   CountedLoopNode *cl = loop->_head->as_CountedLoop();
1971   assert(cl->is_main_loop(), "");
1972 
1973   // protect against stride not being a constant
1974   if (!cl->stride_is_con())
1975     return;
1976 
1977   // Find the trip counter; we are iteration splitting based on it
1978   Node *trip_counter = cl->phi();
1979   // Find the main loop limit; we will trim it's iterations
1980   // to not ever trip end tests
1981   Node *main_limit = cl->limit();
1982 
1983   // Need to find the main-loop zero-trip guard
1984   Node *ctrl  = cl->in(LoopNode::EntryControl);
1985   assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
1986   Node *iffm = ctrl->in(0);
1987   assert(iffm->Opcode() == Op_If, "");
1988   Node *bolzm = iffm->in(1);
1989   assert(bolzm->Opcode() == Op_Bool, "");
1990   Node *cmpzm = bolzm->in(1);
1991   assert(cmpzm->is_Cmp(), "");
1992   Node *opqzm = cmpzm->in(2);
1993   // Can not optimize a loop if zero-trip Opaque1 node is optimized
1994   // away and then another round of loop opts attempted.
1995   if (opqzm->Opcode() != Op_Opaque1)
1996     return;
1997   assert(opqzm->in(1) == main_limit, "do not understand situation");
1998 
1999   // Find the pre-loop limit; we will expand its iterations to
2000   // not ever trip low tests.
2001   Node *p_f = iffm->in(0);
2002   // pre loop may have been optimized out
2003   if (p_f->Opcode() != Op_IfFalse) {
2004     return;
2005   }
2006   CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
2007   assert(pre_end->loopnode()->is_pre_loop(), "");
2008   Node *pre_opaq1 = pre_end->limit();
2009   // Occasionally it's possible for a pre-loop Opaque1 node to be
2010   // optimized away and then another round of loop opts attempted.
2011   // We can not optimize this particular loop in that case.
2012   if (pre_opaq1->Opcode() != Op_Opaque1)
2013     return;
2014   Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1;
2015   Node *pre_limit = pre_opaq->in(1);
2016 
2017   // Where do we put new limit calculations
2018   Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl);
2019 
2020   // Ensure the original loop limit is available from the
2021   // pre-loop Opaque1 node.
2022   Node *orig_limit = pre_opaq->original_loop_limit();
2023   if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP)
2024     return;
2025 
2026   // Must know if its a count-up or count-down loop
2027 
2028   int stride_con = cl->stride_con();
2029   Node *zero = _igvn.intcon(0);
2030   Node *one  = _igvn.intcon(1);
2031   // Use symmetrical int range [-max_jint,max_jint]
2032   Node *mini = _igvn.intcon(-max_jint);
2033   set_ctrl(zero, C->root());
2034   set_ctrl(one,  C->root());
2035   set_ctrl(mini, C->root());
2036 
2037   // Range checks that do not dominate the loop backedge (ie.
2038   // conditionally executed) can lengthen the pre loop limit beyond
2039   // the original loop limit. To prevent this, the pre limit is
2040   // (for stride > 0) MINed with the original loop limit (MAXed
2041   // stride < 0) when some range_check (rc) is conditionally
2042   // executed.
2043   bool conditional_rc = false;
2044 
2045   // Check loop body for tests of trip-counter plus loop-invariant vs
2046   // loop-invariant.
2047   for( uint i = 0; i < loop->_body.size(); i++ ) {
2048     Node *iff = loop->_body[i];
2049     if (iff->Opcode() == Op_If ||
2050         iff->Opcode() == Op_RangeCheck) { // Test?
2051       // Test is an IfNode, has 2 projections.  If BOTH are in the loop
2052       // we need loop unswitching instead of iteration splitting.
2053       Node *exit = loop->is_loop_exit(iff);
2054       if( !exit ) continue;
2055       int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0;
2056 
2057       // Get boolean condition to test
2058       Node *i1 = iff->in(1);
2059       if( !i1->is_Bool() ) continue;
2060       BoolNode *bol = i1->as_Bool();
2061       BoolTest b_test = bol->_test;
2062       // Flip sense of test if exit condition is flipped
2063       if( flip )
2064         b_test = b_test.negate();
2065 
2066       // Get compare
2067       Node *cmp = bol->in(1);
2068 
2069       // Look for trip_counter + offset vs limit
2070       Node *rc_exp = cmp->in(1);
2071       Node *limit  = cmp->in(2);
2072       jint scale_con= 1;        // Assume trip counter not scaled
2073 
2074       Node *limit_c = get_ctrl(limit);
2075       if( loop->is_member(get_loop(limit_c) ) ) {
2076         // Compare might have operands swapped; commute them
2077         b_test = b_test.commute();
2078         rc_exp = cmp->in(2);
2079         limit  = cmp->in(1);
2080         limit_c = get_ctrl(limit);
2081         if( loop->is_member(get_loop(limit_c) ) )
2082           continue;             // Both inputs are loop varying; cannot RCE
2083       }
2084       // Here we know 'limit' is loop invariant
2085 
2086       // 'limit' maybe pinned below the zero trip test (probably from a
2087       // previous round of rce), in which case, it can't be used in the
2088       // zero trip test expression which must occur before the zero test's if.
2089       if( limit_c == ctrl ) {
2090         continue;  // Don't rce this check but continue looking for other candidates.
2091       }
2092 
2093       // Check for scaled induction variable plus an offset
2094       Node *offset = NULL;
2095 
2096       if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) {
2097         continue;
2098       }
2099 
2100       Node *offset_c = get_ctrl(offset);
2101       if( loop->is_member( get_loop(offset_c) ) )
2102         continue;               // Offset is not really loop invariant
2103       // Here we know 'offset' is loop invariant.
2104 
2105       // As above for the 'limit', the 'offset' maybe pinned below the
2106       // zero trip test.
2107       if( offset_c == ctrl ) {
2108         continue; // Don't rce this check but continue looking for other candidates.
2109       }
2110 #ifdef ASSERT
2111       if (TraceRangeLimitCheck) {
2112         tty->print_cr("RC bool node%s", flip ? " flipped:" : ":");
2113         bol->dump(2);
2114       }
2115 #endif
2116       // At this point we have the expression as:
2117       //   scale_con * trip_counter + offset :: limit
2118       // where scale_con, offset and limit are loop invariant.  Trip_counter
2119       // monotonically increases by stride_con, a constant.  Both (or either)
2120       // stride_con and scale_con can be negative which will flip about the
2121       // sense of the test.
2122 
2123       // Adjust pre and main loop limits to guard the correct iteration set
2124       if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests
2125         if( b_test._test == BoolTest::lt ) { // Range checks always use lt
2126           // The underflow and overflow limits: 0 <= scale*I+offset < limit
2127           add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit );
2128           if (!conditional_rc) {
2129             // (0-offset)/scale could be outside of loop iterations range.
2130             conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck;
2131           }
2132         } else {
2133           if (PrintOpto) {
2134             tty->print_cr("missed RCE opportunity");
2135           }
2136           continue;             // In release mode, ignore it
2137         }
2138       } else {                  // Otherwise work on normal compares
2139         switch( b_test._test ) {
2140         case BoolTest::gt:
2141           // Fall into GE case
2142         case BoolTest::ge:
2143           // Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit
2144           scale_con = -scale_con;
2145           offset = new SubINode( zero, offset );
2146           register_new_node( offset, pre_ctrl );
2147           limit  = new SubINode( zero, limit  );
2148           register_new_node( limit, pre_ctrl );
2149           // Fall into LE case
2150         case BoolTest::le:
2151           if (b_test._test != BoolTest::gt) {
2152             // Convert X <= Y to X < Y+1
2153             limit = new AddINode( limit, one );
2154             register_new_node( limit, pre_ctrl );
2155           }
2156           // Fall into LT case
2157         case BoolTest::lt:
2158           // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit
2159           // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here
2160           // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT.
2161           add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit );
2162           if (!conditional_rc) {
2163             // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range.
2164             // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could
2165             // still be outside of loop range.
2166             conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck;
2167           }
2168           break;
2169         default:
2170           if (PrintOpto) {
2171             tty->print_cr("missed RCE opportunity");
2172           }
2173           continue;             // Unhandled case
2174         }
2175       }
2176 
2177       // Kill the eliminated test
2178       C->set_major_progress();
2179       Node *kill_con = _igvn.intcon( 1-flip );
2180       set_ctrl(kill_con, C->root());
2181       _igvn.replace_input_of(iff, 1, kill_con);
2182       // Find surviving projection
2183       assert(iff->is_If(), "");
2184       ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip);
2185       // Find loads off the surviving projection; remove their control edge
2186       for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
2187         Node* cd = dp->fast_out(i); // Control-dependent node
2188         if (cd->is_Load() && cd->depends_only_on_test()) {   // Loads can now float around in the loop
2189           // Allow the load to float around in the loop, or before it
2190           // but NOT before the pre-loop.
2191           _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
2192           --i;
2193           --imax;
2194         }
2195       }
2196 
2197     } // End of is IF
2198 
2199   }
2200 
2201   // Update loop limits
2202   if (conditional_rc) {
2203     pre_limit = (stride_con > 0) ? (Node*)new MinINode(pre_limit, orig_limit)
2204                                  : (Node*)new MaxINode(pre_limit, orig_limit);
2205     register_new_node(pre_limit, pre_ctrl);
2206   }
2207   _igvn.replace_input_of(pre_opaq, 1, pre_limit);
2208 
2209   // Note:: we are making the main loop limit no longer precise;
2210   // need to round up based on stride.
2211   cl->set_nonexact_trip_count();
2212   if (!LoopLimitCheck && stride_con != 1 && stride_con != -1) { // Cutout for common case
2213     // "Standard" round-up logic:  ([main_limit-init+(y-1)]/y)*y+init
2214     // Hopefully, compiler will optimize for powers of 2.
2215     Node *ctrl = get_ctrl(main_limit);
2216     Node *stride = cl->stride();
2217     Node *init = cl->init_trip()->uncast();
2218     Node *span = new SubINode(main_limit,init);
2219     register_new_node(span,ctrl);
2220     Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1));
2221     Node *add = new AddINode(span,rndup);
2222     register_new_node(add,ctrl);
2223     Node *div = new DivINode(0,add,stride);
2224     register_new_node(div,ctrl);
2225     Node *mul = new MulINode(div,stride);
2226     register_new_node(mul,ctrl);
2227     Node *newlim = new AddINode(mul,init);
2228     register_new_node(newlim,ctrl);
2229     main_limit = newlim;
2230   }
2231 
2232   Node *main_cle = cl->loopexit();
2233   Node *main_bol = main_cle->in(1);
2234   // Hacking loop bounds; need private copies of exit test
2235   if( main_bol->outcnt() > 1 ) {// BoolNode shared?
2236     main_bol = main_bol->clone();// Clone a private BoolNode
2237     register_new_node( main_bol, main_cle->in(0) );
2238     _igvn.replace_input_of(main_cle, 1, main_bol);
2239   }
2240   Node *main_cmp = main_bol->in(1);
2241   if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
2242     main_cmp = main_cmp->clone();// Clone a private CmpNode
2243     register_new_node( main_cmp, main_cle->in(0) );
2244     _igvn.replace_input_of(main_bol, 1, main_cmp);
2245   }
2246   // Hack the now-private loop bounds
2247   _igvn.replace_input_of(main_cmp, 2, main_limit);
2248   // The OpaqueNode is unshared by design
2249   assert( opqzm->outcnt() == 1, "cannot hack shared node" );
2250   _igvn.replace_input_of(opqzm, 1, main_limit);
2251 }
2252 
2253 //------------------------------DCE_loop_body----------------------------------
2254 // Remove simplistic dead code from loop body
2255 void IdealLoopTree::DCE_loop_body() {
2256   for( uint i = 0; i < _body.size(); i++ )
2257     if( _body.at(i)->outcnt() == 0 )
2258       _body.map( i--, _body.pop() );
2259 }
2260 
2261 
2262 //------------------------------adjust_loop_exit_prob--------------------------
2263 // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage.
2264 // Replace with a 1-in-10 exit guess.
2265 void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
2266   Node *test = tail();
2267   while( test != _head ) {
2268     uint top = test->Opcode();
2269     if( top == Op_IfTrue || top == Op_IfFalse ) {
2270       int test_con = ((ProjNode*)test)->_con;
2271       assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity");
2272       IfNode *iff = test->in(0)->as_If();
2273       if( iff->outcnt() == 2 ) {        // Ignore dead tests
2274         Node *bol = iff->in(1);
2275         if( bol && bol->req() > 1 && bol->in(1) &&
2276             ((bol->in(1)->Opcode() == Op_StorePConditional ) ||
2277              (bol->in(1)->Opcode() == Op_StoreIConditional ) ||
2278              (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
2279              (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
2280              (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
2281              (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
2282              (bol->in(1)->Opcode() == Op_CompareAndSwapN )))
2283           return;               // Allocation loops RARELY take backedge
2284         // Find the OTHER exit path from the IF
2285         Node* ex = iff->proj_out(1-test_con);
2286         float p = iff->_prob;
2287         if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) {
2288           if( top == Op_IfTrue ) {
2289             if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) {
2290               iff->_prob = PROB_STATIC_FREQUENT;
2291             }
2292           } else {
2293             if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
2294               iff->_prob = PROB_STATIC_INFREQUENT;
2295             }
2296           }
2297         }
2298       }
2299     }
2300     test = phase->idom(test);
2301   }
2302 }
2303 
2304 #ifdef ASSERT
2305 static CountedLoopNode* locate_pre_from_main(CountedLoopNode *cl) {
2306   Node *ctrl  = cl->in(LoopNode::EntryControl);
2307   assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
2308   Node *iffm = ctrl->in(0);
2309   assert(iffm->Opcode() == Op_If, "");
2310   Node *p_f = iffm->in(0);
2311   assert(p_f->Opcode() == Op_IfFalse, "");
2312   CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
2313   assert(pre_end->loopnode()->is_pre_loop(), "");
2314   return pre_end->loopnode();
2315 }
2316 #endif
2317 
2318 // Remove the main and post loops and make the pre loop execute all
2319 // iterations. Useful when the pre loop is found empty.
2320 void IdealLoopTree::remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase) {
2321   CountedLoopEndNode* pre_end = cl->loopexit();
2322   Node* pre_cmp = pre_end->cmp_node();
2323   if (pre_cmp->in(2)->Opcode() != Op_Opaque1) {
2324     // Only safe to remove the main loop if the compiler optimized it
2325     // out based on an unknown number of iterations
2326     return;
2327   }
2328 
2329   // Can we find the main loop?
2330   if (_next == NULL) {
2331     return;
2332   }
2333 
2334   Node* next_head = _next->_head;
2335   if (!next_head->is_CountedLoop()) {
2336     return;
2337   }
2338 
2339   CountedLoopNode* main_head = next_head->as_CountedLoop();
2340   if (!main_head->is_main_loop()) {
2341     return;
2342   }
2343 
2344   assert(locate_pre_from_main(main_head) == cl, "bad main loop");
2345   Node* main_iff = main_head->in(LoopNode::EntryControl)->in(0);
2346 
2347   // Remove the Opaque1Node of the pre loop and make it execute all iterations
2348   phase->_igvn.replace_input_of(pre_cmp, 2, pre_cmp->in(2)->in(2));
2349   // Remove the Opaque1Node of the main loop so it can be optimized out
2350   Node* main_cmp = main_iff->in(1)->in(1);
2351   assert(main_cmp->in(2)->Opcode() == Op_Opaque1, "main loop has no opaque node?");
2352   phase->_igvn.replace_input_of(main_cmp, 2, main_cmp->in(2)->in(1));
2353 }
2354 
2355 //------------------------------policy_do_remove_empty_loop--------------------
2356 // Micro-benchmark spamming.  Policy is to always remove empty loops.
2357 // The 'DO' part is to replace the trip counter with the value it will
2358 // have on the last iteration.  This will break the loop.
2359 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
2360   // Minimum size must be empty loop
2361   if (_body.size() > EMPTY_LOOP_SIZE)
2362     return false;
2363 
2364   if (!_head->is_CountedLoop())
2365     return false;     // Dead loop
2366   CountedLoopNode *cl = _head->as_CountedLoop();
2367   if (!cl->is_valid_counted_loop())
2368     return false; // Malformed loop
2369   if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
2370     return false;             // Infinite loop
2371 
2372   if (cl->is_pre_loop()) {
2373     // If the loop we are removing is a pre-loop then the main and
2374     // post loop can be removed as well
2375     remove_main_post_loops(cl, phase);
2376   }
2377 
2378 #ifdef ASSERT
2379   // Ensure only one phi which is the iv.
2380   Node* iv = NULL;
2381   for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
2382     Node* n = cl->fast_out(i);
2383     if (n->Opcode() == Op_Phi) {
2384       assert(iv == NULL, "Too many phis" );
2385       iv = n;
2386     }
2387   }
2388   assert(iv == cl->phi(), "Wrong phi" );
2389 #endif
2390 
2391   // main and post loops have explicitly created zero trip guard
2392   bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop();
2393   if (needs_guard) {
2394     // Skip guard if values not overlap.
2395     const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int();
2396     const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int();
2397     int  stride_con = cl->stride_con();
2398     if (stride_con > 0) {
2399       needs_guard = (init_t->_hi >= limit_t->_lo);
2400     } else {
2401       needs_guard = (init_t->_lo <= limit_t->_hi);
2402     }
2403   }
2404   if (needs_guard) {
2405     // Check for an obvious zero trip guard.
2406     Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl));
2407     if (inctrl->Opcode() == Op_IfTrue) {
2408       // The test should look like just the backedge of a CountedLoop
2409       Node* iff = inctrl->in(0);
2410       if (iff->is_If()) {
2411         Node* bol = iff->in(1);
2412         if (bol->is_Bool() && bol->as_Bool()->_test._test == cl->loopexit()->test_trip()) {
2413           Node* cmp = bol->in(1);
2414           if (cmp->is_Cmp() && cmp->in(1) == cl->init_trip() && cmp->in(2) == cl->limit()) {
2415             needs_guard = false;
2416           }
2417         }
2418       }
2419     }
2420   }
2421 
2422 #ifndef PRODUCT
2423   if (PrintOpto) {
2424     tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : "");
2425     this->dump_head();
2426   } else if (TraceLoopOpts) {
2427     tty->print("Empty with%s zero trip guard   ", needs_guard ? "out" : "");
2428     this->dump_head();
2429   }
2430 #endif
2431 
2432   if (needs_guard) {
2433     // Peel the loop to ensure there's a zero trip guard
2434     Node_List old_new;
2435     phase->do_peeling(this, old_new);
2436   }
2437 
2438   // Replace the phi at loop head with the final value of the last
2439   // iteration.  Then the CountedLoopEnd will collapse (backedge never
2440   // taken) and all loop-invariant uses of the exit values will be correct.
2441   Node *phi = cl->phi();
2442   Node *exact_limit = phase->exact_limit(this);
2443   if (exact_limit != cl->limit()) {
2444     // We also need to replace the original limit to collapse loop exit.
2445     Node* cmp = cl->loopexit()->cmp_node();
2446     assert(cl->limit() == cmp->in(2), "sanity");
2447     phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist
2448     phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist
2449   }
2450   // Note: the final value after increment should not overflow since
2451   // counted loop has limit check predicate.
2452   Node *final = new SubINode( exact_limit, cl->stride() );
2453   phase->register_new_node(final,cl->in(LoopNode::EntryControl));
2454   phase->_igvn.replace_node(phi,final);
2455   phase->C->set_major_progress();
2456   return true;
2457 }
2458 
2459 //------------------------------policy_do_one_iteration_loop-------------------
2460 // Convert one iteration loop into normal code.
2461 bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) {
2462   if (!_head->as_Loop()->is_valid_counted_loop())
2463     return false; // Only for counted loop
2464 
2465   CountedLoopNode *cl = _head->as_CountedLoop();
2466   if (!cl->has_exact_trip_count() || cl->trip_count() != 1) {
2467     return false;
2468   }
2469 
2470 #ifndef PRODUCT
2471   if(TraceLoopOpts) {
2472     tty->print("OneIteration ");
2473     this->dump_head();
2474   }
2475 #endif
2476 
2477   Node *init_n = cl->init_trip();
2478 #ifdef ASSERT
2479   // Loop boundaries should be constant since trip count is exact.
2480   assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration");
2481 #endif
2482   // Replace the phi at loop head with the value of the init_trip.
2483   // Then the CountedLoopEnd will collapse (backedge will not be taken)
2484   // and all loop-invariant uses of the exit values will be correct.
2485   phase->_igvn.replace_node(cl->phi(), cl->init_trip());
2486   phase->C->set_major_progress();
2487   return true;
2488 }
2489 
2490 //=============================================================================
2491 //------------------------------iteration_split_impl---------------------------
2492 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
2493   // Compute exact loop trip count if possible.
2494   compute_exact_trip_count(phase);
2495 
2496   // Convert one iteration loop into normal code.
2497   if (policy_do_one_iteration_loop(phase))
2498     return true;
2499 
2500   // Check and remove empty loops (spam micro-benchmarks)
2501   if (policy_do_remove_empty_loop(phase))
2502     return true;  // Here we removed an empty loop
2503 
2504   bool should_peel = policy_peeling(phase); // Should we peel?
2505 
2506   bool should_unswitch = policy_unswitching(phase);
2507 
2508   // Non-counted loops may be peeled; exactly 1 iteration is peeled.
2509   // This removes loop-invariant tests (usually null checks).
2510   if (!_head->is_CountedLoop()) { // Non-counted loop
2511     if (PartialPeelLoop && phase->partial_peel(this, old_new)) {
2512       // Partial peel succeeded so terminate this round of loop opts
2513       return false;
2514     }
2515     if (should_peel) {            // Should we peel?
2516       if (PrintOpto) { tty->print_cr("should_peel"); }
2517       phase->do_peeling(this,old_new);
2518     } else if (should_unswitch) {
2519       phase->do_unswitching(this, old_new);
2520     }
2521     return true;
2522   }
2523   CountedLoopNode *cl = _head->as_CountedLoop();
2524 
2525   if (!cl->is_valid_counted_loop()) return true; // Ignore various kinds of broken loops
2526 
2527   // Do nothing special to pre- and post- loops
2528   if (cl->is_pre_loop() || cl->is_post_loop()) return true;
2529 
2530   // Compute loop trip count from profile data
2531   compute_profile_trip_cnt(phase);
2532 
2533   // Before attempting fancy unrolling, RCE or alignment, see if we want
2534   // to completely unroll this loop or do loop unswitching.
2535   if (cl->is_normal_loop()) {
2536     if (should_unswitch) {
2537       phase->do_unswitching(this, old_new);
2538       return true;
2539     }
2540     bool should_maximally_unroll =  policy_maximally_unroll(phase);
2541     if (should_maximally_unroll) {
2542       // Here we did some unrolling and peeling.  Eventually we will
2543       // completely unroll this loop and it will no longer be a loop.
2544       phase->do_maximally_unroll(this,old_new);
2545       return true;
2546     }
2547   }
2548 
2549   // Skip next optimizations if running low on nodes. Note that
2550   // policy_unswitching and policy_maximally_unroll have this check.
2551   int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes();
2552   if ((int)(2 * _body.size()) > nodes_left) {
2553     return true;
2554   }
2555 
2556   // Counted loops may be peeled, may need some iterations run up
2557   // front for RCE, and may want to align loop refs to a cache
2558   // line.  Thus we clone a full loop up front whose trip count is
2559   // at least 1 (if peeling), but may be several more.
2560 
2561   // The main loop will start cache-line aligned with at least 1
2562   // iteration of the unrolled body (zero-trip test required) and
2563   // will have some range checks removed.
2564 
2565   // A post-loop will finish any odd iterations (leftover after
2566   // unrolling), plus any needed for RCE purposes.
2567 
2568   bool should_unroll = policy_unroll(phase);
2569 
2570   bool should_rce = policy_range_check(phase);
2571 
2572   bool should_align = policy_align(phase);
2573 
2574   // If not RCE'ing (iteration splitting) or Aligning, then we do not
2575   // need a pre-loop.  We may still need to peel an initial iteration but
2576   // we will not be needing an unknown number of pre-iterations.
2577   //
2578   // Basically, if may_rce_align reports FALSE first time through,
2579   // we will not be able to later do RCE or Aligning on this loop.
2580   bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align;
2581 
2582   // If we have any of these conditions (RCE, alignment, unrolling) met, then
2583   // we switch to the pre-/main-/post-loop model.  This model also covers
2584   // peeling.
2585   if (should_rce || should_align || should_unroll) {
2586     if (cl->is_normal_loop())  // Convert to 'pre/main/post' loops
2587       phase->insert_pre_post_loops(this,old_new, !may_rce_align);
2588 
2589     // Adjust the pre- and main-loop limits to let the pre and post loops run
2590     // with full checks, but the main-loop with no checks.  Remove said
2591     // checks from the main body.
2592     if (should_rce)
2593       phase->do_range_check(this,old_new);
2594 
2595     // Double loop body for unrolling.  Adjust the minimum-trip test (will do
2596     // twice as many iterations as before) and the main body limit (only do
2597     // an even number of trips).  If we are peeling, we might enable some RCE
2598     // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
2599     // peeling.
2600     if (should_unroll && !should_peel) {
2601       phase->do_unroll(this, old_new, true);
2602     }
2603 
2604     // Adjust the pre-loop limits to align the main body
2605     // iterations.
2606     if (should_align)
2607       Unimplemented();
2608 
2609   } else {                      // Else we have an unchanged counted loop
2610     if (should_peel)           // Might want to peel but do nothing else
2611       phase->do_peeling(this,old_new);
2612   }
2613   return true;
2614 }
2615 
2616 
2617 //=============================================================================
2618 //------------------------------iteration_split--------------------------------
2619 bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) {
2620   // Recursively iteration split nested loops
2621   if (_child && !_child->iteration_split(phase, old_new))
2622     return false;
2623 
2624   // Clean out prior deadwood
2625   DCE_loop_body();
2626 
2627 
2628   // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
2629   // Replace with a 1-in-10 exit guess.
2630   if (_parent /*not the root loop*/ &&
2631       !_irreducible &&
2632       // Also ignore the occasional dead backedge
2633       !tail()->is_top()) {
2634     adjust_loop_exit_prob(phase);
2635   }
2636 
2637   // Gate unrolling, RCE and peeling efforts.
2638   if (!_child &&                // If not an inner loop, do not split
2639       !_irreducible &&
2640       _allow_optimizations &&
2641       !tail()->is_top()) {     // Also ignore the occasional dead backedge
2642     if (!_has_call) {
2643         if (!iteration_split_impl(phase, old_new)) {
2644           return false;
2645         }
2646     } else if (policy_unswitching(phase)) {
2647       phase->do_unswitching(this, old_new);
2648     }
2649   }
2650 
2651   // Minor offset re-organization to remove loop-fallout uses of
2652   // trip counter when there was no major reshaping.
2653   phase->reorg_offsets(this);
2654 
2655   if (_next && !_next->iteration_split(phase, old_new))
2656     return false;
2657   return true;
2658 }
2659 
2660 
2661 //=============================================================================
2662 // Process all the loops in the loop tree and replace any fill
2663 // patterns with an intrinsic version.
2664 bool PhaseIdealLoop::do_intrinsify_fill() {
2665   bool changed = false;
2666   for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2667     IdealLoopTree* lpt = iter.current();
2668     changed |= intrinsify_fill(lpt);
2669   }
2670   return changed;
2671 }
2672 
2673 
2674 // Examine an inner loop looking for a a single store of an invariant
2675 // value in a unit stride loop,
2676 bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
2677                                      Node*& shift, Node*& con) {
2678   const char* msg = NULL;
2679   Node* msg_node = NULL;
2680 
2681   store_value = NULL;
2682   con = NULL;
2683   shift = NULL;
2684 
2685   // Process the loop looking for stores.  If there are multiple
2686   // stores or extra control flow give at this point.
2687   CountedLoopNode* head = lpt->_head->as_CountedLoop();
2688   for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2689     Node* n = lpt->_body.at(i);
2690     if (n->outcnt() == 0) continue; // Ignore dead
2691     if (n->is_Store()) {
2692       if (store != NULL) {
2693         msg = "multiple stores";
2694         break;
2695       }
2696       int opc = n->Opcode();
2697       if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreNKlass || opc == Op_StoreCM) {
2698         msg = "oop fills not handled";
2699         break;
2700       }
2701       Node* value = n->in(MemNode::ValueIn);
2702       if (!lpt->is_invariant(value)) {
2703         msg  = "variant store value";
2704       } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) {
2705         msg = "not array address";
2706       }
2707       store = n;
2708       store_value = value;
2709     } else if (n->is_If() && n != head->loopexit()) {
2710       msg = "extra control flow";
2711       msg_node = n;
2712     }
2713   }
2714 
2715   if (store == NULL) {
2716     // No store in loop
2717     return false;
2718   }
2719 
2720   if (msg == NULL && head->stride_con() != 1) {
2721     // could handle negative strides too
2722     if (head->stride_con() < 0) {
2723       msg = "negative stride";
2724     } else {
2725       msg = "non-unit stride";
2726     }
2727   }
2728 
2729   if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) {
2730     msg = "can't handle store address";
2731     msg_node = store->in(MemNode::Address);
2732   }
2733 
2734   if (msg == NULL &&
2735       (!store->in(MemNode::Memory)->is_Phi() ||
2736        store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) {
2737     msg = "store memory isn't proper phi";
2738     msg_node = store->in(MemNode::Memory);
2739   }
2740 
2741   // Make sure there is an appropriate fill routine
2742   BasicType t = store->as_Mem()->memory_type();
2743   const char* fill_name;
2744   if (msg == NULL &&
2745       StubRoutines::select_fill_function(t, false, fill_name) == NULL) {
2746     msg = "unsupported store";
2747     msg_node = store;
2748   }
2749 
2750   if (msg != NULL) {
2751 #ifndef PRODUCT
2752     if (TraceOptimizeFill) {
2753       tty->print_cr("not fill intrinsic candidate: %s", msg);
2754       if (msg_node != NULL) msg_node->dump();
2755     }
2756 #endif
2757     return false;
2758   }
2759 
2760   // Make sure the address expression can be handled.  It should be
2761   // head->phi * elsize + con.  head->phi might have a ConvI2L(CastII()).
2762   Node* elements[4];
2763   Node* cast = NULL;
2764   Node* conv = NULL;
2765   bool found_index = false;
2766   int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
2767   for (int e = 0; e < count; e++) {
2768     Node* n = elements[e];
2769     if (n->is_Con() && con == NULL) {
2770       con = n;
2771     } else if (n->Opcode() == Op_LShiftX && shift == NULL) {
2772       Node* value = n->in(1);
2773 #ifdef _LP64
2774       if (value->Opcode() == Op_ConvI2L) {
2775         conv = value;
2776         value = value->in(1);
2777       }
2778       if (value->Opcode() == Op_CastII &&
2779           value->as_CastII()->has_range_check()) {
2780         // Skip range check dependent CastII nodes
2781         cast = value;
2782         value = value->in(1);
2783       }
2784 #endif
2785       if (value != head->phi()) {
2786         msg = "unhandled shift in address";
2787       } else {
2788         if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) {
2789           msg = "scale doesn't match";
2790         } else {
2791           found_index = true;
2792           shift = n;
2793         }
2794       }
2795     } else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
2796       conv = n;
2797       n = n->in(1);
2798       if (n->Opcode() == Op_CastII &&
2799           n->as_CastII()->has_range_check()) {
2800         // Skip range check dependent CastII nodes
2801         cast = n;
2802         n = n->in(1);
2803       }
2804       if (n == head->phi()) {
2805         found_index = true;
2806       } else {
2807         msg = "unhandled input to ConvI2L";
2808       }
2809     } else if (n == head->phi()) {
2810       // no shift, check below for allowed cases
2811       found_index = true;
2812     } else {
2813       msg = "unhandled node in address";
2814       msg_node = n;
2815     }
2816   }
2817 
2818   if (count == -1) {
2819     msg = "malformed address expression";
2820     msg_node = store;
2821   }
2822 
2823   if (!found_index) {
2824     msg = "missing use of index";
2825   }
2826 
2827   // byte sized items won't have a shift
2828   if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) {
2829     msg = "can't find shift";
2830     msg_node = store;
2831   }
2832 
2833   if (msg != NULL) {
2834 #ifndef PRODUCT
2835     if (TraceOptimizeFill) {
2836       tty->print_cr("not fill intrinsic: %s", msg);
2837       if (msg_node != NULL) msg_node->dump();
2838     }
2839 #endif
2840     return false;
2841   }
2842 
2843   // No make sure all the other nodes in the loop can be handled
2844   VectorSet ok(Thread::current()->resource_area());
2845 
2846   // store related values are ok
2847   ok.set(store->_idx);
2848   ok.set(store->in(MemNode::Memory)->_idx);
2849 
2850   CountedLoopEndNode* loop_exit = head->loopexit();
2851   guarantee(loop_exit != NULL, "no loop exit node");
2852 
2853   // Loop structure is ok
2854   ok.set(head->_idx);
2855   ok.set(loop_exit->_idx);
2856   ok.set(head->phi()->_idx);
2857   ok.set(head->incr()->_idx);
2858   ok.set(loop_exit->cmp_node()->_idx);
2859   ok.set(loop_exit->in(1)->_idx);
2860 
2861   // Address elements are ok
2862   if (con)   ok.set(con->_idx);
2863   if (shift) ok.set(shift->_idx);
2864   if (cast)  ok.set(cast->_idx);
2865   if (conv)  ok.set(conv->_idx);
2866 
2867   for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2868     Node* n = lpt->_body.at(i);
2869     if (n->outcnt() == 0) continue; // Ignore dead
2870     if (ok.test(n->_idx)) continue;
2871     // Backedge projection is ok
2872     if (n->is_IfTrue() && n->in(0) == loop_exit) continue;
2873     if (!n->is_AddP()) {
2874       msg = "unhandled node";
2875       msg_node = n;
2876       break;
2877     }
2878   }
2879 
2880   // Make sure no unexpected values are used outside the loop
2881   for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2882     Node* n = lpt->_body.at(i);
2883     // These values can be replaced with other nodes if they are used
2884     // outside the loop.
2885     if (n == store || n == loop_exit || n == head->incr() || n == store->in(MemNode::Memory)) continue;
2886     for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) {
2887       Node* use = iter.get();
2888       if (!lpt->_body.contains(use)) {
2889         msg = "node is used outside loop";
2890         // lpt->_body.dump();
2891         msg_node = n;
2892         break;
2893       }
2894     }
2895   }
2896 
2897 #ifdef ASSERT
2898   if (TraceOptimizeFill) {
2899     if (msg != NULL) {
2900       tty->print_cr("no fill intrinsic: %s", msg);
2901       if (msg_node != NULL) msg_node->dump();
2902     } else {
2903       tty->print_cr("fill intrinsic for:");
2904     }
2905     store->dump();
2906     if (Verbose) {
2907       lpt->_body.dump();
2908     }
2909   }
2910 #endif
2911 
2912   return msg == NULL;
2913 }
2914 
2915 
2916 
2917 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
2918   // Only for counted inner loops
2919   if (!lpt->is_counted() || !lpt->is_inner()) {
2920     return false;
2921   }
2922 
2923   // Must have constant stride
2924   CountedLoopNode* head = lpt->_head->as_CountedLoop();
2925   if (!head->is_valid_counted_loop() || !head->is_normal_loop()) {
2926     return false;
2927   }
2928 
2929   // Check that the body only contains a store of a loop invariant
2930   // value that is indexed by the loop phi.
2931   Node* store = NULL;
2932   Node* store_value = NULL;
2933   Node* shift = NULL;
2934   Node* offset = NULL;
2935   if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
2936     return false;
2937   }
2938 
2939 #ifndef PRODUCT
2940   if (TraceLoopOpts) {
2941     tty->print("ArrayFill    ");
2942     lpt->dump_head();
2943   }
2944 #endif
2945 
2946   // Now replace the whole loop body by a call to a fill routine that
2947   // covers the same region as the loop.
2948   Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base);
2949 
2950   // Build an expression for the beginning of the copy region
2951   Node* index = head->init_trip();
2952 #ifdef _LP64
2953   index = new ConvI2LNode(index);
2954   _igvn.register_new_node_with_optimizer(index);
2955 #endif
2956   if (shift != NULL) {
2957     // byte arrays don't require a shift but others do.
2958     index = new LShiftXNode(index, shift->in(2));
2959     _igvn.register_new_node_with_optimizer(index);
2960   }
2961   index = new AddPNode(base, base, index);
2962   _igvn.register_new_node_with_optimizer(index);
2963   Node* from = new AddPNode(base, index, offset);
2964   _igvn.register_new_node_with_optimizer(from);
2965   // Compute the number of elements to copy
2966   Node* len = new SubINode(head->limit(), head->init_trip());
2967   _igvn.register_new_node_with_optimizer(len);
2968 
2969   BasicType t = store->as_Mem()->memory_type();
2970   bool aligned = false;
2971   if (offset != NULL && head->init_trip()->is_Con()) {
2972     int element_size = type2aelembytes(t);
2973     aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0;
2974   }
2975 
2976   // Build a call to the fill routine
2977   const char* fill_name;
2978   address fill = StubRoutines::select_fill_function(t, aligned, fill_name);
2979   assert(fill != NULL, "what?");
2980 
2981   // Convert float/double to int/long for fill routines
2982   if (t == T_FLOAT) {
2983     store_value = new MoveF2INode(store_value);
2984     _igvn.register_new_node_with_optimizer(store_value);
2985   } else if (t == T_DOUBLE) {
2986     store_value = new MoveD2LNode(store_value);
2987     _igvn.register_new_node_with_optimizer(store_value);
2988   }
2989 
2990   Node* mem_phi = store->in(MemNode::Memory);
2991   Node* result_ctrl;
2992   Node* result_mem;
2993   const TypeFunc* call_type = OptoRuntime::array_fill_Type();
2994   CallLeafNode *call = new CallLeafNoFPNode(call_type, fill,
2995                                             fill_name, TypeAryPtr::get_array_body_type(t));
2996   uint cnt = 0;
2997   call->init_req(TypeFunc::Parms + cnt++, from);
2998   call->init_req(TypeFunc::Parms + cnt++, store_value);
2999 #ifdef _LP64
3000   len = new ConvI2LNode(len);
3001   _igvn.register_new_node_with_optimizer(len);
3002 #endif
3003   call->init_req(TypeFunc::Parms + cnt++, len);
3004 #ifdef _LP64
3005   call->init_req(TypeFunc::Parms + cnt++, C->top());
3006 #endif
3007   call->init_req(TypeFunc::Control,   head->init_control());
3008   call->init_req(TypeFunc::I_O,       C->top());       // Does no I/O.
3009   call->init_req(TypeFunc::Memory,    mem_phi->in(LoopNode::EntryControl));
3010   call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr));
3011   call->init_req(TypeFunc::FramePtr,  C->start()->proj_out(TypeFunc::FramePtr));
3012   _igvn.register_new_node_with_optimizer(call);
3013   result_ctrl = new ProjNode(call,TypeFunc::Control);
3014   _igvn.register_new_node_with_optimizer(result_ctrl);
3015   result_mem = new ProjNode(call,TypeFunc::Memory);
3016   _igvn.register_new_node_with_optimizer(result_mem);
3017 
3018 /* Disable following optimization until proper fix (add missing checks).
3019 
3020   // If this fill is tightly coupled to an allocation and overwrites
3021   // the whole body, allow it to take over the zeroing.
3022   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this);
3023   if (alloc != NULL && alloc->is_AllocateArray()) {
3024     Node* length = alloc->as_AllocateArray()->Ideal_length();
3025     if (head->limit() == length &&
3026         head->init_trip() == _igvn.intcon(0)) {
3027       if (TraceOptimizeFill) {
3028         tty->print_cr("Eliminated zeroing in allocation");
3029       }
3030       alloc->maybe_set_complete(&_igvn);
3031     } else {
3032 #ifdef ASSERT
3033       if (TraceOptimizeFill) {
3034         tty->print_cr("filling array but bounds don't match");
3035         alloc->dump();
3036         head->init_trip()->dump();
3037         head->limit()->dump();
3038         length->dump();
3039       }
3040 #endif
3041     }
3042   }
3043 */
3044 
3045   // Redirect the old control and memory edges that are outside the loop.
3046   Node* exit = head->loopexit()->proj_out(0);
3047   // Sometimes the memory phi of the head is used as the outgoing
3048   // state of the loop.  It's safe in this case to replace it with the
3049   // result_mem.
3050   _igvn.replace_node(store->in(MemNode::Memory), result_mem);
3051   lazy_replace(exit, result_ctrl);
3052   _igvn.replace_node(store, result_mem);
3053   // Any uses the increment outside of the loop become the loop limit.
3054   _igvn.replace_node(head->incr(), head->limit());
3055 
3056   // Disconnect the head from the loop.
3057   for (uint i = 0; i < lpt->_body.size(); i++) {
3058     Node* n = lpt->_body.at(i);
3059     _igvn.replace_node(n, C->top());
3060   }
3061 
3062   return true;
3063 }