1 /*
   2  * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OPTO_LOOPNODE_HPP
  26 #define SHARE_VM_OPTO_LOOPNODE_HPP
  27 
  28 #include "opto/cfgnode.hpp"
  29 #include "opto/multnode.hpp"
  30 #include "opto/phaseX.hpp"
  31 #include "opto/subnode.hpp"
  32 #include "opto/type.hpp"
  33 
  34 class CmpNode;
  35 class CountedLoopEndNode;
  36 class CountedLoopNode;
  37 class IdealLoopTree;
  38 class LoopNode;
  39 class Node;
  40 class PhaseIdealLoop;
  41 class CountedLoopReserveKit;
  42 class VectorSet;
  43 class Invariance;
  44 struct small_cache;
  45 
  46 //
  47 //                  I D E A L I Z E D   L O O P S
  48 //
  49 // Idealized loops are the set of loops I perform more interesting
  50 // transformations on, beyond simple hoisting.
  51 
  52 //------------------------------LoopNode---------------------------------------
  53 // Simple loop header.  Fall in path on left, loop-back path on right.
  54 class LoopNode : public RegionNode {
  55   // Size is bigger to hold the flags.  However, the flags do not change
  56   // the semantics so it does not appear in the hash & cmp functions.
  57   virtual uint size_of() const { return sizeof(*this); }
  58 protected:
  59   short _loop_flags;
  60   // Names for flag bitfields
  61   enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3,
  62          MainHasNoPreLoop=4,
  63          HasExactTripCount=8,
  64          InnerLoop=16,
  65          PartialPeelLoop=32,
  66          PartialPeelFailed=64,
  67          HasReductions=128,
  68          WasSlpAnalyzed=256,
  69          PassedSlpAnalysis=512,
  70          DoUnrollOnly=1024 };
  71   char _unswitch_count;
  72   enum { _unswitch_max=3 };
  73 
  74 public:
  75   // Names for edge indices
  76   enum { Self=0, EntryControl, LoopBackControl };
  77 
  78   int is_inner_loop() const { return _loop_flags & InnerLoop; }
  79   void set_inner_loop() { _loop_flags |= InnerLoop; }
  80 
  81   int is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; }
  82   void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; }
  83   int partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
  84   void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
  85   void mark_has_reductions() { _loop_flags |= HasReductions; }
  86   void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; }
  87   void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; }
  88   void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; }
  89 
  90   int unswitch_max() { return _unswitch_max; }
  91   int unswitch_count() { return _unswitch_count; }
  92   void set_unswitch_count(int val) {
  93     assert (val <= unswitch_max(), "too many unswitches");
  94     _unswitch_count = val;
  95   }
  96 
  97   LoopNode( Node *entry, Node *backedge ) : RegionNode(3), _loop_flags(0), _unswitch_count(0) {
  98     init_class_id(Class_Loop);
  99     init_req(EntryControl, entry);
 100     init_req(LoopBackControl, backedge);
 101   }
 102 
 103   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 104   virtual int Opcode() const;
 105   bool can_be_counted_loop(PhaseTransform* phase) const {
 106     return req() == 3 && in(0) != NULL &&
 107       in(1) != NULL && phase->type(in(1)) != Type::TOP &&
 108       in(2) != NULL && phase->type(in(2)) != Type::TOP;
 109   }
 110   bool is_valid_counted_loop() const;
 111 #ifndef PRODUCT
 112   virtual void dump_spec(outputStream *st) const;
 113 #endif
 114 };
 115 
 116 //------------------------------Counted Loops----------------------------------
 117 // Counted loops are all trip-counted loops, with exactly 1 trip-counter exit
 118 // path (and maybe some other exit paths).  The trip-counter exit is always
 119 // last in the loop.  The trip-counter have to stride by a constant;
 120 // the exit value is also loop invariant.
 121 
 122 // CountedLoopNodes and CountedLoopEndNodes come in matched pairs.  The
 123 // CountedLoopNode has the incoming loop control and the loop-back-control
 124 // which is always the IfTrue before the matching CountedLoopEndNode.  The
 125 // CountedLoopEndNode has an incoming control (possibly not the
 126 // CountedLoopNode if there is control flow in the loop), the post-increment
 127 // trip-counter value, and the limit.  The trip-counter value is always of
 128 // the form (Op old-trip-counter stride).  The old-trip-counter is produced
 129 // by a Phi connected to the CountedLoopNode.  The stride is constant.
 130 // The Op is any commutable opcode, including Add, Mul, Xor.  The
 131 // CountedLoopEndNode also takes in the loop-invariant limit value.
 132 
 133 // From a CountedLoopNode I can reach the matching CountedLoopEndNode via the
 134 // loop-back control.  From CountedLoopEndNodes I can reach CountedLoopNodes
 135 // via the old-trip-counter from the Op node.
 136 
 137 //------------------------------CountedLoopNode--------------------------------
 138 // CountedLoopNodes head simple counted loops.  CountedLoopNodes have as
 139 // inputs the incoming loop-start control and the loop-back control, so they
 140 // act like RegionNodes.  They also take in the initial trip counter, the
 141 // loop-invariant stride and the loop-invariant limit value.  CountedLoopNodes
 142 // produce a loop-body control and the trip counter value.  Since
 143 // CountedLoopNodes behave like RegionNodes I still have a standard CFG model.
 144 
 145 class CountedLoopNode : public LoopNode {
 146   // Size is bigger to hold _main_idx.  However, _main_idx does not change
 147   // the semantics so it does not appear in the hash & cmp functions.
 148   virtual uint size_of() const { return sizeof(*this); }
 149 
 150   // For Pre- and Post-loops during debugging ONLY, this holds the index of
 151   // the Main CountedLoop.  Used to assert that we understand the graph shape.
 152   node_idx_t _main_idx;
 153 
 154   // Known trip count calculated by compute_exact_trip_count()
 155   uint  _trip_count;
 156 
 157   // Expected trip count from profile data
 158   float _profile_trip_cnt;
 159 
 160   // Log2 of original loop bodies in unrolled loop
 161   int _unrolled_count_log2;
 162 
 163   // Node count prior to last unrolling - used to decide if
 164   // unroll,optimize,unroll,optimize,... is making progress
 165   int _node_count_before_unroll;
 166 
 167   // If slp analysis is performed we record the maximum
 168   // vector mapped unroll factor here
 169   int _slp_maximum_unroll_factor;
 170 
 171 public:
 172   CountedLoopNode( Node *entry, Node *backedge )
 173     : LoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint),
 174       _profile_trip_cnt(COUNT_UNKNOWN), _unrolled_count_log2(0),
 175       _node_count_before_unroll(0), _slp_maximum_unroll_factor(0) {
 176     init_class_id(Class_CountedLoop);
 177     // Initialize _trip_count to the largest possible value.
 178     // Will be reset (lower) if the loop's trip count is known.
 179   }
 180 
 181   virtual int Opcode() const;
 182   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 183 
 184   Node *init_control() const { return in(EntryControl); }
 185   Node *back_control() const { return in(LoopBackControl); }
 186   CountedLoopEndNode *loopexit() const;
 187   Node *init_trip() const;
 188   Node *stride() const;
 189   int   stride_con() const;
 190   bool  stride_is_con() const;
 191   Node *limit() const;
 192   Node *incr() const;
 193   Node *phi() const;
 194 
 195   // Match increment with optional truncation
 196   static Node* match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type);
 197 
 198   // A 'main' loop has a pre-loop and a post-loop.  The 'main' loop
 199   // can run short a few iterations and may start a few iterations in.
 200   // It will be RCE'd and unrolled and aligned.
 201 
 202   // A following 'post' loop will run any remaining iterations.  Used
 203   // during Range Check Elimination, the 'post' loop will do any final
 204   // iterations with full checks.  Also used by Loop Unrolling, where
 205   // the 'post' loop will do any epilog iterations needed.  Basically,
 206   // a 'post' loop can not profitably be further unrolled or RCE'd.
 207 
 208   // A preceding 'pre' loop will run at least 1 iteration (to do peeling),
 209   // it may do under-flow checks for RCE and may do alignment iterations
 210   // so the following main loop 'knows' that it is striding down cache
 211   // lines.
 212 
 213   // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or
 214   // Aligned, may be missing it's pre-loop.
 215   int is_normal_loop   () const { return (_loop_flags&PreMainPostFlagsMask) == Normal; }
 216   int is_pre_loop      () const { return (_loop_flags&PreMainPostFlagsMask) == Pre;    }
 217   int is_main_loop     () const { return (_loop_flags&PreMainPostFlagsMask) == Main;   }
 218   int is_post_loop     () const { return (_loop_flags&PreMainPostFlagsMask) == Post;   }
 219   int is_reduction_loop() const { return (_loop_flags&HasReductions) == HasReductions; }
 220   int was_slp_analyzed () const { return (_loop_flags&WasSlpAnalyzed) == WasSlpAnalyzed; }
 221   int has_passed_slp   () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; }
 222   int do_unroll_only      () const { return (_loop_flags&DoUnrollOnly) == DoUnrollOnly; }
 223   int is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; }
 224   void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; }
 225 
 226   int main_idx() const { return _main_idx; }
 227 
 228 
 229   void set_pre_loop  (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; }
 230   void set_main_loop (                     ) { assert(is_normal_loop(),""); _loop_flags |= Main;                         }
 231   void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; }
 232   void set_normal_loop(                    ) { _loop_flags &= ~PreMainPostFlagsMask; }
 233 
 234   void set_trip_count(uint tc) { _trip_count = tc; }
 235   uint trip_count()            { return _trip_count; }
 236 
 237   bool has_exact_trip_count() const { return (_loop_flags & HasExactTripCount) != 0; }
 238   void set_exact_trip_count(uint tc) {
 239     _trip_count = tc;
 240     _loop_flags |= HasExactTripCount;
 241   }
 242   void set_nonexact_trip_count() {
 243     _loop_flags &= ~HasExactTripCount;
 244   }
 245   void set_notpassed_slp() {
 246     _loop_flags &= ~PassedSlpAnalysis;
 247   }
 248 
 249   void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; }
 250   float profile_trip_cnt()             { return _profile_trip_cnt; }
 251 
 252   void double_unrolled_count() { _unrolled_count_log2++; }
 253   int  unrolled_count()        { return 1 << MIN2(_unrolled_count_log2, BitsPerInt-3); }
 254 
 255   void set_node_count_before_unroll(int ct)  { _node_count_before_unroll = ct; }
 256   int  node_count_before_unroll()            { return _node_count_before_unroll; }
 257   void set_slp_max_unroll(int unroll_factor) { _slp_maximum_unroll_factor = unroll_factor; }
 258   int  slp_max_unroll() const                { return _slp_maximum_unroll_factor; }
 259 
 260 #ifndef PRODUCT
 261   virtual void dump_spec(outputStream *st) const;
 262 #endif
 263 };
 264 
 265 //------------------------------CountedLoopEndNode-----------------------------
 266 // CountedLoopEndNodes end simple trip counted loops.  They act much like
 267 // IfNodes.
 268 class CountedLoopEndNode : public IfNode {
 269 public:
 270   enum { TestControl, TestValue };
 271 
 272   CountedLoopEndNode( Node *control, Node *test, float prob, float cnt )
 273     : IfNode( control, test, prob, cnt) {
 274     init_class_id(Class_CountedLoopEnd);
 275   }
 276   virtual int Opcode() const;
 277 
 278   Node *cmp_node() const            { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : NULL; }
 279   Node *incr() const                { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; }
 280   Node *limit() const               { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; }
 281   Node *stride() const              { Node *tmp = incr    (); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; }
 282   Node *phi() const                 { Node *tmp = incr    (); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; }
 283   Node *init_trip() const           { Node *tmp = phi     (); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; }
 284   int stride_con() const;
 285   bool stride_is_con() const        { Node *tmp = stride  (); return (tmp != NULL && tmp->is_Con()); }
 286   BoolTest::mask test_trip() const  { return in(TestValue)->as_Bool()->_test._test; }
 287   CountedLoopNode *loopnode() const {
 288     // The CountedLoopNode that goes with this CountedLoopEndNode may
 289     // have been optimized out by the IGVN so be cautious with the
 290     // pattern matching on the graph
 291     if (phi() == NULL) {
 292       return NULL;
 293     }
 294     assert(phi()->is_Phi(), "should be PhiNode");
 295     Node *ln = phi()->in(0);
 296     if (ln->is_CountedLoop() && ln->as_CountedLoop()->loopexit() == this) {
 297       return (CountedLoopNode*)ln;
 298     }
 299     return NULL;
 300   }
 301 
 302 #ifndef PRODUCT
 303   virtual void dump_spec(outputStream *st) const;
 304 #endif
 305 };
 306 
 307 
 308 inline CountedLoopEndNode *CountedLoopNode::loopexit() const {
 309   Node *bc = back_control();
 310   if( bc == NULL ) return NULL;
 311   Node *le = bc->in(0);
 312   if( le->Opcode() != Op_CountedLoopEnd )
 313     return NULL;
 314   return (CountedLoopEndNode*)le;
 315 }
 316 inline Node *CountedLoopNode::init_trip() const { return loopexit() ? loopexit()->init_trip() : NULL; }
 317 inline Node *CountedLoopNode::stride() const { return loopexit() ? loopexit()->stride() : NULL; }
 318 inline int CountedLoopNode::stride_con() const { return loopexit() ? loopexit()->stride_con() : 0; }
 319 inline bool CountedLoopNode::stride_is_con() const { return loopexit() && loopexit()->stride_is_con(); }
 320 inline Node *CountedLoopNode::limit() const { return loopexit() ? loopexit()->limit() : NULL; }
 321 inline Node *CountedLoopNode::incr() const { return loopexit() ? loopexit()->incr() : NULL; }
 322 inline Node *CountedLoopNode::phi() const { return loopexit() ? loopexit()->phi() : NULL; }
 323 
 324 //------------------------------LoopLimitNode-----------------------------
 325 // Counted Loop limit node which represents exact final iterator value:
 326 // trip_count = (limit - init_trip + stride - 1)/stride
 327 // final_value= trip_count * stride + init_trip.
 328 // Use HW instructions to calculate it when it can overflow in integer.
 329 // Note, final_value should fit into integer since counted loop has
 330 // limit check: limit <= max_int-stride.
 331 class LoopLimitNode : public Node {
 332   enum { Init=1, Limit=2, Stride=3 };
 333  public:
 334   LoopLimitNode( Compile* C, Node *init, Node *limit, Node *stride ) : Node(0,init,limit,stride) {
 335     // Put it on the Macro nodes list to optimize during macro nodes expansion.
 336     init_flags(Flag_is_macro);
 337     C->add_macro_node(this);
 338   }
 339   virtual int Opcode() const;
 340   virtual const Type *bottom_type() const { return TypeInt::INT; }
 341   virtual uint ideal_reg() const { return Op_RegI; }
 342   virtual const Type *Value( PhaseTransform *phase ) const;
 343   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 344   virtual Node *Identity( PhaseTransform *phase );
 345 };
 346 
 347 // -----------------------------IdealLoopTree----------------------------------
 348 class IdealLoopTree : public ResourceObj {
 349 public:
 350   IdealLoopTree *_parent;       // Parent in loop tree
 351   IdealLoopTree *_next;         // Next sibling in loop tree
 352   IdealLoopTree *_child;        // First child in loop tree
 353 
 354   // The head-tail backedge defines the loop.
 355   // If tail is NULL then this loop has multiple backedges as part of the
 356   // same loop.  During cleanup I'll peel off the multiple backedges; merge
 357   // them at the loop bottom and flow 1 real backedge into the loop.
 358   Node *_head;                  // Head of loop
 359   Node *_tail;                  // Tail of loop
 360   inline Node *tail();          // Handle lazy update of _tail field
 361   PhaseIdealLoop* _phase;
 362   int _local_loop_unroll_limit;
 363   int _local_loop_unroll_factor;
 364 
 365   Node_List _body;              // Loop body for inner loops
 366 
 367   uint8_t _nest;                // Nesting depth
 368   uint8_t _irreducible:1,       // True if irreducible
 369           _has_call:1,          // True if has call safepoint
 370           _has_sfpt:1,          // True if has non-call safepoint
 371           _rce_candidate:1;     // True if candidate for range check elimination
 372 
 373   Node_List* _safepts;          // List of safepoints in this loop
 374   Node_List* _required_safept;  // A inner loop cannot delete these safepts;
 375   bool  _allow_optimizations;   // Allow loop optimizations
 376 
 377   IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail )
 378     : _parent(0), _next(0), _child(0),
 379       _head(head), _tail(tail),
 380       _phase(phase),
 381       _safepts(NULL),
 382       _required_safept(NULL),
 383       _allow_optimizations(true),
 384       _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0),
 385       _local_loop_unroll_limit(0), _local_loop_unroll_factor(0)
 386   { }
 387 
 388   // Is 'l' a member of 'this'?
 389   bool is_member(const IdealLoopTree *l) const; // Test for nested membership
 390 
 391   // Set loop nesting depth.  Accumulate has_call bits.
 392   int set_nest( uint depth );
 393 
 394   // Split out multiple fall-in edges from the loop header.  Move them to a
 395   // private RegionNode before the loop.  This becomes the loop landing pad.
 396   void split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt );
 397 
 398   // Split out the outermost loop from this shared header.
 399   void split_outer_loop( PhaseIdealLoop *phase );
 400 
 401   // Merge all the backedges from the shared header into a private Region.
 402   // Feed that region as the one backedge to this loop.
 403   void merge_many_backedges( PhaseIdealLoop *phase );
 404 
 405   // Split shared headers and insert loop landing pads.
 406   // Insert a LoopNode to replace the RegionNode.
 407   // Returns TRUE if loop tree is structurally changed.
 408   bool beautify_loops( PhaseIdealLoop *phase );
 409 
 410   // Perform optimization to use the loop predicates for null checks and range checks.
 411   // Applies to any loop level (not just the innermost one)
 412   bool loop_predication( PhaseIdealLoop *phase);
 413 
 414   // Perform iteration-splitting on inner loops.  Split iterations to
 415   // avoid range checks or one-shot null checks.  Returns false if the
 416   // current round of loop opts should stop.
 417   bool iteration_split( PhaseIdealLoop *phase, Node_List &old_new );
 418 
 419   // Driver for various flavors of iteration splitting.  Returns false
 420   // if the current round of loop opts should stop.
 421   bool iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new );
 422 
 423   // Given dominators, try to find loops with calls that must always be
 424   // executed (call dominates loop tail).  These loops do not need non-call
 425   // safepoints (ncsfpt).
 426   void check_safepts(VectorSet &visited, Node_List &stack);
 427 
 428   // Allpaths backwards scan from loop tail, terminating each path at first safepoint
 429   // encountered.
 430   void allpaths_check_safepts(VectorSet &visited, Node_List &stack);
 431 
 432   // Convert to counted loops where possible
 433   void counted_loop( PhaseIdealLoop *phase );
 434 
 435   // Check for Node being a loop-breaking test
 436   Node *is_loop_exit(Node *iff) const;
 437 
 438   // Returns true if ctrl is executed on every complete iteration
 439   bool dominates_backedge(Node* ctrl);
 440 
 441   // Remove simplistic dead code from loop body
 442   void DCE_loop_body();
 443 
 444   // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
 445   // Replace with a 1-in-10 exit guess.
 446   void adjust_loop_exit_prob( PhaseIdealLoop *phase );
 447 
 448   // Return TRUE or FALSE if the loop should never be RCE'd or aligned.
 449   // Useful for unrolling loops with NO array accesses.
 450   bool policy_peel_only( PhaseIdealLoop *phase ) const;
 451 
 452   // Return TRUE or FALSE if the loop should be unswitched -- clone
 453   // loop with an invariant test
 454   bool policy_unswitching( PhaseIdealLoop *phase ) const;
 455 
 456   // Micro-benchmark spamming.  Remove empty loops.
 457   bool policy_do_remove_empty_loop( PhaseIdealLoop *phase );
 458 
 459   // Convert one iteration loop into normal code.
 460   bool policy_do_one_iteration_loop( PhaseIdealLoop *phase );
 461 
 462   // Return TRUE or FALSE if the loop should be peeled or not.  Peel if we can
 463   // make some loop-invariant test (usually a null-check) happen before the
 464   // loop.
 465   bool policy_peeling( PhaseIdealLoop *phase ) const;
 466 
 467   // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any
 468   // known trip count in the counted loop node.
 469   bool policy_maximally_unroll( PhaseIdealLoop *phase ) const;
 470 
 471   // Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
 472   // the loop is a CountedLoop and the body is small enough.
 473   bool policy_unroll(PhaseIdealLoop *phase);
 474 
 475   // Loop analyses to map to a maximal superword unrolling for vectorization.
 476   void policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct);
 477 
 478   // Return TRUE or FALSE if the loop should be range-check-eliminated.
 479   // Gather a list of IF tests that are dominated by iteration splitting;
 480   // also gather the end of the first split and the start of the 2nd split.
 481   bool policy_range_check( PhaseIdealLoop *phase ) const;
 482 
 483   // Return TRUE or FALSE if the loop should be cache-line aligned.
 484   // Gather the expression that does the alignment.  Note that only
 485   // one array base can be aligned in a loop (unless the VM guarantees
 486   // mutual alignment).  Note that if we vectorize short memory ops
 487   // into longer memory ops, we may want to increase alignment.
 488   bool policy_align( PhaseIdealLoop *phase ) const;
 489 
 490   // Return TRUE if "iff" is a range check.
 491   bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const;
 492 
 493   // Compute loop exact trip count if possible
 494   void compute_exact_trip_count( PhaseIdealLoop *phase );
 495 
 496   // Compute loop trip count from profile data
 497   void compute_profile_trip_cnt( PhaseIdealLoop *phase );
 498 
 499   // Reassociate invariant expressions.
 500   void reassociate_invariants(PhaseIdealLoop *phase);
 501   // Reassociate invariant add and subtract expressions.
 502   Node* reassociate_add_sub(Node* n1, PhaseIdealLoop *phase);
 503   // Return nonzero index of invariant operand if invariant and variant
 504   // are combined with an Add or Sub. Helper for reassociate_invariants.
 505   int is_invariant_addition(Node* n, PhaseIdealLoop *phase);
 506 
 507   // Return true if n is invariant
 508   bool is_invariant(Node* n) const;
 509 
 510   // Put loop body on igvn work list
 511   void record_for_igvn();
 512 
 513   bool is_loop()    { return !_irreducible && _tail && !_tail->is_top(); }
 514   bool is_inner()   { return is_loop() && _child == NULL; }
 515   bool is_counted() { return is_loop() && _head != NULL && _head->is_CountedLoop(); }
 516 
 517   void remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase);
 518 
 519 #ifndef PRODUCT
 520   void dump_head( ) const;      // Dump loop head only
 521   void dump() const;            // Dump this loop recursively
 522   void verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const;
 523 #endif
 524 
 525 };
 526 
 527 // -----------------------------PhaseIdealLoop---------------------------------
 528 // Computes the mapping from Nodes to IdealLoopTrees.  Organizes IdealLoopTrees into a
 529 // loop tree.  Drives the loop-based transformations on the ideal graph.
 530 class PhaseIdealLoop : public PhaseTransform {
 531   friend class IdealLoopTree;
 532   friend class SuperWord;
 533   friend class CountedLoopReserveKit;
 534 
 535   // Pre-computed def-use info
 536   PhaseIterGVN &_igvn;
 537 
 538   // Head of loop tree
 539   IdealLoopTree *_ltree_root;
 540 
 541   // Array of pre-order numbers, plus post-visited bit.
 542   // ZERO for not pre-visited.  EVEN for pre-visited but not post-visited.
 543   // ODD for post-visited.  Other bits are the pre-order number.
 544   uint *_preorders;
 545   uint _max_preorder;
 546 
 547   const PhaseIdealLoop* _verify_me;
 548   bool _verify_only;
 549 
 550   // Allocate _preorders[] array
 551   void allocate_preorders() {
 552     _max_preorder = C->unique()+8;
 553     _preorders = NEW_RESOURCE_ARRAY(uint, _max_preorder);
 554     memset(_preorders, 0, sizeof(uint) * _max_preorder);
 555   }
 556 
 557   // Allocate _preorders[] array
 558   void reallocate_preorders() {
 559     if ( _max_preorder < C->unique() ) {
 560       _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, C->unique());
 561       _max_preorder = C->unique();
 562     }
 563     memset(_preorders, 0, sizeof(uint) * _max_preorder);
 564   }
 565 
 566   // Check to grow _preorders[] array for the case when build_loop_tree_impl()
 567   // adds new nodes.
 568   void check_grow_preorders( ) {
 569     if ( _max_preorder < C->unique() ) {
 570       uint newsize = _max_preorder<<1;  // double size of array
 571       _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, newsize);
 572       memset(&_preorders[_max_preorder],0,sizeof(uint)*(newsize-_max_preorder));
 573       _max_preorder = newsize;
 574     }
 575   }
 576   // Check for pre-visited.  Zero for NOT visited; non-zero for visited.
 577   int is_visited( Node *n ) const { return _preorders[n->_idx]; }
 578   // Pre-order numbers are written to the Nodes array as low-bit-set values.
 579   void set_preorder_visited( Node *n, int pre_order ) {
 580     assert( !is_visited( n ), "already set" );
 581     _preorders[n->_idx] = (pre_order<<1);
 582   };
 583   // Return pre-order number.
 584   int get_preorder( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]>>1; }
 585 
 586   // Check for being post-visited.
 587   // Should be previsited already (checked with assert(is_visited(n))).
 588   int is_postvisited( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]&1; }
 589 
 590   // Mark as post visited
 591   void set_postvisited( Node *n ) { assert( !is_postvisited( n ), "" ); _preorders[n->_idx] |= 1; }
 592 
 593   // Set/get control node out.  Set lower bit to distinguish from IdealLoopTree
 594   // Returns true if "n" is a data node, false if it's a control node.
 595   bool has_ctrl( Node *n ) const { return ((intptr_t)_nodes[n->_idx]) & 1; }
 596 
 597   // clear out dead code after build_loop_late
 598   Node_List _deadlist;
 599 
 600   // Support for faster execution of get_late_ctrl()/dom_lca()
 601   // when a node has many uses and dominator depth is deep.
 602   Node_Array _dom_lca_tags;
 603   void   init_dom_lca_tags();
 604   void   clear_dom_lca_tags();
 605 
 606   // Helper for debugging bad dominance relationships
 607   bool verify_dominance(Node* n, Node* use, Node* LCA, Node* early);
 608 
 609   Node* compute_lca_of_uses(Node* n, Node* early, bool verify = false);
 610 
 611   // Inline wrapper for frequent cases:
 612   // 1) only one use
 613   // 2) a use is the same as the current LCA passed as 'n1'
 614   Node *dom_lca_for_get_late_ctrl( Node *lca, Node *n, Node *tag ) {
 615     assert( n->is_CFG(), "" );
 616     // Fast-path NULL lca
 617     if( lca != NULL && lca != n ) {
 618       assert( lca->is_CFG(), "" );
 619       // find LCA of all uses
 620       n = dom_lca_for_get_late_ctrl_internal( lca, n, tag );
 621     }
 622     return find_non_split_ctrl(n);
 623   }
 624   Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag );
 625 
 626   // Helper function for directing control inputs away from CFG split
 627   // points.
 628   Node *find_non_split_ctrl( Node *ctrl ) const {
 629     if (ctrl != NULL) {
 630       if (ctrl->is_MultiBranch()) {
 631         ctrl = ctrl->in(0);
 632       }
 633       assert(ctrl->is_CFG(), "CFG");
 634     }
 635     return ctrl;
 636   }
 637 
 638   bool cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop);
 639 
 640 public:
 641   bool has_node( Node* n ) const {
 642     guarantee(n != NULL, "No Node.");
 643     return _nodes[n->_idx] != NULL;
 644   }
 645   // check if transform created new nodes that need _ctrl recorded
 646   Node *get_late_ctrl( Node *n, Node *early );
 647   Node *get_early_ctrl( Node *n );
 648   Node *get_early_ctrl_for_expensive(Node *n, Node* earliest);
 649   void set_early_ctrl( Node *n );
 650   void set_subtree_ctrl( Node *root );
 651   void set_ctrl( Node *n, Node *ctrl ) {
 652     assert( !has_node(n) || has_ctrl(n), "" );
 653     assert( ctrl->in(0), "cannot set dead control node" );
 654     assert( ctrl == find_non_split_ctrl(ctrl), "must set legal crtl" );
 655     _nodes.map( n->_idx, (Node*)((intptr_t)ctrl + 1) );
 656   }
 657   // Set control and update loop membership
 658   void set_ctrl_and_loop(Node* n, Node* ctrl) {
 659     IdealLoopTree* old_loop = get_loop(get_ctrl(n));
 660     IdealLoopTree* new_loop = get_loop(ctrl);
 661     if (old_loop != new_loop) {
 662       if (old_loop->_child == NULL) old_loop->_body.yank(n);
 663       if (new_loop->_child == NULL) new_loop->_body.push(n);
 664     }
 665     set_ctrl(n, ctrl);
 666   }
 667   // Control nodes can be replaced or subsumed.  During this pass they
 668   // get their replacement Node in slot 1.  Instead of updating the block
 669   // location of all Nodes in the subsumed block, we lazily do it.  As we
 670   // pull such a subsumed block out of the array, we write back the final
 671   // correct block.
 672   Node *get_ctrl( Node *i ) {
 673     assert(has_node(i), "");
 674     Node *n = get_ctrl_no_update(i);
 675     _nodes.map( i->_idx, (Node*)((intptr_t)n + 1) );
 676     assert(has_node(i) && has_ctrl(i), "");
 677     assert(n == find_non_split_ctrl(n), "must return legal ctrl" );
 678     return n;
 679   }
 680   // true if CFG node d dominates CFG node n
 681   bool is_dominator(Node *d, Node *n);
 682   // return get_ctrl for a data node and self(n) for a CFG node
 683   Node* ctrl_or_self(Node* n) {
 684     if (has_ctrl(n))
 685       return get_ctrl(n);
 686     else {
 687       assert (n->is_CFG(), "must be a CFG node");
 688       return n;
 689     }
 690   }
 691 
 692 private:
 693   Node *get_ctrl_no_update( Node *i ) const {
 694     assert( has_ctrl(i), "" );
 695     Node *n = (Node*)(((intptr_t)_nodes[i->_idx]) & ~1);
 696     if (!n->in(0)) {
 697       // Skip dead CFG nodes
 698       do {
 699         n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1);
 700       } while (!n->in(0));
 701       n = find_non_split_ctrl(n);
 702     }
 703     return n;
 704   }
 705 
 706   // Check for loop being set
 707   // "n" must be a control node. Returns true if "n" is known to be in a loop.
 708   bool has_loop( Node *n ) const {
 709     assert(!has_node(n) || !has_ctrl(n), "");
 710     return has_node(n);
 711   }
 712   // Set loop
 713   void set_loop( Node *n, IdealLoopTree *loop ) {
 714     _nodes.map(n->_idx, (Node*)loop);
 715   }
 716   // Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms.  Replace
 717   // the 'old_node' with 'new_node'.  Kill old-node.  Add a reference
 718   // from old_node to new_node to support the lazy update.  Reference
 719   // replaces loop reference, since that is not needed for dead node.
 720 public:
 721   void lazy_update( Node *old_node, Node *new_node ) {
 722     assert( old_node != new_node, "no cycles please" );
 723     //old_node->set_req( 1, new_node /*NO DU INFO*/ );
 724     // Nodes always have DU info now, so re-use the side array slot
 725     // for this node to provide the forwarding pointer.
 726     _nodes.map( old_node->_idx, (Node*)((intptr_t)new_node + 1) );
 727   }
 728   void lazy_replace( Node *old_node, Node *new_node ) {
 729     _igvn.replace_node( old_node, new_node );
 730     lazy_update( old_node, new_node );
 731   }
 732   void lazy_replace_proj( Node *old_node, Node *new_node ) {
 733     assert( old_node->req() == 1, "use this for Projs" );
 734     _igvn.hash_delete(old_node); // Must hash-delete before hacking edges
 735     old_node->add_req( NULL );
 736     lazy_replace( old_node, new_node );
 737   }
 738 
 739 private:
 740 
 741   // Place 'n' in some loop nest, where 'n' is a CFG node
 742   void build_loop_tree();
 743   int build_loop_tree_impl( Node *n, int pre_order );
 744   // Insert loop into the existing loop tree.  'innermost' is a leaf of the
 745   // loop tree, not the root.
 746   IdealLoopTree *sort( IdealLoopTree *loop, IdealLoopTree *innermost );
 747 
 748   // Place Data nodes in some loop nest
 749   void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
 750   void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
 751   void build_loop_late_post ( Node* n );
 752 
 753   // Array of immediate dominance info for each CFG node indexed by node idx
 754 private:
 755   uint _idom_size;
 756   Node **_idom;                 // Array of immediate dominators
 757   uint *_dom_depth;           // Used for fast LCA test
 758   GrowableArray<uint>* _dom_stk; // For recomputation of dom depth
 759 
 760   Node* idom_no_update(Node* d) const {
 761     assert(d->_idx < _idom_size, "oob");
 762     Node* n = _idom[d->_idx];
 763     assert(n != NULL,"Bad immediate dominator info.");
 764     while (n->in(0) == NULL) {  // Skip dead CFG nodes
 765       //n = n->in(1);
 766       n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1);
 767       assert(n != NULL,"Bad immediate dominator info.");
 768     }
 769     return n;
 770   }
 771   Node *idom(Node* d) const {
 772     uint didx = d->_idx;
 773     Node *n = idom_no_update(d);
 774     _idom[didx] = n;            // Lazily remove dead CFG nodes from table.
 775     return n;
 776   }
 777   uint dom_depth(Node* d) const {
 778     guarantee(d != NULL, "Null dominator info.");
 779     guarantee(d->_idx < _idom_size, "");
 780     return _dom_depth[d->_idx];
 781   }
 782   void set_idom(Node* d, Node* n, uint dom_depth);
 783   // Locally compute IDOM using dom_lca call
 784   Node *compute_idom( Node *region ) const;
 785   // Recompute dom_depth
 786   void recompute_dom_depth();
 787 
 788   // Is safept not required by an outer loop?
 789   bool is_deleteable_safept(Node* sfpt);
 790 
 791   // Replace parallel induction variable (parallel to trip counter)
 792   void replace_parallel_iv(IdealLoopTree *loop);
 793 
 794   // Perform verification that the graph is valid.
 795   PhaseIdealLoop( PhaseIterGVN &igvn) :
 796     PhaseTransform(Ideal_Loop),
 797     _igvn(igvn),
 798     _dom_lca_tags(arena()), // Thread::resource_area
 799     _verify_me(NULL),
 800     _verify_only(true) {
 801     build_and_optimize(false, false);
 802   }
 803 
 804   // build the loop tree and perform any requested optimizations
 805   void build_and_optimize(bool do_split_if, bool skip_loop_opts);
 806 
 807 public:
 808   // Dominators for the sea of nodes
 809   void Dominators();
 810   Node *dom_lca( Node *n1, Node *n2 ) const {
 811     return find_non_split_ctrl(dom_lca_internal(n1, n2));
 812   }
 813   Node *dom_lca_internal( Node *n1, Node *n2 ) const;
 814 
 815   // Compute the Ideal Node to Loop mapping
 816   PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool skip_loop_opts = false) :
 817     PhaseTransform(Ideal_Loop),
 818     _igvn(igvn),
 819     _dom_lca_tags(arena()), // Thread::resource_area
 820     _verify_me(NULL),
 821     _verify_only(false) {
 822     build_and_optimize(do_split_ifs, skip_loop_opts);
 823   }
 824 
 825   // Verify that verify_me made the same decisions as a fresh run.
 826   PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me) :
 827     PhaseTransform(Ideal_Loop),
 828     _igvn(igvn),
 829     _dom_lca_tags(arena()), // Thread::resource_area
 830     _verify_me(verify_me),
 831     _verify_only(false) {
 832     build_and_optimize(false, false);
 833   }
 834 
 835   // Build and verify the loop tree without modifying the graph.  This
 836   // is useful to verify that all inputs properly dominate their uses.
 837   static void verify(PhaseIterGVN& igvn) {
 838 #ifdef ASSERT
 839     PhaseIdealLoop v(igvn);
 840 #endif
 841   }
 842 
 843   // True if the method has at least 1 irreducible loop
 844   bool _has_irreducible_loops;
 845 
 846   // Per-Node transform
 847   virtual Node *transform( Node *a_node ) { return 0; }
 848 
 849   bool is_counted_loop( Node *x, IdealLoopTree *loop );
 850 
 851   Node* exact_limit( IdealLoopTree *loop );
 852 
 853   // Return a post-walked LoopNode
 854   IdealLoopTree *get_loop( Node *n ) const {
 855     // Dead nodes have no loop, so return the top level loop instead
 856     if (!has_node(n))  return _ltree_root;
 857     assert(!has_ctrl(n), "");
 858     return (IdealLoopTree*)_nodes[n->_idx];
 859   }
 860 
 861   // Is 'n' a (nested) member of 'loop'?
 862   int is_member( const IdealLoopTree *loop, Node *n ) const {
 863     return loop->is_member(get_loop(n)); }
 864 
 865   // This is the basic building block of the loop optimizations.  It clones an
 866   // entire loop body.  It makes an old_new loop body mapping; with this
 867   // mapping you can find the new-loop equivalent to an old-loop node.  All
 868   // new-loop nodes are exactly equal to their old-loop counterparts, all
 869   // edges are the same.  All exits from the old-loop now have a RegionNode
 870   // that merges the equivalent new-loop path.  This is true even for the
 871   // normal "loop-exit" condition.  All uses of loop-invariant old-loop values
 872   // now come from (one or more) Phis that merge their new-loop equivalents.
 873   // Parameter side_by_side_idom:
 874   //   When side_by_size_idom is NULL, the dominator tree is constructed for
 875   //      the clone loop to dominate the original.  Used in construction of
 876   //      pre-main-post loop sequence.
 877   //   When nonnull, the clone and original are side-by-side, both are
 878   //      dominated by the passed in side_by_side_idom node.  Used in
 879   //      construction of unswitched loops.
 880   void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth,
 881                    Node* side_by_side_idom = NULL);
 882 
 883   // If we got the effect of peeling, either by actually peeling or by
 884   // making a pre-loop which must execute at least once, we can remove
 885   // all loop-invariant dominated tests in the main body.
 886   void peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new );
 887 
 888   // Generate code to do a loop peel for the given loop (and body).
 889   // old_new is a temp array.
 890   void do_peeling( IdealLoopTree *loop, Node_List &old_new );
 891 
 892   // Add pre and post loops around the given loop.  These loops are used
 893   // during RCE, unrolling and aligning loops.
 894   void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only );
 895   // If Node n lives in the back_ctrl block, we clone a private version of n
 896   // in preheader_ctrl block and return that, otherwise return n.
 897   Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones );
 898 
 899   // Take steps to maximally unroll the loop.  Peel any odd iterations, then
 900   // unroll to do double iterations.  The next round of major loop transforms
 901   // will repeat till the doubled loop body does all remaining iterations in 1
 902   // pass.
 903   void do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new );
 904 
 905   // Unroll the loop body one step - make each trip do 2 iterations.
 906   void do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip );
 907 
 908   // Mark vector reduction candidates before loop unrolling
 909   void mark_reductions( IdealLoopTree *loop );
 910 
 911   // Return true if exp is a constant times an induction var
 912   bool is_scaled_iv(Node* exp, Node* iv, int* p_scale);
 913 
 914   // Return true if exp is a scaled induction var plus (or minus) constant
 915   bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
 916 
 917   // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
 918   ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
 919                                         Deoptimization::DeoptReason reason,
 920                                         int opcode);
 921   void register_control(Node* n, IdealLoopTree *loop, Node* pred);
 922 
 923   // Clone loop predicates to cloned loops (peeled, unswitched)
 924   static ProjNode* clone_predicate(ProjNode* predicate_proj, Node* new_entry,
 925                                    Deoptimization::DeoptReason reason,
 926                                    PhaseIdealLoop* loop_phase,
 927                                    PhaseIterGVN* igvn);
 928 
 929   static Node* clone_loop_predicates(Node* old_entry, Node* new_entry,
 930                                          bool clone_limit_check,
 931                                          PhaseIdealLoop* loop_phase,
 932                                          PhaseIterGVN* igvn);
 933   Node* clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check);
 934 
 935   static Node* skip_loop_predicates(Node* entry);
 936 
 937   // Find a good location to insert a predicate
 938   static ProjNode* find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason);
 939   // Find a predicate
 940   static Node* find_predicate(Node* entry);
 941   // Construct a range check for a predicate if
 942   BoolNode* rc_predicate(IdealLoopTree *loop, Node* ctrl,
 943                          int scale, Node* offset,
 944                          Node* init, Node* limit, Node* stride,
 945                          Node* range, bool upper);
 946 
 947   // Implementation of the loop predication to promote checks outside the loop
 948   bool loop_predication_impl(IdealLoopTree *loop);
 949 
 950   // Helper function to collect predicate for eliminating the useless ones
 951   void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1);
 952   void eliminate_useless_predicates();
 953 
 954   // Change the control input of expensive nodes to allow commoning by
 955   // IGVN when it is guaranteed to not result in a more frequent
 956   // execution of the expensive node. Return true if progress.
 957   bool process_expensive_nodes();
 958 
 959   // Check whether node has become unreachable
 960   bool is_node_unreachable(Node *n) const {
 961     return !has_node(n) || n->is_unreachable(_igvn);
 962   }
 963 
 964   // Eliminate range-checks and other trip-counter vs loop-invariant tests.
 965   void do_range_check( IdealLoopTree *loop, Node_List &old_new );
 966 
 967   // Create a slow version of the loop by cloning the loop
 968   // and inserting an if to select fast-slow versions.
 969   ProjNode* create_slow_version_of_loop(IdealLoopTree *loop,
 970                                         Node_List &old_new,
 971                                         int opcode);
 972 
 973   // Clone a loop and return the clone head (clone_loop_head).
 974   // Added nodes include int(1), int(0) - disconnected, If, IfTrue, IfFalse,
 975   // This routine was created for usage in CountedLoopReserveKit.
 976   //
 977   //    int(1) -> If -> IfTrue -> original_loop_head
 978   //              |
 979   //              V
 980   //           IfFalse -> clone_loop_head (returned by function pointer)
 981   //
 982   LoopNode* create_reserve_version_of_loop(IdealLoopTree *loop, CountedLoopReserveKit* lk);
 983   // Clone loop with an invariant test (that does not exit) and
 984   // insert a clone of the test that selects which version to
 985   // execute.
 986   void do_unswitching (IdealLoopTree *loop, Node_List &old_new);
 987 
 988   // Find candidate "if" for unswitching
 989   IfNode* find_unswitching_candidate(const IdealLoopTree *loop) const;
 990 
 991   // Range Check Elimination uses this function!
 992   // Constrain the main loop iterations so the affine function:
 993   //    low_limit <= scale_con * I + offset  <  upper_limit
 994   // always holds true.  That is, either increase the number of iterations in
 995   // the pre-loop or the post-loop until the condition holds true in the main
 996   // loop.  Scale_con, offset and limit are all loop invariant.
 997   void add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit );
 998   // Helper function for add_constraint().
 999   Node* adjust_limit( int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl );
1000 
1001   // Partially peel loop up through last_peel node.
1002   bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
1003 
1004   // Create a scheduled list of nodes control dependent on ctrl set.
1005   void scheduled_nodelist( IdealLoopTree *loop, VectorSet& ctrl, Node_List &sched );
1006   // Has a use in the vector set
1007   bool has_use_in_set( Node* n, VectorSet& vset );
1008   // Has use internal to the vector set (ie. not in a phi at the loop head)
1009   bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop );
1010   // clone "n" for uses that are outside of loop
1011   int  clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist );
1012   // clone "n" for special uses that are in the not_peeled region
1013   void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
1014                                           VectorSet& not_peel, Node_List& sink_list, Node_List& worklist );
1015   // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
1016   void insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp );
1017 #ifdef ASSERT
1018   // Validate the loop partition sets: peel and not_peel
1019   bool is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, VectorSet& not_peel );
1020   // Ensure that uses outside of loop are of the right form
1021   bool is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
1022                                  uint orig_exit_idx, uint clone_exit_idx);
1023   bool is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx);
1024 #endif
1025 
1026   // Returns nonzero constant stride if-node is a possible iv test (otherwise returns zero.)
1027   int stride_of_possible_iv( Node* iff );
1028   bool is_possible_iv_test( Node* iff ) { return stride_of_possible_iv(iff) != 0; }
1029   // Return the (unique) control output node that's in the loop (if it exists.)
1030   Node* stay_in_loop( Node* n, IdealLoopTree *loop);
1031   // Insert a signed compare loop exit cloned from an unsigned compare.
1032   IfNode* insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop);
1033   void remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop);
1034   // Utility to register node "n" with PhaseIdealLoop
1035   void register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth);
1036   // Utility to create an if-projection
1037   ProjNode* proj_clone(ProjNode* p, IfNode* iff);
1038   // Force the iff control output to be the live_proj
1039   Node* short_circuit_if(IfNode* iff, ProjNode* live_proj);
1040   // Insert a region before an if projection
1041   RegionNode* insert_region_before_proj(ProjNode* proj);
1042   // Insert a new if before an if projection
1043   ProjNode* insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj);
1044 
1045   // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
1046   // "Nearly" because all Nodes have been cloned from the original in the loop,
1047   // but the fall-in edges to the Cmp are different.  Clone bool/Cmp pairs
1048   // through the Phi recursively, and return a Bool.
1049   BoolNode *clone_iff( PhiNode *phi, IdealLoopTree *loop );
1050   CmpNode *clone_bool( PhiNode *phi, IdealLoopTree *loop );
1051 
1052 
1053   // Rework addressing expressions to get the most loop-invariant stuff
1054   // moved out.  We'd like to do all associative operators, but it's especially
1055   // important (common) to do address expressions.
1056   Node *remix_address_expressions( Node *n );
1057 
1058   // Attempt to use a conditional move instead of a phi/branch
1059   Node *conditional_move( Node *n );
1060 
1061   // Reorganize offset computations to lower register pressure.
1062   // Mostly prevent loop-fallout uses of the pre-incremented trip counter
1063   // (which are then alive with the post-incremented trip counter
1064   // forcing an extra register move)
1065   void reorg_offsets( IdealLoopTree *loop );
1066 
1067   // Check for aggressive application of 'split-if' optimization,
1068   // using basic block level info.
1069   void  split_if_with_blocks     ( VectorSet &visited, Node_Stack &nstack );
1070   Node *split_if_with_blocks_pre ( Node *n );
1071   void  split_if_with_blocks_post( Node *n );
1072   Node *has_local_phi_input( Node *n );
1073   // Mark an IfNode as being dominated by a prior test,
1074   // without actually altering the CFG (and hence IDOM info).
1075   void dominated_by( Node *prevdom, Node *iff, bool flip = false, bool exclude_loop_predicate = false );
1076 
1077   // Split Node 'n' through merge point
1078   Node *split_thru_region( Node *n, Node *region );
1079   // Split Node 'n' through merge point if there is enough win.
1080   Node *split_thru_phi( Node *n, Node *region, int policy );
1081   // Found an If getting its condition-code input from a Phi in the
1082   // same block.  Split thru the Region.
1083   void do_split_if( Node *iff );
1084 
1085   // Conversion of fill/copy patterns into intrisic versions
1086   bool do_intrinsify_fill();
1087   bool intrinsify_fill(IdealLoopTree* lpt);
1088   bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
1089                        Node*& shift, Node*& offset);
1090 
1091 private:
1092   // Return a type based on condition control flow
1093   const TypeInt* filtered_type( Node *n, Node* n_ctrl);
1094   const TypeInt* filtered_type( Node *n ) { return filtered_type(n, NULL); }
1095  // Helpers for filtered type
1096   const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl);
1097 
1098   // Helper functions
1099   Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache );
1100   Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true );
1101   void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true );
1102   bool split_up( Node *n, Node *blk1, Node *blk2 );
1103   void sink_use( Node *use, Node *post_loop );
1104   Node *place_near_use( Node *useblock ) const;
1105   Node* try_move_store_before_loop(Node* n, Node *n_ctrl);
1106   void try_move_store_after_loop(Node* n);
1107 
1108   bool _created_loop_node;
1109 public:
1110   void set_created_loop_node() { _created_loop_node = true; }
1111   bool created_loop_node()     { return _created_loop_node; }
1112   void register_new_node( Node *n, Node *blk );
1113 
1114 #ifdef ASSERT
1115   void dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA);
1116 #endif
1117 
1118 #ifndef PRODUCT
1119   void dump( ) const;
1120   void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const;
1121   void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const;
1122   void verify() const;          // Major slow  :-)
1123   void verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const;
1124   IdealLoopTree *get_loop_idx(Node* n) const {
1125     // Dead nodes have no loop, so return the top level loop instead
1126     return _nodes[n->_idx] ? (IdealLoopTree*)_nodes[n->_idx] : _ltree_root;
1127   }
1128   // Print some stats
1129   static void print_statistics();
1130   static int _loop_invokes;     // Count of PhaseIdealLoop invokes
1131   static int _loop_work;        // Sum of PhaseIdealLoop x _unique
1132 #endif
1133 };
1134 
1135 // This kit may be used for making of a reserved copy of a loop before this loop
1136 //  goes under non-reversible changes.
1137 //
1138 // Function create_reserve() creates a reserved copy (clone) of the loop.
1139 // The reserved copy is created by calling
1140 // PhaseIdealLoop::create_reserve_version_of_loop - see there how
1141 // the original and reserved loops are connected in the outer graph.
1142 // If create_reserve succeeded, it returns 'true' and _has_reserved is set to 'true'.
1143 //
1144 // By default the reserved copy (clone) of the loop is created as dead code - it is
1145 // dominated in the outer loop by this node chain:
1146 //   intcon(1)->If->IfFalse->reserved_copy.
1147 // The original loop is dominated by the the same node chain but IfTrue projection:
1148 //   intcon(0)->If->IfTrue->original_loop.
1149 //
1150 // In this implementation of CountedLoopReserveKit the ctor includes create_reserve()
1151 // and the dtor, checks _use_new value.
1152 // If _use_new == false, it "switches" control to reserved copy of the loop
1153 // by simple replacing of node intcon(1) with node intcon(0).
1154 //
1155 // Here is a proposed example of usage (see also SuperWord::output in superword.cpp).
1156 //
1157 // void CountedLoopReserveKit_example()
1158 // {
1159 //    CountedLoopReserveKit lrk((phase, lpt, DoReserveCopy = true); // create local object
1160 //    if (DoReserveCopy && !lrk.has_reserved()) {
1161 //      return; //failed to create reserved loop copy
1162 //    }
1163 //    ...
1164 //    //something is wrong, switch to original loop
1165 ///   if(something_is_wrong) return; // ~CountedLoopReserveKit makes the switch
1166 //    ...
1167 //    //everything worked ok, return with the newly modified loop
1168 //    lrk.use_new();
1169 //    return; // ~CountedLoopReserveKit does nothing once use_new() was called
1170 //  }
1171 //
1172 // Keep in mind, that by default if create_reserve() is not followed by use_new()
1173 // the dtor will "switch to the original" loop.
1174 // NOTE. You you modify outside of the original loop this class is no help.
1175 //
1176 class CountedLoopReserveKit {
1177   private:
1178     PhaseIdealLoop* _phase;
1179     IdealLoopTree*  _lpt;
1180     LoopNode*       _lp;
1181     IfNode*         _iff;
1182     LoopNode*       _lp_reserved;
1183     bool            _has_reserved;
1184     bool            _use_new;
1185     const bool      _active; //may be set to false in ctor, then the object is dummy
1186 
1187   public:
1188     CountedLoopReserveKit(PhaseIdealLoop* phase, IdealLoopTree *loop, bool active);
1189     ~CountedLoopReserveKit();
1190     void use_new()                {_use_new = true;}
1191     void set_iff(IfNode* x)       {_iff = x;}
1192     bool has_reserved()     const { return _active && _has_reserved;}
1193   private:
1194     bool create_reserve();
1195 };// class CountedLoopReserveKit
1196 
1197 inline Node* IdealLoopTree::tail() {
1198 // Handle lazy update of _tail field
1199   Node *n = _tail;
1200   //while( !n->in(0) )  // Skip dead CFG nodes
1201     //n = n->in(1);
1202   if (n->in(0) == NULL)
1203     n = _phase->get_ctrl(n);
1204   _tail = n;
1205   return n;
1206 }
1207 
1208 
1209 // Iterate over the loop tree using a preorder, left-to-right traversal.
1210 //
1211 // Example that visits all counted loops from within PhaseIdealLoop
1212 //
1213 //  for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
1214 //   IdealLoopTree* lpt = iter.current();
1215 //   if (!lpt->is_counted()) continue;
1216 //   ...
1217 class LoopTreeIterator : public StackObj {
1218 private:
1219   IdealLoopTree* _root;
1220   IdealLoopTree* _curnt;
1221 
1222 public:
1223   LoopTreeIterator(IdealLoopTree* root) : _root(root), _curnt(root) {}
1224 
1225   bool done() { return _curnt == NULL; }       // Finished iterating?
1226 
1227   void next();                                 // Advance to next loop tree
1228 
1229   IdealLoopTree* current() { return _curnt; }  // Return current value of iterator.
1230 };
1231 
1232 #endif // SHARE_VM_OPTO_LOOPNODE_HPP