src/share/vm/opto/loopnode.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 5091921 Sdiff src/share/vm/opto

src/share/vm/opto/loopnode.hpp

Print this page




 272 #endif
 273 };
 274 
 275 
 276 inline CountedLoopEndNode *CountedLoopNode::loopexit() const {
 277   Node *bc = back_control();
 278   if( bc == NULL ) return NULL;
 279   Node *le = bc->in(0);
 280   if( le->Opcode() != Op_CountedLoopEnd )
 281     return NULL;
 282   return (CountedLoopEndNode*)le;
 283 }
 284 inline Node *CountedLoopNode::init_trip() const { return loopexit() ? loopexit()->init_trip() : NULL; }
 285 inline Node *CountedLoopNode::stride() const { return loopexit() ? loopexit()->stride() : NULL; }
 286 inline int CountedLoopNode::stride_con() const { return loopexit() ? loopexit()->stride_con() : 0; }
 287 inline bool CountedLoopNode::stride_is_con() const { return loopexit() && loopexit()->stride_is_con(); }
 288 inline Node *CountedLoopNode::limit() const { return loopexit() ? loopexit()->limit() : NULL; }
 289 inline Node *CountedLoopNode::incr() const { return loopexit() ? loopexit()->incr() : NULL; }
 290 inline Node *CountedLoopNode::phi() const { return loopexit() ? loopexit()->phi() : NULL; }
 291 






















 292 
 293 // -----------------------------IdealLoopTree----------------------------------
 294 class IdealLoopTree : public ResourceObj {
 295 public:
 296   IdealLoopTree *_parent;       // Parent in loop tree
 297   IdealLoopTree *_next;         // Next sibling in loop tree
 298   IdealLoopTree *_child;        // First child in loop tree
 299 
 300   // The head-tail backedge defines the loop.
 301   // If tail is NULL then this loop has multiple backedges as part of the
 302   // same loop.  During cleanup I'll peel off the multiple backedges; merge
 303   // them at the loop bottom and flow 1 real backedge into the loop.
 304   Node *_head;                  // Head of loop
 305   Node *_tail;                  // Tail of loop
 306   inline Node *tail();          // Handle lazy update of _tail field
 307   PhaseIdealLoop* _phase;
 308 
 309   Node_List _body;              // Loop body for inner loops
 310 
 311   uint8 _nest;                  // Nesting depth


 758     _verify_only(false) {
 759     build_and_optimize(false);
 760   }
 761 
 762   // Build and verify the loop tree without modifying the graph.  This
 763   // is useful to verify that all inputs properly dominate their uses.
 764   static void verify(PhaseIterGVN& igvn) {
 765 #ifdef ASSERT
 766     PhaseIdealLoop v(igvn);
 767 #endif
 768   }
 769 
 770   // True if the method has at least 1 irreducible loop
 771   bool _has_irreducible_loops;
 772 
 773   // Per-Node transform
 774   virtual Node *transform( Node *a_node ) { return 0; }
 775 
 776   bool is_counted_loop( Node *x, IdealLoopTree *loop );
 777 


 778   // Return a post-walked LoopNode
 779   IdealLoopTree *get_loop( Node *n ) const {
 780     // Dead nodes have no loop, so return the top level loop instead
 781     if (!has_node(n))  return _ltree_root;
 782     assert(!has_ctrl(n), "");
 783     return (IdealLoopTree*)_nodes[n->_idx];
 784   }
 785 
 786   // Is 'n' a (nested) member of 'loop'?
 787   int is_member( const IdealLoopTree *loop, Node *n ) const {
 788     return loop->is_member(get_loop(n)); }
 789 
 790   // This is the basic building block of the loop optimizations.  It clones an
 791   // entire loop body.  It makes an old_new loop body mapping; with this
 792   // mapping you can find the new-loop equivalent to an old-loop node.  All
 793   // new-loop nodes are exactly equal to their old-loop counterparts, all
 794   // edges are the same.  All exits from the old-loop now have a RegionNode
 795   // that merges the equivalent new-loop path.  This is true even for the
 796   // normal "loop-exit" condition.  All uses of loop-invariant old-loop values
 797   // now come from (one or more) Phis that merge their new-loop equivalents.


 820   // If Node n lives in the back_ctrl block, we clone a private version of n
 821   // in preheader_ctrl block and return that, otherwise return n.
 822   Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n );
 823 
 824   // Take steps to maximally unroll the loop.  Peel any odd iterations, then
 825   // unroll to do double iterations.  The next round of major loop transforms
 826   // will repeat till the doubled loop body does all remaining iterations in 1
 827   // pass.
 828   void do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new );
 829 
 830   // Unroll the loop body one step - make each trip do 2 iterations.
 831   void do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip );
 832 
 833   // Return true if exp is a constant times an induction var
 834   bool is_scaled_iv(Node* exp, Node* iv, int* p_scale);
 835 
 836   // Return true if exp is a scaled induction var plus (or minus) constant
 837   bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
 838 
 839   // Return true if proj is for "proj->[region->..]call_uct"
 840   // Return true if proj is for "proj->[region->..]call_uct"
 841   static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason);
 842   // Return true for    "if(test)-> proj -> ...
 843   //                          |
 844   //                          V
 845   //                      other_proj->[region->..]call_uct"
 846   static bool is_uncommon_trap_if_pattern(ProjNode* proj, Deoptimization::DeoptReason reason);
 847   // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
 848   ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
 849                                         Deoptimization::DeoptReason reason);
 850   void register_control(Node* n, IdealLoopTree *loop, Node* pred);
 851 
 852   // Clone loop predicates to cloned loops (peeled, unswitched)
 853   static ProjNode* clone_predicate(ProjNode* predicate_proj, Node* new_entry,
 854                                    Deoptimization::DeoptReason reason,
 855                                    PhaseIdealLoop* loop_phase,
 856                                    PhaseIterGVN* igvn);
 857   static ProjNode*  move_predicate(ProjNode* predicate_proj, Node* new_entry,
 858                                    Deoptimization::DeoptReason reason,
 859                                    PhaseIdealLoop* loop_phase,
 860                                    PhaseIterGVN* igvn);
 861   static Node* clone_loop_predicates(Node* old_entry, Node* new_entry,
 862                                          bool move_predicates,

 863                                          PhaseIdealLoop* loop_phase,
 864                                          PhaseIterGVN* igvn);
 865   Node* clone_loop_predicates(Node* old_entry, Node* new_entry);
 866   Node*  move_loop_predicates(Node* old_entry, Node* new_entry);
 867 
 868   void eliminate_loop_predicates(Node* entry);
 869   static Node* skip_loop_predicates(Node* entry);
 870 
 871   // Find a good location to insert a predicate
 872   static ProjNode* find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason);
 873   // Find a predicate
 874   static Node* find_predicate(Node* entry);
 875   // Construct a range check for a predicate if
 876   BoolNode* rc_predicate(Node* ctrl,
 877                          int scale, Node* offset,
 878                          Node* init, Node* limit, Node* stride,
 879                          Node* range, bool upper);
 880 
 881   // Implementation of the loop predication to promote checks outside the loop
 882   bool loop_predication_impl(IdealLoopTree *loop);
 883 
 884   // Helper function to collect predicate for eliminating the useless ones
 885   void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1);
 886   void eliminate_useless_predicates();
 887 
 888   // Eliminate range-checks and other trip-counter vs loop-invariant tests.
 889   void do_range_check( IdealLoopTree *loop, Node_List &old_new );
 890 
 891   // Create a slow version of the loop by cloning the loop
 892   // and inserting an if to select fast-slow versions.
 893   ProjNode* create_slow_version_of_loop(IdealLoopTree *loop,
 894                                         Node_List &old_new);
 895 
 896   // Clone loop with an invariant test (that does not exit) and
 897   // insert a clone of the test that selects which version to
 898   // execute.
 899   void do_unswitching (IdealLoopTree *loop, Node_List &old_new);
 900 
 901   // Find candidate "if" for unswitching
 902   IfNode* find_unswitching_candidate(const IdealLoopTree *loop) const;
 903 
 904   // Range Check Elimination uses this function!
 905   // Constrain the main loop iterations so the affine function:
 906   //    scale_con * I + offset  <  limit
 907   // always holds true.  That is, either increase the number of iterations in
 908   // the pre-loop or the post-loop until the condition holds true in the main
 909   // loop.  Scale_con, offset and limit are all loop invariant.
 910   void add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit );
 911 
 912   // Partially peel loop up through last_peel node.
 913   bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
 914 
 915   // Create a scheduled list of nodes control dependent on ctrl set.
 916   void scheduled_nodelist( IdealLoopTree *loop, VectorSet& ctrl, Node_List &sched );
 917   // Has a use in the vector set
 918   bool has_use_in_set( Node* n, VectorSet& vset );
 919   // Has use internal to the vector set (ie. not in a phi at the loop head)
 920   bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop );
 921   // clone "n" for uses that are outside of loop
 922   void clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist );
 923   // clone "n" for special uses that are in the not_peeled region
 924   void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
 925                                           VectorSet& not_peel, Node_List& sink_list, Node_List& worklist );
 926   // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
 927   void insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp );
 928 #ifdef ASSERT
 929   // Validate the loop partition sets: peel and not_peel
 930   bool is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, VectorSet& not_peel );




 272 #endif
 273 };
 274 
 275 
 276 inline CountedLoopEndNode *CountedLoopNode::loopexit() const {
 277   Node *bc = back_control();
 278   if( bc == NULL ) return NULL;
 279   Node *le = bc->in(0);
 280   if( le->Opcode() != Op_CountedLoopEnd )
 281     return NULL;
 282   return (CountedLoopEndNode*)le;
 283 }
 284 inline Node *CountedLoopNode::init_trip() const { return loopexit() ? loopexit()->init_trip() : NULL; }
 285 inline Node *CountedLoopNode::stride() const { return loopexit() ? loopexit()->stride() : NULL; }
 286 inline int CountedLoopNode::stride_con() const { return loopexit() ? loopexit()->stride_con() : 0; }
 287 inline bool CountedLoopNode::stride_is_con() const { return loopexit() && loopexit()->stride_is_con(); }
 288 inline Node *CountedLoopNode::limit() const { return loopexit() ? loopexit()->limit() : NULL; }
 289 inline Node *CountedLoopNode::incr() const { return loopexit() ? loopexit()->incr() : NULL; }
 290 inline Node *CountedLoopNode::phi() const { return loopexit() ? loopexit()->phi() : NULL; }
 291 
 292 //------------------------------LoopLimitNode-----------------------------
 293 // Counted Loop limit node which represents exact final iterator value:
 294 // trip_count = (limit - init_trip + stride - 1)/stride
 295 // final_value= trip_count * stride + init_trip.
 296 // Use HW instructions to calculate it when it can overflow in integer.
 297 // Note, final_value should fit into integer since counted loop has
 298 // limit check: limit <= max_int-stride.
 299 class LoopLimitNode : public Node {
 300   enum { Normal=0, Init=1, Limit=2, Stride=3 };
 301  public:
 302   LoopLimitNode( Compile* C, Node *init, Node *limit, Node *stride ) : Node(0,init,limit,stride) {
 303     // Put it on the Macro nodes list to optimize during macro nodes expansion.
 304     init_flags(Flag_is_macro);
 305     C->add_macro_node(this);
 306   }
 307   virtual int Opcode() const;
 308   virtual const Type *bottom_type() const { return TypeInt::INT; }
 309   virtual uint ideal_reg() const { return Op_RegI; }
 310   virtual const Type *Value( PhaseTransform *phase ) const;
 311   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 312   virtual Node *Identity( PhaseTransform *phase );
 313 };
 314 
 315 // -----------------------------IdealLoopTree----------------------------------
 316 class IdealLoopTree : public ResourceObj {
 317 public:
 318   IdealLoopTree *_parent;       // Parent in loop tree
 319   IdealLoopTree *_next;         // Next sibling in loop tree
 320   IdealLoopTree *_child;        // First child in loop tree
 321 
 322   // The head-tail backedge defines the loop.
 323   // If tail is NULL then this loop has multiple backedges as part of the
 324   // same loop.  During cleanup I'll peel off the multiple backedges; merge
 325   // them at the loop bottom and flow 1 real backedge into the loop.
 326   Node *_head;                  // Head of loop
 327   Node *_tail;                  // Tail of loop
 328   inline Node *tail();          // Handle lazy update of _tail field
 329   PhaseIdealLoop* _phase;
 330 
 331   Node_List _body;              // Loop body for inner loops
 332 
 333   uint8 _nest;                  // Nesting depth


 780     _verify_only(false) {
 781     build_and_optimize(false);
 782   }
 783 
 784   // Build and verify the loop tree without modifying the graph.  This
 785   // is useful to verify that all inputs properly dominate their uses.
 786   static void verify(PhaseIterGVN& igvn) {
 787 #ifdef ASSERT
 788     PhaseIdealLoop v(igvn);
 789 #endif
 790   }
 791 
 792   // True if the method has at least 1 irreducible loop
 793   bool _has_irreducible_loops;
 794 
 795   // Per-Node transform
 796   virtual Node *transform( Node *a_node ) { return 0; }
 797 
 798   bool is_counted_loop( Node *x, IdealLoopTree *loop );
 799 
 800   Node* exact_limit( IdealLoopTree *loop );
 801 
 802   // Return a post-walked LoopNode
 803   IdealLoopTree *get_loop( Node *n ) const {
 804     // Dead nodes have no loop, so return the top level loop instead
 805     if (!has_node(n))  return _ltree_root;
 806     assert(!has_ctrl(n), "");
 807     return (IdealLoopTree*)_nodes[n->_idx];
 808   }
 809 
 810   // Is 'n' a (nested) member of 'loop'?
 811   int is_member( const IdealLoopTree *loop, Node *n ) const {
 812     return loop->is_member(get_loop(n)); }
 813 
 814   // This is the basic building block of the loop optimizations.  It clones an
 815   // entire loop body.  It makes an old_new loop body mapping; with this
 816   // mapping you can find the new-loop equivalent to an old-loop node.  All
 817   // new-loop nodes are exactly equal to their old-loop counterparts, all
 818   // edges are the same.  All exits from the old-loop now have a RegionNode
 819   // that merges the equivalent new-loop path.  This is true even for the
 820   // normal "loop-exit" condition.  All uses of loop-invariant old-loop values
 821   // now come from (one or more) Phis that merge their new-loop equivalents.


 844   // If Node n lives in the back_ctrl block, we clone a private version of n
 845   // in preheader_ctrl block and return that, otherwise return n.
 846   Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n );
 847 
 848   // Take steps to maximally unroll the loop.  Peel any odd iterations, then
 849   // unroll to do double iterations.  The next round of major loop transforms
 850   // will repeat till the doubled loop body does all remaining iterations in 1
 851   // pass.
 852   void do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new );
 853 
 854   // Unroll the loop body one step - make each trip do 2 iterations.
 855   void do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip );
 856 
 857   // Return true if exp is a constant times an induction var
 858   bool is_scaled_iv(Node* exp, Node* iv, int* p_scale);
 859 
 860   // Return true if exp is a scaled induction var plus (or minus) constant
 861   bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
 862 
 863   // Return true if proj is for "proj->[region->..]call_uct"

 864   static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason);
 865   // Return true for    "if(test)-> proj -> ...
 866   //                          |
 867   //                          V
 868   //                      other_proj->[region->..]call_uct"
 869   static bool is_uncommon_trap_if_pattern(ProjNode* proj, Deoptimization::DeoptReason reason);
 870   // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
 871   ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
 872                                         Deoptimization::DeoptReason reason);
 873   void register_control(Node* n, IdealLoopTree *loop, Node* pred);
 874 
 875   // Clone loop predicates to cloned loops (peeled, unswitched)
 876   static ProjNode* clone_predicate(ProjNode* predicate_proj, Node* new_entry,
 877                                    Deoptimization::DeoptReason reason,
 878                                    PhaseIdealLoop* loop_phase,
 879                                    PhaseIterGVN* igvn);
 880   static ProjNode*  move_predicate(ProjNode* predicate_proj, Node* new_entry,
 881                                    Deoptimization::DeoptReason reason,
 882                                    PhaseIdealLoop* loop_phase,
 883                                    PhaseIterGVN* igvn);
 884   static Node* clone_loop_predicates(Node* old_entry, Node* new_entry,
 885                                          bool move_predicates,
 886                                          bool clone_limit_check,
 887                                          PhaseIdealLoop* loop_phase,
 888                                          PhaseIterGVN* igvn);
 889   Node* clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check);
 890   Node*  move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check);
 891 
 892   void eliminate_loop_predicates(Node* entry);
 893   static Node* skip_loop_predicates(Node* entry);
 894 
 895   // Find a good location to insert a predicate
 896   static ProjNode* find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason);
 897   // Find a predicate
 898   static Node* find_predicate(Node* entry);
 899   // Construct a range check for a predicate if
 900   BoolNode* rc_predicate(IdealLoopTree *loop, Node* ctrl,
 901                          int scale, Node* offset,
 902                          Node* init, Node* limit, Node* stride,
 903                          Node* range, bool upper);
 904 
 905   // Implementation of the loop predication to promote checks outside the loop
 906   bool loop_predication_impl(IdealLoopTree *loop);
 907 
 908   // Helper function to collect predicate for eliminating the useless ones
 909   void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1);
 910   void eliminate_useless_predicates();
 911 
 912   // Eliminate range-checks and other trip-counter vs loop-invariant tests.
 913   void do_range_check( IdealLoopTree *loop, Node_List &old_new );
 914 
 915   // Create a slow version of the loop by cloning the loop
 916   // and inserting an if to select fast-slow versions.
 917   ProjNode* create_slow_version_of_loop(IdealLoopTree *loop,
 918                                         Node_List &old_new);
 919 
 920   // Clone loop with an invariant test (that does not exit) and
 921   // insert a clone of the test that selects which version to
 922   // execute.
 923   void do_unswitching (IdealLoopTree *loop, Node_List &old_new);
 924 
 925   // Find candidate "if" for unswitching
 926   IfNode* find_unswitching_candidate(const IdealLoopTree *loop) const;
 927 
 928   // Range Check Elimination uses this function!
 929   // Constrain the main loop iterations so the affine function:
 930   //    low_limit <= scale_con * I + offset  <  upper_limit
 931   // always holds true.  That is, either increase the number of iterations in
 932   // the pre-loop or the post-loop until the condition holds true in the main
 933   // loop.  Scale_con, offset and limit are all loop invariant.
 934   void add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit );
 935 
 936   // Partially peel loop up through last_peel node.
 937   bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
 938 
 939   // Create a scheduled list of nodes control dependent on ctrl set.
 940   void scheduled_nodelist( IdealLoopTree *loop, VectorSet& ctrl, Node_List &sched );
 941   // Has a use in the vector set
 942   bool has_use_in_set( Node* n, VectorSet& vset );
 943   // Has use internal to the vector set (ie. not in a phi at the loop head)
 944   bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop );
 945   // clone "n" for uses that are outside of loop
 946   void clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist );
 947   // clone "n" for special uses that are in the not_peeled region
 948   void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
 949                                           VectorSet& not_peel, Node_List& sink_list, Node_List& worklist );
 950   // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
 951   void insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp );
 952 #ifdef ASSERT
 953   // Validate the loop partition sets: peel and not_peel
 954   bool is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, VectorSet& not_peel );


src/share/vm/opto/loopnode.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File