src/share/vm/opto/loopnode.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6894778 Cdiff src/share/vm/opto/loopnode.hpp

src/share/vm/opto/loopnode.hpp

Print this page

        

*** 28,37 **** --- 28,38 ---- class IdealLoopTree; class LoopNode; class Node; class PhaseIdealLoop; class VectorSet; + class Invariance; struct small_cache; // // I D E A L I Z E D L O O P S //
*** 323,332 **** --- 324,337 ---- // Split shared headers and insert loop landing pads. // Insert a LoopNode to replace the RegionNode. // Returns TRUE if loop tree is structurally changed. bool beautify_loops( PhaseIdealLoop *phase ); + // Perform optimization to use the loop predicates for null checks and range checks. + // Applies to any loop level (not just the innermost one) + bool loop_predication( PhaseIdealLoop *phase); + // Perform iteration-splitting on inner loops. Split iterations to // avoid range checks or one-shot null checks. Returns false if the // current round of loop opts should stop. bool iteration_split( PhaseIdealLoop *phase, Node_List &old_new );
*** 393,402 **** --- 398,410 ---- // one array base can be aligned in a loop (unless the VM guarantees // mutual alignment). Note that if we vectorize short memory ops // into longer memory ops, we may want to increase alignment. bool policy_align( PhaseIdealLoop *phase ) const; + // Return TRUE if "iff" is a range check. + bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const; + // Compute loop trip count from profile data void compute_profile_trip_cnt( PhaseIdealLoop *phase ); // Reassociate invariant expressions. void reassociate_invariants(PhaseIdealLoop *phase);
*** 519,531 **** } return find_non_split_ctrl(n); } Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag ); - // true if CFG node d dominates CFG node n - bool is_dominator(Node *d, Node *n); - // Helper function for directing control inputs away from CFG split // points. Node *find_non_split_ctrl( Node *ctrl ) const { if (ctrl != NULL) { if (ctrl->is_MultiBranch()) { --- 527,536 ----
*** 570,579 **** --- 575,595 ---- _nodes.map( i->_idx, (Node*)((intptr_t)n + 1) ); assert(has_node(i) && has_ctrl(i), ""); assert(n == find_non_split_ctrl(n), "must return legal ctrl" ); return n; } + // true if CFG node d dominates CFG node n + bool is_dominator(Node *d, Node *n); + // return get_ctrl for a data node and self(n) for a CFG node + Node* ctrl_or_self(Node* n) { + if( has_ctrl(n) ) + return get_ctrl(n); + else { + assert (n->is_CFG(), "must be a CFG node"); + return n; + } + } private: Node *get_ctrl_no_update( Node *i ) const { assert( has_ctrl(i), "" ); Node *n = (Node*)(((intptr_t)_nodes[i->_idx]) & ~1);
*** 598,608 **** _nodes.map(n->_idx, (Node*)loop); } // Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms. Replace // the 'old_node' with 'new_node'. Kill old-node. Add a reference // from old_node to new_node to support the lazy update. Reference ! // replaces loop reference, since that is not neede for dead node. public: void lazy_update( Node *old_node, Node *new_node ) { assert( old_node != new_node, "no cycles please" ); //old_node->set_req( 1, new_node /*NO DU INFO*/ ); // Nodes always have DU info now, so re-use the side array slot --- 614,624 ---- _nodes.map(n->_idx, (Node*)loop); } // Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms. Replace // the 'old_node' with 'new_node'. Kill old-node. Add a reference // from old_node to new_node to support the lazy update. Reference ! // replaces loop reference, since that is not needed for dead node. public: void lazy_update( Node *old_node, Node *new_node ) { assert( old_node != new_node, "no cycles please" ); //old_node->set_req( 1, new_node /*NO DU INFO*/ ); // Nodes always have DU info now, so re-use the side array slot
*** 677,691 **** PhaseTransform(Ideal_Loop), _igvn(igvn), _dom_lca_tags(C->comp_arena()), _verify_me(NULL), _verify_only(true) { ! build_and_optimize(false); } // build the loop tree and perform any requested optimizations ! void build_and_optimize(bool do_split_if); public: // Dominators for the sea of nodes void Dominators(); Node *dom_lca( Node *n1, Node *n2 ) const { --- 693,707 ---- PhaseTransform(Ideal_Loop), _igvn(igvn), _dom_lca_tags(C->comp_arena()), _verify_me(NULL), _verify_only(true) { ! build_and_optimize(false, false); } // build the loop tree and perform any requested optimizations ! void build_and_optimize(bool do_split_if, bool do_loop_pred); public: // Dominators for the sea of nodes void Dominators(); Node *dom_lca( Node *n1, Node *n2 ) const {
*** 692,718 **** return find_non_split_ctrl(dom_lca_internal(n1, n2)); } Node *dom_lca_internal( Node *n1, Node *n2 ) const; // Compute the Ideal Node to Loop mapping ! PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs) : PhaseTransform(Ideal_Loop), _igvn(igvn), _dom_lca_tags(C->comp_arena()), _verify_me(NULL), _verify_only(false) { ! build_and_optimize(do_split_ifs); } // Verify that verify_me made the same decisions as a fresh run. PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me) : PhaseTransform(Ideal_Loop), _igvn(igvn), _dom_lca_tags(C->comp_arena()), _verify_me(verify_me), _verify_only(false) { ! build_and_optimize(false); } // Build and verify the loop tree without modifying the graph. This // is useful to verify that all inputs properly dominate their uses. static void verify(PhaseIterGVN& igvn) { --- 708,734 ---- return find_non_split_ctrl(dom_lca_internal(n1, n2)); } Node *dom_lca_internal( Node *n1, Node *n2 ) const; // Compute the Ideal Node to Loop mapping ! PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool do_loop_pred) : PhaseTransform(Ideal_Loop), _igvn(igvn), _dom_lca_tags(C->comp_arena()), _verify_me(NULL), _verify_only(false) { ! build_and_optimize(do_split_ifs, do_loop_pred); } // Verify that verify_me made the same decisions as a fresh run. PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me) : PhaseTransform(Ideal_Loop), _igvn(igvn), _dom_lca_tags(C->comp_arena()), _verify_me(verify_me), _verify_only(false) { ! build_and_optimize(false, false); } // Build and verify the loop tree without modifying the graph. This // is useful to verify that all inputs properly dominate their uses. static void verify(PhaseIterGVN& igvn) {
*** 788,797 **** --- 804,837 ---- bool is_scaled_iv(Node* exp, Node* iv, int* p_scale); // Return true if exp is a scaled induction var plus (or minus) constant bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0); + // Return true if proj is for "proj->[region->..]call_uct" + bool is_uncommon_trap_proj(ProjNode* proj, bool must_reason_predicate = false); + // Return true for "if(test)-> proj -> ... + // | + // V + // other_proj->[region->..]call_uct" + bool is_uncommon_trap_if_pattern(ProjNode* proj, bool must_reason_predicate = false); + // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted + ProjNode* create_new_if_for_predicate(ProjNode* cont_proj); + // Find a good location to insert a predicate + ProjNode* find_predicate_insertion_point(Node* start_c); + // Construct a range check for a predicate if + BoolNode* rc_predicate(Node* ctrl, + int scale, Node* offset, + Node* init, Node* limit, Node* stride, + Node* range); + + //Implementation of the loop predication to promote checks outside the loop + bool loop_predication_impl(IdealLoopTree *loop); + + //Helper function to collect predicate for eliminating the useless ones + void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1); + void eliminate_useless_predicates(); + // Eliminate range-checks and other trip-counter vs loop-invariant tests. void do_range_check( IdealLoopTree *loop, Node_List &old_new ); // Create a slow version of the loop by cloning the loop // and inserting an if to select fast-slow versions.
*** 904,914 **** const TypeInt* filtered_type( Node *n ) { return filtered_type(n, NULL); } // Helpers for filtered type const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl); // Helper functions - void register_new_node( Node *n, Node *blk ); Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache ); Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ); void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true ); bool split_up( Node *n, Node *blk1, Node *blk2 ); void sink_use( Node *use, Node *post_loop ); --- 944,953 ----
*** 916,925 **** --- 955,965 ---- bool _created_loop_node; public: void set_created_loop_node() { _created_loop_node = true; } bool created_loop_node() { return _created_loop_node; } + void register_new_node( Node *n, Node *blk ); #ifndef PRODUCT void dump( ) const; void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const; void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const;
src/share/vm/opto/loopnode.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File