< prev index next >

src/hotspot/share/opto/loopnode.cpp

Print this page




2757   // IdealLoopTree entries.  Data nodes are NOT walked.
2758   build_loop_tree();
2759   // Check for bailout, and return
2760   if (C->failing()) {
2761     return;
2762   }
2763 
2764   // No loops after all
2765   if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false);
2766 
2767   // There should always be an outer loop containing the Root and Return nodes.
2768   // If not, we have a degenerate empty program.  Bail out in this case.
2769   if (!has_node(C->root())) {
2770     if (!_verify_only) {
2771       C->clear_major_progress();
2772       C->record_method_not_compilable("empty program detected during loop optimization");
2773     }
2774     return;
2775   }
2776 

2777   // Nothing to do, so get out
2778   bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only;

2779   bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn);

2780   if (stop_early && !do_expensive_nodes) {
2781     _igvn.optimize();           // Cleanup NeverBranches
2782     return;
2783   }
2784 
2785   // Set loop nesting depth
2786   _ltree_root->set_nest( 0 );
2787 
2788   // Split shared headers and insert loop landing pads.
2789   // Do not bother doing this on the Root loop of course.
2790   if( !_verify_me && !_verify_only && _ltree_root->_child ) {
2791     C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3);
2792     if( _ltree_root->_child->beautify_loops( this ) ) {
2793       // Re-build loop tree!
2794       _ltree_root->_child = NULL;
2795       _nodes.clear();
2796       reallocate_preorders();
2797       build_loop_tree();
2798       // Check for bailout, and return
2799       if (C->failing()) {


2837 
2838   // Walk the DATA nodes and place into loops.  Find earliest control
2839   // node.  For CFG nodes, the _nodes array starts out and remains
2840   // holding the associated IdealLoopTree pointer.  For DATA nodes, the
2841   // _nodes array holds the earliest legal controlling CFG node.
2842 
2843   // Allocate stack with enough space to avoid frequent realloc
2844   int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
2845   Node_Stack nstack( a, stack_size );
2846 
2847   visited.Clear();
2848   Node_List worklist(a);
2849   // Don't need C->root() on worklist since
2850   // it will be processed among C->top() inputs
2851   worklist.push( C->top() );
2852   visited.set( C->top()->_idx ); // Set C->top() as visited now
2853   build_loop_early( visited, worklist, nstack );
2854 
2855   // Given early legal placement, try finding counted loops.  This placement
2856   // is good enough to discover most loop invariants.
2857   if( !_verify_me && !_verify_only )
2858     _ltree_root->counted_loop( this );

2859 
2860   // Find latest loop placement.  Find ideal loop placement.
2861   visited.Clear();
2862   init_dom_lca_tags();
2863   // Need C->root() on worklist when processing outs
2864   worklist.push( C->root() );
2865   NOT_PRODUCT( C->verify_graph_edges(); )
2866   worklist.push( C->top() );
2867   build_loop_late( visited, worklist, nstack );
2868 
2869   if (_verify_only) {
2870     // restore major progress flag
2871     for (int i = 0; i < old_progress; i++)
2872       C->set_major_progress();
2873     assert(C->unique() == unique, "verification mode made Nodes? ? ?");
2874     assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything");
2875     return;
2876   }
2877 
2878   // clear out the dead code after build_loop_late


2909   if(TraceLoopOpts && C->has_loops()) {
2910     _ltree_root->dump();
2911   }
2912 #endif
2913 
2914   if (skip_loop_opts) {
2915     // restore major progress flag
2916     for (int i = 0; i < old_progress; i++) {
2917       C->set_major_progress();
2918     }
2919 
2920     // Cleanup any modified bits
2921     _igvn.optimize();
2922 
2923     if (C->log() != NULL) {
2924       log_loop_tree(_ltree_root, _ltree_root, C->log());
2925     }
2926     return;
2927   }
2928 








2929   if (ReassociateInvariants) {
2930     // Reassociate invariants and prep for split_thru_phi
2931     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2932       IdealLoopTree* lpt = iter.current();
2933       bool is_counted = lpt->is_counted();
2934       if (!is_counted || !lpt->is_inner()) continue;
2935 
2936       // check for vectorized loops, any reassociation of invariants was already done
2937       if (is_counted && lpt->_head->as_CountedLoop()->do_unroll_only()) continue;
2938 
2939       lpt->reassociate_invariants(this);
2940 
2941       // Because RCE opportunities can be masked by split_thru_phi,
2942       // look for RCE candidates and inhibit split_thru_phi
2943       // on just their loop-phi's for this pass of loop opts
2944       if (SplitIfBlocks && do_split_ifs) {
2945         if (lpt->policy_range_check(this)) {
2946           lpt->_rce_candidate = 1; // = true
2947         }
2948       }


4119           _deadlist.push(use);
4120         }
4121       } else {
4122         // All of n's children have been processed, complete post-processing.
4123         build_loop_late_post(n);
4124         if (nstack.is_empty()) {
4125           // Finished all nodes on stack.
4126           // Process next node on the worklist.
4127           break;
4128         }
4129         // Get saved parent node and next use's index. Visit the rest of uses.
4130         n   = nstack.node();
4131         cnt = n->outcnt();
4132         i   = nstack.index();
4133         nstack.pop();
4134       }
4135     }
4136   }
4137 }
4138 
4139 // Verify that no data node is schedules in the outer loop of a strip
4140 // mined loop.
4141 void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) {
4142 #ifdef ASSERT
4143   if (get_loop(least)->_nest == 0) {
4144     return;
4145   }
4146   IdealLoopTree* loop = get_loop(least);
4147   Node* head = loop->_head;
4148   if (head->is_OuterStripMinedLoop()) {


4149     Node* sfpt = head->as_Loop()->outer_safepoint();
4150     ResourceMark rm;
4151     Unique_Node_List wq;
4152     wq.push(sfpt);
4153     for (uint i = 0; i < wq.size(); i++) {
4154       Node *m = wq.at(i);
4155       for (uint i = 1; i < m->req(); i++) {
4156         Node* nn = m->in(i);
4157         if (nn == n) {
4158           return;
4159         }
4160         if (nn != NULL && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) {
4161           wq.push(nn);
4162         }
4163       }
4164     }
4165     ShouldNotReachHere();
4166   }
4167 #endif
4168 }




2757   // IdealLoopTree entries.  Data nodes are NOT walked.
2758   build_loop_tree();
2759   // Check for bailout, and return
2760   if (C->failing()) {
2761     return;
2762   }
2763 
2764   // No loops after all
2765   if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false);
2766 
2767   // There should always be an outer loop containing the Root and Return nodes.
2768   // If not, we have a degenerate empty program.  Bail out in this case.
2769   if (!has_node(C->root())) {
2770     if (!_verify_only) {
2771       C->clear_major_progress();
2772       C->record_method_not_compilable("empty program detected during loop optimization");
2773     }
2774     return;
2775   }
2776 
2777   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2778   // Nothing to do, so get out
2779   bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only &&
2780     !bs->is_gc_specific_loop_opts_pass(mode);
2781   bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn);
2782   bool strip_mined_loops_expanded = bs->strip_mined_loops_expanded(mode);
2783   if (stop_early && !do_expensive_nodes) {
2784     _igvn.optimize();           // Cleanup NeverBranches
2785     return;
2786   }
2787 
2788   // Set loop nesting depth
2789   _ltree_root->set_nest( 0 );
2790 
2791   // Split shared headers and insert loop landing pads.
2792   // Do not bother doing this on the Root loop of course.
2793   if( !_verify_me && !_verify_only && _ltree_root->_child ) {
2794     C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3);
2795     if( _ltree_root->_child->beautify_loops( this ) ) {
2796       // Re-build loop tree!
2797       _ltree_root->_child = NULL;
2798       _nodes.clear();
2799       reallocate_preorders();
2800       build_loop_tree();
2801       // Check for bailout, and return
2802       if (C->failing()) {


2840 
2841   // Walk the DATA nodes and place into loops.  Find earliest control
2842   // node.  For CFG nodes, the _nodes array starts out and remains
2843   // holding the associated IdealLoopTree pointer.  For DATA nodes, the
2844   // _nodes array holds the earliest legal controlling CFG node.
2845 
2846   // Allocate stack with enough space to avoid frequent realloc
2847   int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
2848   Node_Stack nstack( a, stack_size );
2849 
2850   visited.Clear();
2851   Node_List worklist(a);
2852   // Don't need C->root() on worklist since
2853   // it will be processed among C->top() inputs
2854   worklist.push( C->top() );
2855   visited.set( C->top()->_idx ); // Set C->top() as visited now
2856   build_loop_early( visited, worklist, nstack );
2857 
2858   // Given early legal placement, try finding counted loops.  This placement
2859   // is good enough to discover most loop invariants.
2860   if (!_verify_me && !_verify_only && !strip_mined_loops_expanded) {
2861     _ltree_root->counted_loop( this );
2862   }
2863 
2864   // Find latest loop placement.  Find ideal loop placement.
2865   visited.Clear();
2866   init_dom_lca_tags();
2867   // Need C->root() on worklist when processing outs
2868   worklist.push( C->root() );
2869   NOT_PRODUCT( C->verify_graph_edges(); )
2870   worklist.push( C->top() );
2871   build_loop_late( visited, worklist, nstack );
2872 
2873   if (_verify_only) {
2874     // restore major progress flag
2875     for (int i = 0; i < old_progress; i++)
2876       C->set_major_progress();
2877     assert(C->unique() == unique, "verification mode made Nodes? ? ?");
2878     assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything");
2879     return;
2880   }
2881 
2882   // clear out the dead code after build_loop_late


2913   if(TraceLoopOpts && C->has_loops()) {
2914     _ltree_root->dump();
2915   }
2916 #endif
2917 
2918   if (skip_loop_opts) {
2919     // restore major progress flag
2920     for (int i = 0; i < old_progress; i++) {
2921       C->set_major_progress();
2922     }
2923 
2924     // Cleanup any modified bits
2925     _igvn.optimize();
2926 
2927     if (C->log() != NULL) {
2928       log_loop_tree(_ltree_root, _ltree_root, C->log());
2929     }
2930     return;
2931   }
2932 
2933   if (bs->optimize_loops(this, mode, visited, nstack, worklist)) {
2934     _igvn.optimize();
2935     if (C->log() != NULL) {
2936       log_loop_tree(_ltree_root, _ltree_root, C->log());
2937     }
2938     return;
2939   }
2940 
2941   if (ReassociateInvariants) {
2942     // Reassociate invariants and prep for split_thru_phi
2943     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2944       IdealLoopTree* lpt = iter.current();
2945       bool is_counted = lpt->is_counted();
2946       if (!is_counted || !lpt->is_inner()) continue;
2947 
2948       // check for vectorized loops, any reassociation of invariants was already done
2949       if (is_counted && lpt->_head->as_CountedLoop()->do_unroll_only()) continue;
2950 
2951       lpt->reassociate_invariants(this);
2952 
2953       // Because RCE opportunities can be masked by split_thru_phi,
2954       // look for RCE candidates and inhibit split_thru_phi
2955       // on just their loop-phi's for this pass of loop opts
2956       if (SplitIfBlocks && do_split_ifs) {
2957         if (lpt->policy_range_check(this)) {
2958           lpt->_rce_candidate = 1; // = true
2959         }
2960       }


4131           _deadlist.push(use);
4132         }
4133       } else {
4134         // All of n's children have been processed, complete post-processing.
4135         build_loop_late_post(n);
4136         if (nstack.is_empty()) {
4137           // Finished all nodes on stack.
4138           // Process next node on the worklist.
4139           break;
4140         }
4141         // Get saved parent node and next use's index. Visit the rest of uses.
4142         n   = nstack.node();
4143         cnt = n->outcnt();
4144         i   = nstack.index();
4145         nstack.pop();
4146       }
4147     }
4148   }
4149 }
4150 
4151 // Verify that no data node is scheduled in the outer loop of a strip
4152 // mined loop.
4153 void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) {
4154 #ifdef ASSERT
4155   if (get_loop(least)->_nest == 0) {
4156     return;
4157   }
4158   IdealLoopTree* loop = get_loop(least);
4159   Node* head = loop->_head;
4160   if (head->is_OuterStripMinedLoop() &&
4161       // Verification can't be applied to fully built strip mined loops
4162       head->as_Loop()->outer_loop_end()->in(1)->find_int_con(-1) == 0) {
4163     Node* sfpt = head->as_Loop()->outer_safepoint();
4164     ResourceMark rm;
4165     Unique_Node_List wq;
4166     wq.push(sfpt);
4167     for (uint i = 0; i < wq.size(); i++) {
4168       Node *m = wq.at(i);
4169       for (uint i = 1; i < m->req(); i++) {
4170         Node* nn = m->in(i);
4171         if (nn == n) {
4172           return;
4173         }
4174         if (nn != NULL && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) {
4175           wq.push(nn);
4176         }
4177       }
4178     }
4179     ShouldNotReachHere();
4180   }
4181 #endif
4182 }


< prev index next >