< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page




2167 
2168   set_inlining_incrementally(false);
2169 }
2170 
2171 
2172 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2173   if(_loop_opts_cnt > 0) {
2174     debug_only( int cnt = 0; );
2175     while(major_progress() && (_loop_opts_cnt > 0)) {
2176       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2177       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2178       PhaseIdealLoop ideal_loop(igvn, mode);
2179       _loop_opts_cnt--;
2180       if (failing())  return false;
2181       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2182     }
2183   }
2184   return true;
2185 }
2186 

















2187 //------------------------------Optimize---------------------------------------
2188 // Given a graph, optimize it.
2189 void Compile::Optimize() {
2190   TracePhase tp("optimizer", &timers[_t_optimizer]);
2191 
2192 #ifndef PRODUCT
2193   if (_directive->BreakAtCompileOption) {
2194     BREAKPOINT;
2195   }
2196 
2197 #endif
2198 
2199 #ifdef ASSERT
2200   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2201   bs->verify_gc_barriers(this, BarrierSetC2::BeforeOptimize);
2202 #endif
2203 
2204   ResourceMark rm;
2205 
2206   print_inlining_reinit();


2227 
2228   inline_incrementally(igvn);
2229 
2230   print_method(PHASE_INCREMENTAL_INLINE, 2);
2231 
2232   if (failing())  return;
2233 
2234   if (eliminate_boxing()) {
2235     // Inline valueOf() methods now.
2236     inline_boxing_calls(igvn);
2237 
2238     if (AlwaysIncrementalInline) {
2239       inline_incrementally(igvn);
2240     }
2241 
2242     print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2243 
2244     if (failing())  return;
2245   }
2246 




2247   // Remove the speculative part of types and clean up the graph from
2248   // the extra CastPP nodes whose only purpose is to carry them. Do
2249   // that early so that optimizations are not disrupted by the extra
2250   // CastPP nodes.
2251   remove_speculative_types(igvn);
2252 
2253   // No more new expensive nodes will be added to the list from here
2254   // so keep only the actual candidates for optimizations.
2255   cleanup_expensive_nodes(igvn);
2256 
2257   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2258     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2259     initial_gvn()->replace_with(&igvn);
2260     for_igvn()->clear();
2261     Unique_Node_List new_worklist(C->comp_arena());
2262     {
2263       ResourceMark rm;
2264       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2265     }
2266     set_for_igvn(&new_worklist);


3231     break;
3232   }
3233 
3234   case Op_Proj: {
3235     if (OptimizeStringConcat) {
3236       ProjNode* p = n->as_Proj();
3237       if (p->_is_io_use) {
3238         // Separate projections were used for the exception path which
3239         // are normally removed by a late inline.  If it wasn't inlined
3240         // then they will hang around and should just be replaced with
3241         // the original one.
3242         Node* proj = NULL;
3243         // Replace with just one
3244         for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) {
3245           Node *use = i.get();
3246           if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) {
3247             proj = use;
3248             break;
3249           }
3250         }
3251         assert(proj != NULL, "must be found");

3252         p->subsume_by(proj, this);

3253       }
3254     }
3255     break;
3256   }
3257 
3258   case Op_Phi:
3259     if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
3260       // The EncodeP optimization may create Phi with the same edges
3261       // for all paths. It is not handled well by Register Allocator.
3262       Node* unique_in = n->in(1);
3263       assert(unique_in != NULL, "");
3264       uint cnt = n->req();
3265       for (uint i = 2; i < cnt; i++) {
3266         Node* m = n->in(i);
3267         assert(m != NULL, "");
3268         if (unique_in != m)
3269           unique_in = NULL;
3270       }
3271       if (unique_in != NULL) {
3272         n->subsume_by(unique_in, this);




2167 
2168   set_inlining_incrementally(false);
2169 }
2170 
2171 
2172 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2173   if(_loop_opts_cnt > 0) {
2174     debug_only( int cnt = 0; );
2175     while(major_progress() && (_loop_opts_cnt > 0)) {
2176       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2177       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2178       PhaseIdealLoop ideal_loop(igvn, mode);
2179       _loop_opts_cnt--;
2180       if (failing())  return false;
2181       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2182     }
2183   }
2184   return true;
2185 }
2186 
2187 // Remove edges from "root" to each SafePoint at a backward branch.
2188 // They were inserted during parsing (see add_safepoint()) to make
2189 // infinite loops without calls or exceptions visible to root, i.e.,
2190 // useful.
2191 void Compile::remove_root_to_sfpts_edges() {
2192   Node *r = root();
2193   if (r != NULL) {
2194     for (uint i = r->req(); i < r->len(); ++i) {
2195       Node *n = r->in(i);
2196       if (n != NULL && n->is_SafePoint()) {
2197         r->rm_prec(i);
2198         --i;
2199       }
2200     }
2201   }
2202 }
2203 
2204 //------------------------------Optimize---------------------------------------
2205 // Given a graph, optimize it.
2206 void Compile::Optimize() {
2207   TracePhase tp("optimizer", &timers[_t_optimizer]);
2208 
2209 #ifndef PRODUCT
2210   if (_directive->BreakAtCompileOption) {
2211     BREAKPOINT;
2212   }
2213 
2214 #endif
2215 
2216 #ifdef ASSERT
2217   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2218   bs->verify_gc_barriers(this, BarrierSetC2::BeforeOptimize);
2219 #endif
2220 
2221   ResourceMark rm;
2222 
2223   print_inlining_reinit();


2244 
2245   inline_incrementally(igvn);
2246 
2247   print_method(PHASE_INCREMENTAL_INLINE, 2);
2248 
2249   if (failing())  return;
2250 
2251   if (eliminate_boxing()) {
2252     // Inline valueOf() methods now.
2253     inline_boxing_calls(igvn);
2254 
2255     if (AlwaysIncrementalInline) {
2256       inline_incrementally(igvn);
2257     }
2258 
2259     print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2260 
2261     if (failing())  return;
2262   }
2263 
2264   // Now that all inlining is over, cut edge from root to loop
2265   // safepoints
2266   remove_root_to_sfpts_edges();
2267 
2268   // Remove the speculative part of types and clean up the graph from
2269   // the extra CastPP nodes whose only purpose is to carry them. Do
2270   // that early so that optimizations are not disrupted by the extra
2271   // CastPP nodes.
2272   remove_speculative_types(igvn);
2273 
2274   // No more new expensive nodes will be added to the list from here
2275   // so keep only the actual candidates for optimizations.
2276   cleanup_expensive_nodes(igvn);
2277 
2278   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2279     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2280     initial_gvn()->replace_with(&igvn);
2281     for_igvn()->clear();
2282     Unique_Node_List new_worklist(C->comp_arena());
2283     {
2284       ResourceMark rm;
2285       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2286     }
2287     set_for_igvn(&new_worklist);


3252     break;
3253   }
3254 
3255   case Op_Proj: {
3256     if (OptimizeStringConcat) {
3257       ProjNode* p = n->as_Proj();
3258       if (p->_is_io_use) {
3259         // Separate projections were used for the exception path which
3260         // are normally removed by a late inline.  If it wasn't inlined
3261         // then they will hang around and should just be replaced with
3262         // the original one.
3263         Node* proj = NULL;
3264         // Replace with just one
3265         for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) {
3266           Node *use = i.get();
3267           if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) {
3268             proj = use;
3269             break;
3270           }
3271         }
3272         assert(proj != NULL || p->_con == TypeFunc::I_O, "io may be dropped at an infinite loop");
3273         if (proj != NULL) {
3274           p->subsume_by(proj, this);
3275         }
3276       }
3277     }
3278     break;
3279   }
3280 
3281   case Op_Phi:
3282     if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
3283       // The EncodeP optimization may create Phi with the same edges
3284       // for all paths. It is not handled well by Register Allocator.
3285       Node* unique_in = n->in(1);
3286       assert(unique_in != NULL, "");
3287       uint cnt = n->req();
3288       for (uint i = 2; i < cnt; i++) {
3289         Node* m = n->in(i);
3290         assert(m != NULL, "");
3291         if (unique_in != m)
3292           unique_in = NULL;
3293       }
3294       if (unique_in != NULL) {
3295         n->subsume_by(unique_in, this);


< prev index next >