< prev index next >

src/share/vm/opto/compile.cpp

Print this page




 395       if (! useful.member(child)) {
 396         assert(!child->is_top() || child != top(),
 397                "If top is cached in Compile object it is in useful list");
 398         // Only need to remove this out-edge to the useless node
 399         n->raw_del_out(j);
 400         --j;
 401         --max;
 402       }
 403     }
 404     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 405       record_for_igvn(n->unique_out());
 406     }
 407   }
 408   // Remove useless macro and predicate opaq nodes
 409   for (int i = C->macro_count()-1; i >= 0; i--) {
 410     Node* n = C->macro_node(i);
 411     if (!useful.member(n)) {
 412       remove_macro_node(n);
 413     }
 414   }







 415   // Remove useless expensive node
 416   for (int i = C->expensive_count()-1; i >= 0; i--) {
 417     Node* n = C->expensive_node(i);
 418     if (!useful.member(n)) {
 419       remove_expensive_node(n);
 420     }
 421   }
 422   // clean up the late inline lists
 423   remove_useless_late_inlines(&_string_late_inlines, useful);
 424   remove_useless_late_inlines(&_boxing_late_inlines, useful);
 425   remove_useless_late_inlines(&_late_inlines, useful);
 426   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 427 }
 428 
 429 //------------------------------frame_size_in_words-----------------------------
 430 // frame_slots in units of words
 431 int Compile::frame_size_in_words() const {
 432   // shift is 0 in LP32 and 1 in LP64
 433   const int shift = (LogBytesPerWord - LogBytesPerInt);
 434   int words = _frame_slots >> shift;


1131   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1132   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1133   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1134   {
1135     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1136   }
1137   // Initialize the first few types.
1138   _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
1139   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1140   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1141   _num_alias_types = AliasIdxRaw+1;
1142   // Zero out the alias type cache.
1143   Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1144   // A NULL adr_type hits in the cache right away.  Preload the right answer.
1145   probe_alias_cache(NULL)->_index = AliasIdxTop;
1146 
1147   _intrinsics = NULL;
1148   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1149   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1150   _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);

1151   register_library_intrinsics();
1152 }
1153 
1154 //---------------------------init_start----------------------------------------
1155 // Install the StartNode on this compile object.
1156 void Compile::init_start(StartNode* s) {
1157   if (failing())
1158     return; // already failing
1159   assert(s == start(), "");
1160 }
1161 
1162 StartNode* Compile::start() const {
1163   assert(!failing(), "");
1164   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1165     Node* start = root()->fast_out(i);
1166     if( start->is_Start() )
1167       return start->as_Start();
1168   }
1169   fatal("Did not find Start node!");
1170   return NULL;


1859   // Clean up loose ends, if we are out of space for inlining.
1860   WarmCallInfo* call;
1861   while ((call = pop_warm_call()) != NULL) {
1862     call->make_cold();
1863   }
1864 }
1865 
1866 //---------------------cleanup_loop_predicates-----------------------
1867 // Remove the opaque nodes that protect the predicates so that all unused
1868 // checks and uncommon_traps will be eliminated from the ideal graph
1869 void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
1870   if (predicate_count()==0) return;
1871   for (int i = predicate_count(); i > 0; i--) {
1872     Node * n = predicate_opaque1_node(i-1);
1873     assert(n->Opcode() == Op_Opaque1, "must be");
1874     igvn.replace_node(n, n->in(1));
1875   }
1876   assert(predicate_count()==0, "should be clean!");
1877 }
1878 
















1879 // StringOpts and late inlining of string methods
1880 void Compile::inline_string_calls(bool parse_time) {
1881   {
1882     // remove useless nodes to make the usage analysis simpler
1883     ResourceMark rm;
1884     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1885   }
1886 
1887   {
1888     ResourceMark rm;
1889     print_method(PHASE_BEFORE_STRINGOPTS, 3);
1890     PhaseStringOpts pso(initial_gvn(), for_igvn());
1891     print_method(PHASE_AFTER_STRINGOPTS, 3);
1892   }
1893 
1894   // now inline anything that we skipped the first time around
1895   if (!parse_time) {
1896     _late_inlines_pos = _late_inlines.length();
1897   }
1898 


2201   // peeling, unrolling, etc.
2202   if(loop_opts_cnt > 0) {
2203     debug_only( int cnt = 0; );
2204     while(major_progress() && (loop_opts_cnt > 0)) {
2205       TracePhase t2("idealLoop", &_t_idealLoop, true);
2206       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2207       PhaseIdealLoop ideal_loop( igvn, true);
2208       loop_opts_cnt--;
2209       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2210       if (failing())  return;
2211     }
2212   }
2213 
2214   {
2215     // Verify that all previous optimizations produced a valid graph
2216     // at least to this point, even if no loop optimizations were done.
2217     NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
2218     PhaseIdealLoop::verify(igvn);
2219   }
2220 






2221   {
2222     NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); )
2223     PhaseMacroExpand  mex(igvn);
2224     if (mex.expand_macro_nodes()) {
2225       assert(failing(), "must bail out w/ explicit message");
2226       return;
2227     }
2228   }
2229 
2230  } // (End scope of igvn; run destructor if necessary for asserts.)
2231 
2232   dump_inlining();
2233   // A method with only infinite loops has no edges entering loops from root
2234   {
2235     NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); )
2236     if (final_graph_reshaping()) {
2237       assert(failing(), "must bail out w/ explicit message");
2238       return;
2239     }
2240   }


2970     if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
2971       // The EncodeP optimization may create Phi with the same edges
2972       // for all paths. It is not handled well by Register Allocator.
2973       Node* unique_in = n->in(1);
2974       assert(unique_in != NULL, "");
2975       uint cnt = n->req();
2976       for (uint i = 2; i < cnt; i++) {
2977         Node* m = n->in(i);
2978         assert(m != NULL, "");
2979         if (unique_in != m)
2980           unique_in = NULL;
2981       }
2982       if (unique_in != NULL) {
2983         n->subsume_by(unique_in, this);
2984       }
2985     }
2986     break;
2987 
2988 #endif
2989 










2990   case Op_ModI:
2991     if (UseDivMod) {
2992       // Check if a%b and a/b both exist
2993       Node* d = n->find_similar(Op_DivI);
2994       if (d) {
2995         // Replace them with a fused divmod if supported
2996         if (Matcher::has_match_rule(Op_DivModI)) {
2997           DivModINode* divmod = DivModINode::make(this, n);
2998           d->subsume_by(divmod->div_proj(), this);
2999           n->subsume_by(divmod->mod_proj(), this);
3000         } else {
3001           // replace a%b with a-((a/b)*b)
3002           Node* mult = new (this) MulINode(d, d->in(2));
3003           Node* sub  = new (this) SubINode(d->in(1), mult);
3004           n->subsume_by(sub, this);
3005         }
3006       }
3007     }
3008     break;
3009 


4005     worklist.clear();
4006     worklist.push(root());
4007     for (uint next = 0; next < worklist.size(); ++next) {
4008       Node *n  = worklist.at(next);
4009       const Type* t = igvn.type_or_null(n);
4010       assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
4011       if (n->is_Type()) {
4012         t = n->as_Type()->type();
4013         assert(t == t->remove_speculative(), "no more speculative types");
4014       }
4015       uint max = n->len();
4016       for( uint i = 0; i < max; ++i ) {
4017         Node *m = n->in(i);
4018         if (not_a_node(m))  continue;
4019         worklist.push(m);
4020       }
4021     }
4022     igvn.check_no_speculative_types();
4023 #endif
4024   }


















4025 }
4026 
4027 // Auxiliary method to support randomized stressing/fuzzing.
4028 //
4029 // This method can be called the arbitrary number of times, with current count
4030 // as the argument. The logic allows selecting a single candidate from the
4031 // running list of candidates as follows:
4032 //    int count = 0;
4033 //    Cand* selected = null;
4034 //    while(cand = cand->next()) {
4035 //      if (randomized_select(++count)) {
4036 //        selected = cand;
4037 //      }
4038 //    }
4039 //
4040 // Including count equalizes the chances any candidate is "selected".
4041 // This is useful when we don't have the complete list of candidates to choose
4042 // from uniformly. In this case, we need to adjust the randomicity of the
4043 // selection, or else we will end up biasing the selection towards the latter
4044 // candidates.


 395       if (! useful.member(child)) {
 396         assert(!child->is_top() || child != top(),
 397                "If top is cached in Compile object it is in useful list");
 398         // Only need to remove this out-edge to the useless node
 399         n->raw_del_out(j);
 400         --j;
 401         --max;
 402       }
 403     }
 404     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 405       record_for_igvn(n->unique_out());
 406     }
 407   }
 408   // Remove useless macro and predicate opaq nodes
 409   for (int i = C->macro_count()-1; i >= 0; i--) {
 410     Node* n = C->macro_node(i);
 411     if (!useful.member(n)) {
 412       remove_macro_node(n);
 413     }
 414   }
 415   // Remove useless CastII nodes with range check dependency
 416   for (int i = range_check_cast_count() - 1; i >= 0; i--) {
 417     Node* cast = range_check_cast_node(i);
 418     if (!useful.member(cast)) {
 419       remove_range_check_cast(cast);
 420     }
 421   }
 422   // Remove useless expensive node
 423   for (int i = C->expensive_count()-1; i >= 0; i--) {
 424     Node* n = C->expensive_node(i);
 425     if (!useful.member(n)) {
 426       remove_expensive_node(n);
 427     }
 428   }
 429   // clean up the late inline lists
 430   remove_useless_late_inlines(&_string_late_inlines, useful);
 431   remove_useless_late_inlines(&_boxing_late_inlines, useful);
 432   remove_useless_late_inlines(&_late_inlines, useful);
 433   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 434 }
 435 
 436 //------------------------------frame_size_in_words-----------------------------
 437 // frame_slots in units of words
 438 int Compile::frame_size_in_words() const {
 439   // shift is 0 in LP32 and 1 in LP64
 440   const int shift = (LogBytesPerWord - LogBytesPerInt);
 441   int words = _frame_slots >> shift;


1138   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1139   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1140   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1141   {
1142     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1143   }
1144   // Initialize the first few types.
1145   _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
1146   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1147   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1148   _num_alias_types = AliasIdxRaw+1;
1149   // Zero out the alias type cache.
1150   Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1151   // A NULL adr_type hits in the cache right away.  Preload the right answer.
1152   probe_alias_cache(NULL)->_index = AliasIdxTop;
1153 
1154   _intrinsics = NULL;
1155   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1156   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1157   _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1158   _range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1159   register_library_intrinsics();
1160 }
1161 
1162 //---------------------------init_start----------------------------------------
1163 // Install the StartNode on this compile object.
1164 void Compile::init_start(StartNode* s) {
1165   if (failing())
1166     return; // already failing
1167   assert(s == start(), "");
1168 }
1169 
1170 StartNode* Compile::start() const {
1171   assert(!failing(), "");
1172   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1173     Node* start = root()->fast_out(i);
1174     if( start->is_Start() )
1175       return start->as_Start();
1176   }
1177   fatal("Did not find Start node!");
1178   return NULL;


1867   // Clean up loose ends, if we are out of space for inlining.
1868   WarmCallInfo* call;
1869   while ((call = pop_warm_call()) != NULL) {
1870     call->make_cold();
1871   }
1872 }
1873 
1874 //---------------------cleanup_loop_predicates-----------------------
1875 // Remove the opaque nodes that protect the predicates so that all unused
1876 // checks and uncommon_traps will be eliminated from the ideal graph
1877 void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
1878   if (predicate_count()==0) return;
1879   for (int i = predicate_count(); i > 0; i--) {
1880     Node * n = predicate_opaque1_node(i-1);
1881     assert(n->Opcode() == Op_Opaque1, "must be");
1882     igvn.replace_node(n, n->in(1));
1883   }
1884   assert(predicate_count()==0, "should be clean!");
1885 }
1886 
1887 void Compile::add_range_check_cast(Node* n) {
1888   assert(n->isa_CastII()->has_range_check(), "CastII should have range check dependency");
1889   assert(!_range_check_casts->contains(n), "duplicate entry in range check casts");
1890   _range_check_casts->append(n);
1891 }
1892 
1893 // Remove all range check dependent CastIINodes.
1894 void Compile::remove_range_check_casts(PhaseIterGVN &igvn) {
1895   for (int i = range_check_cast_count(); i > 0; i--) {
1896     Node* cast = range_check_cast_node(i-1);
1897     assert(cast->isa_CastII()->has_range_check(), "CastII should have range check dependency");
1898     igvn.replace_node(cast, cast->in(1));
1899   }
1900   assert(range_check_cast_count() == 0, "should be empty");
1901 }
1902 
1903 // StringOpts and late inlining of string methods
1904 void Compile::inline_string_calls(bool parse_time) {
1905   {
1906     // remove useless nodes to make the usage analysis simpler
1907     ResourceMark rm;
1908     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1909   }
1910 
1911   {
1912     ResourceMark rm;
1913     print_method(PHASE_BEFORE_STRINGOPTS, 3);
1914     PhaseStringOpts pso(initial_gvn(), for_igvn());
1915     print_method(PHASE_AFTER_STRINGOPTS, 3);
1916   }
1917 
1918   // now inline anything that we skipped the first time around
1919   if (!parse_time) {
1920     _late_inlines_pos = _late_inlines.length();
1921   }
1922 


2225   // peeling, unrolling, etc.
2226   if(loop_opts_cnt > 0) {
2227     debug_only( int cnt = 0; );
2228     while(major_progress() && (loop_opts_cnt > 0)) {
2229       TracePhase t2("idealLoop", &_t_idealLoop, true);
2230       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2231       PhaseIdealLoop ideal_loop( igvn, true);
2232       loop_opts_cnt--;
2233       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2234       if (failing())  return;
2235     }
2236   }
2237 
2238   {
2239     // Verify that all previous optimizations produced a valid graph
2240     // at least to this point, even if no loop optimizations were done.
2241     NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
2242     PhaseIdealLoop::verify(igvn);
2243   }
2244 
2245   if (range_check_cast_count() > 0) {
2246     // No more loop optimizations. Remove all range check dependent CastIINodes.
2247     C->remove_range_check_casts(igvn);
2248     igvn.optimize();
2249   }
2250 
2251   {
2252     NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); )
2253     PhaseMacroExpand  mex(igvn);
2254     if (mex.expand_macro_nodes()) {
2255       assert(failing(), "must bail out w/ explicit message");
2256       return;
2257     }
2258   }
2259 
2260  } // (End scope of igvn; run destructor if necessary for asserts.)
2261 
2262   dump_inlining();
2263   // A method with only infinite loops has no edges entering loops from root
2264   {
2265     NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); )
2266     if (final_graph_reshaping()) {
2267       assert(failing(), "must bail out w/ explicit message");
2268       return;
2269     }
2270   }


3000     if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
3001       // The EncodeP optimization may create Phi with the same edges
3002       // for all paths. It is not handled well by Register Allocator.
3003       Node* unique_in = n->in(1);
3004       assert(unique_in != NULL, "");
3005       uint cnt = n->req();
3006       for (uint i = 2; i < cnt; i++) {
3007         Node* m = n->in(i);
3008         assert(m != NULL, "");
3009         if (unique_in != m)
3010           unique_in = NULL;
3011       }
3012       if (unique_in != NULL) {
3013         n->subsume_by(unique_in, this);
3014       }
3015     }
3016     break;
3017 
3018 #endif
3019 
3020 #ifdef ASSERT
3021   case Op_CastII:
3022     // Verify that all range check dependent CastII nodes were removed.
3023     if (n->isa_CastII()->has_range_check()) {
3024       n->dump(3);
3025       assert(false, "Range check dependent CastII node was not removed");
3026     }
3027     break;
3028 #endif
3029 
3030   case Op_ModI:
3031     if (UseDivMod) {
3032       // Check if a%b and a/b both exist
3033       Node* d = n->find_similar(Op_DivI);
3034       if (d) {
3035         // Replace them with a fused divmod if supported
3036         if (Matcher::has_match_rule(Op_DivModI)) {
3037           DivModINode* divmod = DivModINode::make(this, n);
3038           d->subsume_by(divmod->div_proj(), this);
3039           n->subsume_by(divmod->mod_proj(), this);
3040         } else {
3041           // replace a%b with a-((a/b)*b)
3042           Node* mult = new (this) MulINode(d, d->in(2));
3043           Node* sub  = new (this) SubINode(d->in(1), mult);
3044           n->subsume_by(sub, this);
3045         }
3046       }
3047     }
3048     break;
3049 


4045     worklist.clear();
4046     worklist.push(root());
4047     for (uint next = 0; next < worklist.size(); ++next) {
4048       Node *n  = worklist.at(next);
4049       const Type* t = igvn.type_or_null(n);
4050       assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
4051       if (n->is_Type()) {
4052         t = n->as_Type()->type();
4053         assert(t == t->remove_speculative(), "no more speculative types");
4054       }
4055       uint max = n->len();
4056       for( uint i = 0; i < max; ++i ) {
4057         Node *m = n->in(i);
4058         if (not_a_node(m))  continue;
4059         worklist.push(m);
4060       }
4061     }
4062     igvn.check_no_speculative_types();
4063 #endif
4064   }
4065 }
4066 
4067 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
4068 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl) {
4069   if (ctrl != NULL) {
4070     // Express control dependency by a CastII node with a narrow type.
4071     value = new (phase->C) CastIINode(value, itype, false, true /* range check dependency */);
4072     // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4073     // node from floating above the range check during loop optimizations. Otherwise, the
4074     // ConvI2L node may be eliminated independently of the range check, causing the data path
4075     // to become TOP while the control path is still there (although it's unreachable).
4076     value->set_req(0, ctrl);
4077     // Save CastII node to remove it after loop optimizations.
4078     phase->C->add_range_check_cast(value);
4079     value = phase->transform(value);
4080   }
4081   const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4082   return phase->transform(new (phase->C) ConvI2LNode(value, ltype));
4083 }
4084 
4085 // Auxiliary method to support randomized stressing/fuzzing.
4086 //
4087 // This method can be called the arbitrary number of times, with current count
4088 // as the argument. The logic allows selecting a single candidate from the
4089 // running list of candidates as follows:
4090 //    int count = 0;
4091 //    Cand* selected = null;
4092 //    while(cand = cand->next()) {
4093 //      if (randomized_select(++count)) {
4094 //        selected = cand;
4095 //      }
4096 //    }
4097 //
4098 // Including count equalizes the chances any candidate is "selected".
4099 // This is useful when we don't have the complete list of candidates to choose
4100 // from uniformly. In this case, we need to adjust the randomicity of the
4101 // selection, or else we will end up biasing the selection towards the latter
4102 // candidates.
< prev index next >