< prev index next >

src/share/vm/opto/compile.cpp

Print this page




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "ci/ciReplay.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/exceptionHandlerTable.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "compiler/compileBroker.hpp"
  33 #include "compiler/compileLog.hpp"
  34 #include "compiler/disassembler.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/block.hpp"
  38 #include "opto/c2compiler.hpp"
  39 #include "opto/callGenerator.hpp"
  40 #include "opto/callnode.hpp"

  41 #include "opto/cfgnode.hpp"
  42 #include "opto/chaitin.hpp"
  43 #include "opto/compile.hpp"
  44 #include "opto/connode.hpp"
  45 #include "opto/convertnode.hpp"
  46 #include "opto/divnode.hpp"
  47 #include "opto/escape.hpp"
  48 #include "opto/idealGraphPrinter.hpp"
  49 #include "opto/loopnode.hpp"
  50 #include "opto/machnode.hpp"
  51 #include "opto/macro.hpp"
  52 #include "opto/matcher.hpp"
  53 #include "opto/mathexactnode.hpp"
  54 #include "opto/memnode.hpp"
  55 #include "opto/mulnode.hpp"
  56 #include "opto/narrowptrnode.hpp"
  57 #include "opto/node.hpp"
  58 #include "opto/opcodes.hpp"
  59 #include "opto/output.hpp"
  60 #include "opto/parse.hpp"


 385       if (! useful.member(child)) {
 386         assert(!child->is_top() || child != top(),
 387                "If top is cached in Compile object it is in useful list");
 388         // Only need to remove this out-edge to the useless node
 389         n->raw_del_out(j);
 390         --j;
 391         --max;
 392       }
 393     }
 394     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 395       record_for_igvn(n->unique_out());
 396     }
 397   }
 398   // Remove useless macro and predicate opaq nodes
 399   for (int i = C->macro_count()-1; i >= 0; i--) {
 400     Node* n = C->macro_node(i);
 401     if (!useful.member(n)) {
 402       remove_macro_node(n);
 403     }
 404   }









 405   // Remove useless expensive node
 406   for (int i = C->expensive_count()-1; i >= 0; i--) {
 407     Node* n = C->expensive_node(i);
 408     if (!useful.member(n)) {
 409       remove_expensive_node(n);
 410     }
 411   }
 412   // clean up the late inline lists
 413   remove_useless_late_inlines(&_string_late_inlines, useful);
 414   remove_useless_late_inlines(&_boxing_late_inlines, useful);
 415   remove_useless_late_inlines(&_late_inlines, useful);
 416   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 417 }
 418 
 419 //------------------------------frame_size_in_words-----------------------------
 420 // frame_slots in units of words
 421 int Compile::frame_size_in_words() const {
 422   // shift is 0 in LP32 and 1 in LP64
 423   const int shift = (LogBytesPerWord - LogBytesPerInt);
 424   int words = _frame_slots >> shift;


1161   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1162   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1163   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1164   {
1165     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1166   }
1167   // Initialize the first few types.
1168   _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
1169   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1170   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1171   _num_alias_types = AliasIdxRaw+1;
1172   // Zero out the alias type cache.
1173   Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1174   // A NULL adr_type hits in the cache right away.  Preload the right answer.
1175   probe_alias_cache(NULL)->_index = AliasIdxTop;
1176 
1177   _intrinsics = NULL;
1178   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1179   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1180   _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);



1181   register_library_intrinsics();
1182 }
1183 
1184 //---------------------------init_start----------------------------------------
1185 // Install the StartNode on this compile object.
1186 void Compile::init_start(StartNode* s) {
1187   if (failing())
1188     return; // already failing
1189   assert(s == start(), "");
1190 }
1191 
1192 /**
1193  * Return the 'StartNode'. We must not have a pending failure, since the ideal graph
1194  * can be in an inconsistent state, i.e., we can get segmentation faults when traversing
1195  * the ideal graph.
1196  */
1197 StartNode* Compile::start() const {
1198   assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason());
1199   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1200     Node* start = root()->fast_out(i);


1907   // Clean up loose ends, if we are out of space for inlining.
1908   WarmCallInfo* call;
1909   while ((call = pop_warm_call()) != NULL) {
1910     call->make_cold();
1911   }
1912 }
1913 
1914 //---------------------cleanup_loop_predicates-----------------------
1915 // Remove the opaque nodes that protect the predicates so that all unused
1916 // checks and uncommon_traps will be eliminated from the ideal graph
1917 void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
1918   if (predicate_count()==0) return;
1919   for (int i = predicate_count(); i > 0; i--) {
1920     Node * n = predicate_opaque1_node(i-1);
1921     assert(n->Opcode() == Op_Opaque1, "must be");
1922     igvn.replace_node(n, n->in(1));
1923   }
1924   assert(predicate_count()==0, "should be clean!");
1925 }
1926 


















1927 // StringOpts and late inlining of string methods
1928 void Compile::inline_string_calls(bool parse_time) {
1929   {
1930     // remove useless nodes to make the usage analysis simpler
1931     ResourceMark rm;
1932     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1933   }
1934 
1935   {
1936     ResourceMark rm;
1937     print_method(PHASE_BEFORE_STRINGOPTS, 3);
1938     PhaseStringOpts pso(initial_gvn(), for_igvn());
1939     print_method(PHASE_AFTER_STRINGOPTS, 3);
1940   }
1941 
1942   // now inline anything that we skipped the first time around
1943   if (!parse_time) {
1944     _late_inlines_pos = _late_inlines.length();
1945   }
1946 


2267     debug_only( int cnt = 0; );
2268     while(major_progress() && (loop_opts_cnt > 0)) {
2269       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2270       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2271       PhaseIdealLoop ideal_loop( igvn, true);
2272       loop_opts_cnt--;
2273       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2274       if (failing())  return;
2275     }
2276   }
2277   // Ensure that major progress is now clear
2278   C->clear_major_progress();
2279 
2280   {
2281     // Verify that all previous optimizations produced a valid graph
2282     // at least to this point, even if no loop optimizations were done.
2283     TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2284     PhaseIdealLoop::verify(igvn);
2285   }
2286 








2287   {
2288     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2289     PhaseMacroExpand  mex(igvn);
2290     if (mex.expand_macro_nodes()) {
2291       assert(failing(), "must bail out w/ explicit message");
2292       return;
2293     }
2294   }
2295 
2296   DEBUG_ONLY( _modified_nodes = NULL; )
2297  } // (End scope of igvn; run destructor if necessary for asserts.)
2298 
2299  process_print_inlining();
2300  // A method with only infinite loops has no edges entering loops from root
2301  {
2302    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2303    if (final_graph_reshaping()) {
2304      assert(failing(), "must bail out w/ explicit message");
2305      return;
2306    }


3068 
3069   case Op_Phi:
3070     if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
3071       // The EncodeP optimization may create Phi with the same edges
3072       // for all paths. It is not handled well by Register Allocator.
3073       Node* unique_in = n->in(1);
3074       assert(unique_in != NULL, "");
3075       uint cnt = n->req();
3076       for (uint i = 2; i < cnt; i++) {
3077         Node* m = n->in(i);
3078         assert(m != NULL, "");
3079         if (unique_in != m)
3080           unique_in = NULL;
3081       }
3082       if (unique_in != NULL) {
3083         n->subsume_by(unique_in, this);
3084       }
3085     }
3086     break;
3087 








3088 #endif
3089 


3090   case Op_ModI:
3091     if (UseDivMod) {
3092       // Check if a%b and a/b both exist
3093       Node* d = n->find_similar(Op_DivI);
3094       if (d) {
3095         // Replace them with a fused divmod if supported
3096         if (Matcher::has_match_rule(Op_DivModI)) {
3097           DivModINode* divmod = DivModINode::make(n);
3098           d->subsume_by(divmod->div_proj(), this);
3099           n->subsume_by(divmod->mod_proj(), this);
3100         } else {
3101           // replace a%b with a-((a/b)*b)
3102           Node* mult = new MulINode(d, d->in(2));
3103           Node* sub  = new SubINode(d->in(1), mult);
3104           n->subsume_by(sub, this);
3105         }
3106       }
3107     }
3108     break;
3109 


3945   if (superk->is_interface()) {
3946     // Cannot trust interfaces yet.
3947     // %%% S.B. superk->nof_implementors() == 1
3948   } else if (superelem->is_instance_klass()) {
3949     ciInstanceKlass* ik = superelem->as_instance_klass();
3950     if (!ik->has_subklass() && !ik->is_interface()) {
3951       if (!ik->is_final()) {
3952         // Add a dependency if there is a chance of a later subclass.
3953         dependencies()->assert_leaf_type(ik);
3954       }
3955       return SSC_easy_test;     // (3) caller can do a simple ptr comparison
3956     }
3957   } else {
3958     // A primitive array type has no subtypes.
3959     return SSC_easy_test;       // (3) caller can do a simple ptr comparison
3960   }
3961 
3962   return SSC_full_test;
3963 }
3964 
3965 Node* Compile::conv_I2X_index(PhaseGVN *phase, Node* idx, const TypeInt* sizetype) {
3966 #ifdef _LP64
3967   // The scaled index operand to AddP must be a clean 64-bit value.
3968   // Java allows a 32-bit int to be incremented to a negative
3969   // value, which appears in a 64-bit register as a large
3970   // positive number.  Using that large positive number as an
3971   // operand in pointer arithmetic has bad consequences.
3972   // On the other hand, 32-bit overflow is rare, and the possibility
3973   // can often be excluded, if we annotate the ConvI2L node with
3974   // a type assertion that its value is known to be a small positive
3975   // number.  (The prior range check has ensured this.)
3976   // This assertion is used by ConvI2LNode::Ideal.
3977   int index_max = max_jint - 1;  // array size is max_jint, index is one less
3978   if (sizetype != NULL)  index_max = sizetype->_hi - 1;
3979   const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
3980   idx = phase->transform(new ConvI2LNode(idx, lidxtype));
3981 #endif
3982   return idx;
3983 }




















3984 
3985 // The message about the current inlining is accumulated in
3986 // _print_inlining_stream and transfered into the _print_inlining_list
3987 // once we know whether inlining succeeds or not. For regular
3988 // inlining, messages are appended to the buffer pointed by
3989 // _print_inlining_idx in the _print_inlining_list. For late inlining,
3990 // a new buffer is added after _print_inlining_idx in the list. This
3991 // way we can update the inlining message for late inlining call site
3992 // when the inlining is attempted again.
3993 void Compile::print_inlining_init() {
3994   if (print_inlining() || print_intrinsics()) {
3995     _print_inlining_stream = new stringStream();
3996     _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
3997   }
3998 }
3999 
4000 void Compile::print_inlining_reinit() {
4001   if (print_inlining() || print_intrinsics()) {
4002     // Re allocate buffer when we change ResourceMark
4003     _print_inlining_stream = new stringStream();




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "ci/ciReplay.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/exceptionHandlerTable.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "compiler/compileBroker.hpp"
  33 #include "compiler/compileLog.hpp"
  34 #include "compiler/disassembler.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/block.hpp"
  38 #include "opto/c2compiler.hpp"
  39 #include "opto/callGenerator.hpp"
  40 #include "opto/callnode.hpp"
  41 #include "opto/castnode.hpp"
  42 #include "opto/cfgnode.hpp"
  43 #include "opto/chaitin.hpp"
  44 #include "opto/compile.hpp"
  45 #include "opto/connode.hpp"
  46 #include "opto/convertnode.hpp"
  47 #include "opto/divnode.hpp"
  48 #include "opto/escape.hpp"
  49 #include "opto/idealGraphPrinter.hpp"
  50 #include "opto/loopnode.hpp"
  51 #include "opto/machnode.hpp"
  52 #include "opto/macro.hpp"
  53 #include "opto/matcher.hpp"
  54 #include "opto/mathexactnode.hpp"
  55 #include "opto/memnode.hpp"
  56 #include "opto/mulnode.hpp"
  57 #include "opto/narrowptrnode.hpp"
  58 #include "opto/node.hpp"
  59 #include "opto/opcodes.hpp"
  60 #include "opto/output.hpp"
  61 #include "opto/parse.hpp"


 386       if (! useful.member(child)) {
 387         assert(!child->is_top() || child != top(),
 388                "If top is cached in Compile object it is in useful list");
 389         // Only need to remove this out-edge to the useless node
 390         n->raw_del_out(j);
 391         --j;
 392         --max;
 393       }
 394     }
 395     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 396       record_for_igvn(n->unique_out());
 397     }
 398   }
 399   // Remove useless macro and predicate opaq nodes
 400   for (int i = C->macro_count()-1; i >= 0; i--) {
 401     Node* n = C->macro_node(i);
 402     if (!useful.member(n)) {
 403       remove_macro_node(n);
 404     }
 405   }
 406 #ifdef _LP64
 407   // Remove useless CastII nodes with range check dependency
 408   for (int i = range_check_cast_count() - 1; i >= 0; i--) {
 409     Node* cast = range_check_cast_node(i);
 410     if (!useful.member(cast)) {
 411       remove_range_check_cast(cast);
 412     }
 413   }
 414 #endif
 415   // Remove useless expensive node
 416   for (int i = C->expensive_count()-1; i >= 0; i--) {
 417     Node* n = C->expensive_node(i);
 418     if (!useful.member(n)) {
 419       remove_expensive_node(n);
 420     }
 421   }
 422   // clean up the late inline lists
 423   remove_useless_late_inlines(&_string_late_inlines, useful);
 424   remove_useless_late_inlines(&_boxing_late_inlines, useful);
 425   remove_useless_late_inlines(&_late_inlines, useful);
 426   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 427 }
 428 
 429 //------------------------------frame_size_in_words-----------------------------
 430 // frame_slots in units of words
 431 int Compile::frame_size_in_words() const {
 432   // shift is 0 in LP32 and 1 in LP64
 433   const int shift = (LogBytesPerWord - LogBytesPerInt);
 434   int words = _frame_slots >> shift;


1171   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1172   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1173   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1174   {
1175     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1176   }
1177   // Initialize the first few types.
1178   _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
1179   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1180   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1181   _num_alias_types = AliasIdxRaw+1;
1182   // Zero out the alias type cache.
1183   Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1184   // A NULL adr_type hits in the cache right away.  Preload the right answer.
1185   probe_alias_cache(NULL)->_index = AliasIdxTop;
1186 
1187   _intrinsics = NULL;
1188   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1189   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1190   _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1191 #ifdef _LP64
1192   _range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1193 #endif
1194   register_library_intrinsics();
1195 }
1196 
1197 //---------------------------init_start----------------------------------------
1198 // Install the StartNode on this compile object.
1199 void Compile::init_start(StartNode* s) {
1200   if (failing())
1201     return; // already failing
1202   assert(s == start(), "");
1203 }
1204 
1205 /**
1206  * Return the 'StartNode'. We must not have a pending failure, since the ideal graph
1207  * can be in an inconsistent state, i.e., we can get segmentation faults when traversing
1208  * the ideal graph.
1209  */
1210 StartNode* Compile::start() const {
1211   assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason());
1212   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1213     Node* start = root()->fast_out(i);


1920   // Clean up loose ends, if we are out of space for inlining.
1921   WarmCallInfo* call;
1922   while ((call = pop_warm_call()) != NULL) {
1923     call->make_cold();
1924   }
1925 }
1926 
1927 //---------------------cleanup_loop_predicates-----------------------
1928 // Remove the opaque nodes that protect the predicates so that all unused
1929 // checks and uncommon_traps will be eliminated from the ideal graph
1930 void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
1931   if (predicate_count()==0) return;
1932   for (int i = predicate_count(); i > 0; i--) {
1933     Node * n = predicate_opaque1_node(i-1);
1934     assert(n->Opcode() == Op_Opaque1, "must be");
1935     igvn.replace_node(n, n->in(1));
1936   }
1937   assert(predicate_count()==0, "should be clean!");
1938 }
1939 
1940 #ifdef _LP64
1941 void Compile::add_range_check_cast(Node* n) {
1942   assert(n->isa_CastII()->has_range_check(), "CastII should have range check dependency");
1943   assert(!_range_check_casts->contains(n), "duplicate entry in range check casts");
1944   _range_check_casts->append(n);
1945 }
1946 
1947 // Remove all range check dependent CastIINodes.
1948 void Compile::remove_range_check_casts(PhaseIterGVN &igvn) {
1949   for (int i = range_check_cast_count(); i > 0; i--) {
1950     Node* cast = range_check_cast_node(i-1);
1951     assert(cast->isa_CastII()->has_range_check(), "CastII should have range check dependency");
1952     igvn.replace_node(cast, cast->in(1));
1953   }
1954   assert(range_check_cast_count() == 0, "should be empty");
1955 }
1956 #endif
1957 
1958 // StringOpts and late inlining of string methods
1959 void Compile::inline_string_calls(bool parse_time) {
1960   {
1961     // remove useless nodes to make the usage analysis simpler
1962     ResourceMark rm;
1963     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1964   }
1965 
1966   {
1967     ResourceMark rm;
1968     print_method(PHASE_BEFORE_STRINGOPTS, 3);
1969     PhaseStringOpts pso(initial_gvn(), for_igvn());
1970     print_method(PHASE_AFTER_STRINGOPTS, 3);
1971   }
1972 
1973   // now inline anything that we skipped the first time around
1974   if (!parse_time) {
1975     _late_inlines_pos = _late_inlines.length();
1976   }
1977 


2298     debug_only( int cnt = 0; );
2299     while(major_progress() && (loop_opts_cnt > 0)) {
2300       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2301       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2302       PhaseIdealLoop ideal_loop( igvn, true);
2303       loop_opts_cnt--;
2304       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2305       if (failing())  return;
2306     }
2307   }
2308   // Ensure that major progress is now clear
2309   C->clear_major_progress();
2310 
2311   {
2312     // Verify that all previous optimizations produced a valid graph
2313     // at least to this point, even if no loop optimizations were done.
2314     TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2315     PhaseIdealLoop::verify(igvn);
2316   }
2317 
2318 #ifdef _LP64
2319   if (range_check_cast_count() > 0) {
2320     // No more loop optimizations. Remove all range check dependent CastIINodes.
2321     C->remove_range_check_casts(igvn);
2322     igvn.optimize();
2323   }
2324 #endif
2325 
2326   {
2327     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2328     PhaseMacroExpand  mex(igvn);
2329     if (mex.expand_macro_nodes()) {
2330       assert(failing(), "must bail out w/ explicit message");
2331       return;
2332     }
2333   }
2334 
2335   DEBUG_ONLY( _modified_nodes = NULL; )
2336  } // (End scope of igvn; run destructor if necessary for asserts.)
2337 
2338  process_print_inlining();
2339  // A method with only infinite loops has no edges entering loops from root
2340  {
2341    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2342    if (final_graph_reshaping()) {
2343      assert(failing(), "must bail out w/ explicit message");
2344      return;
2345    }


3107 
3108   case Op_Phi:
3109     if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
3110       // The EncodeP optimization may create Phi with the same edges
3111       // for all paths. It is not handled well by Register Allocator.
3112       Node* unique_in = n->in(1);
3113       assert(unique_in != NULL, "");
3114       uint cnt = n->req();
3115       for (uint i = 2; i < cnt; i++) {
3116         Node* m = n->in(i);
3117         assert(m != NULL, "");
3118         if (unique_in != m)
3119           unique_in = NULL;
3120       }
3121       if (unique_in != NULL) {
3122         n->subsume_by(unique_in, this);
3123       }
3124     }
3125     break;
3126 
3127 #ifdef ASSERT
3128   case Op_CastII:
3129     // Verify that all range check dependent CastII nodes were removed.
3130     if (n->isa_CastII()->has_range_check()) {
3131       n->dump(3);
3132       assert(false, "Range check dependent CastII node was not removed");
3133     }
3134     break;
3135 #endif
3136 
3137 #endif // _LP64
3138 
3139   case Op_ModI:
3140     if (UseDivMod) {
3141       // Check if a%b and a/b both exist
3142       Node* d = n->find_similar(Op_DivI);
3143       if (d) {
3144         // Replace them with a fused divmod if supported
3145         if (Matcher::has_match_rule(Op_DivModI)) {
3146           DivModINode* divmod = DivModINode::make(n);
3147           d->subsume_by(divmod->div_proj(), this);
3148           n->subsume_by(divmod->mod_proj(), this);
3149         } else {
3150           // replace a%b with a-((a/b)*b)
3151           Node* mult = new MulINode(d, d->in(2));
3152           Node* sub  = new SubINode(d->in(1), mult);
3153           n->subsume_by(sub, this);
3154         }
3155       }
3156     }
3157     break;
3158 


3994   if (superk->is_interface()) {
3995     // Cannot trust interfaces yet.
3996     // %%% S.B. superk->nof_implementors() == 1
3997   } else if (superelem->is_instance_klass()) {
3998     ciInstanceKlass* ik = superelem->as_instance_klass();
3999     if (!ik->has_subklass() && !ik->is_interface()) {
4000       if (!ik->is_final()) {
4001         // Add a dependency if there is a chance of a later subclass.
4002         dependencies()->assert_leaf_type(ik);
4003       }
4004       return SSC_easy_test;     // (3) caller can do a simple ptr comparison
4005     }
4006   } else {
4007     // A primitive array type has no subtypes.
4008     return SSC_easy_test;       // (3) caller can do a simple ptr comparison
4009   }
4010 
4011   return SSC_full_test;
4012 }
4013 
4014 Node* Compile::conv_I2X_index(PhaseGVN* phase, Node* idx, const TypeInt* sizetype, Node* ctrl) {
4015 #ifdef _LP64
4016   // The scaled index operand to AddP must be a clean 64-bit value.
4017   // Java allows a 32-bit int to be incremented to a negative
4018   // value, which appears in a 64-bit register as a large
4019   // positive number.  Using that large positive number as an
4020   // operand in pointer arithmetic has bad consequences.
4021   // On the other hand, 32-bit overflow is rare, and the possibility
4022   // can often be excluded, if we annotate the ConvI2L node with
4023   // a type assertion that its value is known to be a small positive
4024   // number.  (The prior range check has ensured this.)
4025   // This assertion is used by ConvI2LNode::Ideal.
4026   int index_max = max_jint - 1;  // array size is max_jint, index is one less
4027   if (sizetype != NULL) index_max = sizetype->_hi - 1;
4028   const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
4029   idx = constrained_convI2L(phase, idx, iidxtype, ctrl);
4030 #endif
4031   return idx;
4032 }
4033 
4034 #ifdef _LP64
4035 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
4036 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl) {
4037   if (ctrl != NULL) {
4038     // Express control dependency by a CastII node with a narrow type.
4039     value = new CastIINode(value, itype, false, true /* range check dependency */);
4040     // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4041     // node from floating above the range check during loop optimizations. Otherwise, the
4042     // ConvI2L node may be eliminated independently of the range check, causing the data path
4043     // to become TOP while the control path is still there (although it's unreachable).
4044     value->set_req(0, ctrl);
4045     // Save CastII node to remove it after loop optimizations.
4046     phase->C->add_range_check_cast(value);
4047     value = phase->transform(value);
4048   }
4049   const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4050   return phase->transform(new ConvI2LNode(value, ltype));
4051 }
4052 #endif
4053 
4054 // The message about the current inlining is accumulated in
4055 // _print_inlining_stream and transfered into the _print_inlining_list
4056 // once we know whether inlining succeeds or not. For regular
4057 // inlining, messages are appended to the buffer pointed by
4058 // _print_inlining_idx in the _print_inlining_list. For late inlining,
4059 // a new buffer is added after _print_inlining_idx in the list. This
4060 // way we can update the inlining message for late inlining call site
4061 // when the inlining is attempted again.
4062 void Compile::print_inlining_init() {
4063   if (print_inlining() || print_intrinsics()) {
4064     _print_inlining_stream = new stringStream();
4065     _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
4066   }
4067 }
4068 
4069 void Compile::print_inlining_reinit() {
4070   if (print_inlining() || print_intrinsics()) {
4071     // Re allocate buffer when we change ResourceMark
4072     _print_inlining_stream = new stringStream();


< prev index next >