21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "ci/ciReplay.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "code/exceptionHandlerTable.hpp"
31 #include "code/nmethod.hpp"
32 #include "compiler/compileBroker.hpp"
33 #include "compiler/compileLog.hpp"
34 #include "compiler/disassembler.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/block.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/callGenerator.hpp"
40 #include "opto/callnode.hpp"
41 #include "opto/cfgnode.hpp"
42 #include "opto/chaitin.hpp"
43 #include "opto/compile.hpp"
44 #include "opto/connode.hpp"
45 #include "opto/convertnode.hpp"
46 #include "opto/divnode.hpp"
47 #include "opto/escape.hpp"
48 #include "opto/idealGraphPrinter.hpp"
49 #include "opto/loopnode.hpp"
50 #include "opto/machnode.hpp"
51 #include "opto/macro.hpp"
52 #include "opto/matcher.hpp"
53 #include "opto/mathexactnode.hpp"
54 #include "opto/memnode.hpp"
55 #include "opto/mulnode.hpp"
56 #include "opto/narrowptrnode.hpp"
57 #include "opto/node.hpp"
58 #include "opto/opcodes.hpp"
59 #include "opto/output.hpp"
60 #include "opto/parse.hpp"
385 if (! useful.member(child)) {
386 assert(!child->is_top() || child != top(),
387 "If top is cached in Compile object it is in useful list");
388 // Only need to remove this out-edge to the useless node
389 n->raw_del_out(j);
390 --j;
391 --max;
392 }
393 }
394 if (n->outcnt() == 1 && n->has_special_unique_user()) {
395 record_for_igvn(n->unique_out());
396 }
397 }
398 // Remove useless macro and predicate opaq nodes
399 for (int i = C->macro_count()-1; i >= 0; i--) {
400 Node* n = C->macro_node(i);
401 if (!useful.member(n)) {
402 remove_macro_node(n);
403 }
404 }
405 // Remove useless expensive node
406 for (int i = C->expensive_count()-1; i >= 0; i--) {
407 Node* n = C->expensive_node(i);
408 if (!useful.member(n)) {
409 remove_expensive_node(n);
410 }
411 }
412 // clean up the late inline lists
413 remove_useless_late_inlines(&_string_late_inlines, useful);
414 remove_useless_late_inlines(&_boxing_late_inlines, useful);
415 remove_useless_late_inlines(&_late_inlines, useful);
416 debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
417 }
418
419 //------------------------------frame_size_in_words-----------------------------
420 // frame_slots in units of words
421 int Compile::frame_size_in_words() const {
422 // shift is 0 in LP32 and 1 in LP64
423 const int shift = (LogBytesPerWord - LogBytesPerInt);
424 int words = _frame_slots >> shift;
1161 _alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1162 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats);
1163 Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1164 {
1165 for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i];
1166 }
1167 // Initialize the first few types.
1168 _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
1169 _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1170 _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1171 _num_alias_types = AliasIdxRaw+1;
1172 // Zero out the alias type cache.
1173 Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1174 // A NULL adr_type hits in the cache right away. Preload the right answer.
1175 probe_alias_cache(NULL)->_index = AliasIdxTop;
1176
1177 _intrinsics = NULL;
1178 _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
1179 _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
1180 _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
1181 register_library_intrinsics();
1182 }
1183
1184 //---------------------------init_start----------------------------------------
1185 // Install the StartNode on this compile object.
1186 void Compile::init_start(StartNode* s) {
1187 if (failing())
1188 return; // already failing
1189 assert(s == start(), "");
1190 }
1191
1192 /**
1193 * Return the 'StartNode'. We must not have a pending failure, since the ideal graph
1194 * can be in an inconsistent state, i.e., we can get segmentation faults when traversing
1195 * the ideal graph.
1196 */
1197 StartNode* Compile::start() const {
1198 assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason());
1199 for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1200 Node* start = root()->fast_out(i);
1907 // Clean up loose ends, if we are out of space for inlining.
1908 WarmCallInfo* call;
1909 while ((call = pop_warm_call()) != NULL) {
1910 call->make_cold();
1911 }
1912 }
1913
1914 //---------------------cleanup_loop_predicates-----------------------
1915 // Remove the opaque nodes that protect the predicates so that all unused
1916 // checks and uncommon_traps will be eliminated from the ideal graph
1917 void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
1918 if (predicate_count()==0) return;
1919 for (int i = predicate_count(); i > 0; i--) {
1920 Node * n = predicate_opaque1_node(i-1);
1921 assert(n->Opcode() == Op_Opaque1, "must be");
1922 igvn.replace_node(n, n->in(1));
1923 }
1924 assert(predicate_count()==0, "should be clean!");
1925 }
1926
1927 // StringOpts and late inlining of string methods
1928 void Compile::inline_string_calls(bool parse_time) {
1929 {
1930 // remove useless nodes to make the usage analysis simpler
1931 ResourceMark rm;
1932 PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1933 }
1934
1935 {
1936 ResourceMark rm;
1937 print_method(PHASE_BEFORE_STRINGOPTS, 3);
1938 PhaseStringOpts pso(initial_gvn(), for_igvn());
1939 print_method(PHASE_AFTER_STRINGOPTS, 3);
1940 }
1941
1942 // now inline anything that we skipped the first time around
1943 if (!parse_time) {
1944 _late_inlines_pos = _late_inlines.length();
1945 }
1946
2267 debug_only( int cnt = 0; );
2268 while(major_progress() && (loop_opts_cnt > 0)) {
2269 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2270 assert( cnt++ < 40, "infinite cycle in loop optimization" );
2271 PhaseIdealLoop ideal_loop( igvn, true);
2272 loop_opts_cnt--;
2273 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2274 if (failing()) return;
2275 }
2276 }
2277 // Ensure that major progress is now clear
2278 C->clear_major_progress();
2279
2280 {
2281 // Verify that all previous optimizations produced a valid graph
2282 // at least to this point, even if no loop optimizations were done.
2283 TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2284 PhaseIdealLoop::verify(igvn);
2285 }
2286
2287 {
2288 TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2289 PhaseMacroExpand mex(igvn);
2290 if (mex.expand_macro_nodes()) {
2291 assert(failing(), "must bail out w/ explicit message");
2292 return;
2293 }
2294 }
2295
2296 DEBUG_ONLY( _modified_nodes = NULL; )
2297 } // (End scope of igvn; run destructor if necessary for asserts.)
2298
2299 process_print_inlining();
2300 // A method with only infinite loops has no edges entering loops from root
2301 {
2302 TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2303 if (final_graph_reshaping()) {
2304 assert(failing(), "must bail out w/ explicit message");
2305 return;
2306 }
3070 if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
3071 // The EncodeP optimization may create Phi with the same edges
3072 // for all paths. It is not handled well by Register Allocator.
3073 Node* unique_in = n->in(1);
3074 assert(unique_in != NULL, "");
3075 uint cnt = n->req();
3076 for (uint i = 2; i < cnt; i++) {
3077 Node* m = n->in(i);
3078 assert(m != NULL, "");
3079 if (unique_in != m)
3080 unique_in = NULL;
3081 }
3082 if (unique_in != NULL) {
3083 n->subsume_by(unique_in, this);
3084 }
3085 }
3086 break;
3087
3088 #endif
3089
3090 case Op_ModI:
3091 if (UseDivMod) {
3092 // Check if a%b and a/b both exist
3093 Node* d = n->find_similar(Op_DivI);
3094 if (d) {
3095 // Replace them with a fused divmod if supported
3096 if (Matcher::has_match_rule(Op_DivModI)) {
3097 DivModINode* divmod = DivModINode::make(n);
3098 d->subsume_by(divmod->div_proj(), this);
3099 n->subsume_by(divmod->mod_proj(), this);
3100 } else {
3101 // replace a%b with a-((a/b)*b)
3102 Node* mult = new MulINode(d, d->in(2));
3103 Node* sub = new SubINode(d->in(1), mult);
3104 n->subsume_by(sub, this);
3105 }
3106 }
3107 }
3108 break;
3109
3945 if (superk->is_interface()) {
3946 // Cannot trust interfaces yet.
3947 // %%% S.B. superk->nof_implementors() == 1
3948 } else if (superelem->is_instance_klass()) {
3949 ciInstanceKlass* ik = superelem->as_instance_klass();
3950 if (!ik->has_subklass() && !ik->is_interface()) {
3951 if (!ik->is_final()) {
3952 // Add a dependency if there is a chance of a later subclass.
3953 dependencies()->assert_leaf_type(ik);
3954 }
3955 return SSC_easy_test; // (3) caller can do a simple ptr comparison
3956 }
3957 } else {
3958 // A primitive array type has no subtypes.
3959 return SSC_easy_test; // (3) caller can do a simple ptr comparison
3960 }
3961
3962 return SSC_full_test;
3963 }
3964
3965 Node* Compile::conv_I2X_index(PhaseGVN *phase, Node* idx, const TypeInt* sizetype) {
3966 #ifdef _LP64
3967 // The scaled index operand to AddP must be a clean 64-bit value.
3968 // Java allows a 32-bit int to be incremented to a negative
3969 // value, which appears in a 64-bit register as a large
3970 // positive number. Using that large positive number as an
3971 // operand in pointer arithmetic has bad consequences.
3972 // On the other hand, 32-bit overflow is rare, and the possibility
3973 // can often be excluded, if we annotate the ConvI2L node with
3974 // a type assertion that its value is known to be a small positive
3975 // number. (The prior range check has ensured this.)
3976 // This assertion is used by ConvI2LNode::Ideal.
3977 int index_max = max_jint - 1; // array size is max_jint, index is one less
3978 if (sizetype != NULL) index_max = sizetype->_hi - 1;
3979 const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
3980 idx = phase->transform(new ConvI2LNode(idx, lidxtype));
3981 #endif
3982 return idx;
3983 }
3984
3985 // The message about the current inlining is accumulated in
3986 // _print_inlining_stream and transfered into the _print_inlining_list
3987 // once we know whether inlining succeeds or not. For regular
3988 // inlining, messages are appended to the buffer pointed by
3989 // _print_inlining_idx in the _print_inlining_list. For late inlining,
3990 // a new buffer is added after _print_inlining_idx in the list. This
3991 // way we can update the inlining message for late inlining call site
3992 // when the inlining is attempted again.
3993 void Compile::print_inlining_init() {
3994 if (print_inlining() || print_intrinsics()) {
3995 _print_inlining_stream = new stringStream();
3996 _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
3997 }
3998 }
3999
4000 void Compile::print_inlining_reinit() {
4001 if (print_inlining() || print_intrinsics()) {
4002 // Re allocate buffer when we change ResourceMark
|
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "ci/ciReplay.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "code/exceptionHandlerTable.hpp"
31 #include "code/nmethod.hpp"
32 #include "compiler/compileBroker.hpp"
33 #include "compiler/compileLog.hpp"
34 #include "compiler/disassembler.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/block.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/callGenerator.hpp"
40 #include "opto/callnode.hpp"
41 #include "opto/castnode.hpp"
42 #include "opto/cfgnode.hpp"
43 #include "opto/chaitin.hpp"
44 #include "opto/compile.hpp"
45 #include "opto/connode.hpp"
46 #include "opto/convertnode.hpp"
47 #include "opto/divnode.hpp"
48 #include "opto/escape.hpp"
49 #include "opto/idealGraphPrinter.hpp"
50 #include "opto/loopnode.hpp"
51 #include "opto/machnode.hpp"
52 #include "opto/macro.hpp"
53 #include "opto/matcher.hpp"
54 #include "opto/mathexactnode.hpp"
55 #include "opto/memnode.hpp"
56 #include "opto/mulnode.hpp"
57 #include "opto/narrowptrnode.hpp"
58 #include "opto/node.hpp"
59 #include "opto/opcodes.hpp"
60 #include "opto/output.hpp"
61 #include "opto/parse.hpp"
386 if (! useful.member(child)) {
387 assert(!child->is_top() || child != top(),
388 "If top is cached in Compile object it is in useful list");
389 // Only need to remove this out-edge to the useless node
390 n->raw_del_out(j);
391 --j;
392 --max;
393 }
394 }
395 if (n->outcnt() == 1 && n->has_special_unique_user()) {
396 record_for_igvn(n->unique_out());
397 }
398 }
399 // Remove useless macro and predicate opaq nodes
400 for (int i = C->macro_count()-1; i >= 0; i--) {
401 Node* n = C->macro_node(i);
402 if (!useful.member(n)) {
403 remove_macro_node(n);
404 }
405 }
406 // Remove useless CastII nodes with range check dependency
407 for (int i = range_check_cast_count() - 1; i >= 0; i--) {
408 Node* cast = range_check_cast_node(i);
409 if (!useful.member(cast)) {
410 remove_range_check_cast(cast);
411 }
412 }
413 // Remove useless expensive node
414 for (int i = C->expensive_count()-1; i >= 0; i--) {
415 Node* n = C->expensive_node(i);
416 if (!useful.member(n)) {
417 remove_expensive_node(n);
418 }
419 }
420 // clean up the late inline lists
421 remove_useless_late_inlines(&_string_late_inlines, useful);
422 remove_useless_late_inlines(&_boxing_late_inlines, useful);
423 remove_useless_late_inlines(&_late_inlines, useful);
424 debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
425 }
426
427 //------------------------------frame_size_in_words-----------------------------
428 // frame_slots in units of words
429 int Compile::frame_size_in_words() const {
430 // shift is 0 in LP32 and 1 in LP64
431 const int shift = (LogBytesPerWord - LogBytesPerInt);
432 int words = _frame_slots >> shift;
1169 _alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1170 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats);
1171 Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1172 {
1173 for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i];
1174 }
1175 // Initialize the first few types.
1176 _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
1177 _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1178 _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1179 _num_alias_types = AliasIdxRaw+1;
1180 // Zero out the alias type cache.
1181 Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1182 // A NULL adr_type hits in the cache right away. Preload the right answer.
1183 probe_alias_cache(NULL)->_index = AliasIdxTop;
1184
1185 _intrinsics = NULL;
1186 _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
1187 _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
1188 _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
1189 _range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
1190 register_library_intrinsics();
1191 }
1192
1193 //---------------------------init_start----------------------------------------
1194 // Install the StartNode on this compile object.
1195 void Compile::init_start(StartNode* s) {
1196 if (failing())
1197 return; // already failing
1198 assert(s == start(), "");
1199 }
1200
1201 /**
1202 * Return the 'StartNode'. We must not have a pending failure, since the ideal graph
1203 * can be in an inconsistent state, i.e., we can get segmentation faults when traversing
1204 * the ideal graph.
1205 */
1206 StartNode* Compile::start() const {
1207 assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason());
1208 for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1209 Node* start = root()->fast_out(i);
1916 // Clean up loose ends, if we are out of space for inlining.
1917 WarmCallInfo* call;
1918 while ((call = pop_warm_call()) != NULL) {
1919 call->make_cold();
1920 }
1921 }
1922
1923 //---------------------cleanup_loop_predicates-----------------------
1924 // Remove the opaque nodes that protect the predicates so that all unused
1925 // checks and uncommon_traps will be eliminated from the ideal graph
1926 void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
1927 if (predicate_count()==0) return;
1928 for (int i = predicate_count(); i > 0; i--) {
1929 Node * n = predicate_opaque1_node(i-1);
1930 assert(n->Opcode() == Op_Opaque1, "must be");
1931 igvn.replace_node(n, n->in(1));
1932 }
1933 assert(predicate_count()==0, "should be clean!");
1934 }
1935
1936 void Compile::add_range_check_cast(Node* n) {
1937 assert(n->isa_CastII()->has_range_check(), "CastII should have range check dependency");
1938 assert(!_range_check_casts->contains(n), "duplicate entry in range check casts");
1939 _range_check_casts->append(n);
1940 }
1941
1942 // Remove all range check dependent CastIINodes.
1943 void Compile::remove_range_check_casts(PhaseIterGVN &igvn) {
1944 for (int i = range_check_cast_count(); i > 0; i--) {
1945 Node* cast = range_check_cast_node(i-1);
1946 assert(cast->isa_CastII()->has_range_check(), "CastII should have range check dependency");
1947 igvn.replace_node(cast, cast->in(1));
1948 }
1949 assert(range_check_cast_count() == 0, "should be empty");
1950 }
1951
1952 // StringOpts and late inlining of string methods
1953 void Compile::inline_string_calls(bool parse_time) {
1954 {
1955 // remove useless nodes to make the usage analysis simpler
1956 ResourceMark rm;
1957 PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1958 }
1959
1960 {
1961 ResourceMark rm;
1962 print_method(PHASE_BEFORE_STRINGOPTS, 3);
1963 PhaseStringOpts pso(initial_gvn(), for_igvn());
1964 print_method(PHASE_AFTER_STRINGOPTS, 3);
1965 }
1966
1967 // now inline anything that we skipped the first time around
1968 if (!parse_time) {
1969 _late_inlines_pos = _late_inlines.length();
1970 }
1971
2292 debug_only( int cnt = 0; );
2293 while(major_progress() && (loop_opts_cnt > 0)) {
2294 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2295 assert( cnt++ < 40, "infinite cycle in loop optimization" );
2296 PhaseIdealLoop ideal_loop( igvn, true);
2297 loop_opts_cnt--;
2298 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2299 if (failing()) return;
2300 }
2301 }
2302 // Ensure that major progress is now clear
2303 C->clear_major_progress();
2304
2305 {
2306 // Verify that all previous optimizations produced a valid graph
2307 // at least to this point, even if no loop optimizations were done.
2308 TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2309 PhaseIdealLoop::verify(igvn);
2310 }
2311
2312 if (range_check_cast_count() > 0) {
2313 // No more loop optimizations. Remove all range check dependent CastIINodes.
2314 C->remove_range_check_casts(igvn);
2315 igvn.optimize();
2316 }
2317
2318 {
2319 TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2320 PhaseMacroExpand mex(igvn);
2321 if (mex.expand_macro_nodes()) {
2322 assert(failing(), "must bail out w/ explicit message");
2323 return;
2324 }
2325 }
2326
2327 DEBUG_ONLY( _modified_nodes = NULL; )
2328 } // (End scope of igvn; run destructor if necessary for asserts.)
2329
2330 process_print_inlining();
2331 // A method with only infinite loops has no edges entering loops from root
2332 {
2333 TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2334 if (final_graph_reshaping()) {
2335 assert(failing(), "must bail out w/ explicit message");
2336 return;
2337 }
3101 if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
3102 // The EncodeP optimization may create Phi with the same edges
3103 // for all paths. It is not handled well by Register Allocator.
3104 Node* unique_in = n->in(1);
3105 assert(unique_in != NULL, "");
3106 uint cnt = n->req();
3107 for (uint i = 2; i < cnt; i++) {
3108 Node* m = n->in(i);
3109 assert(m != NULL, "");
3110 if (unique_in != m)
3111 unique_in = NULL;
3112 }
3113 if (unique_in != NULL) {
3114 n->subsume_by(unique_in, this);
3115 }
3116 }
3117 break;
3118
3119 #endif
3120
3121 #ifdef ASSERT
3122 case Op_CastII:
3123 // Verify that all range check dependent CastII nodes were removed.
3124 if (n->isa_CastII()->has_range_check()) {
3125 n->dump(3);
3126 assert(false, "Range check dependent CastII node was not removed");
3127 }
3128 break;
3129 #endif
3130
3131 case Op_ModI:
3132 if (UseDivMod) {
3133 // Check if a%b and a/b both exist
3134 Node* d = n->find_similar(Op_DivI);
3135 if (d) {
3136 // Replace them with a fused divmod if supported
3137 if (Matcher::has_match_rule(Op_DivModI)) {
3138 DivModINode* divmod = DivModINode::make(n);
3139 d->subsume_by(divmod->div_proj(), this);
3140 n->subsume_by(divmod->mod_proj(), this);
3141 } else {
3142 // replace a%b with a-((a/b)*b)
3143 Node* mult = new MulINode(d, d->in(2));
3144 Node* sub = new SubINode(d->in(1), mult);
3145 n->subsume_by(sub, this);
3146 }
3147 }
3148 }
3149 break;
3150
3986 if (superk->is_interface()) {
3987 // Cannot trust interfaces yet.
3988 // %%% S.B. superk->nof_implementors() == 1
3989 } else if (superelem->is_instance_klass()) {
3990 ciInstanceKlass* ik = superelem->as_instance_klass();
3991 if (!ik->has_subklass() && !ik->is_interface()) {
3992 if (!ik->is_final()) {
3993 // Add a dependency if there is a chance of a later subclass.
3994 dependencies()->assert_leaf_type(ik);
3995 }
3996 return SSC_easy_test; // (3) caller can do a simple ptr comparison
3997 }
3998 } else {
3999 // A primitive array type has no subtypes.
4000 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4001 }
4002
4003 return SSC_full_test;
4004 }
4005
4006 Node* Compile::conv_I2X_index(PhaseGVN* phase, Node* idx, const TypeInt* sizetype, Node* ctrl) {
4007 #ifdef _LP64
4008 // The scaled index operand to AddP must be a clean 64-bit value.
4009 // Java allows a 32-bit int to be incremented to a negative
4010 // value, which appears in a 64-bit register as a large
4011 // positive number. Using that large positive number as an
4012 // operand in pointer arithmetic has bad consequences.
4013 // On the other hand, 32-bit overflow is rare, and the possibility
4014 // can often be excluded, if we annotate the ConvI2L node with
4015 // a type assertion that its value is known to be a small positive
4016 // number. (The prior range check has ensured this.)
4017 // This assertion is used by ConvI2LNode::Ideal.
4018 int index_max = max_jint - 1; // array size is max_jint, index is one less
4019 if (sizetype != NULL) index_max = sizetype->_hi - 1;
4020 const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
4021 idx = constrained_convI2L(phase, idx, iidxtype, ctrl);
4022 #endif
4023 return idx;
4024 }
4025
4026 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
4027 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl) {
4028 if (ctrl != NULL) {
4029 // Express control dependency by a CastII node with a narrow type.
4030 value = new CastIINode(value, itype, false, true /* range check dependency */);
4031 // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4032 // node from floating above the range check during loop optimizations. Otherwise, the
4033 // ConvI2L node may be eliminated independently of the range check, causing the data path
4034 // to become TOP while the control path is still there (although it's unreachable).
4035 value->set_req(0, ctrl);
4036 // Save CastII node to remove it after loop optimizations.
4037 phase->C->add_range_check_cast(value);
4038 value = phase->transform(value);
4039 }
4040 const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4041 return phase->transform(new ConvI2LNode(value, ltype));
4042 }
4043
4044 // The message about the current inlining is accumulated in
4045 // _print_inlining_stream and transfered into the _print_inlining_list
4046 // once we know whether inlining succeeds or not. For regular
4047 // inlining, messages are appended to the buffer pointed by
4048 // _print_inlining_idx in the _print_inlining_list. For late inlining,
4049 // a new buffer is added after _print_inlining_idx in the list. This
4050 // way we can update the inlining message for late inlining call site
4051 // when the inlining is attempted again.
4052 void Compile::print_inlining_init() {
4053 if (print_inlining() || print_intrinsics()) {
4054 _print_inlining_stream = new stringStream();
4055 _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
4056 }
4057 }
4058
4059 void Compile::print_inlining_reinit() {
4060 if (print_inlining() || print_intrinsics()) {
4061 // Re allocate buffer when we change ResourceMark
|