src/share/vm/opto/compile.cpp

Print this page




 650                   _comp_arena(mtCompiler),
 651                   _node_arena(mtCompiler),
 652                   _old_arena(mtCompiler),
 653                   _Compile_types(mtCompiler),
 654                   _replay_inline_data(NULL),
 655                   _late_inlines(comp_arena(), 2, 0, NULL),
 656                   _string_late_inlines(comp_arena(), 2, 0, NULL),
 657                   _boxing_late_inlines(comp_arena(), 2, 0, NULL),
 658                   _late_inlines_pos(0),
 659                   _number_of_mh_late_inlines(0),
 660                   _inlining_progress(false),
 661                   _inlining_incrementally(false),
 662                   _print_inlining_list(NULL),
 663                   _print_inlining_stream(NULL),
 664                   _print_inlining_idx(0),
 665                   _print_inlining_output(NULL),
 666                   _interpreter_frame_size(0) {
 667   C = this;
 668 
 669   CompileWrapper cw(this);
 670 #ifndef PRODUCT
 671   if (TimeCompiler2) {
 672     tty->print(" ");
 673     target->holder()->name()->print();
 674     tty->print(".");
 675     target->print_short_name();
 676     tty->print("  ");
 677   }
 678   TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2);
 679   TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false);


 680   bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly");
 681   if (!print_opto_assembly) {
 682     bool print_assembly = (PrintAssembly || _method->should_print_assembly());
 683     if (print_assembly && !Disassembler::can_decode()) {
 684       tty->print_cr("PrintAssembly request changed to PrintOptoAssembly");
 685       print_opto_assembly = true;
 686     }
 687   }
 688   set_print_assembly(print_opto_assembly);
 689   set_parsed_irreducible_loop(false);
 690 
 691   if (method()->has_option("ReplayInline")) {
 692     _replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level());
 693   }
 694 #endif
 695   set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
 696   set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
 697   set_has_irreducible_loop(true); // conservative until build_loop_tree() reset it
 698 
 699   if (ProfileTraps RTM_OPT_ONLY( || UseRTMLocking )) {


 709   print_compile_messages();
 710 
 711   _ilt = InlineTree::build_inline_tree_root();
 712 
 713   // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
 714   assert(num_alias_types() >= AliasIdxRaw, "");
 715 
 716 #define MINIMUM_NODE_HASH  1023
 717   // Node list that Iterative GVN will start with
 718   Unique_Node_List for_igvn(comp_arena());
 719   set_for_igvn(&for_igvn);
 720 
 721   // GVN that will be run immediately on new nodes
 722   uint estimated_size = method()->code_size()*4+64;
 723   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
 724   PhaseGVN gvn(node_arena(), estimated_size);
 725   set_initial_gvn(&gvn);
 726 
 727   print_inlining_init();
 728   { // Scope for timing the parser
 729     TracePhase t3("parse", &_t_parser, true);
 730 
 731     // Put top into the hash table ASAP.
 732     initial_gvn()->transform_no_reclaim(top());
 733 
 734     // Set up tf(), start(), and find a CallGenerator.
 735     CallGenerator* cg = NULL;
 736     if (is_osr_compilation()) {
 737       const TypeTuple *domain = StartOSRNode::osr_domain();
 738       const TypeTuple *range = TypeTuple::make_range(method()->signature());
 739       init_tf(TypeFunc::make(domain, range));
 740       StartNode* s = new StartOSRNode(root(), domain);
 741       initial_gvn()->set_type_bottom(s);
 742       init_start(s);
 743       cg = CallGenerator::for_osr(method(), entry_bci());
 744     } else {
 745       // Normal case.
 746       init_tf(TypeFunc::make(method()));
 747       StartNode* s = new StartNode(root(), tf()->domain());
 748       initial_gvn()->set_type_bottom(s);
 749       init_start(s);


 871 
 872   _orig_pc_slot =  fixed_slots();
 873   int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size);
 874   set_fixed_slots(next_slot);
 875 
 876   // Compute when to use implicit null checks. Used by matching trap based
 877   // nodes and NullCheck optimization.
 878   set_allowed_deopt_reasons();
 879 
 880   // Now generate code
 881   Code_Gen();
 882   if (failing())  return;
 883 
 884   // Check if we want to skip execution of all compiled code.
 885   {
 886 #ifndef PRODUCT
 887     if (OptoNoExecute) {
 888       record_method_not_compilable("+OptoNoExecute");  // Flag as failed
 889       return;
 890     }
 891     TracePhase t2("install_code", &_t_registerMethod, TimeCompiler);
 892 #endif
 893 
 894     if (is_osr_compilation()) {
 895       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
 896       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
 897     } else {
 898       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
 899       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
 900     }
 901 
 902     env()->register_method(_method, _entry_bci,
 903                            &_code_offsets,
 904                            _orig_pc_slot_offset_in_bytes,
 905                            code_buffer(),
 906                            frame_size_in_words(), _oop_map_set,
 907                            &_handler_table, &_inc_table,
 908                            compiler,
 909                            env()->comp_level(),
 910                            has_unsafe_access(),
 911                            SharedRuntime::is_wide_vector(max_vector_size()),


 960 #endif
 961     _comp_arena(mtCompiler),
 962     _node_arena(mtCompiler),
 963     _old_arena(mtCompiler),
 964     _Compile_types(mtCompiler),
 965     _dead_node_list(comp_arena()),
 966     _dead_node_count(0),
 967     _congraph(NULL),
 968     _replay_inline_data(NULL),
 969     _number_of_mh_late_inlines(0),
 970     _inlining_progress(false),
 971     _inlining_incrementally(false),
 972     _print_inlining_list(NULL),
 973     _print_inlining_stream(NULL),
 974     _print_inlining_idx(0),
 975     _print_inlining_output(NULL),
 976     _allowed_reasons(0),
 977     _interpreter_frame_size(0) {
 978   C = this;
 979 



 980 #ifndef PRODUCT
 981   TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false);
 982   TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false);
 983   set_print_assembly(PrintFrameConverterAssembly);
 984   set_parsed_irreducible_loop(false);
 985 #endif
 986   set_has_irreducible_loop(false); // no loops
 987 
 988   CompileWrapper cw(this);
 989   Init(/*AliasLevel=*/ 0);
 990   init_tf((*generator)());
 991 
 992   {
 993     // The following is a dummy for the sake of GraphKit::gen_stub
 994     Unique_Node_List for_igvn(comp_arena());
 995     set_for_igvn(&for_igvn);  // not used, but some GraphKit guys push on this
 996     PhaseGVN gvn(Thread::current()->resource_area(),255);
 997     set_initial_gvn(&gvn);    // not significant, but GraphKit guys use it pervasively
 998     gvn.transform_no_reclaim(top());
 999 
1000     GraphKit kit;
1001     kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc);
1002   }


2017 
2018     if (failing())  return;
2019 
2020     {
2021       ResourceMark rm;
2022       PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2023     }
2024 
2025     igvn = PhaseIterGVN(gvn);
2026 
2027     igvn.optimize();
2028   }
2029 
2030   set_inlining_incrementally(false);
2031 }
2032 
2033 
2034 //------------------------------Optimize---------------------------------------
2035 // Given a graph, optimize it.
2036 void Compile::Optimize() {
2037   TracePhase t1("optimizer", &_t_optimizer, true);
2038 
2039 #ifndef PRODUCT
2040   if (env()->break_at_compile()) {
2041     BREAKPOINT;
2042   }
2043 
2044 #endif
2045 
2046   ResourceMark rm;
2047   int          loop_opts_cnt;
2048 
2049   print_inlining_reinit();
2050 
2051   NOT_PRODUCT( verify_graph_edges(); )
2052 
2053   print_method(PHASE_AFTER_PARSING);
2054 
2055  {
2056   // Iterative Global Value Numbering, including ideal transforms
2057   // Initialize IterGVN with types and values from parse-time GVN
2058   PhaseIterGVN igvn(initial_gvn());
2059 #ifdef ASSERT
2060   _modified_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
2061 #endif
2062   {
2063     NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); )
2064     igvn.optimize();
2065   }
2066 
2067   print_method(PHASE_ITER_GVN1, 2);
2068 
2069   if (failing())  return;
2070 
2071   {
2072     NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
2073     inline_incrementally(igvn);
2074   }
2075 
2076   print_method(PHASE_INCREMENTAL_INLINE, 2);
2077 
2078   if (failing())  return;
2079 
2080   if (eliminate_boxing()) {
2081     NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
2082     // Inline valueOf() methods now.
2083     inline_boxing_calls(igvn);
2084 
2085     if (AlwaysIncrementalInline) {
2086       inline_incrementally(igvn);
2087     }
2088 
2089     print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2090 
2091     if (failing())  return;
2092   }
2093 
2094   // Remove the speculative part of types and clean up the graph from
2095   // the extra CastPP nodes whose only purpose is to carry them. Do
2096   // that early so that optimizations are not disrupted by the extra
2097   // CastPP nodes.
2098   remove_speculative_types(igvn);
2099 
2100   // No more new expensive nodes will be added to the list from here
2101   // so keep only the actual candidates for optimizations.
2102   cleanup_expensive_nodes(igvn);
2103 
2104   // Perform escape analysis
2105   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2106     if (has_loops()) {
2107       // Cleanup graph (remove dead nodes).
2108       TracePhase t2("idealLoop", &_t_idealLoop, true);
2109       PhaseIdealLoop ideal_loop( igvn, false, true );
2110       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2111       if (failing())  return;
2112     }
2113     ConnectionGraph::do_analysis(this, &igvn);
2114 
2115     if (failing())  return;
2116 
2117     // Optimize out fields loads from scalar replaceable allocations.
2118     igvn.optimize();
2119     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2120 
2121     if (failing())  return;
2122 
2123     if (congraph() != NULL && macro_count() > 0) {
2124       NOT_PRODUCT( TracePhase t2("macroEliminate", &_t_macroEliminate, TimeCompiler); )
2125       PhaseMacroExpand mexp(igvn);
2126       mexp.eliminate_macro_nodes();
2127       igvn.set_delay_transform(false);
2128 
2129       igvn.optimize();
2130       print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
2131 
2132       if (failing())  return;
2133     }
2134   }
2135 
2136   // Loop transforms on the ideal graph.  Range Check Elimination,
2137   // peeling, unrolling, etc.
2138 
2139   // Set loop opts counter
2140   loop_opts_cnt = num_loop_opts();
2141   if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
2142     {
2143       TracePhase t2("idealLoop", &_t_idealLoop, true);
2144       PhaseIdealLoop ideal_loop( igvn, true );
2145       loop_opts_cnt--;
2146       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
2147       if (failing())  return;
2148     }
2149     // Loop opts pass if partial peeling occurred in previous pass
2150     if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
2151       TracePhase t3("idealLoop", &_t_idealLoop, true);
2152       PhaseIdealLoop ideal_loop( igvn, false );
2153       loop_opts_cnt--;
2154       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
2155       if (failing())  return;
2156     }
2157     // Loop opts pass for loop-unrolling before CCP
2158     if(major_progress() && (loop_opts_cnt > 0)) {
2159       TracePhase t4("idealLoop", &_t_idealLoop, true);
2160       PhaseIdealLoop ideal_loop( igvn, false );
2161       loop_opts_cnt--;
2162       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
2163     }
2164     if (!failing()) {
2165       // Verify that last round of loop opts produced a valid graph
2166       NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
2167       PhaseIdealLoop::verify(igvn);
2168     }
2169   }
2170   if (failing())  return;
2171 
2172   // Conditional Constant Propagation;
2173   PhaseCCP ccp( &igvn );
2174   assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
2175   {
2176     TracePhase t2("ccp", &_t_ccp, true);
2177     ccp.do_transform();
2178   }
2179   print_method(PHASE_CPP1, 2);
2180 
2181   assert( true, "Break here to ccp.dump_old2new_map()");
2182 
2183   // Iterative Global Value Numbering, including ideal transforms
2184   {
2185     NOT_PRODUCT( TracePhase t2("iterGVN2", &_t_iterGVN2, TimeCompiler); )
2186     igvn = ccp;
2187     igvn.optimize();
2188   }
2189 
2190   print_method(PHASE_ITER_GVN2, 2);
2191 
2192   if (failing())  return;
2193 
2194   // Loop transforms on the ideal graph.  Range Check Elimination,
2195   // peeling, unrolling, etc.
2196   if(loop_opts_cnt > 0) {
2197     debug_only( int cnt = 0; );
2198     while(major_progress() && (loop_opts_cnt > 0)) {
2199       TracePhase t2("idealLoop", &_t_idealLoop, true);
2200       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2201       PhaseIdealLoop ideal_loop( igvn, true);
2202       loop_opts_cnt--;
2203       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2204       if (failing())  return;
2205     }
2206   }
2207 
2208   {
2209     // Verify that all previous optimizations produced a valid graph
2210     // at least to this point, even if no loop optimizations were done.
2211     NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
2212     PhaseIdealLoop::verify(igvn);
2213   }
2214 
2215   {
2216     NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); )
2217     PhaseMacroExpand  mex(igvn);
2218     if (mex.expand_macro_nodes()) {
2219       assert(failing(), "must bail out w/ explicit message");
2220       return;
2221     }
2222   }
2223 
2224   DEBUG_ONLY( _modified_nodes = NULL; )
2225  } // (End scope of igvn; run destructor if necessary for asserts.)
2226 
2227   process_print_inlining();
2228   // A method with only infinite loops has no edges entering loops from root
2229   {
2230     NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); )
2231     if (final_graph_reshaping()) {
2232       assert(failing(), "must bail out w/ explicit message");
2233       return;
2234     }
2235   }
2236 
2237   print_method(PHASE_OPTIMIZE_FINISHED, 2);
2238 }
2239 
2240 
2241 //------------------------------Code_Gen---------------------------------------
2242 // Given a graph, generate code for it
2243 void Compile::Code_Gen() {
2244   if (failing()) {
2245     return;
2246   }
2247 
2248   // Perform instruction selection.  You might think we could reclaim Matcher
2249   // memory PDQ, but actually the Matcher is used in generating spill code.
2250   // Internals of the Matcher (including some VectorSets) must remain live
2251   // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
2252   // set a bit in reclaimed memory.
2253 
2254   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2255   // nodes.  Mapping is only valid at the root of each matched subtree.
2256   NOT_PRODUCT( verify_graph_edges(); )
2257 
2258   Matcher matcher;
2259   _matcher = &matcher;
2260   {
2261     TracePhase t2("matcher", &_t_matcher, true);
2262     matcher.match();
2263   }
2264   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2265   // nodes.  Mapping is only valid at the root of each matched subtree.
2266   NOT_PRODUCT( verify_graph_edges(); )
2267 
2268   // If you have too many nodes, or if matching has failed, bail out
2269   check_node_count(0, "out of nodes matching instructions");
2270   if (failing()) {
2271     return;
2272   }
2273 
2274   // Build a proper-looking CFG
2275   PhaseCFG cfg(node_arena(), root(), matcher);
2276   _cfg = &cfg;
2277   {
2278     NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); )
2279     bool success = cfg.do_global_code_motion();
2280     if (!success) {
2281       return;
2282     }
2283 
2284     print_method(PHASE_GLOBAL_CODE_MOTION, 2);
2285     NOT_PRODUCT( verify_graph_edges(); )
2286     debug_only( cfg.verify(); )
2287   }
2288 
2289   PhaseChaitin regalloc(unique(), cfg, matcher);
2290   _regalloc = &regalloc;
2291   {
2292     TracePhase t2("regalloc", &_t_registerAllocation, true);
2293     // Perform register allocation.  After Chaitin, use-def chains are
2294     // no longer accurate (at spill code) and so must be ignored.
2295     // Node->LRG->reg mappings are still accurate.
2296     _regalloc->Register_Allocate();
2297 
2298     // Bail out if the allocator builds too many nodes
2299     if (failing()) {
2300       return;
2301     }
2302   }
2303 
2304   // Prior to register allocation we kept empty basic blocks in case the
2305   // the allocator needed a place to spill.  After register allocation we
2306   // are not adding any new instructions.  If any basic block is empty, we
2307   // can now safely remove it.
2308   {
2309     NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); )
2310     cfg.remove_empty_blocks();
2311     if (do_freq_based_layout()) {
2312       PhaseBlockLayout layout(cfg);
2313     } else {
2314       cfg.set_loop_alignment();
2315     }
2316     cfg.fixup_flow();
2317   }
2318 
2319   // Apply peephole optimizations
2320   if( OptoPeephole ) {
2321     NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); )
2322     PhasePeephole peep( _regalloc, cfg);
2323     peep.do_transform();
2324   }
2325 
2326   // Do late expand if CPU requires this.
2327   if (Matcher::require_postalloc_expand) {
2328     NOT_PRODUCT(TracePhase t2c("postalloc_expand", &_t_postalloc_expand, true));
2329     cfg.postalloc_expand(_regalloc);
2330   }
2331 
2332   // Convert Nodes to instruction bits in a buffer
2333   {
2334     // %%%% workspace merge brought two timers together for one job
2335     TracePhase t2a("output", &_t_output, true);
2336     NOT_PRODUCT( TraceTime t2b(NULL, &_t_codeGeneration, TimeCompiler, false); )
2337     Output();
2338   }
2339 
2340   print_method(PHASE_FINAL_CODE);
2341 
2342   // He's dead, Jim.
2343   _cfg     = (PhaseCFG*)0xdeadbeef;
2344   _regalloc = (PhaseChaitin*)0xdeadbeef;
2345 }
2346 
2347 
2348 //------------------------------dump_asm---------------------------------------
2349 // Dump formatted assembly
2350 #ifndef PRODUCT
2351 void Compile::dump_asm(int *pcs, uint pc_limit) {
2352   bool cut_short = false;
2353   tty->print_cr("#");
2354   tty->print("#  ");  _tf->dump();  tty->cr();
2355   tty->print_cr("#");
2356 


3521     log()->elem("failure reason='%s' phase='compile'", reason);
3522   }
3523   if (_failure_reason == NULL) {
3524     // Record the first failure reason.
3525     _failure_reason = reason;
3526   }
3527 
3528   EventCompilerFailure event;
3529   if (event.should_commit()) {
3530     event.set_compileID(Compile::compile_id());
3531     event.set_failure(reason);
3532     event.commit();
3533   }
3534 
3535   if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
3536     C->print_method(PHASE_FAILURE);
3537   }
3538   _root = NULL;  // flush the graph, too
3539 }
3540 
3541 Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator, bool dolog)
3542   : TraceTime(NULL, accumulator, false NOT_PRODUCT( || TimeCompiler ), false),
3543     _phase_name(name), _dolog(dolog)
3544 {
3545   if (dolog) {
3546     C = Compile::current();
3547     _log = C->log();
3548   } else {
3549     C = NULL;
3550     _log = NULL;
3551   }
3552   if (_log != NULL) {
3553     _log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
3554     _log->stamp();
3555     _log->end_head();
3556   }
3557 }
3558 
3559 Compile::TracePhase::~TracePhase() {
3560 
3561   C = Compile::current();
3562   if (_dolog) {
3563     _log = C->log();
3564   } else {
3565     _log = NULL;




 650                   _comp_arena(mtCompiler),
 651                   _node_arena(mtCompiler),
 652                   _old_arena(mtCompiler),
 653                   _Compile_types(mtCompiler),
 654                   _replay_inline_data(NULL),
 655                   _late_inlines(comp_arena(), 2, 0, NULL),
 656                   _string_late_inlines(comp_arena(), 2, 0, NULL),
 657                   _boxing_late_inlines(comp_arena(), 2, 0, NULL),
 658                   _late_inlines_pos(0),
 659                   _number_of_mh_late_inlines(0),
 660                   _inlining_progress(false),
 661                   _inlining_incrementally(false),
 662                   _print_inlining_list(NULL),
 663                   _print_inlining_stream(NULL),
 664                   _print_inlining_idx(0),
 665                   _print_inlining_output(NULL),
 666                   _interpreter_frame_size(0) {
 667   C = this;
 668 
 669   CompileWrapper cw(this);
 670 
 671   if (CITimeVerbose) {
 672     tty->print(" ");
 673     target->holder()->name()->print();
 674     tty->print(".");
 675     target->print_short_name();
 676     tty->print("  ");
 677   }
 678   TraceTime t1("Total compilation time", &_t_totalCompilation, CITime, CITimeVerbose);
 679   TraceTime t2(NULL, &_t_methodCompilation, CITime, false);
 680 
 681 #ifndef PRODUCT
 682   bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly");
 683   if (!print_opto_assembly) {
 684     bool print_assembly = (PrintAssembly || _method->should_print_assembly());
 685     if (print_assembly && !Disassembler::can_decode()) {
 686       tty->print_cr("PrintAssembly request changed to PrintOptoAssembly");
 687       print_opto_assembly = true;
 688     }
 689   }
 690   set_print_assembly(print_opto_assembly);
 691   set_parsed_irreducible_loop(false);
 692 
 693   if (method()->has_option("ReplayInline")) {
 694     _replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level());
 695   }
 696 #endif
 697   set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
 698   set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
 699   set_has_irreducible_loop(true); // conservative until build_loop_tree() reset it
 700 
 701   if (ProfileTraps RTM_OPT_ONLY( || UseRTMLocking )) {


 711   print_compile_messages();
 712 
 713   _ilt = InlineTree::build_inline_tree_root();
 714 
 715   // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
 716   assert(num_alias_types() >= AliasIdxRaw, "");
 717 
 718 #define MINIMUM_NODE_HASH  1023
 719   // Node list that Iterative GVN will start with
 720   Unique_Node_List for_igvn(comp_arena());
 721   set_for_igvn(&for_igvn);
 722 
 723   // GVN that will be run immediately on new nodes
 724   uint estimated_size = method()->code_size()*4+64;
 725   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
 726   PhaseGVN gvn(node_arena(), estimated_size);
 727   set_initial_gvn(&gvn);
 728 
 729   print_inlining_init();
 730   { // Scope for timing the parser
 731     TracePhase t3("parse", &timers[_t_parser]);
 732 
 733     // Put top into the hash table ASAP.
 734     initial_gvn()->transform_no_reclaim(top());
 735 
 736     // Set up tf(), start(), and find a CallGenerator.
 737     CallGenerator* cg = NULL;
 738     if (is_osr_compilation()) {
 739       const TypeTuple *domain = StartOSRNode::osr_domain();
 740       const TypeTuple *range = TypeTuple::make_range(method()->signature());
 741       init_tf(TypeFunc::make(domain, range));
 742       StartNode* s = new StartOSRNode(root(), domain);
 743       initial_gvn()->set_type_bottom(s);
 744       init_start(s);
 745       cg = CallGenerator::for_osr(method(), entry_bci());
 746     } else {
 747       // Normal case.
 748       init_tf(TypeFunc::make(method()));
 749       StartNode* s = new StartNode(root(), tf()->domain());
 750       initial_gvn()->set_type_bottom(s);
 751       init_start(s);


 873 
 874   _orig_pc_slot =  fixed_slots();
 875   int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size);
 876   set_fixed_slots(next_slot);
 877 
 878   // Compute when to use implicit null checks. Used by matching trap based
 879   // nodes and NullCheck optimization.
 880   set_allowed_deopt_reasons();
 881 
 882   // Now generate code
 883   Code_Gen();
 884   if (failing())  return;
 885 
 886   // Check if we want to skip execution of all compiled code.
 887   {
 888 #ifndef PRODUCT
 889     if (OptoNoExecute) {
 890       record_method_not_compilable("+OptoNoExecute");  // Flag as failed
 891       return;
 892     }
 893     TracePhase t2("install_code", &_t_registerMethod);
 894 #endif
 895 
 896     if (is_osr_compilation()) {
 897       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
 898       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
 899     } else {
 900       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
 901       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
 902     }
 903 
 904     env()->register_method(_method, _entry_bci,
 905                            &_code_offsets,
 906                            _orig_pc_slot_offset_in_bytes,
 907                            code_buffer(),
 908                            frame_size_in_words(), _oop_map_set,
 909                            &_handler_table, &_inc_table,
 910                            compiler,
 911                            env()->comp_level(),
 912                            has_unsafe_access(),
 913                            SharedRuntime::is_wide_vector(max_vector_size()),


 962 #endif
 963     _comp_arena(mtCompiler),
 964     _node_arena(mtCompiler),
 965     _old_arena(mtCompiler),
 966     _Compile_types(mtCompiler),
 967     _dead_node_list(comp_arena()),
 968     _dead_node_count(0),
 969     _congraph(NULL),
 970     _replay_inline_data(NULL),
 971     _number_of_mh_late_inlines(0),
 972     _inlining_progress(false),
 973     _inlining_incrementally(false),
 974     _print_inlining_list(NULL),
 975     _print_inlining_stream(NULL),
 976     _print_inlining_idx(0),
 977     _print_inlining_output(NULL),
 978     _allowed_reasons(0),
 979     _interpreter_frame_size(0) {
 980   C = this;
 981 
 982   TraceTime t1(NULL, &_t_totalCompilation, CITime, false);
 983   TraceTime t2(NULL, &_t_stubCompilation, CITime, false);
 984 
 985 #ifndef PRODUCT


 986   set_print_assembly(PrintFrameConverterAssembly);
 987   set_parsed_irreducible_loop(false);
 988 #endif
 989   set_has_irreducible_loop(false); // no loops
 990 
 991   CompileWrapper cw(this);
 992   Init(/*AliasLevel=*/ 0);
 993   init_tf((*generator)());
 994 
 995   {
 996     // The following is a dummy for the sake of GraphKit::gen_stub
 997     Unique_Node_List for_igvn(comp_arena());
 998     set_for_igvn(&for_igvn);  // not used, but some GraphKit guys push on this
 999     PhaseGVN gvn(Thread::current()->resource_area(),255);
1000     set_initial_gvn(&gvn);    // not significant, but GraphKit guys use it pervasively
1001     gvn.transform_no_reclaim(top());
1002 
1003     GraphKit kit;
1004     kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc);
1005   }


2020 
2021     if (failing())  return;
2022 
2023     {
2024       ResourceMark rm;
2025       PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2026     }
2027 
2028     igvn = PhaseIterGVN(gvn);
2029 
2030     igvn.optimize();
2031   }
2032 
2033   set_inlining_incrementally(false);
2034 }
2035 
2036 
2037 //------------------------------Optimize---------------------------------------
2038 // Given a graph, optimize it.
2039 void Compile::Optimize() {
2040   TracePhase t1("optimizer", &timers[_t_optimizer]);
2041 
2042 #ifndef PRODUCT
2043   if (env()->break_at_compile()) {
2044     BREAKPOINT;
2045   }
2046 
2047 #endif
2048 
2049   ResourceMark rm;
2050   int          loop_opts_cnt;
2051 
2052   print_inlining_reinit();
2053 
2054   NOT_PRODUCT( verify_graph_edges(); )
2055 
2056   print_method(PHASE_AFTER_PARSING);
2057 
2058  {
2059   // Iterative Global Value Numbering, including ideal transforms
2060   // Initialize IterGVN with types and values from parse-time GVN
2061   PhaseIterGVN igvn(initial_gvn());
2062 #ifdef ASSERT
2063   _modified_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
2064 #endif
2065   {
2066     TracePhase t2("iterGVN", &timers[_t_iterGVN]);
2067     igvn.optimize();
2068   }
2069 
2070   print_method(PHASE_ITER_GVN1, 2);
2071 
2072   if (failing())  return;
2073 
2074   {
2075     TracePhase t2("incrementalInline", &timers[_t_incrInline]);
2076     inline_incrementally(igvn);
2077   }
2078 
2079   print_method(PHASE_INCREMENTAL_INLINE, 2);
2080 
2081   if (failing())  return;
2082 
2083   if (eliminate_boxing()) {
2084     TracePhase t2("incrementalInline", &timers[_t_incrInline]);
2085     // Inline valueOf() methods now.
2086     inline_boxing_calls(igvn);
2087 
2088     if (AlwaysIncrementalInline) {
2089       inline_incrementally(igvn);
2090     }
2091 
2092     print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2093 
2094     if (failing())  return;
2095   }
2096 
2097   // Remove the speculative part of types and clean up the graph from
2098   // the extra CastPP nodes whose only purpose is to carry them. Do
2099   // that early so that optimizations are not disrupted by the extra
2100   // CastPP nodes.
2101   remove_speculative_types(igvn);
2102 
2103   // No more new expensive nodes will be added to the list from here
2104   // so keep only the actual candidates for optimizations.
2105   cleanup_expensive_nodes(igvn);
2106 
2107   // Perform escape analysis
2108   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2109     if (has_loops()) {
2110       // Cleanup graph (remove dead nodes).
2111       TracePhase t2("idealLoop", &timers[_t_idealLoop]);
2112       PhaseIdealLoop ideal_loop( igvn, false, true );
2113       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2114       if (failing())  return;
2115     }
2116     ConnectionGraph::do_analysis(this, &igvn);
2117 
2118     if (failing())  return;
2119 
2120     // Optimize out fields loads from scalar replaceable allocations.
2121     igvn.optimize();
2122     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2123 
2124     if (failing())  return;
2125 
2126     if (congraph() != NULL && macro_count() > 0) {
2127       TracePhase t2("macroEliminate", &timers[_t_macroEliminate]);
2128       PhaseMacroExpand mexp(igvn);
2129       mexp.eliminate_macro_nodes();
2130       igvn.set_delay_transform(false);
2131 
2132       igvn.optimize();
2133       print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
2134 
2135       if (failing())  return;
2136     }
2137   }
2138 
2139   // Loop transforms on the ideal graph.  Range Check Elimination,
2140   // peeling, unrolling, etc.
2141 
2142   // Set loop opts counter
2143   loop_opts_cnt = num_loop_opts();
2144   if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
2145     {
2146       TracePhase t2("idealLoop", &timers[_t_idealLoop]);
2147       PhaseIdealLoop ideal_loop( igvn, true );
2148       loop_opts_cnt--;
2149       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
2150       if (failing())  return;
2151     }
2152     // Loop opts pass if partial peeling occurred in previous pass
2153     if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
2154       TracePhase t3("idealLoop", &timers[_t_idealLoop]);
2155       PhaseIdealLoop ideal_loop( igvn, false );
2156       loop_opts_cnt--;
2157       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
2158       if (failing())  return;
2159     }
2160     // Loop opts pass for loop-unrolling before CCP
2161     if(major_progress() && (loop_opts_cnt > 0)) {
2162       TracePhase t4("idealLoop", &timers[_t_idealLoop]);
2163       PhaseIdealLoop ideal_loop( igvn, false );
2164       loop_opts_cnt--;
2165       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
2166     }
2167     if (!failing()) {
2168       // Verify that last round of loop opts produced a valid graph
2169       TracePhase t2("idealLoopVerify", &timers[_t_idealLoopVerify]);
2170       PhaseIdealLoop::verify(igvn);
2171     }
2172   }
2173   if (failing())  return;
2174 
2175   // Conditional Constant Propagation;
2176   PhaseCCP ccp( &igvn );
2177   assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
2178   {
2179     TracePhase t2("ccp", &timers[_t_ccp]);
2180     ccp.do_transform();
2181   }
2182   print_method(PHASE_CPP1, 2);
2183 
2184   assert( true, "Break here to ccp.dump_old2new_map()");
2185 
2186   // Iterative Global Value Numbering, including ideal transforms
2187   {
2188     TracePhase t2("iterGVN2", &timers[_t_iterGVN2]);
2189     igvn = ccp;
2190     igvn.optimize();
2191   }
2192 
2193   print_method(PHASE_ITER_GVN2, 2);
2194 
2195   if (failing())  return;
2196 
2197   // Loop transforms on the ideal graph.  Range Check Elimination,
2198   // peeling, unrolling, etc.
2199   if(loop_opts_cnt > 0) {
2200     debug_only( int cnt = 0; );
2201     while(major_progress() && (loop_opts_cnt > 0)) {
2202       TracePhase t2("idealLoop", &timers[_t_idealLoop]);
2203       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2204       PhaseIdealLoop ideal_loop( igvn, true);
2205       loop_opts_cnt--;
2206       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2207       if (failing())  return;
2208     }
2209   }
2210 
2211   {
2212     // Verify that all previous optimizations produced a valid graph
2213     // at least to this point, even if no loop optimizations were done.
2214     TracePhase t2("idealLoopVerify", &timers[_t_idealLoopVerify]);
2215     PhaseIdealLoop::verify(igvn);
2216   }
2217 
2218   {
2219     TracePhase t2("macroExpand", &timers[_t_macroExpand]);
2220     PhaseMacroExpand  mex(igvn);
2221     if (mex.expand_macro_nodes()) {
2222       assert(failing(), "must bail out w/ explicit message");
2223       return;
2224     }
2225   }
2226 
2227   DEBUG_ONLY( _modified_nodes = NULL; )
2228  } // (End scope of igvn; run destructor if necessary for asserts.)
2229 
2230   process_print_inlining();
2231   // A method with only infinite loops has no edges entering loops from root
2232   {
2233     TracePhase t2("graphReshape", &timers[_t_graphReshaping]);
2234     if (final_graph_reshaping()) {
2235       assert(failing(), "must bail out w/ explicit message");
2236       return;
2237     }
2238   }
2239 
2240   print_method(PHASE_OPTIMIZE_FINISHED, 2);
2241 }
2242 
2243 
2244 //------------------------------Code_Gen---------------------------------------
2245 // Given a graph, generate code for it
2246 void Compile::Code_Gen() {
2247   if (failing()) {
2248     return;
2249   }
2250 
2251   // Perform instruction selection.  You might think we could reclaim Matcher
2252   // memory PDQ, but actually the Matcher is used in generating spill code.
2253   // Internals of the Matcher (including some VectorSets) must remain live
2254   // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
2255   // set a bit in reclaimed memory.
2256 
2257   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2258   // nodes.  Mapping is only valid at the root of each matched subtree.
2259   NOT_PRODUCT( verify_graph_edges(); )
2260 
2261   Matcher matcher;
2262   _matcher = &matcher;
2263   {
2264     TracePhase t2("matcher", &timers[_t_matcher]);
2265     matcher.match();
2266   }
2267   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2268   // nodes.  Mapping is only valid at the root of each matched subtree.
2269   NOT_PRODUCT( verify_graph_edges(); )
2270 
2271   // If you have too many nodes, or if matching has failed, bail out
2272   check_node_count(0, "out of nodes matching instructions");
2273   if (failing()) {
2274     return;
2275   }
2276 
2277   // Build a proper-looking CFG
2278   PhaseCFG cfg(node_arena(), root(), matcher);
2279   _cfg = &cfg;
2280   {
2281     TracePhase t2("scheduler", &timers[_t_scheduler]);
2282     bool success = cfg.do_global_code_motion();
2283     if (!success) {
2284       return;
2285     }
2286 
2287     print_method(PHASE_GLOBAL_CODE_MOTION, 2);
2288     NOT_PRODUCT( verify_graph_edges(); )
2289     debug_only( cfg.verify(); )
2290   }
2291 
2292   PhaseChaitin regalloc(unique(), cfg, matcher);
2293   _regalloc = &regalloc;
2294   {
2295     TracePhase t2("regalloc", &timers[_t_registerAllocation]);
2296     // Perform register allocation.  After Chaitin, use-def chains are
2297     // no longer accurate (at spill code) and so must be ignored.
2298     // Node->LRG->reg mappings are still accurate.
2299     _regalloc->Register_Allocate();
2300 
2301     // Bail out if the allocator builds too many nodes
2302     if (failing()) {
2303       return;
2304     }
2305   }
2306 
2307   // Prior to register allocation we kept empty basic blocks in case the
2308   // the allocator needed a place to spill.  After register allocation we
2309   // are not adding any new instructions.  If any basic block is empty, we
2310   // can now safely remove it.
2311   {
2312     TracePhase t2("blockOrdering", &timers[_t_blockOrdering]);
2313     cfg.remove_empty_blocks();
2314     if (do_freq_based_layout()) {
2315       PhaseBlockLayout layout(cfg);
2316     } else {
2317       cfg.set_loop_alignment();
2318     }
2319     cfg.fixup_flow();
2320   }
2321 
2322   // Apply peephole optimizations
2323   if( OptoPeephole ) {
2324     TracePhase t2("peephole", &timers[_t_peephole]);
2325     PhasePeephole peep( _regalloc, cfg);
2326     peep.do_transform();
2327   }
2328 
2329   // Do late expand if CPU requires this.
2330   if (Matcher::require_postalloc_expand) {
2331     TracePhase t2c("postalloc_expand", &timers[_t_postalloc_expand]);
2332     cfg.postalloc_expand(_regalloc);
2333   }
2334 
2335   // Convert Nodes to instruction bits in a buffer
2336   {
2337     TraceTime t2b("output", &timers[_t_output], CITime);


2338     Output();
2339   }
2340 
2341   print_method(PHASE_FINAL_CODE);
2342 
2343   // He's dead, Jim.
2344   _cfg     = (PhaseCFG*)0xdeadbeef;
2345   _regalloc = (PhaseChaitin*)0xdeadbeef;
2346 }
2347 
2348 
2349 //------------------------------dump_asm---------------------------------------
2350 // Dump formatted assembly
2351 #ifndef PRODUCT
2352 void Compile::dump_asm(int *pcs, uint pc_limit) {
2353   bool cut_short = false;
2354   tty->print_cr("#");
2355   tty->print("#  ");  _tf->dump();  tty->cr();
2356   tty->print_cr("#");
2357 


3522     log()->elem("failure reason='%s' phase='compile'", reason);
3523   }
3524   if (_failure_reason == NULL) {
3525     // Record the first failure reason.
3526     _failure_reason = reason;
3527   }
3528 
3529   EventCompilerFailure event;
3530   if (event.should_commit()) {
3531     event.set_compileID(Compile::compile_id());
3532     event.set_failure(reason);
3533     event.commit();
3534   }
3535 
3536   if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
3537     C->print_method(PHASE_FAILURE);
3538   }
3539   _root = NULL;  // flush the graph, too
3540 }
3541 
3542 Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator)
3543   : TraceTime(name, accumulator, CITime, CITimeVerbose),
3544     _phase_name(name), _dolog(CITimeVerbose)
3545 {
3546   if (_dolog) {
3547     C = Compile::current();
3548     _log = C->log();
3549   } else {
3550     C = NULL;
3551     _log = NULL;
3552   }
3553   if (_log != NULL) {
3554     _log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
3555     _log->stamp();
3556     _log->end_head();
3557   }
3558 }
3559 
3560 Compile::TracePhase::~TracePhase() {
3561 
3562   C = Compile::current();
3563   if (_dolog) {
3564     _log = C->log();
3565   } else {
3566     _log = NULL;