375 void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) {
376 int shift = 0;
377 for (int i = 0; i < inlines->length(); i++) {
378 CallGenerator* cg = inlines->at(i);
379 CallNode* call = cg->call_node();
380 if (shift > 0) {
381 inlines->at_put(i-shift, cg);
382 }
383 if (!useful.member(call)) {
384 shift++;
385 }
386 }
387 inlines->trunc_to(inlines->length()-shift);
388 }
389
390 // Disconnect all useless nodes by disconnecting those at the boundary.
391 void Compile::remove_useless_nodes(Unique_Node_List &useful) {
392 uint next = 0;
393 while (next < useful.size()) {
394 Node *n = useful.at(next++);
395 // Use raw traversal of out edges since this code removes out edges
396 int max = n->outcnt();
397 for (int j = 0; j < max; ++j) {
398 Node* child = n->raw_out(j);
399 if (! useful.member(child)) {
400 assert(!child->is_top() || child != top(),
401 "If top is cached in Compile object it is in useful list");
402 // Only need to remove this out-edge to the useless node
403 n->raw_del_out(j);
404 --j;
405 --max;
406 }
407 }
408 if (n->outcnt() == 1 && n->has_special_unique_user()) {
409 record_for_igvn(n->unique_out());
410 }
411 }
412 // Remove useless macro and predicate opaq nodes
413 for (int i = C->macro_count()-1; i >= 0; i--) {
414 Node* n = C->macro_node(i);
655 _in_scratch_emit_size(false),
656 _dead_node_list(comp_arena()),
657 _dead_node_count(0),
658 #ifndef PRODUCT
659 _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
660 _in_dump_cnt(0),
661 _printer(IdealGraphPrinter::printer()),
662 #endif
663 _congraph(NULL),
664 _replay_inline_data(NULL),
665 _late_inlines(comp_arena(), 2, 0, NULL),
666 _string_late_inlines(comp_arena(), 2, 0, NULL),
667 _boxing_late_inlines(comp_arena(), 2, 0, NULL),
668 _late_inlines_pos(0),
669 _number_of_mh_late_inlines(0),
670 _inlining_progress(false),
671 _inlining_incrementally(false),
672 _print_inlining_list(NULL),
673 _print_inlining_stream(NULL),
674 _print_inlining_idx(0),
675 _preserve_jvm_state(0),
676 _interpreter_frame_size(0) {
677 C = this;
678
679 CompileWrapper cw(this);
680 #ifndef PRODUCT
681 if (TimeCompiler2) {
682 tty->print(" ");
683 target->holder()->name()->print();
684 tty->print(".");
685 target->print_short_name();
686 tty->print(" ");
687 }
688 TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2);
689 TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false);
690 bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly");
691 if (!print_opto_assembly) {
692 bool print_assembly = (PrintAssembly || _method->should_print_assembly());
693 if (print_assembly && !Disassembler::can_decode()) {
694 tty->print_cr("PrintAssembly request changed to PrintOptoAssembly");
695 print_opto_assembly = true;
765 // the pre-barrier code.
766 // Specifically, if G1 is enabled, the value in the referent
767 // field is recorded by the G1 SATB pre barrier. This will
768 // result in the referent being marked live and the reference
769 // object removed from the list of discovered references during
770 // reference processing.
771 cg = find_intrinsic(method(), false);
772 }
773 if (cg == NULL) {
774 float past_uses = method()->interpreter_invocation_count();
775 float expected_uses = past_uses;
776 cg = CallGenerator::for_inline(method(), expected_uses);
777 }
778 }
779 if (failing()) return;
780 if (cg == NULL) {
781 record_method_not_compilable_all_tiers("cannot parse method");
782 return;
783 }
784 JVMState* jvms = build_start_state(start(), tf());
785 if ((jvms = cg->generate(jvms, NULL)) == NULL) {
786 record_method_not_compilable("method parse failed");
787 return;
788 }
789 GraphKit kit(jvms);
790
791 if (!kit.stopped()) {
792 // Accept return values, and transfer control we know not where.
793 // This is done by a special, unique ReturnNode bound to root.
794 return_values(kit.jvms());
795 }
796
797 if (kit.has_exceptions()) {
798 // Any exceptions that escape from this call must be rethrown
799 // to whatever caller is dynamically above us on the stack.
800 // This is done by a special, unique RethrowNode bound to root.
801 rethrow_exceptions(kit.transfer_exceptions_into_jvms());
802 }
803
804 assert(IncrementalInline || (_late_inlines.length() == 0 && !has_mh_late_inlines()), "incremental inlining is off");
805
961 _mach_constant_base_node(NULL),
962 _node_bundling_limit(0),
963 _node_bundling_base(NULL),
964 _java_calls(0),
965 _inner_loops(0),
966 #ifndef PRODUCT
967 _trace_opto_output(TraceOptoOutput),
968 _in_dump_cnt(0),
969 _printer(NULL),
970 #endif
971 _dead_node_list(comp_arena()),
972 _dead_node_count(0),
973 _congraph(NULL),
974 _replay_inline_data(NULL),
975 _number_of_mh_late_inlines(0),
976 _inlining_progress(false),
977 _inlining_incrementally(false),
978 _print_inlining_list(NULL),
979 _print_inlining_stream(NULL),
980 _print_inlining_idx(0),
981 _preserve_jvm_state(0),
982 _allowed_reasons(0),
983 _interpreter_frame_size(0) {
984 C = this;
985
986 #ifndef PRODUCT
987 TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false);
988 TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false);
989 set_print_assembly(PrintFrameConverterAssembly);
990 set_parsed_irreducible_loop(false);
991 #endif
992 set_has_irreducible_loop(false); // no loops
993
994 CompileWrapper cw(this);
995 Init(/*AliasLevel=*/ 0);
996 init_tf((*generator)());
997
998 {
999 // The following is a dummy for the sake of GraphKit::gen_stub
1000 Unique_Node_List for_igvn(comp_arena());
1001 set_for_igvn(&for_igvn); // not used, but some GraphKit guys push on this
1895 while (_string_late_inlines.length() > 0) {
1896 CallGenerator* cg = _string_late_inlines.pop();
1897 cg->do_late_inline();
1898 if (failing()) return;
1899 }
1900 _string_late_inlines.trunc_to(0);
1901 }
1902
1903 // Late inlining of boxing methods
1904 void Compile::inline_boxing_calls(PhaseIterGVN& igvn) {
1905 if (_boxing_late_inlines.length() > 0) {
1906 assert(has_boxed_value(), "inconsistent");
1907
1908 PhaseGVN* gvn = initial_gvn();
1909 set_inlining_incrementally(true);
1910
1911 assert( igvn._worklist.size() == 0, "should be done with igvn" );
1912 for_igvn()->clear();
1913 gvn->replace_with(&igvn);
1914
1915 while (_boxing_late_inlines.length() > 0) {
1916 CallGenerator* cg = _boxing_late_inlines.pop();
1917 cg->do_late_inline();
1918 if (failing()) return;
1919 }
1920 _boxing_late_inlines.trunc_to(0);
1921
1922 {
1923 ResourceMark rm;
1924 PhaseRemoveUseless pru(gvn, for_igvn());
1925 }
1926
1927 igvn = PhaseIterGVN(gvn);
1928 igvn.optimize();
1929
1930 set_inlining_progress(false);
1931 set_inlining_incrementally(false);
1932 }
1933 }
1934
1958 ResourceMark rm;
1959 PhaseRemoveUseless pru(gvn, for_igvn());
1960 }
1961
1962 igvn = PhaseIterGVN(gvn);
1963 }
1964
1965 // Perform incremental inlining until bound on number of live nodes is reached
1966 void Compile::inline_incrementally(PhaseIterGVN& igvn) {
1967 PhaseGVN* gvn = initial_gvn();
1968
1969 set_inlining_incrementally(true);
1970 set_inlining_progress(true);
1971 uint low_live_nodes = 0;
1972
1973 while(inlining_progress() && _late_inlines.length() > 0) {
1974
1975 if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
1976 if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
1977 // PhaseIdealLoop is expensive so we only try it once we are
1978 // out of loop and we only try it again if the previous helped
1979 // got the number of nodes down significantly
1980 PhaseIdealLoop ideal_loop( igvn, false, true );
1981 if (failing()) return;
1982 low_live_nodes = live_nodes();
1983 _major_progress = true;
1984 }
1985
1986 if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
1987 break;
1988 }
1989 }
1990
1991 inline_incrementally_one(igvn);
1992
1993 if (failing()) return;
1994
1995 igvn.optimize();
1996
1997 if (failing()) return;
1998 }
1999
2052 igvn.optimize();
2053 }
2054
2055 print_method(PHASE_ITER_GVN1, 2);
2056
2057 if (failing()) return;
2058
2059 {
2060 NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
2061 inline_incrementally(igvn);
2062 }
2063
2064 print_method(PHASE_INCREMENTAL_INLINE, 2);
2065
2066 if (failing()) return;
2067
2068 if (eliminate_boxing()) {
2069 NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
2070 // Inline valueOf() methods now.
2071 inline_boxing_calls(igvn);
2072
2073 print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2074
2075 if (failing()) return;
2076 }
2077
2078 // Remove the speculative part of types and clean up the graph from
2079 // the extra CastPP nodes whose only purpose is to carry them. Do
2080 // that early so that optimizations are not disrupted by the extra
2081 // CastPP nodes.
2082 remove_speculative_types(igvn);
2083
2084 // No more new expensive nodes will be added to the list from here
2085 // so keep only the actual candidates for optimizations.
2086 cleanup_expensive_nodes(igvn);
2087
2088 // Perform escape analysis
2089 if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2090 if (has_loops()) {
2091 // Cleanup graph (remove dead nodes).
|
375 void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) {
376 int shift = 0;
377 for (int i = 0; i < inlines->length(); i++) {
378 CallGenerator* cg = inlines->at(i);
379 CallNode* call = cg->call_node();
380 if (shift > 0) {
381 inlines->at_put(i-shift, cg);
382 }
383 if (!useful.member(call)) {
384 shift++;
385 }
386 }
387 inlines->trunc_to(inlines->length()-shift);
388 }
389
390 // Disconnect all useless nodes by disconnecting those at the boundary.
391 void Compile::remove_useless_nodes(Unique_Node_List &useful) {
392 uint next = 0;
393 while (next < useful.size()) {
394 Node *n = useful.at(next++);
395 if (n->is_SafePoint()) {
396 // We're done with a parsing phase. Replaced nodes are not valid
397 // beyond that point.
398 n->as_SafePoint()->delete_replaced_nodes();
399 }
400 // Use raw traversal of out edges since this code removes out edges
401 int max = n->outcnt();
402 for (int j = 0; j < max; ++j) {
403 Node* child = n->raw_out(j);
404 if (! useful.member(child)) {
405 assert(!child->is_top() || child != top(),
406 "If top is cached in Compile object it is in useful list");
407 // Only need to remove this out-edge to the useless node
408 n->raw_del_out(j);
409 --j;
410 --max;
411 }
412 }
413 if (n->outcnt() == 1 && n->has_special_unique_user()) {
414 record_for_igvn(n->unique_out());
415 }
416 }
417 // Remove useless macro and predicate opaq nodes
418 for (int i = C->macro_count()-1; i >= 0; i--) {
419 Node* n = C->macro_node(i);
660 _in_scratch_emit_size(false),
661 _dead_node_list(comp_arena()),
662 _dead_node_count(0),
663 #ifndef PRODUCT
664 _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
665 _in_dump_cnt(0),
666 _printer(IdealGraphPrinter::printer()),
667 #endif
668 _congraph(NULL),
669 _replay_inline_data(NULL),
670 _late_inlines(comp_arena(), 2, 0, NULL),
671 _string_late_inlines(comp_arena(), 2, 0, NULL),
672 _boxing_late_inlines(comp_arena(), 2, 0, NULL),
673 _late_inlines_pos(0),
674 _number_of_mh_late_inlines(0),
675 _inlining_progress(false),
676 _inlining_incrementally(false),
677 _print_inlining_list(NULL),
678 _print_inlining_stream(NULL),
679 _print_inlining_idx(0),
680 _interpreter_frame_size(0) {
681 C = this;
682
683 CompileWrapper cw(this);
684 #ifndef PRODUCT
685 if (TimeCompiler2) {
686 tty->print(" ");
687 target->holder()->name()->print();
688 tty->print(".");
689 target->print_short_name();
690 tty->print(" ");
691 }
692 TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2);
693 TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false);
694 bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly");
695 if (!print_opto_assembly) {
696 bool print_assembly = (PrintAssembly || _method->should_print_assembly());
697 if (print_assembly && !Disassembler::can_decode()) {
698 tty->print_cr("PrintAssembly request changed to PrintOptoAssembly");
699 print_opto_assembly = true;
769 // the pre-barrier code.
770 // Specifically, if G1 is enabled, the value in the referent
771 // field is recorded by the G1 SATB pre barrier. This will
772 // result in the referent being marked live and the reference
773 // object removed from the list of discovered references during
774 // reference processing.
775 cg = find_intrinsic(method(), false);
776 }
777 if (cg == NULL) {
778 float past_uses = method()->interpreter_invocation_count();
779 float expected_uses = past_uses;
780 cg = CallGenerator::for_inline(method(), expected_uses);
781 }
782 }
783 if (failing()) return;
784 if (cg == NULL) {
785 record_method_not_compilable_all_tiers("cannot parse method");
786 return;
787 }
788 JVMState* jvms = build_start_state(start(), tf());
789 if ((jvms = cg->generate(jvms)) == NULL) {
790 record_method_not_compilable("method parse failed");
791 return;
792 }
793 GraphKit kit(jvms);
794
795 if (!kit.stopped()) {
796 // Accept return values, and transfer control we know not where.
797 // This is done by a special, unique ReturnNode bound to root.
798 return_values(kit.jvms());
799 }
800
801 if (kit.has_exceptions()) {
802 // Any exceptions that escape from this call must be rethrown
803 // to whatever caller is dynamically above us on the stack.
804 // This is done by a special, unique RethrowNode bound to root.
805 rethrow_exceptions(kit.transfer_exceptions_into_jvms());
806 }
807
808 assert(IncrementalInline || (_late_inlines.length() == 0 && !has_mh_late_inlines()), "incremental inlining is off");
809
965 _mach_constant_base_node(NULL),
966 _node_bundling_limit(0),
967 _node_bundling_base(NULL),
968 _java_calls(0),
969 _inner_loops(0),
970 #ifndef PRODUCT
971 _trace_opto_output(TraceOptoOutput),
972 _in_dump_cnt(0),
973 _printer(NULL),
974 #endif
975 _dead_node_list(comp_arena()),
976 _dead_node_count(0),
977 _congraph(NULL),
978 _replay_inline_data(NULL),
979 _number_of_mh_late_inlines(0),
980 _inlining_progress(false),
981 _inlining_incrementally(false),
982 _print_inlining_list(NULL),
983 _print_inlining_stream(NULL),
984 _print_inlining_idx(0),
985 _allowed_reasons(0),
986 _interpreter_frame_size(0) {
987 C = this;
988
989 #ifndef PRODUCT
990 TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false);
991 TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false);
992 set_print_assembly(PrintFrameConverterAssembly);
993 set_parsed_irreducible_loop(false);
994 #endif
995 set_has_irreducible_loop(false); // no loops
996
997 CompileWrapper cw(this);
998 Init(/*AliasLevel=*/ 0);
999 init_tf((*generator)());
1000
1001 {
1002 // The following is a dummy for the sake of GraphKit::gen_stub
1003 Unique_Node_List for_igvn(comp_arena());
1004 set_for_igvn(&for_igvn); // not used, but some GraphKit guys push on this
1898 while (_string_late_inlines.length() > 0) {
1899 CallGenerator* cg = _string_late_inlines.pop();
1900 cg->do_late_inline();
1901 if (failing()) return;
1902 }
1903 _string_late_inlines.trunc_to(0);
1904 }
1905
1906 // Late inlining of boxing methods
1907 void Compile::inline_boxing_calls(PhaseIterGVN& igvn) {
1908 if (_boxing_late_inlines.length() > 0) {
1909 assert(has_boxed_value(), "inconsistent");
1910
1911 PhaseGVN* gvn = initial_gvn();
1912 set_inlining_incrementally(true);
1913
1914 assert( igvn._worklist.size() == 0, "should be done with igvn" );
1915 for_igvn()->clear();
1916 gvn->replace_with(&igvn);
1917
1918 _late_inlines_pos = _late_inlines.length();
1919
1920 while (_boxing_late_inlines.length() > 0) {
1921 CallGenerator* cg = _boxing_late_inlines.pop();
1922 cg->do_late_inline();
1923 if (failing()) return;
1924 }
1925 _boxing_late_inlines.trunc_to(0);
1926
1927 {
1928 ResourceMark rm;
1929 PhaseRemoveUseless pru(gvn, for_igvn());
1930 }
1931
1932 igvn = PhaseIterGVN(gvn);
1933 igvn.optimize();
1934
1935 set_inlining_progress(false);
1936 set_inlining_incrementally(false);
1937 }
1938 }
1939
1963 ResourceMark rm;
1964 PhaseRemoveUseless pru(gvn, for_igvn());
1965 }
1966
1967 igvn = PhaseIterGVN(gvn);
1968 }
1969
1970 // Perform incremental inlining until bound on number of live nodes is reached
1971 void Compile::inline_incrementally(PhaseIterGVN& igvn) {
1972 PhaseGVN* gvn = initial_gvn();
1973
1974 set_inlining_incrementally(true);
1975 set_inlining_progress(true);
1976 uint low_live_nodes = 0;
1977
1978 while(inlining_progress() && _late_inlines.length() > 0) {
1979
1980 if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
1981 if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
1982 // PhaseIdealLoop is expensive so we only try it once we are
1983 // out of live nodes and we only try it again if the previous
1984 // helped got the number of nodes down significantly
1985 PhaseIdealLoop ideal_loop( igvn, false, true );
1986 if (failing()) return;
1987 low_live_nodes = live_nodes();
1988 _major_progress = true;
1989 }
1990
1991 if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
1992 break;
1993 }
1994 }
1995
1996 inline_incrementally_one(igvn);
1997
1998 if (failing()) return;
1999
2000 igvn.optimize();
2001
2002 if (failing()) return;
2003 }
2004
2057 igvn.optimize();
2058 }
2059
2060 print_method(PHASE_ITER_GVN1, 2);
2061
2062 if (failing()) return;
2063
2064 {
2065 NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
2066 inline_incrementally(igvn);
2067 }
2068
2069 print_method(PHASE_INCREMENTAL_INLINE, 2);
2070
2071 if (failing()) return;
2072
2073 if (eliminate_boxing()) {
2074 NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
2075 // Inline valueOf() methods now.
2076 inline_boxing_calls(igvn);
2077
2078 if (AlwaysIncrementalInline) {
2079 inline_incrementally(igvn);
2080 }
2081
2082 print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2083
2084 if (failing()) return;
2085 }
2086
2087 // Remove the speculative part of types and clean up the graph from
2088 // the extra CastPP nodes whose only purpose is to carry them. Do
2089 // that early so that optimizations are not disrupted by the extra
2090 // CastPP nodes.
2091 remove_speculative_types(igvn);
2092
2093 // No more new expensive nodes will be added to the list from here
2094 // so keep only the actual candidates for optimizations.
2095 cleanup_expensive_nodes(igvn);
2096
2097 // Perform escape analysis
2098 if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2099 if (has_loops()) {
2100 // Cleanup graph (remove dead nodes).
|