365 CloneMap _clone_map; // used for recording history of cloned nodes
366 size_t _type_last_size; // Last allocation size (see Type::operator new/delete)
367 ciMethod* _last_tf_m; // Cache for
368 const TypeFunc* _last_tf; // TypeFunc::make
369 AliasType** _alias_types; // List of alias types seen so far.
370 int _num_alias_types; // Logical length of _alias_types
371 int _max_alias_types; // Physical length of _alias_types
372 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
373
374 // Parsing, optimization
375 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN
376 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN
377 WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining.
378
379 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after
380 // main parsing has finished.
381 GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
382
383 GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
384
385 int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
386 uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
387
388
389 // Inlining may not happen in parse order which would make
390 // PrintInlining output confusing. Keep track of PrintInlining
391 // pieces in order.
392 class PrintInliningBuffer : public ResourceObj {
393 private:
394 CallGenerator* _cg;
395 stringStream* _ss;
396
397 public:
398 PrintInliningBuffer()
399 : _cg(NULL) { _ss = new stringStream(); }
400
401 void freeStream() { _ss->~stringStream(); _ss = NULL; }
402
403 stringStream* ss() const { return _ss; }
404 CallGenerator* cg() const { return _cg; }
626 }
627
628 bool should_print(int level = 1) {
629 #ifndef PRODUCT
630 if (PrintIdealGraphLevel < 0) { // disabled by the user
631 return false;
632 }
633
634 bool need = directive()->IGVPrintLevelOption >= level;
635 if (need && !_printer) {
636 _printer = IdealGraphPrinter::printer();
637 assert(_printer != NULL, "_printer is NULL when we need it!");
638 _printer->set_compile(this);
639 }
640 return need;
641 #else
642 return false;
643 #endif
644 }
645
646 void print_method(CompilerPhaseType cpt, int level = 1, int idx = 0);
647
648 #ifndef PRODUCT
649 void igv_print_method_to_file(const char* phase_name = "Debug", bool append = false);
650 void igv_print_method_to_network(const char* phase_name = "Debug");
651 static IdealGraphPrinter* debug_file_printer() { return _debug_file_printer; }
652 static IdealGraphPrinter* debug_network_printer() { return _debug_network_printer; }
653 #endif
654
655 void end_method(int level = 1);
656
657 int macro_count() const { return _macro_nodes->length(); }
658 int predicate_count() const { return _predicate_opaqs->length();}
659 int expensive_count() const { return _expensive_nodes->length(); }
660 Node* macro_node(int idx) const { return _macro_nodes->at(idx); }
661 Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);}
662 Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); }
663 ConnectionGraph* congraph() { return _congraph;}
664 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
665 void add_macro_node(Node * n) {
666 //assert(n->is_macro(), "must be a macro node");
847 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
848 bool have_alias_type(const TypePtr* adr_type);
849 AliasType* alias_type(ciField* field);
850
851 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
852 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
853 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
854
855 // Building nodes
856 void rethrow_exceptions(JVMState* jvms);
857 void return_values(JVMState* jvms);
858 JVMState* build_start_state(StartNode* start, const TypeFunc* tf);
859
860 // Decide how to build a call.
861 // The profile factor is a discount to apply to this site's interp. profile.
862 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
863 JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL,
864 bool allow_intrinsics = true);
865 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
866 return should_delay_string_inlining(call_method, jvms) ||
867 should_delay_boxing_inlining(call_method, jvms);
868 }
869 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
870 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
871
872 // Helper functions to identify inlining potential at call-site
873 ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
874 ciKlass* holder, ciMethod* callee,
875 const TypeOopPtr* receiver_type, bool is_virtual,
876 bool &call_does_dispatch, int &vtable_index,
877 bool check_access = true);
878 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
879 ciMethod* callee, const TypeOopPtr* receiver_type,
880 bool check_access = true);
881
882 // Report if there were too many traps at a current method and bci.
883 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
884 // If there is no MDO at all, report no trap unless told to assume it.
885 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
886 // This version, unspecific to a particular bci, asks if
887 // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
888 bool too_many_traps(Deoptimization::DeoptReason reason,
889 // Privately used parameter for logging:
890 ciMethodData* logmd = NULL);
922 WarmCallInfo* pop_warm_call();
923
924 // Record this CallGenerator for inlining at the end of parsing.
925 void add_late_inline(CallGenerator* cg) {
926 _late_inlines.insert_before(_late_inlines_pos, cg);
927 _late_inlines_pos++;
928 }
929
930 void prepend_late_inline(CallGenerator* cg) {
931 _late_inlines.insert_before(0, cg);
932 }
933
934 void add_string_late_inline(CallGenerator* cg) {
935 _string_late_inlines.push(cg);
936 }
937
938 void add_boxing_late_inline(CallGenerator* cg) {
939 _boxing_late_inlines.push(cg);
940 }
941
942 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
943
944 void process_print_inlining();
945 void dump_print_inlining();
946
947 bool over_inlining_cutoff() const {
948 if (!inlining_incrementally()) {
949 return unique() > (uint)NodeCountInliningCutoff;
950 } else {
951 // Give some room for incremental inlining algorithm to "breathe"
952 // and avoid thrashing when live node count is close to the limit.
953 // Keep in mind that live_nodes() isn't accurate during inlining until
954 // dead node elimination step happens (see Compile::inline_incrementally).
955 return live_nodes() > (uint)LiveNodeCountInliningCutoff * 11 / 10;
956 }
957 }
958
959 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
960 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; }
961 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; }
962
963 bool inline_incrementally_one();
964 void inline_incrementally_cleanup(PhaseIterGVN& igvn);
965 void inline_incrementally(PhaseIterGVN& igvn);
966 void inline_string_calls(bool parse_time);
967 void inline_boxing_calls(PhaseIterGVN& igvn);
968 bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode);
969 void remove_root_to_sfpts_edges(PhaseIterGVN& igvn);
970
971 // Matching, CFG layout, allocation, code generation
972 PhaseCFG* cfg() { return _cfg; }
973 bool has_java_calls() const { return _java_calls > 0; }
974 int java_calls() const { return _java_calls; }
975 int inner_loops() const { return _inner_loops; }
976 Matcher* matcher() { return _matcher; }
977 PhaseRegAlloc* regalloc() { return _regalloc; }
978 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
979 Arena* indexSet_arena() { return _indexSet_arena; }
980 void* indexSet_free_block_list() { return _indexSet_free_block_list; }
981 DebugInformationRecorder* debug_info() { return env()->debug_info(); }
982
983 void update_interpreter_frame_size(int size) {
984 if (_interpreter_frame_size < size) {
985 _interpreter_frame_size = size;
986 }
987 }
988
989 void set_matcher(Matcher* m) { _matcher = m; }
990 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; }
|
365 CloneMap _clone_map; // used for recording history of cloned nodes
366 size_t _type_last_size; // Last allocation size (see Type::operator new/delete)
367 ciMethod* _last_tf_m; // Cache for
368 const TypeFunc* _last_tf; // TypeFunc::make
369 AliasType** _alias_types; // List of alias types seen so far.
370 int _num_alias_types; // Logical length of _alias_types
371 int _max_alias_types; // Physical length of _alias_types
372 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
373
374 // Parsing, optimization
375 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN
376 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN
377 WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining.
378
379 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after
380 // main parsing has finished.
381 GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
382
383 GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
384
385 GrowableArray<CallGenerator*> _vector_reboxing_late_inlines; // same but for vector reboxing operations
386
387 int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
388 uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
389
390
391 // Inlining may not happen in parse order which would make
392 // PrintInlining output confusing. Keep track of PrintInlining
393 // pieces in order.
394 class PrintInliningBuffer : public ResourceObj {
395 private:
396 CallGenerator* _cg;
397 stringStream* _ss;
398
399 public:
400 PrintInliningBuffer()
401 : _cg(NULL) { _ss = new stringStream(); }
402
403 void freeStream() { _ss->~stringStream(); _ss = NULL; }
404
405 stringStream* ss() const { return _ss; }
406 CallGenerator* cg() const { return _cg; }
628 }
629
630 bool should_print(int level = 1) {
631 #ifndef PRODUCT
632 if (PrintIdealGraphLevel < 0) { // disabled by the user
633 return false;
634 }
635
636 bool need = directive()->IGVPrintLevelOption >= level;
637 if (need && !_printer) {
638 _printer = IdealGraphPrinter::printer();
639 assert(_printer != NULL, "_printer is NULL when we need it!");
640 _printer->set_compile(this);
641 }
642 return need;
643 #else
644 return false;
645 #endif
646 }
647
648 void print_method(CompilerPhaseType cpt, const char *name, int level = 1, int idx = 0);
649 void print_method(CompilerPhaseType cpt, int level = 1, int idx = 0);
650 void print_method(CompilerPhaseType cpt, Node* n, int level = 3);
651
652 #ifndef PRODUCT
653 void igv_print_method_to_file(const char* phase_name = "Debug", bool append = false);
654 void igv_print_method_to_network(const char* phase_name = "Debug");
655 static IdealGraphPrinter* debug_file_printer() { return _debug_file_printer; }
656 static IdealGraphPrinter* debug_network_printer() { return _debug_network_printer; }
657 #endif
658
659 void end_method(int level = 1);
660
661 int macro_count() const { return _macro_nodes->length(); }
662 int predicate_count() const { return _predicate_opaqs->length();}
663 int expensive_count() const { return _expensive_nodes->length(); }
664 Node* macro_node(int idx) const { return _macro_nodes->at(idx); }
665 Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);}
666 Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); }
667 ConnectionGraph* congraph() { return _congraph;}
668 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
669 void add_macro_node(Node * n) {
670 //assert(n->is_macro(), "must be a macro node");
851 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
852 bool have_alias_type(const TypePtr* adr_type);
853 AliasType* alias_type(ciField* field);
854
855 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
856 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
857 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
858
859 // Building nodes
860 void rethrow_exceptions(JVMState* jvms);
861 void return_values(JVMState* jvms);
862 JVMState* build_start_state(StartNode* start, const TypeFunc* tf);
863
864 // Decide how to build a call.
865 // The profile factor is a discount to apply to this site's interp. profile.
866 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
867 JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL,
868 bool allow_intrinsics = true);
869 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
870 return should_delay_string_inlining(call_method, jvms) ||
871 should_delay_boxing_inlining(call_method, jvms) ||
872 should_delay_vector_inlining(call_method, jvms);
873 }
874 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
875 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
876 bool should_delay_vector_inlining(ciMethod* call_method, JVMState* jvms);
877 bool should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms);
878
879 // Helper functions to identify inlining potential at call-site
880 ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
881 ciKlass* holder, ciMethod* callee,
882 const TypeOopPtr* receiver_type, bool is_virtual,
883 bool &call_does_dispatch, int &vtable_index,
884 bool check_access = true);
885 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
886 ciMethod* callee, const TypeOopPtr* receiver_type,
887 bool check_access = true);
888
889 // Report if there were too many traps at a current method and bci.
890 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
891 // If there is no MDO at all, report no trap unless told to assume it.
892 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
893 // This version, unspecific to a particular bci, asks if
894 // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
895 bool too_many_traps(Deoptimization::DeoptReason reason,
896 // Privately used parameter for logging:
897 ciMethodData* logmd = NULL);
929 WarmCallInfo* pop_warm_call();
930
931 // Record this CallGenerator for inlining at the end of parsing.
932 void add_late_inline(CallGenerator* cg) {
933 _late_inlines.insert_before(_late_inlines_pos, cg);
934 _late_inlines_pos++;
935 }
936
937 void prepend_late_inline(CallGenerator* cg) {
938 _late_inlines.insert_before(0, cg);
939 }
940
941 void add_string_late_inline(CallGenerator* cg) {
942 _string_late_inlines.push(cg);
943 }
944
945 void add_boxing_late_inline(CallGenerator* cg) {
946 _boxing_late_inlines.push(cg);
947 }
948
949 void add_vector_reboxing_late_inline(CallGenerator* cg) {
950 _vector_reboxing_late_inlines.push(cg);
951 }
952
953 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
954
955 void process_print_inlining();
956 void dump_print_inlining();
957
958 bool over_inlining_cutoff() const {
959 if (!inlining_incrementally()) {
960 return unique() > (uint)NodeCountInliningCutoff;
961 } else {
962 // Give some room for incremental inlining algorithm to "breathe"
963 // and avoid thrashing when live node count is close to the limit.
964 // Keep in mind that live_nodes() isn't accurate during inlining until
965 // dead node elimination step happens (see Compile::inline_incrementally).
966 return live_nodes() > (uint)LiveNodeCountInliningCutoff * 11 / 10;
967 }
968 }
969
970 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
971 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; }
972 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; }
973
974 bool inline_incrementally_one();
975 void inline_incrementally_cleanup(PhaseIterGVN& igvn);
976 void inline_incrementally(PhaseIterGVN& igvn);
977 void inline_string_calls(bool parse_time);
978 void inline_boxing_calls(PhaseIterGVN& igvn);
979 bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode);
980 void remove_root_to_sfpts_edges(PhaseIterGVN& igvn);
981
982 void inline_vector_reboxing_calls();
983 bool has_vbox_nodes();
984
985 // Matching, CFG layout, allocation, code generation
986 PhaseCFG* cfg() { return _cfg; }
987 bool has_java_calls() const { return _java_calls > 0; }
988 int java_calls() const { return _java_calls; }
989 int inner_loops() const { return _inner_loops; }
990 Matcher* matcher() { return _matcher; }
991 PhaseRegAlloc* regalloc() { return _regalloc; }
992 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
993 Arena* indexSet_arena() { return _indexSet_arena; }
994 void* indexSet_free_block_list() { return _indexSet_free_block_list; }
995 DebugInformationRecorder* debug_info() { return env()->debug_info(); }
996
997 void update_interpreter_frame_size(int size) {
998 if (_interpreter_frame_size < size) {
999 _interpreter_frame_size = size;
1000 }
1001 }
1002
1003 void set_matcher(Matcher* m) { _matcher = m; }
1004 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; }
|