408
409 public:
410 PrintInliningBuffer()
411 : _cg(NULL) { _ss = new stringStream(); }
412
413 stringStream* ss() const { return _ss; }
414 CallGenerator* cg() const { return _cg; }
415 void set_cg(CallGenerator* cg) { _cg = cg; }
416 };
417
418 GrowableArray<PrintInliningBuffer>* _print_inlining_list;
419 int _print_inlining_idx;
420
421 // Only keep nodes in the expensive node list that need to be optimized
422 void cleanup_expensive_nodes(PhaseIterGVN &igvn);
423 // Use for sorting expensive nodes to bring similar nodes together
424 static int cmp_expensive_nodes(Node** n1, Node** n2);
425 // Expensive nodes list already sorted?
426 bool expensive_nodes_sorted() const;
427
428 public:
429
430 outputStream* print_inlining_stream() const {
431 return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
432 }
433
434 void print_inlining_skip(CallGenerator* cg) {
435 if (_print_inlining) {
436 _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
437 _print_inlining_idx++;
438 _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
439 }
440 }
441
442 void print_inlining_insert(CallGenerator* cg) {
443 if (_print_inlining) {
444 for (int i = 0; i < _print_inlining_list->length(); i++) {
445 if (_print_inlining_list->adr_at(i)->cg() == cg) {
446 _print_inlining_list->insert_before(i+1, PrintInliningBuffer());
447 _print_inlining_idx = i+1;
803 _last_tf_m = m;
804 _last_tf = tf;
805 }
806
807 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
808 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
809 bool have_alias_type(const TypePtr* adr_type);
810 AliasType* alias_type(ciField* field);
811
812 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
813 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
814 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
815
816 // Building nodes
817 void rethrow_exceptions(JVMState* jvms);
818 void return_values(JVMState* jvms);
819 JVMState* build_start_state(StartNode* start, const TypeFunc* tf);
820
821 // Decide how to build a call.
822 // The profile factor is a discount to apply to this site's interp. profile.
823 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false);
824 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
825 return should_delay_string_inlining(call_method, jvms) ||
826 should_delay_boxing_inlining(call_method, jvms);
827 }
828 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
829 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
830
831 // Helper functions to identify inlining potential at call-site
832 ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
833 ciMethod* callee, const TypeOopPtr* receiver_type,
834 bool is_virtual,
835 bool &call_does_dispatch, int &vtable_index);
836 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
837 ciMethod* callee, const TypeOopPtr* receiver_type);
838
839 // Report if there were too many traps at a current method and bci.
840 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
841 // If there is no MDO at all, report no trap unless told to assume it.
842 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
843 // This version, unspecific to a particular bci, asks if
1139 // correspondence between Use-Def edges and Def-Use edges
1140 // The option no_dead_code enables stronger checks that the
1141 // graph is strongly connected from root in both directions.
1142 void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN;
1143
1144 // End-of-run dumps.
1145 static void print_statistics() PRODUCT_RETURN;
1146
1147 // Dump formatted assembly
1148 void dump_asm(int *pcs = NULL, uint pc_limit = 0) PRODUCT_RETURN;
1149 void dump_pc(int *pcs, int pc_limit, Node *n);
1150
1151 // Verify ADLC assumptions during startup
1152 static void adlc_verification() PRODUCT_RETURN;
1153
1154 // Definitions of pd methods
1155 static void pd_compiler2_init();
1156
1157 // Auxiliary method for randomized fuzzing/stressing
1158 static bool randomized_select(int count);
1159 };
1160
1161 #endif // SHARE_VM_OPTO_COMPILE_HPP
|
408
409 public:
410 PrintInliningBuffer()
411 : _cg(NULL) { _ss = new stringStream(); }
412
413 stringStream* ss() const { return _ss; }
414 CallGenerator* cg() const { return _cg; }
415 void set_cg(CallGenerator* cg) { _cg = cg; }
416 };
417
418 GrowableArray<PrintInliningBuffer>* _print_inlining_list;
419 int _print_inlining_idx;
420
421 // Only keep nodes in the expensive node list that need to be optimized
422 void cleanup_expensive_nodes(PhaseIterGVN &igvn);
423 // Use for sorting expensive nodes to bring similar nodes together
424 static int cmp_expensive_nodes(Node** n1, Node** n2);
425 // Expensive nodes list already sorted?
426 bool expensive_nodes_sorted() const;
427
428 // Are we within a PreserveJVMState block?
429 int _preserve_jvm_state;
430
431 public:
432
433 outputStream* print_inlining_stream() const {
434 return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
435 }
436
437 void print_inlining_skip(CallGenerator* cg) {
438 if (_print_inlining) {
439 _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
440 _print_inlining_idx++;
441 _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
442 }
443 }
444
445 void print_inlining_insert(CallGenerator* cg) {
446 if (_print_inlining) {
447 for (int i = 0; i < _print_inlining_list->length(); i++) {
448 if (_print_inlining_list->adr_at(i)->cg() == cg) {
449 _print_inlining_list->insert_before(i+1, PrintInliningBuffer());
450 _print_inlining_idx = i+1;
806 _last_tf_m = m;
807 _last_tf = tf;
808 }
809
810 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
811 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
812 bool have_alias_type(const TypePtr* adr_type);
813 AliasType* alias_type(ciField* field);
814
815 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
816 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
817 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
818
819 // Building nodes
820 void rethrow_exceptions(JVMState* jvms);
821 void return_values(JVMState* jvms);
822 JVMState* build_start_state(StartNode* start, const TypeFunc* tf);
823
824 // Decide how to build a call.
825 // The profile factor is a discount to apply to this site's interp. profile.
826 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
827 JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true,
828 bool delayed_forbidden = false);
829 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
830 return should_delay_string_inlining(call_method, jvms) ||
831 should_delay_boxing_inlining(call_method, jvms);
832 }
833 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
834 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
835
836 // Helper functions to identify inlining potential at call-site
837 ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
838 ciMethod* callee, const TypeOopPtr* receiver_type,
839 bool is_virtual,
840 bool &call_does_dispatch, int &vtable_index);
841 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
842 ciMethod* callee, const TypeOopPtr* receiver_type);
843
844 // Report if there were too many traps at a current method and bci.
845 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
846 // If there is no MDO at all, report no trap unless told to assume it.
847 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
848 // This version, unspecific to a particular bci, asks if
1144 // correspondence between Use-Def edges and Def-Use edges
1145 // The option no_dead_code enables stronger checks that the
1146 // graph is strongly connected from root in both directions.
1147 void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN;
1148
1149 // End-of-run dumps.
1150 static void print_statistics() PRODUCT_RETURN;
1151
1152 // Dump formatted assembly
1153 void dump_asm(int *pcs = NULL, uint pc_limit = 0) PRODUCT_RETURN;
1154 void dump_pc(int *pcs, int pc_limit, Node *n);
1155
1156 // Verify ADLC assumptions during startup
1157 static void adlc_verification() PRODUCT_RETURN;
1158
1159 // Definitions of pd methods
1160 static void pd_compiler2_init();
1161
1162 // Auxiliary method for randomized fuzzing/stressing
1163 static bool randomized_select(int count);
1164
1165 // enter a PreserveJVMState block
1166 void inc_preserve_jvm_state() {
1167 _preserve_jvm_state++;
1168 }
1169
1170 // exit a PreserveJVMState block
1171 void dec_preserve_jvm_state() {
1172 _preserve_jvm_state--;
1173 assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative");
1174 }
1175
1176 bool has_preserve_jvm_state() const {
1177 return _preserve_jvm_state > 0;
1178 }
1179 };
1180
1181 #endif // SHARE_VM_OPTO_COMPILE_HPP
|