src/share/vm/opto/compile.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6934604 Cdiff src/share/vm/opto/compile.hpp

src/share/vm/opto/compile.hpp

Print this page

        

*** 260,269 **** --- 260,270 ---- // Fixed parameters to this compilation. const int _compile_id; const bool _save_argument_registers; // save/restore arg regs for trampolines const bool _subsume_loads; // Load can be matched as part of a larger op. const bool _do_escape_analysis; // Do escape analysis. + const bool _eliminate_autobox; // Do autoboxing elimination. ciMethod* _method; // The method being compiled. int _entry_bci; // entry bci for osr methods. const TypeFunc* _tf; // My kind of signature InlineTree* _ilt; // Ditto (temporary). address _stub_function; // VM entry for stub being compiled, or NULL
*** 285,294 **** --- 286,296 ---- bool _inlining_incrementally;// Are we doing incremental inlining (post parse) bool _has_loops; // True if the method _may_ have some loops bool _has_split_ifs; // True if the method _may_ have some split-if bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated + bool _has_boxed_value; // True if a boxed object is allocated int _max_vector_size; // Maximum size of generated vectors uint _trap_hist[trapHistLength]; // Cumulative traps bool _trap_can_recompile; // Have we emitted a recompiling trap? uint _decompile_count; // Cumulative decompilation counts. bool _do_inlining; // True if we intend to do inlining
*** 373,382 **** --- 375,386 ---- GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after // main parsing has finished. GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations + GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations + int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining) uint _number_of_mh_late_inlines; // number of method handle late inlining still pending // Inlining may not happen in parse order which would make
*** 486,495 **** --- 490,503 ---- // instructions that subsume a load may result in an unschedulable // instruction sequence. bool subsume_loads() const { return _subsume_loads; } // Do escape analysis. bool do_escape_analysis() const { return _do_escape_analysis; } + // Do autoboxing elimination. + bool eliminate_autobox() const { return _eliminate_autobox; } + // Do aggressive boxing elimination. + bool aggressive_unboxing() const { return _eliminate_autobox && AggressiveUnboxing; } bool save_argument_registers() const { return _save_argument_registers; } // Other fixed compilation parameters. ciMethod* method() const { return _method; }
*** 525,534 **** --- 533,544 ---- void set_has_split_ifs(bool z) { _has_split_ifs = z; } bool has_unsafe_access() const { return _has_unsafe_access; } void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } bool has_stringbuilder() const { return _has_stringbuilder; } void set_has_stringbuilder(bool z) { _has_stringbuilder = z; } + bool has_boxed_value() const { return _has_boxed_value; } + void set_has_boxed_value(bool z) { _has_boxed_value = z; } int max_vector_size() const { return _max_vector_size; } void set_max_vector_size(int s) { _max_vector_size = s; } void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; } uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; } bool trap_can_recompile() const { return _trap_can_recompile; }
*** 577,592 **** #ifndef PRODUCT if (_printer) _printer->end_method(); #endif } ! int macro_count() { return _macro_nodes->length(); } ! int predicate_count() { return _predicate_opaqs->length();} ! int expensive_count() { return _expensive_nodes->length(); } ! Node* macro_node(int idx) { return _macro_nodes->at(idx); } ! Node* predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);} ! Node* expensive_node(int idx) { return _expensive_nodes->at(idx); } ConnectionGraph* congraph() { return _congraph;} void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} void add_macro_node(Node * n) { //assert(n->is_macro(), "must be a macro node"); assert(!_macro_nodes->contains(n), " duplicate entry in expand list"); --- 587,602 ---- #ifndef PRODUCT if (_printer) _printer->end_method(); #endif } ! int macro_count() const { return _macro_nodes->length(); } ! int predicate_count() const { return _predicate_opaqs->length();} ! int expensive_count() const { return _expensive_nodes->length(); } ! Node* macro_node(int idx) const { return _macro_nodes->at(idx); } ! Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);} ! Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); } ConnectionGraph* congraph() { return _congraph;} void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} void add_macro_node(Node * n) { //assert(n->is_macro(), "must be a macro node"); assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
*** 764,774 **** JVMState* build_start_state(StartNode* start, const TypeFunc* tf); // Decide how to build a call. // The profile factor is a discount to apply to this site's interp. profile. CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false); ! bool should_delay_inlining(ciMethod* call_method, JVMState* jvms); // Helper functions to identify inlining potential at call-site ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass, ciMethod* callee, const TypeOopPtr* receiver_type, bool is_virtual, --- 774,789 ---- JVMState* build_start_state(StartNode* start, const TypeFunc* tf); // Decide how to build a call. // The profile factor is a discount to apply to this site's interp. profile. CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false); ! bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) { ! return should_delay_string_inlining(call_method, jvms) || ! should_delay_boxing_inlining(call_method, jvms); ! } ! bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms); ! bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms); // Helper functions to identify inlining potential at call-site ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass, ciMethod* callee, const TypeOopPtr* receiver_type, bool is_virtual,
*** 820,829 **** --- 835,848 ---- void add_string_late_inline(CallGenerator* cg) { _string_late_inlines.push(cg); } + void add_boxing_late_inline(CallGenerator* cg) { + _boxing_late_inlines.push(cg); + } + void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful); void dump_inlining(); bool over_inlining_cutoff() const {
*** 836,848 **** --- 855,869 ---- void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; } void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; } bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; } + bool has_late_inline() const { return ((_late_inlines.length() + _string_late_inlines.length()) > 0); } void inline_incrementally_one(PhaseIterGVN& igvn); void inline_incrementally(PhaseIterGVN& igvn); void inline_string_calls(bool parse_time); + void inline_boxing_calls(PhaseIterGVN& igvn); // Matching, CFG layout, allocation, code generation PhaseCFG* cfg() { return _cfg; } bool select_24_bit_instr() const { return _select_24_bit_instr; } bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
*** 911,921 **** // Major entry point. Given a Scope, compile the associated method. // For normal compilations, entry_bci is InvocationEntryBci. For on stack // replacement, entry_bci indicates the bytecode for which to compile a // continuation. Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, ! int entry_bci, bool subsume_loads, bool do_escape_analysis); // Second major entry point. From the TypeFunc signature, generate code // to pass arguments from the Java calling convention to the C calling // convention. Compile(ciEnv* ci_env, const TypeFunc *(*gen)(), --- 932,943 ---- // Major entry point. Given a Scope, compile the associated method. // For normal compilations, entry_bci is InvocationEntryBci. For on stack // replacement, entry_bci indicates the bytecode for which to compile a // continuation. Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, ! int entry_bci, bool subsume_loads, bool do_escape_analysis, ! bool eliminate_boxing); // Second major entry point. From the TypeFunc signature, generate code // to pass arguments from the Java calling convention to the C calling // convention. Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
src/share/vm/opto/compile.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File