src/share/vm/opto/compile.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6934604 Sdiff src/share/vm/opto

src/share/vm/opto/compile.hpp

Print this page




 245     Constant add(MachConstantNode* n, jfloat f) {
 246       jvalue value; value.f = f;
 247       return add(n, T_FLOAT, value);
 248     }
 249     Constant add(MachConstantNode* n, jdouble d) {
 250       jvalue value; value.d = d;
 251       return add(n, T_DOUBLE, value);
 252     }
 253 
 254     // Jump-table
 255     Constant  add_jump_table(MachConstantNode* n);
 256     void     fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const;
 257   };
 258 
 259  private:
 260   // Fixed parameters to this compilation.
 261   const int             _compile_id;
 262   const bool            _save_argument_registers; // save/restore arg regs for trampolines
 263   const bool            _subsume_loads;         // Load can be matched as part of a larger op.
 264   const bool            _do_escape_analysis;    // Do escape analysis.

 265   ciMethod*             _method;                // The method being compiled.
 266   int                   _entry_bci;             // entry bci for osr methods.
 267   const TypeFunc*       _tf;                    // My kind of signature
 268   InlineTree*           _ilt;                   // Ditto (temporary).
 269   address               _stub_function;         // VM entry for stub being compiled, or NULL
 270   const char*           _stub_name;             // Name of stub or adapter being compiled, or NULL
 271   address               _stub_entry_point;      // Compile code entry for generated stub, or NULL
 272 
 273   // Control of this compilation.
 274   int                   _num_loop_opts;         // Number of iterations for doing loop optimiztions
 275   int                   _max_inline_size;       // Max inline size for this compilation
 276   int                   _freq_inline_size;      // Max hot method inline size for this compilation
 277   int                   _fixed_slots;           // count of frame slots not allocated by the register
 278                                                 // allocator i.e. locks, original deopt pc, etc.
 279   // For deopt
 280   int                   _orig_pc_slot;
 281   int                   _orig_pc_slot_offset_in_bytes;
 282 
 283   int                   _major_progress;        // Count of something big happening
 284   bool                  _inlining_progress;     // progress doing incremental inlining?
 285   bool                  _inlining_incrementally;// Are we doing incremental inlining (post parse)
 286   bool                  _has_loops;             // True if the method _may_ have some loops
 287   bool                  _has_split_ifs;         // True if the method _may_ have some split-if
 288   bool                  _has_unsafe_access;     // True if the method _may_ produce faults in unsafe loads or stores.
 289   bool                  _has_stringbuilder;     // True StringBuffers or StringBuilders are allocated

 290   int                   _max_vector_size;       // Maximum size of generated vectors
 291   uint                  _trap_hist[trapHistLength];  // Cumulative traps
 292   bool                  _trap_can_recompile;    // Have we emitted a recompiling trap?
 293   uint                  _decompile_count;       // Cumulative decompilation counts.
 294   bool                  _do_inlining;           // True if we intend to do inlining
 295   bool                  _do_scheduling;         // True if we intend to do scheduling
 296   bool                  _do_freq_based_layout;  // True if we intend to do frequency based block layout
 297   bool                  _do_count_invocations;  // True if we generate code to count invocations
 298   bool                  _do_method_data_update; // True if we generate code to update MethodData*s
 299   int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
 300   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
 301 #ifndef PRODUCT
 302   bool                  _trace_opto_output;
 303   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
 304 #endif
 305 
 306   // JSR 292
 307   bool                  _has_method_handle_invokes; // True if this method has MethodHandle invokes.
 308 
 309   // Compilation environment.


 358   Arena*                _type_arena;            // Alias for _Compile_types except in Initialize_shared()
 359   Dict*                 _type_dict;             // Intern table
 360   void*                 _type_hwm;              // Last allocation (see Type::operator new/delete)
 361   size_t                _type_last_size;        // Last allocation size (see Type::operator new/delete)
 362   ciMethod*             _last_tf_m;             // Cache for
 363   const TypeFunc*       _last_tf;               //  TypeFunc::make
 364   AliasType**           _alias_types;           // List of alias types seen so far.
 365   int                   _num_alias_types;       // Logical length of _alias_types
 366   int                   _max_alias_types;       // Physical length of _alias_types
 367   AliasCacheEntry       _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
 368 
 369   // Parsing, optimization
 370   PhaseGVN*             _initial_gvn;           // Results of parse-time PhaseGVN
 371   Unique_Node_List*     _for_igvn;              // Initial work-list for next round of Iterative GVN
 372   WarmCallInfo*         _warm_calls;            // Sorted work-list for heat-based inlining.
 373 
 374   GrowableArray<CallGenerator*> _late_inlines;        // List of CallGenerators to be revisited after
 375                                                       // main parsing has finished.
 376   GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
 377 


 378   int                           _late_inlines_pos;    // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
 379   uint                          _number_of_mh_late_inlines; // number of method handle late inlining still pending
 380 
 381 
 382   // Inlining may not happen in parse order which would make
 383   // PrintInlining output confusing. Keep track of PrintInlining
 384   // pieces in order.
 385   class PrintInliningBuffer : public ResourceObj {
 386    private:
 387     CallGenerator* _cg;
 388     stringStream* _ss;
 389 
 390    public:
 391     PrintInliningBuffer()
 392       : _cg(NULL) { _ss = new stringStream(); }
 393 
 394     stringStream* ss() const { return _ss; }
 395     CallGenerator* cg() const { return _cg; }
 396     void set_cg(CallGenerator* cg) { _cg = cg; }
 397   };


 469   BufferBlob*           _scratch_buffer_blob;   // For temporary code buffers.
 470   relocInfo*            _scratch_locs_memory;   // For temporary code buffers.
 471   int                   _scratch_const_size;    // For temporary code buffers.
 472   bool                  _in_scratch_emit_size;  // true when in scratch_emit_size.
 473 
 474  public:
 475   // Accessors
 476 
 477   // The Compile instance currently active in this (compiler) thread.
 478   static Compile* current() {
 479     return (Compile*) ciEnv::current()->compiler_data();
 480   }
 481 
 482   // ID for this compilation.  Useful for setting breakpoints in the debugger.
 483   int               compile_id() const          { return _compile_id; }
 484 
 485   // Does this compilation allow instructions to subsume loads?  User
 486   // instructions that subsume a load may result in an unschedulable
 487   // instruction sequence.
 488   bool              subsume_loads() const       { return _subsume_loads; }
 489   // Do escape analysis.


 490   bool              do_escape_analysis() const  { return _do_escape_analysis; }








 491   bool              save_argument_registers() const { return _save_argument_registers; }
 492 
 493 
 494   // Other fixed compilation parameters.
 495   ciMethod*         method() const              { return _method; }
 496   int               entry_bci() const           { return _entry_bci; }
 497   bool              is_osr_compilation() const  { return _entry_bci != InvocationEntryBci; }
 498   bool              is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); }
 499   const TypeFunc*   tf() const                  { assert(_tf!=NULL, ""); return _tf; }
 500   void         init_tf(const TypeFunc* tf)      { assert(_tf==NULL, ""); _tf = tf; }
 501   InlineTree*       ilt() const                 { return _ilt; }
 502   address           stub_function() const       { return _stub_function; }
 503   const char*       stub_name() const           { return _stub_name; }
 504   address           stub_entry_point() const    { return _stub_entry_point; }
 505 
 506   // Control of this compilation.
 507   int               fixed_slots() const         { assert(_fixed_slots >= 0, "");         return _fixed_slots; }
 508   void          set_fixed_slots(int n)          { _fixed_slots = n; }
 509   int               major_progress() const      { return _major_progress; }
 510   void          set_inlining_progress(bool z)   { _inlining_progress = z; }
 511   int               inlining_progress() const   { return _inlining_progress; }
 512   void          set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
 513   int               inlining_incrementally() const { return _inlining_incrementally; }
 514   void          set_major_progress()            { _major_progress++; }
 515   void        clear_major_progress()            { _major_progress = 0; }
 516   int               num_loop_opts() const       { return _num_loop_opts; }
 517   void          set_num_loop_opts(int n)        { _num_loop_opts = n; }
 518   int               max_inline_size() const     { return _max_inline_size; }
 519   void          set_freq_inline_size(int n)     { _freq_inline_size = n; }
 520   int               freq_inline_size() const    { return _freq_inline_size; }
 521   void          set_max_inline_size(int n)      { _max_inline_size = n; }
 522   bool              has_loops() const           { return _has_loops; }
 523   void          set_has_loops(bool z)           { _has_loops = z; }
 524   bool              has_split_ifs() const       { return _has_split_ifs; }
 525   void          set_has_split_ifs(bool z)       { _has_split_ifs = z; }
 526   bool              has_unsafe_access() const   { return _has_unsafe_access; }
 527   void          set_has_unsafe_access(bool z)   { _has_unsafe_access = z; }
 528   bool              has_stringbuilder() const   { return _has_stringbuilder; }
 529   void          set_has_stringbuilder(bool z)   { _has_stringbuilder = z; }


 530   int               max_vector_size() const     { return _max_vector_size; }
 531   void          set_max_vector_size(int s)      { _max_vector_size = s; }
 532   void          set_trap_count(uint r, uint c)  { assert(r < trapHistLength, "oob");        _trap_hist[r] = c; }
 533   uint              trap_count(uint r) const    { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
 534   bool              trap_can_recompile() const  { return _trap_can_recompile; }
 535   void          set_trap_can_recompile(bool z)  { _trap_can_recompile = z; }
 536   uint              decompile_count() const     { return _decompile_count; }
 537   void          set_decompile_count(uint c)     { _decompile_count = c; }
 538   bool              allow_range_check_smearing() const;
 539   bool              do_inlining() const         { return _do_inlining; }
 540   void          set_do_inlining(bool z)         { _do_inlining = z; }
 541   bool              do_scheduling() const       { return _do_scheduling; }
 542   void          set_do_scheduling(bool z)       { _do_scheduling = z; }
 543   bool              do_freq_based_layout() const{ return _do_freq_based_layout; }
 544   void          set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
 545   bool              do_count_invocations() const{ return _do_count_invocations; }
 546   void          set_do_count_invocations(bool z){ _do_count_invocations = z; }
 547   bool              do_method_data_update() const { return _do_method_data_update; }
 548   void          set_do_method_data_update(bool z) { _do_method_data_update = z; }
 549   int               AliasLevel() const          { return _AliasLevel; }


 562   // JSR 292
 563   bool              has_method_handle_invokes() const { return _has_method_handle_invokes;     }
 564   void          set_has_method_handle_invokes(bool z) {        _has_method_handle_invokes = z; }
 565 
 566   void begin_method() {
 567 #ifndef PRODUCT
 568     if (_printer) _printer->begin_method(this);
 569 #endif
 570   }
 571   void print_method(const char * name, int level = 1) {
 572 #ifndef PRODUCT
 573     if (_printer) _printer->print_method(this, name, level);
 574 #endif
 575   }
 576   void end_method() {
 577 #ifndef PRODUCT
 578     if (_printer) _printer->end_method();
 579 #endif
 580   }
 581 
 582   int           macro_count()                   { return _macro_nodes->length(); }
 583   int           predicate_count()               { return _predicate_opaqs->length();}
 584   int           expensive_count()               { return _expensive_nodes->length(); }
 585   Node*         macro_node(int idx)             { return _macro_nodes->at(idx); }
 586   Node*         predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);}
 587   Node*         expensive_node(int idx)         { return _expensive_nodes->at(idx); }
 588   ConnectionGraph* congraph()                   { return _congraph;}
 589   void set_congraph(ConnectionGraph* congraph)  { _congraph = congraph;}
 590   void add_macro_node(Node * n) {
 591     //assert(n->is_macro(), "must be a macro node");
 592     assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
 593     _macro_nodes->append(n);
 594   }
 595   void remove_macro_node(Node * n) {
 596     // this function may be called twice for a node so check
 597     // that the node is in the array before attempting to remove it
 598     if (_macro_nodes->contains(n))
 599       _macro_nodes->remove(n);
 600     // remove from _predicate_opaqs list also if it is there
 601     if (predicate_count() > 0 && _predicate_opaqs->contains(n)){
 602       _predicate_opaqs->remove(n);
 603     }
 604   }
 605   void add_expensive_node(Node * n);
 606   void remove_expensive_node(Node * n) {
 607     if (_expensive_nodes->contains(n)) {


 749     _last_tf = tf;
 750   }
 751 
 752   AliasType*        alias_type(int                idx)  { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
 753   AliasType*        alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
 754   bool         have_alias_type(const TypePtr* adr_type);
 755   AliasType*        alias_type(ciField*         field);
 756 
 757   int               get_alias_index(const TypePtr* at)  { return alias_type(at)->index(); }
 758   const TypePtr*    get_adr_type(uint aidx)             { return alias_type(aidx)->adr_type(); }
 759   int               get_general_index(uint aidx)        { return alias_type(aidx)->general_index(); }
 760 
 761   // Building nodes
 762   void              rethrow_exceptions(JVMState* jvms);
 763   void              return_values(JVMState* jvms);
 764   JVMState*         build_start_state(StartNode* start, const TypeFunc* tf);
 765 
 766   // Decide how to build a call.
 767   // The profile factor is a discount to apply to this site's interp. profile.
 768   CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false);
 769   bool should_delay_inlining(ciMethod* call_method, JVMState* jvms);





 770 
 771   // Helper functions to identify inlining potential at call-site
 772   ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
 773                                   ciMethod* callee, const TypeOopPtr* receiver_type,
 774                                   bool is_virtual,
 775                                   bool &call_does_dispatch, int &vtable_index);
 776   ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
 777                               ciMethod* callee, const TypeOopPtr* receiver_type);
 778 
 779   // Report if there were too many traps at a current method and bci.
 780   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
 781   // If there is no MDO at all, report no trap unless told to assume it.
 782   bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
 783   // This version, unspecific to a particular bci, asks if
 784   // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
 785   bool too_many_traps(Deoptimization::DeoptReason reason,
 786                       // Privately used parameter for logging:
 787                       ciMethodData* logmd = NULL);
 788   // Report if there were too many recompiles at a method and bci.
 789   bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);


 805   void              remove_useless_nodes (Unique_Node_List &useful);
 806 
 807   WarmCallInfo*     warm_calls() const          { return _warm_calls; }
 808   void          set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
 809   WarmCallInfo* pop_warm_call();
 810 
 811   // Record this CallGenerator for inlining at the end of parsing.
 812   void              add_late_inline(CallGenerator* cg)        {
 813     _late_inlines.insert_before(_late_inlines_pos, cg);
 814     _late_inlines_pos++;
 815   }
 816 
 817   void              prepend_late_inline(CallGenerator* cg)    {
 818     _late_inlines.insert_before(0, cg);
 819   }
 820 
 821   void              add_string_late_inline(CallGenerator* cg) {
 822     _string_late_inlines.push(cg);
 823   }
 824 




 825   void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
 826 
 827   void dump_inlining();
 828 
 829   bool over_inlining_cutoff() const {
 830     if (!inlining_incrementally()) {
 831       return unique() > (uint)NodeCountInliningCutoff;
 832     } else {
 833       return live_nodes() > (uint)LiveNodeCountInliningCutoff;
 834     }
 835   }
 836 
 837   void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
 838   void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; }
 839   bool has_mh_late_inlines() const     { return _number_of_mh_late_inlines > 0; }
 840 
 841   void inline_incrementally_one(PhaseIterGVN& igvn);
 842   void inline_incrementally(PhaseIterGVN& igvn);
 843   void inline_string_calls(bool parse_time);

 844 
 845   // Matching, CFG layout, allocation, code generation
 846   PhaseCFG*         cfg()                       { return _cfg; }
 847   bool              select_24_bit_instr() const { return _select_24_bit_instr; }
 848   bool              in_24_bit_fp_mode() const   { return _in_24_bit_fp_mode; }
 849   bool              has_java_calls() const      { return _java_calls > 0; }
 850   int               java_calls() const          { return _java_calls; }
 851   int               inner_loops() const         { return _inner_loops; }
 852   Matcher*          matcher()                   { return _matcher; }
 853   PhaseRegAlloc*    regalloc()                  { return _regalloc; }
 854   int               frame_slots() const         { return _frame_slots; }
 855   int               frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
 856   RegMask&          FIRST_STACK_mask()          { return _FIRST_STACK_mask; }
 857   Arena*            indexSet_arena()            { return _indexSet_arena; }
 858   void*             indexSet_free_block_list()  { return _indexSet_free_block_list; }
 859   uint              node_bundling_limit()       { return _node_bundling_limit; }
 860   Bundle*           node_bundling_base()        { return _node_bundling_base; }
 861   void          set_node_bundling_limit(uint n) { _node_bundling_limit = n; }
 862   void          set_node_bundling_base(Bundle* b) { _node_bundling_base = b; }
 863   bool          starts_bundle(const Node *n) const;


 896   relocInfo*        scratch_locs_memory()       { return _scratch_locs_memory; }
 897   void          set_scratch_locs_memory(relocInfo* b)  { _scratch_locs_memory = b; }
 898 
 899   // emit to scratch blob, report resulting size
 900   uint              scratch_emit_size(const Node* n);
 901   void       set_in_scratch_emit_size(bool x)   {        _in_scratch_emit_size = x; }
 902   bool           in_scratch_emit_size() const   { return _in_scratch_emit_size;     }
 903 
 904   enum ScratchBufferBlob {
 905     MAX_inst_size       = 1024,
 906     MAX_locs_size       = 128, // number of relocInfo elements
 907     MAX_const_size      = 128,
 908     MAX_stubs_size      = 128
 909   };
 910 
 911   // Major entry point.  Given a Scope, compile the associated method.
 912   // For normal compilations, entry_bci is InvocationEntryBci.  For on stack
 913   // replacement, entry_bci indicates the bytecode for which to compile a
 914   // continuation.
 915   Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
 916           int entry_bci, bool subsume_loads, bool do_escape_analysis);

 917 
 918   // Second major entry point.  From the TypeFunc signature, generate code
 919   // to pass arguments from the Java calling convention to the C calling
 920   // convention.
 921   Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
 922           address stub_function, const char *stub_name,
 923           int is_fancy_jump, bool pass_tls,
 924           bool save_arg_registers, bool return_pc);
 925 
 926   // From the TypeFunc signature, generate code to pass arguments
 927   // from Compiled calling convention to Interpreter's calling convention
 928   void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry);
 929 
 930   // From the TypeFunc signature, generate code to pass arguments
 931   // from Interpreter's calling convention to Compiler's calling convention
 932   void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf);
 933 
 934   // Are we compiling a method?
 935   bool has_method() { return method() != NULL; }
 936 




 245     Constant add(MachConstantNode* n, jfloat f) {
 246       jvalue value; value.f = f;
 247       return add(n, T_FLOAT, value);
 248     }
 249     Constant add(MachConstantNode* n, jdouble d) {
 250       jvalue value; value.d = d;
 251       return add(n, T_DOUBLE, value);
 252     }
 253 
 254     // Jump-table
 255     Constant  add_jump_table(MachConstantNode* n);
 256     void     fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const;
 257   };
 258 
 259  private:
 260   // Fixed parameters to this compilation.
 261   const int             _compile_id;
 262   const bool            _save_argument_registers; // save/restore arg regs for trampolines
 263   const bool            _subsume_loads;         // Load can be matched as part of a larger op.
 264   const bool            _do_escape_analysis;    // Do escape analysis.
 265   const bool            _eliminate_boxing;      // Do boxing elimination.
 266   ciMethod*             _method;                // The method being compiled.
 267   int                   _entry_bci;             // entry bci for osr methods.
 268   const TypeFunc*       _tf;                    // My kind of signature
 269   InlineTree*           _ilt;                   // Ditto (temporary).
 270   address               _stub_function;         // VM entry for stub being compiled, or NULL
 271   const char*           _stub_name;             // Name of stub or adapter being compiled, or NULL
 272   address               _stub_entry_point;      // Compile code entry for generated stub, or NULL
 273 
 274   // Control of this compilation.
 275   int                   _num_loop_opts;         // Number of iterations for doing loop optimiztions
 276   int                   _max_inline_size;       // Max inline size for this compilation
 277   int                   _freq_inline_size;      // Max hot method inline size for this compilation
 278   int                   _fixed_slots;           // count of frame slots not allocated by the register
 279                                                 // allocator i.e. locks, original deopt pc, etc.
 280   // For deopt
 281   int                   _orig_pc_slot;
 282   int                   _orig_pc_slot_offset_in_bytes;
 283 
 284   int                   _major_progress;        // Count of something big happening
 285   bool                  _inlining_progress;     // progress doing incremental inlining?
 286   bool                  _inlining_incrementally;// Are we doing incremental inlining (post parse)
 287   bool                  _has_loops;             // True if the method _may_ have some loops
 288   bool                  _has_split_ifs;         // True if the method _may_ have some split-if
 289   bool                  _has_unsafe_access;     // True if the method _may_ produce faults in unsafe loads or stores.
 290   bool                  _has_stringbuilder;     // True StringBuffers or StringBuilders are allocated
 291   bool                  _has_boxed_value;       // True if a boxed object is allocated
 292   int                   _max_vector_size;       // Maximum size of generated vectors
 293   uint                  _trap_hist[trapHistLength];  // Cumulative traps
 294   bool                  _trap_can_recompile;    // Have we emitted a recompiling trap?
 295   uint                  _decompile_count;       // Cumulative decompilation counts.
 296   bool                  _do_inlining;           // True if we intend to do inlining
 297   bool                  _do_scheduling;         // True if we intend to do scheduling
 298   bool                  _do_freq_based_layout;  // True if we intend to do frequency based block layout
 299   bool                  _do_count_invocations;  // True if we generate code to count invocations
 300   bool                  _do_method_data_update; // True if we generate code to update MethodData*s
 301   int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
 302   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
 303 #ifndef PRODUCT
 304   bool                  _trace_opto_output;
 305   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
 306 #endif
 307 
 308   // JSR 292
 309   bool                  _has_method_handle_invokes; // True if this method has MethodHandle invokes.
 310 
 311   // Compilation environment.


 360   Arena*                _type_arena;            // Alias for _Compile_types except in Initialize_shared()
 361   Dict*                 _type_dict;             // Intern table
 362   void*                 _type_hwm;              // Last allocation (see Type::operator new/delete)
 363   size_t                _type_last_size;        // Last allocation size (see Type::operator new/delete)
 364   ciMethod*             _last_tf_m;             // Cache for
 365   const TypeFunc*       _last_tf;               //  TypeFunc::make
 366   AliasType**           _alias_types;           // List of alias types seen so far.
 367   int                   _num_alias_types;       // Logical length of _alias_types
 368   int                   _max_alias_types;       // Physical length of _alias_types
 369   AliasCacheEntry       _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
 370 
 371   // Parsing, optimization
 372   PhaseGVN*             _initial_gvn;           // Results of parse-time PhaseGVN
 373   Unique_Node_List*     _for_igvn;              // Initial work-list for next round of Iterative GVN
 374   WarmCallInfo*         _warm_calls;            // Sorted work-list for heat-based inlining.
 375 
 376   GrowableArray<CallGenerator*> _late_inlines;        // List of CallGenerators to be revisited after
 377                                                       // main parsing has finished.
 378   GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
 379 
 380   GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
 381 
 382   int                           _late_inlines_pos;    // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
 383   uint                          _number_of_mh_late_inlines; // number of method handle late inlining still pending
 384 
 385 
 386   // Inlining may not happen in parse order which would make
 387   // PrintInlining output confusing. Keep track of PrintInlining
 388   // pieces in order.
 389   class PrintInliningBuffer : public ResourceObj {
 390    private:
 391     CallGenerator* _cg;
 392     stringStream* _ss;
 393 
 394    public:
 395     PrintInliningBuffer()
 396       : _cg(NULL) { _ss = new stringStream(); }
 397 
 398     stringStream* ss() const { return _ss; }
 399     CallGenerator* cg() const { return _cg; }
 400     void set_cg(CallGenerator* cg) { _cg = cg; }
 401   };


 473   BufferBlob*           _scratch_buffer_blob;   // For temporary code buffers.
 474   relocInfo*            _scratch_locs_memory;   // For temporary code buffers.
 475   int                   _scratch_const_size;    // For temporary code buffers.
 476   bool                  _in_scratch_emit_size;  // true when in scratch_emit_size.
 477 
 478  public:
 479   // Accessors
 480 
 481   // The Compile instance currently active in this (compiler) thread.
 482   static Compile* current() {
 483     return (Compile*) ciEnv::current()->compiler_data();
 484   }
 485 
 486   // ID for this compilation.  Useful for setting breakpoints in the debugger.
 487   int               compile_id() const          { return _compile_id; }
 488 
 489   // Does this compilation allow instructions to subsume loads?  User
 490   // instructions that subsume a load may result in an unschedulable
 491   // instruction sequence.
 492   bool              subsume_loads() const       { return _subsume_loads; }
 493   /**
 494    * Do escape analysis.
 495    */
 496   bool              do_escape_analysis() const  { return _do_escape_analysis; }
 497   /**
 498    * Do boxing elimination.
 499    */
 500   bool              eliminate_boxing() const    { return _eliminate_boxing; }
 501   /**
 502    * Do aggressive boxing elimination.
 503    */
 504   bool              aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; }
 505   bool              save_argument_registers() const { return _save_argument_registers; }
 506 
 507 
 508   // Other fixed compilation parameters.
 509   ciMethod*         method() const              { return _method; }
 510   int               entry_bci() const           { return _entry_bci; }
 511   bool              is_osr_compilation() const  { return _entry_bci != InvocationEntryBci; }
 512   bool              is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); }
 513   const TypeFunc*   tf() const                  { assert(_tf!=NULL, ""); return _tf; }
 514   void         init_tf(const TypeFunc* tf)      { assert(_tf==NULL, ""); _tf = tf; }
 515   InlineTree*       ilt() const                 { return _ilt; }
 516   address           stub_function() const       { return _stub_function; }
 517   const char*       stub_name() const           { return _stub_name; }
 518   address           stub_entry_point() const    { return _stub_entry_point; }
 519 
 520   // Control of this compilation.
 521   int               fixed_slots() const         { assert(_fixed_slots >= 0, "");         return _fixed_slots; }
 522   void          set_fixed_slots(int n)          { _fixed_slots = n; }
 523   int               major_progress() const      { return _major_progress; }
 524   void          set_inlining_progress(bool z)   { _inlining_progress = z; }
 525   int               inlining_progress() const   { return _inlining_progress; }
 526   void          set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
 527   int               inlining_incrementally() const { return _inlining_incrementally; }
 528   void          set_major_progress()            { _major_progress++; }
 529   void        clear_major_progress()            { _major_progress = 0; }
 530   int               num_loop_opts() const       { return _num_loop_opts; }
 531   void          set_num_loop_opts(int n)        { _num_loop_opts = n; }
 532   int               max_inline_size() const     { return _max_inline_size; }
 533   void          set_freq_inline_size(int n)     { _freq_inline_size = n; }
 534   int               freq_inline_size() const    { return _freq_inline_size; }
 535   void          set_max_inline_size(int n)      { _max_inline_size = n; }
 536   bool              has_loops() const           { return _has_loops; }
 537   void          set_has_loops(bool z)           { _has_loops = z; }
 538   bool              has_split_ifs() const       { return _has_split_ifs; }
 539   void          set_has_split_ifs(bool z)       { _has_split_ifs = z; }
 540   bool              has_unsafe_access() const   { return _has_unsafe_access; }
 541   void          set_has_unsafe_access(bool z)   { _has_unsafe_access = z; }
 542   bool              has_stringbuilder() const   { return _has_stringbuilder; }
 543   void          set_has_stringbuilder(bool z)   { _has_stringbuilder = z; }
 544   bool              has_boxed_value() const     { return _has_boxed_value; }
 545   void          set_has_boxed_value(bool z)     { _has_boxed_value = z; }
 546   int               max_vector_size() const     { return _max_vector_size; }
 547   void          set_max_vector_size(int s)      { _max_vector_size = s; }
 548   void          set_trap_count(uint r, uint c)  { assert(r < trapHistLength, "oob");        _trap_hist[r] = c; }
 549   uint              trap_count(uint r) const    { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
 550   bool              trap_can_recompile() const  { return _trap_can_recompile; }
 551   void          set_trap_can_recompile(bool z)  { _trap_can_recompile = z; }
 552   uint              decompile_count() const     { return _decompile_count; }
 553   void          set_decompile_count(uint c)     { _decompile_count = c; }
 554   bool              allow_range_check_smearing() const;
 555   bool              do_inlining() const         { return _do_inlining; }
 556   void          set_do_inlining(bool z)         { _do_inlining = z; }
 557   bool              do_scheduling() const       { return _do_scheduling; }
 558   void          set_do_scheduling(bool z)       { _do_scheduling = z; }
 559   bool              do_freq_based_layout() const{ return _do_freq_based_layout; }
 560   void          set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
 561   bool              do_count_invocations() const{ return _do_count_invocations; }
 562   void          set_do_count_invocations(bool z){ _do_count_invocations = z; }
 563   bool              do_method_data_update() const { return _do_method_data_update; }
 564   void          set_do_method_data_update(bool z) { _do_method_data_update = z; }
 565   int               AliasLevel() const          { return _AliasLevel; }


 578   // JSR 292
 579   bool              has_method_handle_invokes() const { return _has_method_handle_invokes;     }
 580   void          set_has_method_handle_invokes(bool z) {        _has_method_handle_invokes = z; }
 581 
 582   void begin_method() {
 583 #ifndef PRODUCT
 584     if (_printer) _printer->begin_method(this);
 585 #endif
 586   }
 587   void print_method(const char * name, int level = 1) {
 588 #ifndef PRODUCT
 589     if (_printer) _printer->print_method(this, name, level);
 590 #endif
 591   }
 592   void end_method() {
 593 #ifndef PRODUCT
 594     if (_printer) _printer->end_method();
 595 #endif
 596   }
 597 
 598   int           macro_count()             const { return _macro_nodes->length(); }
 599   int           predicate_count()         const { return _predicate_opaqs->length();}
 600   int           expensive_count()         const { return _expensive_nodes->length(); }
 601   Node*         macro_node(int idx)       const { return _macro_nodes->at(idx); }
 602   Node*         predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);}
 603   Node*         expensive_node(int idx)   const { return _expensive_nodes->at(idx); }
 604   ConnectionGraph* congraph()                   { return _congraph;}
 605   void set_congraph(ConnectionGraph* congraph)  { _congraph = congraph;}
 606   void add_macro_node(Node * n) {
 607     //assert(n->is_macro(), "must be a macro node");
 608     assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
 609     _macro_nodes->append(n);
 610   }
 611   void remove_macro_node(Node * n) {
 612     // this function may be called twice for a node so check
 613     // that the node is in the array before attempting to remove it
 614     if (_macro_nodes->contains(n))
 615       _macro_nodes->remove(n);
 616     // remove from _predicate_opaqs list also if it is there
 617     if (predicate_count() > 0 && _predicate_opaqs->contains(n)){
 618       _predicate_opaqs->remove(n);
 619     }
 620   }
 621   void add_expensive_node(Node * n);
 622   void remove_expensive_node(Node * n) {
 623     if (_expensive_nodes->contains(n)) {


 765     _last_tf = tf;
 766   }
 767 
 768   AliasType*        alias_type(int                idx)  { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
 769   AliasType*        alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
 770   bool         have_alias_type(const TypePtr* adr_type);
 771   AliasType*        alias_type(ciField*         field);
 772 
 773   int               get_alias_index(const TypePtr* at)  { return alias_type(at)->index(); }
 774   const TypePtr*    get_adr_type(uint aidx)             { return alias_type(aidx)->adr_type(); }
 775   int               get_general_index(uint aidx)        { return alias_type(aidx)->general_index(); }
 776 
 777   // Building nodes
 778   void              rethrow_exceptions(JVMState* jvms);
 779   void              return_values(JVMState* jvms);
 780   JVMState*         build_start_state(StartNode* start, const TypeFunc* tf);
 781 
 782   // Decide how to build a call.
 783   // The profile factor is a discount to apply to this site's interp. profile.
 784   CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false);
 785   bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
 786     return should_delay_string_inlining(call_method, jvms) ||
 787            should_delay_boxing_inlining(call_method, jvms);
 788   }
 789   bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
 790   bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
 791 
 792   // Helper functions to identify inlining potential at call-site
 793   ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
 794                                   ciMethod* callee, const TypeOopPtr* receiver_type,
 795                                   bool is_virtual,
 796                                   bool &call_does_dispatch, int &vtable_index);
 797   ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
 798                               ciMethod* callee, const TypeOopPtr* receiver_type);
 799 
 800   // Report if there were too many traps at a current method and bci.
 801   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
 802   // If there is no MDO at all, report no trap unless told to assume it.
 803   bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
 804   // This version, unspecific to a particular bci, asks if
 805   // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
 806   bool too_many_traps(Deoptimization::DeoptReason reason,
 807                       // Privately used parameter for logging:
 808                       ciMethodData* logmd = NULL);
 809   // Report if there were too many recompiles at a method and bci.
 810   bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);


 826   void              remove_useless_nodes (Unique_Node_List &useful);
 827 
 828   WarmCallInfo*     warm_calls() const          { return _warm_calls; }
 829   void          set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
 830   WarmCallInfo* pop_warm_call();
 831 
 832   // Record this CallGenerator for inlining at the end of parsing.
 833   void              add_late_inline(CallGenerator* cg)        {
 834     _late_inlines.insert_before(_late_inlines_pos, cg);
 835     _late_inlines_pos++;
 836   }
 837 
 838   void              prepend_late_inline(CallGenerator* cg)    {
 839     _late_inlines.insert_before(0, cg);
 840   }
 841 
 842   void              add_string_late_inline(CallGenerator* cg) {
 843     _string_late_inlines.push(cg);
 844   }
 845 
 846   void              add_boxing_late_inline(CallGenerator* cg) {
 847     _boxing_late_inlines.push(cg);
 848   }
 849 
 850   void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
 851 
 852   void dump_inlining();
 853 
 854   bool over_inlining_cutoff() const {
 855     if (!inlining_incrementally()) {
 856       return unique() > (uint)NodeCountInliningCutoff;
 857     } else {
 858       return live_nodes() > (uint)LiveNodeCountInliningCutoff;
 859     }
 860   }
 861 
 862   void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
 863   void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; }
 864   bool has_mh_late_inlines() const     { return _number_of_mh_late_inlines > 0; }
 865 
 866   void inline_incrementally_one(PhaseIterGVN& igvn);
 867   void inline_incrementally(PhaseIterGVN& igvn);
 868   void inline_string_calls(bool parse_time);
 869   void inline_boxing_calls(PhaseIterGVN& igvn);
 870 
 871   // Matching, CFG layout, allocation, code generation
 872   PhaseCFG*         cfg()                       { return _cfg; }
 873   bool              select_24_bit_instr() const { return _select_24_bit_instr; }
 874   bool              in_24_bit_fp_mode() const   { return _in_24_bit_fp_mode; }
 875   bool              has_java_calls() const      { return _java_calls > 0; }
 876   int               java_calls() const          { return _java_calls; }
 877   int               inner_loops() const         { return _inner_loops; }
 878   Matcher*          matcher()                   { return _matcher; }
 879   PhaseRegAlloc*    regalloc()                  { return _regalloc; }
 880   int               frame_slots() const         { return _frame_slots; }
 881   int               frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
 882   RegMask&          FIRST_STACK_mask()          { return _FIRST_STACK_mask; }
 883   Arena*            indexSet_arena()            { return _indexSet_arena; }
 884   void*             indexSet_free_block_list()  { return _indexSet_free_block_list; }
 885   uint              node_bundling_limit()       { return _node_bundling_limit; }
 886   Bundle*           node_bundling_base()        { return _node_bundling_base; }
 887   void          set_node_bundling_limit(uint n) { _node_bundling_limit = n; }
 888   void          set_node_bundling_base(Bundle* b) { _node_bundling_base = b; }
 889   bool          starts_bundle(const Node *n) const;


 922   relocInfo*        scratch_locs_memory()       { return _scratch_locs_memory; }
 923   void          set_scratch_locs_memory(relocInfo* b)  { _scratch_locs_memory = b; }
 924 
 925   // emit to scratch blob, report resulting size
 926   uint              scratch_emit_size(const Node* n);
 927   void       set_in_scratch_emit_size(bool x)   {        _in_scratch_emit_size = x; }
 928   bool           in_scratch_emit_size() const   { return _in_scratch_emit_size;     }
 929 
 930   enum ScratchBufferBlob {
 931     MAX_inst_size       = 1024,
 932     MAX_locs_size       = 128, // number of relocInfo elements
 933     MAX_const_size      = 128,
 934     MAX_stubs_size      = 128
 935   };
 936 
 937   // Major entry point.  Given a Scope, compile the associated method.
 938   // For normal compilations, entry_bci is InvocationEntryBci.  For on stack
 939   // replacement, entry_bci indicates the bytecode for which to compile a
 940   // continuation.
 941   Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
 942           int entry_bci, bool subsume_loads, bool do_escape_analysis,
 943           bool eliminate_boxing);
 944 
 945   // Second major entry point.  From the TypeFunc signature, generate code
 946   // to pass arguments from the Java calling convention to the C calling
 947   // convention.
 948   Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
 949           address stub_function, const char *stub_name,
 950           int is_fancy_jump, bool pass_tls,
 951           bool save_arg_registers, bool return_pc);
 952 
 953   // From the TypeFunc signature, generate code to pass arguments
 954   // from Compiled calling convention to Interpreter's calling convention
 955   void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry);
 956 
 957   // From the TypeFunc signature, generate code to pass arguments
 958   // from Interpreter's calling convention to Compiler's calling convention
 959   void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf);
 960 
 961   // Are we compiling a method?
 962   bool has_method() { return method() != NULL; }
 963 


src/share/vm/opto/compile.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File