src/share/vm/opto/compile.hpp

Print this page
rev 3898 : 8005031: Some cleanup in c2 to prepare for incremental inlining support
Summary: collection of small changes to prepare for incremental inlining.
Reviewed-by:


  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OPTO_COMPILE_HPP
  26 #define SHARE_VM_OPTO_COMPILE_HPP
  27 
  28 #include "asm/codeBuffer.hpp"
  29 #include "ci/compilerInterface.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "code/exceptionHandlerTable.hpp"
  32 #include "compiler/compilerOracle.hpp"

  33 #include "libadt/dict.hpp"
  34 #include "libadt/port.hpp"
  35 #include "libadt/vectset.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "opto/idealGraphPrinter.hpp"
  38 #include "opto/phase.hpp"
  39 #include "opto/regmask.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/vmThread.hpp"
  42 
  43 class Block;
  44 class Bundle;
  45 class C2Compiler;
  46 class CallGenerator;
  47 class ConnectionGraph;
  48 class InlineTree;
  49 class Int_Array;
  50 class Matcher;
  51 class MachConstantNode;
  52 class MachConstantBaseNode;


 352   Arena                 _Compile_types;         // Arena for all types
 353   Arena*                _type_arena;            // Alias for _Compile_types except in Initialize_shared()
 354   Dict*                 _type_dict;             // Intern table
 355   void*                 _type_hwm;              // Last allocation (see Type::operator new/delete)
 356   size_t                _type_last_size;        // Last allocation size (see Type::operator new/delete)
 357   ciMethod*             _last_tf_m;             // Cache for
 358   const TypeFunc*       _last_tf;               //  TypeFunc::make
 359   AliasType**           _alias_types;           // List of alias types seen so far.
 360   int                   _num_alias_types;       // Logical length of _alias_types
 361   int                   _max_alias_types;       // Physical length of _alias_types
 362   AliasCacheEntry       _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
 363 
 364   // Parsing, optimization
 365   PhaseGVN*             _initial_gvn;           // Results of parse-time PhaseGVN
 366   Unique_Node_List*     _for_igvn;              // Initial work-list for next round of Iterative GVN
 367   WarmCallInfo*         _warm_calls;            // Sorted work-list for heat-based inlining.
 368 
 369   GrowableArray<CallGenerator*> _late_inlines;  // List of CallGenerators to be revisited after
 370                                                 // main parsing has finished.
 371 























































 372   // Matching, CFG layout, allocation, code generation
 373   PhaseCFG*             _cfg;                   // Results of CFG finding
 374   bool                  _select_24_bit_instr;   // We selected an instruction with a 24-bit result
 375   bool                  _in_24_bit_fp_mode;     // We are emitting instructions with 24-bit results
 376   int                   _java_calls;            // Number of java calls in the method
 377   int                   _inner_loops;           // Number of inner loops in the method
 378   Matcher*              _matcher;               // Engine to map ideal to machine instructions
 379   PhaseRegAlloc*        _regalloc;              // Results of register allocation.
 380   int                   _frame_slots;           // Size of total frame in stack slots
 381   CodeOffsets           _code_offsets;          // Offsets into the code for various interesting entries
 382   RegMask               _FIRST_STACK_mask;      // All stack slots usable for spills (depends on frame layout)
 383   Arena*                _indexSet_arena;        // control IndexSet allocation within PhaseChaitin
 384   void*                 _indexSet_free_block_list; // free list of IndexSet bit blocks
 385 
 386   uint                  _node_bundling_limit;
 387   Bundle*               _node_bundling_base;    // Information for instruction bundling
 388 
 389   // Instruction bits passed off to the VM
 390   int                   _method_size;           // Size of nmethod code segment in bytes
 391   CodeBuffer            _code_buffer;           // Where the code is assembled


 574   Arena*       old_arena()                 { return &_old_arena; }
 575   RootNode*    root() const                { return _root; }
 576   void         set_root(RootNode* r)       { _root = r; }
 577   StartNode*   start() const;              // (Derived from root.)
 578   void         init_start(StartNode* s);
 579   Node*        immutable_memory();
 580 
 581   Node*        recent_alloc_ctl() const    { return _recent_alloc_ctl; }
 582   Node*        recent_alloc_obj() const    { return _recent_alloc_obj; }
 583   void         set_recent_alloc(Node* ctl, Node* obj) {
 584                                                   _recent_alloc_ctl = ctl;
 585                                                   _recent_alloc_obj = obj;
 586                                            }
 587   void         record_dead_node(uint idx)  { if (_dead_node_list.test_set(idx)) return;
 588                                              _dead_node_count++;
 589                                            }
 590   uint         dead_node_count()           { return _dead_node_count; }
 591   void         reset_dead_node_list()      { _dead_node_list.Reset();
 592                                              _dead_node_count = 0;
 593                                            }
 594   uint          live_nodes()               {
 595     int  val = _unique - _dead_node_count;
 596     assert (val >= 0, err_msg_res("number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count));
 597             return (uint) val;
 598                                            }
 599 #ifdef ASSERT
 600   uint         count_live_nodes_by_graph_walk();
 601   void         print_missing_nodes();
 602 #endif
 603 
 604   // Constant table
 605   ConstantTable&   constant_table() { return _constant_table; }
 606 
 607   MachConstantBaseNode*     mach_constant_base_node();
 608   bool                  has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; }
 609 
 610   // Handy undefined Node
 611   Node*             top() const                 { return _top; }
 612 
 613   // these are used by guys who need to know about creation and transformation of top:
 614   Node*             cached_top_node()           { return _top; }


 693   Unique_Node_List* for_igvn()                  { return _for_igvn; }
 694   inline void       record_for_igvn(Node* n);   // Body is after class Unique_Node_List.
 695   void          set_initial_gvn(PhaseGVN *gvn)           { _initial_gvn = gvn; }
 696   void          set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; }
 697 
 698   // Replace n by nn using initial_gvn, calling hash_delete and
 699   // record_for_igvn as needed.
 700   void gvn_replace_by(Node* n, Node* nn);
 701 
 702 
 703   void              identify_useful_nodes(Unique_Node_List &useful);
 704   void              update_dead_node_list(Unique_Node_List &useful);
 705   void              remove_useless_nodes  (Unique_Node_List &useful);
 706 
 707   WarmCallInfo*     warm_calls() const          { return _warm_calls; }
 708   void          set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
 709   WarmCallInfo* pop_warm_call();
 710 
 711   // Record this CallGenerator for inlining at the end of parsing.
 712   void              add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); }


 713 
 714   // Matching, CFG layout, allocation, code generation
 715   PhaseCFG*         cfg()                       { return _cfg; }
 716   bool              select_24_bit_instr() const { return _select_24_bit_instr; }
 717   bool              in_24_bit_fp_mode() const   { return _in_24_bit_fp_mode; }
 718   bool              has_java_calls() const      { return _java_calls > 0; }
 719   int               java_calls() const          { return _java_calls; }
 720   int               inner_loops() const         { return _inner_loops; }
 721   Matcher*          matcher()                   { return _matcher; }
 722   PhaseRegAlloc*    regalloc()                  { return _regalloc; }
 723   int               frame_slots() const         { return _frame_slots; }
 724   int               frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
 725   RegMask&          FIRST_STACK_mask()          { return _FIRST_STACK_mask; }
 726   Arena*            indexSet_arena()            { return _indexSet_arena; }
 727   void*             indexSet_free_block_list()  { return _indexSet_free_block_list; }
 728   uint              node_bundling_limit()       { return _node_bundling_limit; }
 729   Bundle*           node_bundling_base()        { return _node_bundling_base; }
 730   void          set_node_bundling_limit(uint n) { _node_bundling_limit = n; }
 731   void          set_node_bundling_base(Bundle* b) { _node_bundling_base = b; }
 732   bool          starts_bundle(const Node *n) const;




  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OPTO_COMPILE_HPP
  26 #define SHARE_VM_OPTO_COMPILE_HPP
  27 
  28 #include "asm/codeBuffer.hpp"
  29 #include "ci/compilerInterface.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "code/exceptionHandlerTable.hpp"
  32 #include "compiler/compilerOracle.hpp"
  33 #include "compiler/compileBroker.hpp"
  34 #include "libadt/dict.hpp"
  35 #include "libadt/port.hpp"
  36 #include "libadt/vectset.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "opto/idealGraphPrinter.hpp"
  39 #include "opto/phase.hpp"
  40 #include "opto/regmask.hpp"
  41 #include "runtime/deoptimization.hpp"
  42 #include "runtime/vmThread.hpp"
  43 
  44 class Block;
  45 class Bundle;
  46 class C2Compiler;
  47 class CallGenerator;
  48 class ConnectionGraph;
  49 class InlineTree;
  50 class Int_Array;
  51 class Matcher;
  52 class MachConstantNode;
  53 class MachConstantBaseNode;


 353   Arena                 _Compile_types;         // Arena for all types
 354   Arena*                _type_arena;            // Alias for _Compile_types except in Initialize_shared()
 355   Dict*                 _type_dict;             // Intern table
 356   void*                 _type_hwm;              // Last allocation (see Type::operator new/delete)
 357   size_t                _type_last_size;        // Last allocation size (see Type::operator new/delete)
 358   ciMethod*             _last_tf_m;             // Cache for
 359   const TypeFunc*       _last_tf;               //  TypeFunc::make
 360   AliasType**           _alias_types;           // List of alias types seen so far.
 361   int                   _num_alias_types;       // Logical length of _alias_types
 362   int                   _max_alias_types;       // Physical length of _alias_types
 363   AliasCacheEntry       _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
 364 
 365   // Parsing, optimization
 366   PhaseGVN*             _initial_gvn;           // Results of parse-time PhaseGVN
 367   Unique_Node_List*     _for_igvn;              // Initial work-list for next round of Iterative GVN
 368   WarmCallInfo*         _warm_calls;            // Sorted work-list for heat-based inlining.
 369 
 370   GrowableArray<CallGenerator*> _late_inlines;  // List of CallGenerators to be revisited after
 371                                                 // main parsing has finished.
 372 
 373   // Inlining may not happen in parse order which would make
 374   // PrintInlining output confusing. Keep track of PrintInlining
 375   // pieces in order.
 376   class PrintInliningEvent : public ResourceObj {
 377    private:
 378     CallGenerator* _cg;
 379     stringStream* _ss;
 380 
 381    public:
 382     PrintInliningEvent() 
 383       : _cg(NULL) { _ss = new stringStream(); }
 384 
 385     stringStream* ss() const { return _ss; }
 386     CallGenerator* cg() const { return _cg; }
 387     void set_cg(CallGenerator* cg) { _cg = cg; }
 388   };
 389   
 390   GrowableArray<PrintInliningEvent>* _print_inlining_list;
 391   int _print_inlining;
 392 
 393  public:
 394 
 395   outputStream* print_inlining_stream() const {
 396     return _print_inlining_list->at(_print_inlining).ss();
 397   }
 398 
 399   void print_inlining_skip(CallGenerator* cg) {
 400     if (PrintInlining) {
 401       _print_inlining_list->at(_print_inlining).set_cg(cg);
 402       _print_inlining++;
 403       _print_inlining_list->insert_before(_print_inlining, PrintInliningEvent());
 404     }
 405   }
 406 
 407   void print_inlining_insert(CallGenerator* cg) {
 408     if (PrintInlining) {
 409       for (int i = 0; i < _print_inlining_list->length(); i++) {
 410         if (_print_inlining_list->at(i).cg() == cg) {
 411           _print_inlining_list->insert_before(i+1, PrintInliningEvent());
 412           _print_inlining = i+1;
 413           _print_inlining_list->at(i).set_cg(NULL);
 414           return;
 415         }
 416       }
 417       ShouldNotReachHere();
 418     }
 419   }
 420 
 421   void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
 422     stringStream ss;
 423     CompileTask::print_inlining(&ss, method, inline_level, bci, msg);
 424     print_inlining_stream()->print(ss.as_string());
 425   }
 426 
 427  private:
 428   // Matching, CFG layout, allocation, code generation
 429   PhaseCFG*             _cfg;                   // Results of CFG finding
 430   bool                  _select_24_bit_instr;   // We selected an instruction with a 24-bit result
 431   bool                  _in_24_bit_fp_mode;     // We are emitting instructions with 24-bit results
 432   int                   _java_calls;            // Number of java calls in the method
 433   int                   _inner_loops;           // Number of inner loops in the method
 434   Matcher*              _matcher;               // Engine to map ideal to machine instructions
 435   PhaseRegAlloc*        _regalloc;              // Results of register allocation.
 436   int                   _frame_slots;           // Size of total frame in stack slots
 437   CodeOffsets           _code_offsets;          // Offsets into the code for various interesting entries
 438   RegMask               _FIRST_STACK_mask;      // All stack slots usable for spills (depends on frame layout)
 439   Arena*                _indexSet_arena;        // control IndexSet allocation within PhaseChaitin
 440   void*                 _indexSet_free_block_list; // free list of IndexSet bit blocks
 441 
 442   uint                  _node_bundling_limit;
 443   Bundle*               _node_bundling_base;    // Information for instruction bundling
 444 
 445   // Instruction bits passed off to the VM
 446   int                   _method_size;           // Size of nmethod code segment in bytes
 447   CodeBuffer            _code_buffer;           // Where the code is assembled


 630   Arena*       old_arena()                 { return &_old_arena; }
 631   RootNode*    root() const                { return _root; }
 632   void         set_root(RootNode* r)       { _root = r; }
 633   StartNode*   start() const;              // (Derived from root.)
 634   void         init_start(StartNode* s);
 635   Node*        immutable_memory();
 636 
 637   Node*        recent_alloc_ctl() const    { return _recent_alloc_ctl; }
 638   Node*        recent_alloc_obj() const    { return _recent_alloc_obj; }
 639   void         set_recent_alloc(Node* ctl, Node* obj) {
 640                                                   _recent_alloc_ctl = ctl;
 641                                                   _recent_alloc_obj = obj;
 642                                            }
 643   void         record_dead_node(uint idx)  { if (_dead_node_list.test_set(idx)) return;
 644                                              _dead_node_count++;
 645                                            }
 646   uint         dead_node_count()           { return _dead_node_count; }
 647   void         reset_dead_node_list()      { _dead_node_list.Reset();
 648                                              _dead_node_count = 0;
 649                                            }
 650   uint          live_nodes() const         {
 651     int  val = _unique - _dead_node_count;
 652     assert (val >= 0, err_msg_res("number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count));
 653             return (uint) val;
 654                                            }
 655 #ifdef ASSERT
 656   uint         count_live_nodes_by_graph_walk();
 657   void         print_missing_nodes();
 658 #endif
 659 
 660   // Constant table
 661   ConstantTable&   constant_table() { return _constant_table; }
 662 
 663   MachConstantBaseNode*     mach_constant_base_node();
 664   bool                  has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; }
 665 
 666   // Handy undefined Node
 667   Node*             top() const                 { return _top; }
 668 
 669   // these are used by guys who need to know about creation and transformation of top:
 670   Node*             cached_top_node()           { return _top; }


 749   Unique_Node_List* for_igvn()                  { return _for_igvn; }
 750   inline void       record_for_igvn(Node* n);   // Body is after class Unique_Node_List.
 751   void          set_initial_gvn(PhaseGVN *gvn)           { _initial_gvn = gvn; }
 752   void          set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; }
 753 
 754   // Replace n by nn using initial_gvn, calling hash_delete and
 755   // record_for_igvn as needed.
 756   void gvn_replace_by(Node* n, Node* nn);
 757 
 758 
 759   void              identify_useful_nodes(Unique_Node_List &useful);
 760   void              update_dead_node_list(Unique_Node_List &useful);
 761   void              remove_useless_nodes (Unique_Node_List &useful);
 762 
 763   WarmCallInfo*     warm_calls() const          { return _warm_calls; }
 764   void          set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
 765   WarmCallInfo* pop_warm_call();
 766 
 767   // Record this CallGenerator for inlining at the end of parsing.
 768   void              add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); }
 769 
 770   void dump_inlining();
 771 
 772   // Matching, CFG layout, allocation, code generation
 773   PhaseCFG*         cfg()                       { return _cfg; }
 774   bool              select_24_bit_instr() const { return _select_24_bit_instr; }
 775   bool              in_24_bit_fp_mode() const   { return _in_24_bit_fp_mode; }
 776   bool              has_java_calls() const      { return _java_calls > 0; }
 777   int               java_calls() const          { return _java_calls; }
 778   int               inner_loops() const         { return _inner_loops; }
 779   Matcher*          matcher()                   { return _matcher; }
 780   PhaseRegAlloc*    regalloc()                  { return _regalloc; }
 781   int               frame_slots() const         { return _frame_slots; }
 782   int               frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
 783   RegMask&          FIRST_STACK_mask()          { return _FIRST_STACK_mask; }
 784   Arena*            indexSet_arena()            { return _indexSet_arena; }
 785   void*             indexSet_free_block_list()  { return _indexSet_free_block_list; }
 786   uint              node_bundling_limit()       { return _node_bundling_limit; }
 787   Bundle*           node_bundling_base()        { return _node_bundling_base; }
 788   void          set_node_bundling_limit(uint n) { _node_bundling_limit = n; }
 789   void          set_node_bundling_base(Bundle* b) { _node_bundling_base = b; }
 790   bool          starts_bundle(const Node *n) const;