< prev index next >

src/share/vm/opto/parse.hpp

Print this page
rev 10293 : 8150720: Cleanup code around PrintOptoStatistics
Reviewed-by: kvn, shade, vlivanov


  87                                 JVMState* jvms,
  88                                 WarmCallInfo* wci_result);
  89   void        print_inlining(ciMethod* callee_method, int caller_bci,
  90                              ciMethod* caller_method, bool success) const;
  91 
  92   InlineTree* caller_tree()       const { return _caller_tree;  }
  93   InlineTree* callee_at(int bci, ciMethod* m) const;
  94   int         inline_level()      const { return stack_depth(); }
  95   int         stack_depth()       const { return _caller_jvms ? _caller_jvms->depth() : 0; }
  96   const char* msg()               const { return _msg; }
  97   void        set_msg(const char* msg)  { _msg = msg; }
  98 public:
  99   static const char* check_can_parse(ciMethod* callee);
 100 
 101   static InlineTree* build_inline_tree_root();
 102   static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee);
 103 
 104   // For temporary (stack-allocated, stateless) ilts:
 105   InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level);
 106 
 107   // InlineTree enum
 108   enum InlineStyle {
 109     Inline_do_not_inline             =   0, //
 110     Inline_cha_is_monomorphic        =   1, //
 111     Inline_type_profile_monomorphic  =   2  //
 112   };
 113 
 114   // See if it is OK to inline.
 115   // The receiver is the inline tree for the caller.
 116   //
 117   // The result is a temperature indication.  If it is hot or cold,
 118   // inlining is immediate or undesirable.  Otherwise, the info block
 119   // returned is newly allocated and may be enqueued.
 120   //
 121   // If the method is inlinable, a new inline subtree is created on the fly,
 122   // and may be accessed by find_subtree_from_root.
 123   // The call_method is the dest_method for a special or static invocation.
 124   // The call_method is an optimized virtual method candidate otherwise.
 125   WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci, bool& should_delay);
 126 
 127   // Information about inlined method
 128   JVMState*   caller_jvms()       const { return _caller_jvms; }
 129   ciMethod   *method()            const { return _method; }
 130   int         caller_bci()        const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; }
 131   uint        count_inline_bcs()  const { return _count_inline_bcs; }
 132   float       site_invoke_ratio() const { return _site_invoke_ratio; };
 133 


 331   const TypeFunc*_tf;           // My kind of function type
 332   int           _entry_bci;     // the osr bci or InvocationEntryBci
 333 
 334   ciTypeFlow*   _flow;          // Results of previous flow pass.
 335   Block*        _blocks;        // Array of basic-block structs.
 336   int           _block_count;   // Number of elements in _blocks.
 337 
 338   GraphKit      _exits;         // Record all normal returns and throws here.
 339   bool          _wrote_final;   // Did we write a final field?
 340   bool          _wrote_volatile;     // Did we write a volatile field?
 341   bool          _wrote_stable;       // Did we write a @Stable field?
 342   bool          _wrote_fields;       // Did we write any field?
 343   bool          _count_invocations;  // update and test invocation counter
 344   bool          _method_data_update; // update method data oop
 345   Node*         _alloc_with_final;   // An allocation node with final field
 346 
 347   // Variables which track Java semantics during bytecode parsing:
 348 
 349   Block*            _block;     // block currently getting parsed
 350   ciBytecodeStream  _iter;      // stream of this method's bytecodes
 351 
 352   int           _blocks_merged; // Progress meter: state merges from BB preds
 353   int           _blocks_parsed; // Progress meter: BBs actually parsed
 354 
 355   const FastLockNode* _synch_lock; // FastLockNode for synchronized method
 356 
 357 #ifndef PRODUCT
 358   int _max_switch_depth;        // Debugging SwitchRanges.
 359   int _est_switch_depth;        // Debugging SwitchRanges.
 360 #endif
 361 
 362   bool         _first_return;                  // true if return is the first to be parsed
 363   bool         _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
 364   uint         _new_idx;                       // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
 365 
 366  public:
 367   // Constructor
 368   Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
 369 
 370   virtual Parse* is_Parse() const { return (Parse*)this; }
 371 
 372   // Accessors.
 373   JVMState*     caller()        const { return _caller; }




  87                                 JVMState* jvms,
  88                                 WarmCallInfo* wci_result);
  89   void        print_inlining(ciMethod* callee_method, int caller_bci,
  90                              ciMethod* caller_method, bool success) const;
  91 
  92   InlineTree* caller_tree()       const { return _caller_tree;  }
  93   InlineTree* callee_at(int bci, ciMethod* m) const;
  94   int         inline_level()      const { return stack_depth(); }
  95   int         stack_depth()       const { return _caller_jvms ? _caller_jvms->depth() : 0; }
  96   const char* msg()               const { return _msg; }
  97   void        set_msg(const char* msg)  { _msg = msg; }
  98 public:
  99   static const char* check_can_parse(ciMethod* callee);
 100 
 101   static InlineTree* build_inline_tree_root();
 102   static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee);
 103 
 104   // For temporary (stack-allocated, stateless) ilts:
 105   InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level);
 106 







 107   // See if it is OK to inline.
 108   // The receiver is the inline tree for the caller.
 109   //
 110   // The result is a temperature indication.  If it is hot or cold,
 111   // inlining is immediate or undesirable.  Otherwise, the info block
 112   // returned is newly allocated and may be enqueued.
 113   //
 114   // If the method is inlinable, a new inline subtree is created on the fly,
 115   // and may be accessed by find_subtree_from_root.
 116   // The call_method is the dest_method for a special or static invocation.
 117   // The call_method is an optimized virtual method candidate otherwise.
 118   WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci, bool& should_delay);
 119 
 120   // Information about inlined method
 121   JVMState*   caller_jvms()       const { return _caller_jvms; }
 122   ciMethod   *method()            const { return _method; }
 123   int         caller_bci()        const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; }
 124   uint        count_inline_bcs()  const { return _count_inline_bcs; }
 125   float       site_invoke_ratio() const { return _site_invoke_ratio; };
 126 


 324   const TypeFunc*_tf;           // My kind of function type
 325   int           _entry_bci;     // the osr bci or InvocationEntryBci
 326 
 327   ciTypeFlow*   _flow;          // Results of previous flow pass.
 328   Block*        _blocks;        // Array of basic-block structs.
 329   int           _block_count;   // Number of elements in _blocks.
 330 
 331   GraphKit      _exits;         // Record all normal returns and throws here.
 332   bool          _wrote_final;   // Did we write a final field?
 333   bool          _wrote_volatile;     // Did we write a volatile field?
 334   bool          _wrote_stable;       // Did we write a @Stable field?
 335   bool          _wrote_fields;       // Did we write any field?
 336   bool          _count_invocations;  // update and test invocation counter
 337   bool          _method_data_update; // update method data oop
 338   Node*         _alloc_with_final;   // An allocation node with final field
 339 
 340   // Variables which track Java semantics during bytecode parsing:
 341 
 342   Block*            _block;     // block currently getting parsed
 343   ciBytecodeStream  _iter;      // stream of this method's bytecodes



 344 
 345   const FastLockNode* _synch_lock; // FastLockNode for synchronized method
 346 
 347 #ifndef PRODUCT
 348   int _max_switch_depth;        // Debugging SwitchRanges.
 349   int _est_switch_depth;        // Debugging SwitchRanges.
 350 #endif
 351 
 352   bool         _first_return;                  // true if return is the first to be parsed
 353   bool         _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
 354   uint         _new_idx;                       // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
 355 
 356  public:
 357   // Constructor
 358   Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
 359 
 360   virtual Parse* is_Parse() const { return (Parse*)this; }
 361 
 362   // Accessors.
 363   JVMState*     caller()        const { return _caller; }


< prev index next >