rev 7652 : 8063137: Never-taken branches should be pruned when GWT LambdaForms are shared
Reviewed-by: ?

   1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OPTO_PARSE_HPP
  26 #define SHARE_VM_OPTO_PARSE_HPP
  27 
  28 #include "ci/ciMethodData.hpp"
  29 #include "ci/ciTypeFlow.hpp"
  30 #include "compiler/methodLiveness.hpp"
  31 #include "libadt/vectset.hpp"
  32 #include "oops/generateOopMap.hpp"
  33 #include "opto/graphKit.hpp"
  34 #include "opto/subnode.hpp"
  35 
  36 class BytecodeParseHistogram;
  37 class InlineTree;
  38 class Parse;
  39 class SwitchRange;
  40 
  41 
  42 //------------------------------InlineTree-------------------------------------
  43 class InlineTree : public ResourceObj {
  44   friend class VMStructs;
  45 
  46   Compile*    C;                  // cache
  47   JVMState*   _caller_jvms;       // state of caller
  48   ciMethod*   _method;            // method being called by the caller_jvms
  49   InlineTree* _caller_tree;
  50   uint        _count_inline_bcs;  // Accumulated count of inlined bytecodes
  51   // Call-site count / interpreter invocation count, scaled recursively.
  52   // Always between 0.0 and 1.0.  Represents the percentage of the method's
  53   // total execution time used at this call site.
  54   const float _site_invoke_ratio;
  55   const int   _max_inline_level;  // the maximum inline level for this sub-tree (may be adjusted)
  56   float compute_callee_frequency( int caller_bci ) const;
  57 
  58   GrowableArray<InlineTree*> _subtrees;
  59 
  60   void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN;
  61   const char* _msg;
  62 protected:
  63   InlineTree(Compile* C,
  64              const InlineTree* caller_tree,
  65              ciMethod* callee_method,
  66              JVMState* caller_jvms,
  67              int caller_bci,
  68              float site_invoke_ratio,
  69              int max_inline_level);
  70   InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
  71                                            JVMState* caller_jvms,
  72                                            int caller_bci);
  73   bool        try_to_inline(ciMethod* callee_method,
  74                             ciMethod* caller_method,
  75                             int caller_bci,
  76                             JVMState* jvms,
  77                             ciCallProfile& profile,
  78                             WarmCallInfo* wci_result,
  79                             bool& should_delay);
  80   bool        should_inline(ciMethod* callee_method,
  81                             ciMethod* caller_method,
  82                             int caller_bci,
  83                             ciCallProfile& profile,
  84                             WarmCallInfo* wci_result);
  85   bool        should_not_inline(ciMethod* callee_method,
  86                                 ciMethod* caller_method,
  87                                 JVMState* jvms,
  88                                 WarmCallInfo* wci_result);
  89   void        print_inlining(ciMethod* callee_method, int caller_bci,
  90                              bool success) const;
  91 
  92   InlineTree* caller_tree()       const { return _caller_tree;  }
  93   InlineTree* callee_at(int bci, ciMethod* m) const;
  94   int         inline_level()      const { return stack_depth(); }
  95   int         stack_depth()       const { return _caller_jvms ? _caller_jvms->depth() : 0; }
  96   const char* msg()               const { return _msg; }
  97   void        set_msg(const char* msg)  { _msg = msg; }
  98 public:
  99   static const char* check_can_parse(ciMethod* callee);
 100 
 101   static InlineTree* build_inline_tree_root();
 102   static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee);
 103 
 104   // For temporary (stack-allocated, stateless) ilts:
 105   InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level);
 106 
 107   // InlineTree enum
 108   enum InlineStyle {
 109     Inline_do_not_inline             =   0, //
 110     Inline_cha_is_monomorphic        =   1, //
 111     Inline_type_profile_monomorphic  =   2  //
 112   };
 113 
 114   // See if it is OK to inline.
 115   // The receiver is the inline tree for the caller.
 116   //
 117   // The result is a temperature indication.  If it is hot or cold,
 118   // inlining is immediate or undesirable.  Otherwise, the info block
 119   // returned is newly allocated and may be enqueued.
 120   //
 121   // If the method is inlinable, a new inline subtree is created on the fly,
 122   // and may be accessed by find_subtree_from_root.
 123   // The call_method is the dest_method for a special or static invocation.
 124   // The call_method is an optimized virtual method candidate otherwise.
 125   WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci, bool& should_delay);
 126 
 127   // Information about inlined method
 128   JVMState*   caller_jvms()       const { return _caller_jvms; }
 129   ciMethod   *method()            const { return _method; }
 130   int         caller_bci()        const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; }
 131   uint        count_inline_bcs()  const { return _count_inline_bcs; }
 132   float       site_invoke_ratio() const { return _site_invoke_ratio; };
 133 
 134 #ifndef PRODUCT
 135 private:
 136   uint        _count_inlines;     // Count of inlined methods
 137 public:
 138   // Debug information collected during parse
 139   uint        count_inlines()     const { return _count_inlines; };
 140 #endif
 141   GrowableArray<InlineTree*> subtrees() { return _subtrees; }
 142 
 143   void print_value_on(outputStream* st) const PRODUCT_RETURN;
 144 
 145   bool        _forced_inline;     // Inlining was forced by CompilerOracle, ciReplay or annotation
 146   bool        forced_inline()     const { return _forced_inline; }
 147   // Count number of nodes in this subtree
 148   int         count() const;
 149   // Dump inlining replay data to the stream.
 150   void dump_replay_data(outputStream* out);
 151 };
 152 
 153 
 154 //-----------------------------------------------------------------------------
 155 //------------------------------Parse------------------------------------------
 156 // Parse bytecodes, build a Graph
 157 class Parse : public GraphKit {
 158  public:
 159   // Per-block information needed by the parser:
 160   class Block {
 161    private:
 162     ciTypeFlow::Block* _flow;
 163     int                _pred_count;     // how many predecessors in CFG?
 164     int                _preds_parsed;   // how many of these have been parsed?
 165     uint               _count;          // how many times executed?  Currently only set by _goto's
 166     bool               _is_parsed;      // has this block been parsed yet?
 167     bool               _is_handler;     // is this block an exception handler?
 168     bool               _has_merged_backedge; // does this block have merged backedge?
 169     SafePointNode*     _start_map;      // all values flowing into this block
 170     MethodLivenessResult _live_locals;  // lazily initialized liveness bitmap
 171 
 172     int                _num_successors; // Includes only normal control flow.
 173     int                _all_successors; // Include exception paths also.
 174     Block**            _successors;
 175 
 176     // Use init_node/init_graph to initialize Blocks.
 177     // Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); }
 178     Block() : _live_locals(NULL,0) { ShouldNotReachHere(); }
 179 
 180    public:
 181 
 182     // Set up the block data structure itself.
 183     void init_node(Parse* outer, int po);
 184     // Set up the block's relations to other blocks.
 185     void init_graph(Parse* outer);
 186 
 187     ciTypeFlow::Block* flow() const        { return _flow; }
 188     int pred_count() const                 { return _pred_count; }
 189     int preds_parsed() const               { return _preds_parsed; }
 190     bool is_parsed() const                 { return _is_parsed; }
 191     bool is_handler() const                { return _is_handler; }
 192     void set_count( uint x )               { _count = x; }
 193     uint count() const                     { return _count; }
 194 
 195     SafePointNode* start_map() const       { assert(is_merged(),"");   return _start_map; }
 196     void set_start_map(SafePointNode* m)   { assert(!is_merged(), ""); _start_map = m; }
 197 
 198     // True after any predecessor flows control into this block
 199     bool is_merged() const                 { return _start_map != NULL; }
 200 
 201 #ifdef ASSERT
 202     // True after backedge predecessor flows control into this block
 203     bool has_merged_backedge() const       { return _has_merged_backedge; }
 204     void mark_merged_backedge(Block* pred) {
 205       assert(is_SEL_head(), "should be loop head");
 206       if (pred != NULL && is_SEL_backedge(pred)) {
 207         assert(is_parsed(), "block should be parsed before merging backedges");
 208         _has_merged_backedge = true;
 209       }
 210     }
 211 #endif
 212 
 213     // True when all non-exception predecessors have been parsed.
 214     bool is_ready() const                  { return preds_parsed() == pred_count(); }
 215 
 216     int num_successors() const             { return _num_successors; }
 217     int all_successors() const             { return _all_successors; }
 218     Block* successor_at(int i) const {
 219       assert((uint)i < (uint)all_successors(), "");
 220       return _successors[i];
 221     }
 222     Block* successor_for_bci(int bci);
 223 
 224     int start() const                      { return flow()->start(); }
 225     int limit() const                      { return flow()->limit(); }
 226     int rpo() const                        { return flow()->rpo(); }
 227     int start_sp() const                   { return flow()->stack_size(); }
 228 
 229     bool is_loop_head() const              { return flow()->is_loop_head(); }
 230     bool is_SEL_head() const               { return flow()->is_single_entry_loop_head(); }
 231     bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); }
 232     bool is_invariant_local(uint i) const  {
 233       const JVMState* jvms = start_map()->jvms();
 234       if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false;
 235       return flow()->is_invariant_local(i - jvms->locoff());
 236     }
 237     bool can_elide_SEL_phi(uint i) const  { assert(is_SEL_head(),""); return is_invariant_local(i); }
 238 
 239     const Type* peek(int off=0) const      { return stack_type_at(start_sp() - (off+1)); }
 240 
 241     const Type* stack_type_at(int i) const;
 242     const Type* local_type_at(int i) const;
 243     static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); }
 244 
 245     bool has_trap_at(int bci) const        { return flow()->has_trap() && flow()->trap_bci() == bci; }
 246 
 247     // Call this just before parsing a block.
 248     void mark_parsed() {
 249       assert(!_is_parsed, "must parse each block exactly once");
 250       _is_parsed = true;
 251     }
 252 
 253     // Return the phi/region input index for the "current" pred,
 254     // and bump the pred number.  For historical reasons these index
 255     // numbers are handed out in descending order.  The last index is
 256     // always PhiNode::Input (i.e., 1).  The value returned is known
 257     // as a "path number" because it distinguishes by which path we are
 258     // entering the block.
 259     int next_path_num() {
 260       assert(preds_parsed() < pred_count(), "too many preds?");
 261       return pred_count() - _preds_parsed++;
 262     }
 263 
 264     // Add a previously unaccounted predecessor to this block.
 265     // This operates by increasing the size of the block's region
 266     // and all its phi nodes (if any).  The value returned is a
 267     // path number ("pnum").
 268     int add_new_path();
 269 
 270     // Initialize me by recording the parser's map.  My own map must be NULL.
 271     void record_state(Parse* outer);
 272   };
 273 
 274 #ifndef PRODUCT
 275   // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
 276   class BytecodeParseHistogram : public ResourceObj {
 277    private:
 278     enum BPHType {
 279       BPH_transforms,
 280       BPH_values
 281     };
 282     static bool _initialized;
 283     static uint _bytecodes_parsed [Bytecodes::number_of_codes];
 284     static uint _nodes_constructed[Bytecodes::number_of_codes];
 285     static uint _nodes_transformed[Bytecodes::number_of_codes];
 286     static uint _new_values       [Bytecodes::number_of_codes];
 287 
 288     Bytecodes::Code _initial_bytecode;
 289     int             _initial_node_count;
 290     int             _initial_transforms;
 291     int             _initial_values;
 292 
 293     Parse     *_parser;
 294     Compile   *_compiler;
 295 
 296     // Initialization
 297     static void reset();
 298 
 299     // Return info being collected, select with global flag 'BytecodeParseInfo'
 300     int current_count(BPHType info_selector);
 301 
 302    public:
 303     BytecodeParseHistogram(Parse *p, Compile *c);
 304     static bool initialized();
 305 
 306     // Record info when starting to parse one bytecode
 307     void set_initial_state( Bytecodes::Code bc );
 308     // Record results of parsing one bytecode
 309     void record_change();
 310 
 311     // Profile printing
 312     static void print(float cutoff = 0.01F); // cutoff in percent
 313   };
 314 
 315   public:
 316     // Record work done during parsing
 317     BytecodeParseHistogram* _parse_histogram;
 318     void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; }
 319     BytecodeParseHistogram* parse_histogram()      { return _parse_histogram; }
 320 #endif
 321 
 322  private:
 323   friend class Block;
 324 
 325   // Variables which characterize this compilation as a whole:
 326 
 327   JVMState*     _caller;        // JVMS which carries incoming args & state.
 328   float         _expected_uses; // expected number of calls to this code
 329   float         _prof_factor;   // discount applied to my profile counts
 330   int           _depth;         // Inline tree depth, for debug printouts
 331   const TypeFunc*_tf;           // My kind of function type
 332   int           _entry_bci;     // the osr bci or InvocationEntryBci
 333 
 334   ciTypeFlow*   _flow;          // Results of previous flow pass.
 335   Block*        _blocks;        // Array of basic-block structs.
 336   int           _block_count;   // Number of elements in _blocks.
 337 
 338   GraphKit      _exits;         // Record all normal returns and throws here.
 339   bool          _wrote_final;   // Did we write a final field?
 340   bool          _wrote_volatile;     // Did we write a volatile field?
 341   bool          _wrote_stable;       // Did we write a @Stable field?
 342   bool          _wrote_fields;       // Did we write any field?
 343   bool          _count_invocations;  // update and test invocation counter
 344   bool          _method_data_update; // update method data oop
 345   Node*         _alloc_with_final;   // An allocation node with final field
 346 
 347   // Variables which track Java semantics during bytecode parsing:
 348 
 349   Block*            _block;     // block currently getting parsed
 350   ciBytecodeStream  _iter;      // stream of this method's bytecodes
 351 
 352   int           _blocks_merged; // Progress meter: state merges from BB preds
 353   int           _blocks_parsed; // Progress meter: BBs actually parsed
 354 
 355   const FastLockNode* _synch_lock; // FastLockNode for synchronized method
 356 
 357 #ifndef PRODUCT
 358   int _max_switch_depth;        // Debugging SwitchRanges.
 359   int _est_switch_depth;        // Debugging SwitchRanges.
 360 #endif
 361 
 362   bool         _first_return;                  // true if return is the first to be parsed
 363   bool         _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
 364   uint         _new_idx;                       // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
 365 
 366  public:
 367   // Constructor
 368   Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
 369 
 370   virtual Parse* is_Parse() const { return (Parse*)this; }
 371 
 372   // Accessors.
 373   JVMState*     caller()        const { return _caller; }
 374   float         expected_uses() const { return _expected_uses; }
 375   float         prof_factor()   const { return _prof_factor; }
 376   int           depth()         const { return _depth; }
 377   const TypeFunc* tf()          const { return _tf; }
 378   //            entry_bci()     -- see osr_bci, etc.
 379 
 380   ciTypeFlow*   flow()          const { return _flow; }
 381   //            blocks()        -- see rpo_at, start_block, etc.
 382   int           block_count()   const { return _block_count; }
 383 
 384   GraphKit&     exits()               { return _exits; }
 385   bool          wrote_final() const   { return _wrote_final; }
 386   void      set_wrote_final(bool z)   { _wrote_final = z; }
 387   bool          wrote_volatile() const { return _wrote_volatile; }
 388   void      set_wrote_volatile(bool z) { _wrote_volatile = z; }
 389   bool          wrote_stable() const  { return _wrote_stable; }
 390   void      set_wrote_stable(bool z)  { _wrote_stable = z; }
 391   bool         wrote_fields() const   { return _wrote_fields; }
 392   void     set_wrote_fields(bool z)   { _wrote_fields = z; }
 393   bool          count_invocations() const  { return _count_invocations; }
 394   bool          method_data_update() const { return _method_data_update; }
 395   Node*    alloc_with_final() const   { return _alloc_with_final; }
 396   void set_alloc_with_final(Node* n)  {
 397     assert((_alloc_with_final == NULL) || (_alloc_with_final == n), "different init objects?");
 398     _alloc_with_final = n;
 399   }
 400 
 401   Block*             block()    const { return _block; }
 402   ciBytecodeStream&  iter()           { return _iter; }
 403   Bytecodes::Code    bc()       const { return _iter.cur_bc(); }
 404 
 405   void set_block(Block* b)            { _block = b; }
 406 
 407   // Derived accessors:
 408   bool is_normal_parse() const  { return _entry_bci == InvocationEntryBci; }
 409   bool is_osr_parse() const     { return _entry_bci != InvocationEntryBci; }
 410   int osr_bci() const           { assert(is_osr_parse(),""); return _entry_bci; }
 411 
 412   void set_parse_bci(int bci);
 413 
 414   // Must this parse be aborted?
 415   bool failing()                { return C->failing(); }
 416 
 417   Block* rpo_at(int rpo) {
 418     assert(0 <= rpo && rpo < _block_count, "oob");
 419     return &_blocks[rpo];
 420   }
 421   Block* start_block() {
 422     return rpo_at(flow()->start_block()->rpo());
 423   }
 424   // Can return NULL if the flow pass did not complete a block.
 425   Block* successor_for_bci(int bci) {
 426     return block()->successor_for_bci(bci);
 427   }
 428 
 429  private:
 430   // Create a JVMS & map for the initial state of this method.
 431   SafePointNode* create_entry_map();
 432 
 433   // OSR helpers
 434   Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
 435   Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
 436   void  load_interpreter_state(Node* osr_buf);
 437 
 438   // Functions for managing basic blocks:
 439   void init_blocks();
 440   void load_state_from(Block* b);
 441   void store_state_to(Block* b) { b->record_state(this); }
 442 
 443   // Parse all the basic blocks.
 444   void do_all_blocks();
 445 
 446   // Parse the current basic block
 447   void do_one_block();
 448 
 449   // Raise an error if we get a bad ciTypeFlow CFG.
 450   void handle_missing_successor(int bci);
 451 
 452   // first actions (before BCI 0)
 453   void do_method_entry();
 454 
 455   // implementation of monitorenter/monitorexit
 456   void do_monitor_enter();
 457   void do_monitor_exit();
 458 
 459   // Eagerly create phie throughout the state, to cope with back edges.
 460   void ensure_phis_everywhere();
 461 
 462   // Merge the current mapping into the basic block starting at bci
 463   void merge(          int target_bci);
 464   // Same as plain merge, except that it allocates a new path number.
 465   void merge_new_path( int target_bci);
 466   // Merge the current mapping into an exception handler.
 467   void merge_exception(int target_bci);
 468   // Helper: Merge the current mapping into the given basic block
 469   void merge_common(Block* target, int pnum);
 470   // Helper functions for merging individual cells.
 471   PhiNode *ensure_phi(       int idx, bool nocreate = false);
 472   PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
 473   // Helper to merge the current memory state into the given basic block
 474   void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
 475 
 476   // Parse this bytecode, and alter the Parsers JVM->Node mapping
 477   void do_one_bytecode();
 478 
 479   // helper function to generate array store check
 480   void array_store_check();
 481   // Helper function to generate array load
 482   void array_load(BasicType etype);
 483   // Helper function to generate array store
 484   void array_store(BasicType etype);
 485   // Helper function to compute array addressing
 486   Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL);
 487 
 488   void rtm_deopt();
 489 
 490   // Pass current map to exits
 491   void return_current(Node* value);
 492 
 493   // Register finalizers on return from Object.<init>
 494   void call_register_finalizer();
 495 
 496   // Insert a compiler safepoint into the graph
 497   void add_safepoint();
 498 
 499   // Insert a compiler safepoint into the graph, if there is a back-branch.
 500   void maybe_add_safepoint(int target_bci) {
 501     if (UseLoopSafepoints && target_bci <= bci()) {
 502       add_safepoint();
 503     }
 504   }
 505 
 506   // Note:  Intrinsic generation routines may be found in library_call.cpp.
 507 
 508   // Helper function to setup Ideal Call nodes
 509   void do_call();
 510 
 511   // Helper function to uncommon-trap or bailout for non-compilable call-sites
 512   bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
 513 
 514   // Helper function to setup for type-profile based inlining
 515   bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method);
 516 
 517   // Helper functions for type checking bytecodes:
 518   void  do_checkcast();
 519   void  do_instanceof();
 520 
 521   // Helper functions for shifting & arithmetic
 522   void modf();
 523   void modd();
 524   void l2f();
 525 
 526   void do_irem();
 527 
 528   // implementation of _get* and _put* bytecodes
 529   void do_getstatic() { do_field_access(true,  false); }
 530   void do_getfield () { do_field_access(true,  true); }
 531   void do_putstatic() { do_field_access(false, false); }
 532   void do_putfield () { do_field_access(false, true); }
 533 
 534   // common code for making initial checks and forming addresses
 535   void do_field_access(bool is_get, bool is_field);
 536   bool static_field_ok_in_clinit(ciField *field, ciMethod *method);
 537 
 538   // common code for actually performing the load or store
 539   void do_get_xxx(Node* obj, ciField* field, bool is_field);
 540   void do_put_xxx(Node* obj, ciField* field, bool is_field);
 541 
 542   // loading from a constant field or the constant pool
 543   // returns false if push failed (non-perm field constants only, not ldcs)
 544   bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false, const Type* basic_type = NULL);
 545 
 546   // implementation of object creation bytecodes
 547   void emit_guard_for_new(ciInstanceKlass* klass);
 548   void do_new();
 549   void do_newarray(BasicType elemtype);
 550   void do_anewarray();
 551   void do_multianewarray();
 552   Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
 553 
 554   // implementation of jsr/ret
 555   void do_jsr();
 556   void do_ret();
 557 
 558   float   dynamic_branch_prediction(float &cnt);
 559   float   branch_prediction(float &cnt, BoolTest::mask btest, int target_bci);
 560   bool    seems_never_taken(float prob) const;
 561   bool    path_is_suitable_for_uncommon_trap(float prob) const;
 562   bool    seems_stable_comparison() const;
 563 
 564   void    do_ifnull(BoolTest::mask btest, Node* c);
 565   void    do_if(BoolTest::mask btest, Node* c);
 566   int     repush_if_args();
 567   void    adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
 568                               Block* path, Block* other_path);
 569   void    sharpen_type_after_if(BoolTest::mask btest,
 570                                 Node* con, const Type* tcon,
 571                                 Node* val, const Type* tval);
 572   IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask);
 573   Node*   jump_if_join(Node* iffalse, Node* iftrue);
 574   void    jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index);
 575   void    jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index);
 576   void    jump_if_always_fork(int dest_bci_if_true, int prof_table_index);
 577 
 578   friend class SwitchRange;
 579   void    do_tableswitch();
 580   void    do_lookupswitch();
 581   void    jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
 582   bool    create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
 583 
 584   void decrement_age();
 585   // helper functions for methodData style profiling
 586   void test_counter_against_threshold(Node* cnt, int limit);
 587   void increment_and_test_invocation_counter(int limit);
 588   void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit);
 589   Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
 590   void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
 591   void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant);
 592 
 593   void profile_method_entry();
 594   void profile_taken_branch(int target_bci, bool force_update = false);
 595   void profile_not_taken_branch(bool force_update = false);
 596   void profile_call(Node* receiver);
 597   void profile_generic_call();
 598   void profile_receiver_type(Node* receiver);
 599   void profile_ret(int target_bci);
 600   void profile_null_checkcast();
 601   void profile_switch_case(int table_index);
 602 
 603   // helper function for call statistics
 604   void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
 605 
 606   Node_Notes* make_node_notes(Node_Notes* caller_nn);
 607 
 608   // Helper functions for handling normal and abnormal exits.
 609   void build_exits();
 610 
 611   // Fix up all exceptional control flow exiting a single bytecode.
 612   void do_exceptions();
 613 
 614   // Fix up all exiting control flow at the end of the parse.
 615   void do_exits();
 616 
 617   // Add Catch/CatchProjs
 618   // The call is either a Java call or the VM's rethrow stub
 619   void catch_call_exceptions(ciExceptionHandlerStream&);
 620 
 621   // Handle all exceptions thrown by the inlined method.
 622   // Also handles exceptions for individual bytecodes.
 623   void catch_inline_exceptions(SafePointNode* ex_map);
 624 
 625   // Merge the given map into correct exceptional exit state.
 626   // Assumes that there is no applicable local handler.
 627   void throw_to_exit(SafePointNode* ex_map);
 628 
 629   // Use speculative type to optimize CmpP node
 630   Node* optimize_cmp_with_klass(Node* c);
 631 
 632  public:
 633 #ifndef PRODUCT
 634   // Handle PrintOpto, etc.
 635   void show_parse_info();
 636   void dump_map_adr_mem() const;
 637   static void print_statistics(); // Print some performance counters
 638   void dump();
 639   void dump_bci(int bci);
 640 #endif
 641 };
 642 
 643 #endif // SHARE_VM_OPTO_PARSE_HPP
--- EOF ---