src/share/vm/opto/block.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File JDK-8023691 Sdiff src/share/vm/opto

src/share/vm/opto/block.hpp

Print this page




  88 
  89 
  90 class CFGElement : public ResourceObj {
  91   friend class VMStructs;
  92  public:
  93   float _freq; // Execution frequency (estimate)
  94 
  95   CFGElement() : _freq(0.0f) {}
  96   virtual bool is_block() { return false; }
  97   virtual bool is_loop()  { return false; }
  98   Block*   as_Block() { assert(is_block(), "must be block"); return (Block*)this; }
  99   CFGLoop* as_CFGLoop()  { assert(is_loop(),  "must be loop");  return (CFGLoop*)this;  }
 100 };
 101 
 102 //------------------------------Block------------------------------------------
 103 // This class defines a Basic Block.
 104 // Basic blocks are used during the output routines, and are not used during
 105 // any optimization pass.  They are created late in the game.
 106 class Block : public CFGElement {
 107   friend class VMStructs;
 108  public:

 109   // Nodes in this block, in order
 110   Node_List _nodes;
 111 





































 112   // Basic blocks have a Node which defines Control for all Nodes pinned in
 113   // this block.  This Node is a RegionNode.  Exception-causing Nodes
 114   // (division, subroutines) and Phi functions are always pinned.  Later,
 115   // every Node will get pinned to some block.
 116   Node *head() const { return _nodes[0]; }
 117 
 118   // CAUTION: num_preds() is ONE based, so that predecessor numbers match
 119   // input edges to Regions and Phis.
 120   uint num_preds() const { return head()->req(); }
 121   Node *pred(uint i) const { return head()->in(i); }
 122 
 123   // Array of successor blocks, same size as projs array
 124   Block_Array _succs;
 125 
 126   // Basic blocks have some number of Nodes which split control to all
 127   // following blocks.  These Nodes are always Projections.  The field in
 128   // the Projection and the block-ending Node determine which Block follows.
 129   uint _num_succs;
 130 
 131   // Basic blocks also carry all sorts of good old fashioned DFS information
 132   // used to find loops, loop nesting depth, dominators, etc.
 133   uint _pre_order;              // Pre-order DFS number
 134 
 135   // Dominator tree
 136   uint _dom_depth;              // Depth in dominator tree for fast LCA


 257   }
 258 
 259   // Index of 'end' Node
 260   uint end_idx() const {
 261     // %%%%% add a proj after every goto
 262     // so (last->is_block_proj() != last) always, then simplify this code
 263     // This will not give correct end_idx for block 0 when it only contains root.
 264     int last_idx = _nodes.size() - 1;
 265     Node *last  = _nodes[last_idx];
 266     assert(last->is_block_proj() == last || last->is_block_proj() == _nodes[last_idx - _num_succs], "");
 267     return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs);
 268   }
 269 
 270   // Basic blocks have a Node which ends them.  This Node determines which
 271   // basic block follows this one in the program flow.  This Node is either an
 272   // IfNode, a GotoNode, a JmpNode, or a ReturnNode.
 273   Node *end() const { return _nodes[end_idx()]; }
 274 
 275   // Add an instruction to an existing block.  It must go after the head
 276   // instruction and before the end instruction.
 277   void add_inst( Node *n ) { _nodes.insert(end_idx(),n); }
 278   // Find node in block
 279   uint find_node( const Node *n ) const;
 280   // Find and remove n from block list
 281   void find_remove( const Node *n );
 282 
 283   // helper function that adds caller save registers to MachProjNode
 284   void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
 285   // Schedule a call next in the block
 286   uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
 287 
 288   // Perform basic-block local scheduling
 289   Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
 290   void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
 291   void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
 292   bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
 293   // Cleanup if any code lands between a Call and his Catch
 294   void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
 295   // Detect implicit-null-check opportunities.  Basically, find NULL checks
 296   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
 297   // I can generate a memory op if there is not one nearby.


 533 #ifdef ASSERT
 534   Unique_Node_List _raw_oops;
 535 #endif
 536 
 537   // Do global code motion by first building dominator tree and estimate block frequency
 538   // Returns true on success
 539   bool do_global_code_motion();
 540 
 541   // Compute the (backwards) latency of a node from the uses
 542   void latency_from_uses(Node *n);
 543 
 544   // Set loop alignment
 545   void set_loop_alignment();
 546 
 547   // Remove empty basic blocks
 548   void remove_empty_blocks();
 549   void fixup_flow();
 550 
 551   // Insert a node into a block at index and map the node to the block
 552   void insert(Block *b, uint idx, Node *n) {
 553     b->_nodes.insert( idx, n );
 554     map_node_to_block(n, b);
 555   }
 556 
 557 #ifndef PRODUCT
 558   bool trace_opto_pipelining() const { return _trace_opto_pipelining; }
 559 
 560   // Debugging print of CFG
 561   void dump( ) const;           // CFG only
 562   void _dump_cfg( const Node *end, VectorSet &visited  ) const;
 563   void verify() const;
 564   void dump_headers();
 565 #else
 566   bool trace_opto_pipelining() const { return false; }
 567 #endif
 568 };
 569 
 570 
 571 //------------------------------UnionFind--------------------------------------
 572 // Map Block indices to a block-index for a cfg-cover.
 573 // Array lookup in the optimized case.




  88 
  89 
  90 class CFGElement : public ResourceObj {
  91   friend class VMStructs;
  92  public:
  93   float _freq; // Execution frequency (estimate)
  94 
  95   CFGElement() : _freq(0.0f) {}
  96   virtual bool is_block() { return false; }
  97   virtual bool is_loop()  { return false; }
  98   Block*   as_Block() { assert(is_block(), "must be block"); return (Block*)this; }
  99   CFGLoop* as_CFGLoop()  { assert(is_loop(),  "must be loop");  return (CFGLoop*)this;  }
 100 };
 101 
 102 //------------------------------Block------------------------------------------
 103 // This class defines a Basic Block.
 104 // Basic blocks are used during the output routines, and are not used during
 105 // any optimization pass.  They are created late in the game.
 106 class Block : public CFGElement {
 107   friend class VMStructs;
 108 
 109 private:
 110   // Nodes in this block, in order
 111   Node_List _nodes;
 112 
 113 public:
 114 
 115   // Get the node at index 'at_index', if 'at_index' is out of bounds return NULL
 116   Node* get_node(uint at_index) const {
 117     return _nodes[at_index];
 118   }
 119 
 120   // Get the number of nodes in this block
 121   uint number_of_nodes() const {
 122     return _nodes.size();
 123   }
 124 
 125   // Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased
 126   void map_node(Node* node, uint to_index) {
 127     _nodes.map(to_index, node);
 128   }
 129 
 130   // Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash
 131   void insert_node(Node* node, uint at_index) {
 132     _nodes.insert(at_index, node);
 133   }
 134 
 135   // Remove a node at index 'at_index'
 136   void remove_node(uint at_index) {
 137     _nodes.remove(at_index);
 138   }
 139 
 140   // Push a node 'node' onto the node list
 141   void push_node(Node* node) {
 142     _nodes.push(node);
 143   }
 144 
 145   // Pop the last node off the node list
 146   Node* pop_node() {
 147     return _nodes.pop();
 148   }
 149 
 150   // Basic blocks have a Node which defines Control for all Nodes pinned in
 151   // this block.  This Node is a RegionNode.  Exception-causing Nodes
 152   // (division, subroutines) and Phi functions are always pinned.  Later,
 153   // every Node will get pinned to some block.
 154   Node *head() const { return get_node(0); }
 155 
 156   // CAUTION: num_preds() is ONE based, so that predecessor numbers match
 157   // input edges to Regions and Phis.
 158   uint num_preds() const { return head()->req(); }
 159   Node *pred(uint i) const { return head()->in(i); }
 160 
 161   // Array of successor blocks, same size as projs array
 162   Block_Array _succs;
 163 
 164   // Basic blocks have some number of Nodes which split control to all
 165   // following blocks.  These Nodes are always Projections.  The field in
 166   // the Projection and the block-ending Node determine which Block follows.
 167   uint _num_succs;
 168 
 169   // Basic blocks also carry all sorts of good old fashioned DFS information
 170   // used to find loops, loop nesting depth, dominators, etc.
 171   uint _pre_order;              // Pre-order DFS number
 172 
 173   // Dominator tree
 174   uint _dom_depth;              // Depth in dominator tree for fast LCA


 295   }
 296 
 297   // Index of 'end' Node
 298   uint end_idx() const {
 299     // %%%%% add a proj after every goto
 300     // so (last->is_block_proj() != last) always, then simplify this code
 301     // This will not give correct end_idx for block 0 when it only contains root.
 302     int last_idx = _nodes.size() - 1;
 303     Node *last  = _nodes[last_idx];
 304     assert(last->is_block_proj() == last || last->is_block_proj() == _nodes[last_idx - _num_succs], "");
 305     return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs);
 306   }
 307 
 308   // Basic blocks have a Node which ends them.  This Node determines which
 309   // basic block follows this one in the program flow.  This Node is either an
 310   // IfNode, a GotoNode, a JmpNode, or a ReturnNode.
 311   Node *end() const { return _nodes[end_idx()]; }
 312 
 313   // Add an instruction to an existing block.  It must go after the head
 314   // instruction and before the end instruction.
 315   void add_inst( Node *n ) { insert_node(n, end_idx()); }
 316   // Find node in block
 317   uint find_node( const Node *n ) const;
 318   // Find and remove n from block list
 319   void find_remove( const Node *n );
 320 
 321   // helper function that adds caller save registers to MachProjNode
 322   void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
 323   // Schedule a call next in the block
 324   uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
 325 
 326   // Perform basic-block local scheduling
 327   Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
 328   void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
 329   void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
 330   bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
 331   // Cleanup if any code lands between a Call and his Catch
 332   void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
 333   // Detect implicit-null-check opportunities.  Basically, find NULL checks
 334   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
 335   // I can generate a memory op if there is not one nearby.


 571 #ifdef ASSERT
 572   Unique_Node_List _raw_oops;
 573 #endif
 574 
 575   // Do global code motion by first building dominator tree and estimate block frequency
 576   // Returns true on success
 577   bool do_global_code_motion();
 578 
 579   // Compute the (backwards) latency of a node from the uses
 580   void latency_from_uses(Node *n);
 581 
 582   // Set loop alignment
 583   void set_loop_alignment();
 584 
 585   // Remove empty basic blocks
 586   void remove_empty_blocks();
 587   void fixup_flow();
 588 
 589   // Insert a node into a block at index and map the node to the block
 590   void insert(Block *b, uint idx, Node *n) {
 591     b->insert_node(n , idx);
 592     map_node_to_block(n, b);
 593   }
 594 
 595 #ifndef PRODUCT
 596   bool trace_opto_pipelining() const { return _trace_opto_pipelining; }
 597 
 598   // Debugging print of CFG
 599   void dump( ) const;           // CFG only
 600   void _dump_cfg( const Node *end, VectorSet &visited  ) const;
 601   void verify() const;
 602   void dump_headers();
 603 #else
 604   bool trace_opto_pipelining() const { return false; }
 605 #endif
 606 };
 607 
 608 
 609 //------------------------------UnionFind--------------------------------------
 610 // Map Block indices to a block-index for a cfg-cover.
 611 // Array lookup in the optimized case.


src/share/vm/opto/block.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File