1 /*
   2  * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 // Optimization - Graph Style
  26 
  27 class Block;
  28 class CFGLoop;
  29 class MachCallNode;
  30 class Matcher;
  31 class RootNode;
  32 class VectorSet;
  33 struct Tarjan;
  34 
  35 //------------------------------Block_Array------------------------------------
  36 // Map dense integer indices to Blocks.  Uses classic doubling-array trick.
  37 // Abstractly provides an infinite array of Block*'s, initialized to NULL.
  38 // Note that the constructor just zeros things, and since I use Arena
  39 // allocation I do not need a destructor to reclaim storage.
  40 class Block_Array : public ResourceObj {
  41   uint _size;                   // allocated size, as opposed to formal limit
  42   debug_only(uint _limit;)      // limit to formal domain
  43 protected:
  44   Block **_blocks;
  45   void grow( uint i );          // Grow array node to fit
  46 
  47 public:
  48   Arena *_arena;                // Arena to allocate in
  49 
  50   Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) {
  51     debug_only(_limit=0);
  52     _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
  53     for( int i = 0; i < OptoBlockListSize; i++ ) {
  54       _blocks[i] = NULL;
  55     }
  56   }
  57   Block *lookup( uint i ) const // Lookup, or NULL for not mapped
  58   { return (i<Max()) ? _blocks[i] : (Block*)NULL; }
  59   Block *operator[] ( uint i ) const // Lookup, or assert for not mapped
  60   { assert( i < Max(), "oob" ); return _blocks[i]; }
  61   // Extend the mapping: index i maps to Block *n.
  62   void map( uint i, Block *n ) { if( i>=Max() ) grow(i); _blocks[i] = n; }
  63   uint Max() const { debug_only(return _limit); return _size; }
  64 };
  65 
  66 
  67 class Block_List : public Block_Array {
  68 public:
  69   uint _cnt;
  70   Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
  71   void push( Block *b ) { map(_cnt++,b); }
  72   Block *pop() { return _blocks[--_cnt]; }
  73   Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;}
  74   void remove( uint i );
  75   void insert( uint i, Block *n );
  76   uint size() const { return _cnt; }
  77   void reset() { _cnt = 0; }
  78   void print();
  79 };
  80 
  81 
  82 class CFGElement : public ResourceObj {
  83  public:
  84   float _freq; // Execution frequency (estimate)
  85 
  86   CFGElement() : _freq(0.0f) {}
  87   virtual bool is_block() { return false; }
  88   virtual bool is_loop()  { return false; }
  89   Block*   as_Block() { assert(is_block(), "must be block"); return (Block*)this; }
  90   CFGLoop* as_CFGLoop()  { assert(is_loop(),  "must be loop");  return (CFGLoop*)this;  }
  91 };
  92 
  93 //------------------------------Block------------------------------------------
  94 // This class defines a Basic Block.
  95 // Basic blocks are used during the output routines, and are not used during
  96 // any optimization pass.  They are created late in the game.
  97 class Block : public CFGElement {
  98  public:
  99   // Nodes in this block, in order
 100   Node_List _nodes;
 101 
 102   // Basic blocks have a Node which defines Control for all Nodes pinned in
 103   // this block.  This Node is a RegionNode.  Exception-causing Nodes
 104   // (division, subroutines) and Phi functions are always pinned.  Later,
 105   // every Node will get pinned to some block.
 106   Node *head() const { return _nodes[0]; }
 107 
 108   // CAUTION: num_preds() is ONE based, so that predecessor numbers match
 109   // input edges to Regions and Phis.
 110   uint num_preds() const { return head()->req(); }
 111   Node *pred(uint i) const { return head()->in(i); }
 112 
 113   // Array of successor blocks, same size as projs array
 114   Block_Array _succs;
 115 
 116   // Basic blocks have some number of Nodes which split control to all
 117   // following blocks.  These Nodes are always Projections.  The field in
 118   // the Projection and the block-ending Node determine which Block follows.
 119   uint _num_succs;
 120 
 121   // Basic blocks also carry all sorts of good old fashioned DFS information
 122   // used to find loops, loop nesting depth, dominators, etc.
 123   uint _pre_order;              // Pre-order DFS number
 124 
 125   // Dominator tree
 126   uint _dom_depth;              // Depth in dominator tree for fast LCA
 127   Block* _idom;                 // Immediate dominator block
 128 
 129   CFGLoop *_loop;               // Loop to which this block belongs
 130   uint _rpo;                    // Number in reverse post order walk
 131 
 132   virtual bool is_block() { return true; }
 133   float succ_prob(uint i);      // return probability of i'th successor
 134   int num_fall_throughs();      // How many fall-through candidate this block has
 135   void update_uncommon_branch(Block* un); // Lower branch prob to uncommon code
 136   bool succ_fall_through(uint i); // Is successor "i" is a fall-through candidate
 137   Block* lone_fall_through();   // Return lone fall-through Block or null
 138 
 139   Block* dom_lca(Block* that);  // Compute LCA in dominator tree.
 140 #ifdef ASSERT
 141   bool dominates(Block* that) {
 142     int dom_diff = this->_dom_depth - that->_dom_depth;
 143     if (dom_diff > 0)  return false;
 144     for (; dom_diff < 0; dom_diff++)  that = that->_idom;
 145     return this == that;
 146   }
 147 #endif
 148 
 149   // Report the alignment required by this block.  Must be a power of 2.
 150   // The previous block will insert nops to get this alignment.
 151   uint code_alignment();
 152   uint compute_loop_alignment();
 153 
 154   // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies.
 155   // It is currently also used to scale such frequencies relative to
 156   // FreqCountInvocations relative to the old value of 1500.
 157 #define BLOCK_FREQUENCY(f) ((f * (float) 1500) / FreqCountInvocations)
 158 
 159   // Register Pressure (estimate) for Splitting heuristic
 160   uint _reg_pressure;
 161   uint _ihrp_index;
 162   uint _freg_pressure;
 163   uint _fhrp_index;
 164 
 165   // Mark and visited bits for an LCA calculation in insert_anti_dependences.
 166   // Since they hold unique node indexes, they do not need reinitialization.
 167   node_idx_t _raise_LCA_mark;
 168   void    set_raise_LCA_mark(node_idx_t x)    { _raise_LCA_mark = x; }
 169   node_idx_t  raise_LCA_mark() const          { return _raise_LCA_mark; }
 170   node_idx_t _raise_LCA_visited;
 171   void    set_raise_LCA_visited(node_idx_t x) { _raise_LCA_visited = x; }
 172   node_idx_t  raise_LCA_visited() const       { return _raise_LCA_visited; }
 173 
 174   // Estimated size in bytes of first instructions in a loop.
 175   uint _first_inst_size;
 176   uint first_inst_size() const     { return _first_inst_size; }
 177   void set_first_inst_size(uint s) { _first_inst_size = s; }
 178 
 179   // Compute the size of first instructions in this block.
 180   uint compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra);
 181 
 182   // Compute alignment padding if the block needs it.
 183   // Align a loop if loop's padding is less or equal to padding limit
 184   // or the size of first instructions in the loop > padding.
 185   uint alignment_padding(int current_offset) {
 186     int block_alignment = code_alignment();
 187     int max_pad = block_alignment-relocInfo::addr_unit();
 188     if( max_pad > 0 ) {
 189       assert(is_power_of_2(max_pad+relocInfo::addr_unit()), "");
 190       int current_alignment = current_offset & max_pad;
 191       if( current_alignment != 0 ) {
 192         uint padding = (block_alignment-current_alignment) & max_pad;
 193         if( has_loop_alignment() &&
 194             padding > (uint)MaxLoopPad &&
 195             first_inst_size() <= padding ) {
 196           return 0;
 197         }
 198         return padding;
 199       }
 200     }
 201     return 0;
 202   }
 203 
 204   // Connector blocks. Connector blocks are basic blocks devoid of
 205   // instructions, but may have relevant non-instruction Nodes, such as
 206   // Phis or MergeMems. Such blocks are discovered and marked during the
 207   // RemoveEmpty phase, and elided during Output.
 208   bool _connector;
 209   void set_connector() { _connector = true; }
 210   bool is_connector() const { return _connector; };
 211 
 212   // Loop_alignment will be set for blocks which are at the top of loops.
 213   // The block layout pass may rotate loops such that the loop head may not
 214   // be the sequentially first block of the loop encountered in the linear
 215   // list of blocks.  If the layout pass is not run, loop alignment is set
 216   // for each block which is the head of a loop.
 217   uint _loop_alignment;
 218   void set_loop_alignment(Block *loop_top) {
 219     uint new_alignment = loop_top->compute_loop_alignment();
 220     if (new_alignment > _loop_alignment) {
 221       _loop_alignment = new_alignment;
 222     }
 223   }
 224   uint loop_alignment() const { return _loop_alignment; }
 225   bool has_loop_alignment() const { return loop_alignment() > 0; }
 226 
 227   // Create a new Block with given head Node.
 228   // Creates the (empty) predecessor arrays.
 229   Block( Arena *a, Node *headnode )
 230     : CFGElement(),
 231       _nodes(a),
 232       _succs(a),
 233       _num_succs(0),
 234       _pre_order(0),
 235       _idom(0),
 236       _loop(NULL),
 237       _reg_pressure(0),
 238       _ihrp_index(1),
 239       _freg_pressure(0),
 240       _fhrp_index(1),
 241       _raise_LCA_mark(0),
 242       _raise_LCA_visited(0),
 243       _first_inst_size(999999),
 244       _connector(false),
 245       _loop_alignment(0) {
 246     _nodes.push(headnode);
 247   }
 248 
 249   // Index of 'end' Node
 250   uint end_idx() const {
 251     // %%%%% add a proj after every goto
 252     // so (last->is_block_proj() != last) always, then simplify this code
 253     // This will not give correct end_idx for block 0 when it only contains root.
 254     int last_idx = _nodes.size() - 1;
 255     Node *last  = _nodes[last_idx];
 256     assert(last->is_block_proj() == last || last->is_block_proj() == _nodes[last_idx - _num_succs], "");
 257     return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs);
 258   }
 259 
 260   // Basic blocks have a Node which ends them.  This Node determines which
 261   // basic block follows this one in the program flow.  This Node is either an
 262   // IfNode, a GotoNode, a JmpNode, or a ReturnNode.
 263   Node *end() const { return _nodes[end_idx()]; }
 264 
 265   // Add an instruction to an existing block.  It must go after the head
 266   // instruction and before the end instruction.
 267   void add_inst( Node *n ) { _nodes.insert(end_idx(),n); }
 268   // Find node in block
 269   uint find_node( const Node *n ) const;
 270   // Find and remove n from block list
 271   void find_remove( const Node *n );
 272 
 273   // Schedule a call next in the block
 274   uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call);
 275 
 276   // Perform basic-block local scheduling
 277   Node *select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot);
 278   void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs );
 279   void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
 280   bool schedule_local(PhaseCFG *cfg, Matcher &m, int *ready_cnt, VectorSet &next_call);
 281   // Cleanup if any code lands between a Call and his Catch
 282   void call_catch_cleanup(Block_Array &bbs);
 283   // Detect implicit-null-check opportunities.  Basically, find NULL checks
 284   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
 285   // I can generate a memory op if there is not one nearby.
 286   void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
 287 
 288   // Return the empty status of a block
 289   enum { not_empty, empty_with_goto, completely_empty };
 290   int is_Empty() const;
 291 
 292   // Forward through connectors
 293   Block* non_connector() {
 294     Block* s = this;
 295     while (s->is_connector()) {
 296       s = s->_succs[0];
 297     }
 298     return s;
 299   }
 300 
 301   // Return true if b is a successor of this block
 302   bool has_successor(Block* b) const {
 303     for (uint i = 0; i < _num_succs; i++ ) {
 304       if (non_connector_successor(i) == b) {
 305         return true;
 306       }
 307     }
 308     return false;
 309   }
 310 
 311   // Successor block, after forwarding through connectors
 312   Block* non_connector_successor(int i) const {
 313     return _succs[i]->non_connector();
 314   }
 315 
 316   // Examine block's code shape to predict if it is not commonly executed.
 317   bool has_uncommon_code() const;
 318 
 319   // Use frequency calculations and code shape to predict if the block
 320   // is uncommon.
 321   bool is_uncommon( Block_Array &bbs ) const;
 322 
 323 #ifndef PRODUCT
 324   // Debugging print of basic block
 325   void dump_bidx(const Block* orig) const;
 326   void dump_pred(const Block_Array *bbs, Block* orig) const;
 327   void dump_head( const Block_Array *bbs ) const;
 328   void dump( ) const;
 329   void dump( const Block_Array *bbs ) const;
 330 #endif
 331 };
 332 
 333 
 334 //------------------------------PhaseCFG---------------------------------------
 335 // Build an array of Basic Block pointers, one per Node.
 336 class PhaseCFG : public Phase {
 337  private:
 338   // Build a proper looking cfg.  Return count of basic blocks
 339   uint build_cfg();
 340 
 341   // Perform DFS search.
 342   // Setup 'vertex' as DFS to vertex mapping.
 343   // Setup 'semi' as vertex to DFS mapping.
 344   // Set 'parent' to DFS parent.
 345   uint DFS( Tarjan *tarjan );
 346 
 347   // Helper function to insert a node into a block
 348   void schedule_node_into_block( Node *n, Block *b );
 349 
 350   // Set the basic block for pinned Nodes
 351   void schedule_pinned_nodes( VectorSet &visited );
 352 
 353   // I'll need a few machine-specific GotoNodes.  Clone from this one.
 354   MachNode *_goto;
 355 
 356   Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
 357   void verify_anti_dependences(Block* LCA, Node* load) {
 358     assert(LCA == _bbs[load->_idx], "should already be scheduled");
 359     insert_anti_dependences(LCA, load, true);
 360   }
 361 
 362  public:
 363   PhaseCFG( Arena *a, RootNode *r, Matcher &m );
 364 
 365   uint _num_blocks;             // Count of basic blocks
 366   Block_List _blocks;           // List of basic blocks
 367   RootNode *_root;              // Root of whole program
 368   Block_Array _bbs;             // Map Nodes to owning Basic Block
 369   Block *_broot;                // Basic block of root
 370   uint _rpo_ctr;
 371   CFGLoop* _root_loop;
 372 
 373   // Per node latency estimation, valid only during GCM
 374   GrowableArray<uint> _node_latency;
 375 
 376 #ifndef PRODUCT
 377   bool _trace_opto_pipelining;  // tracing flag
 378 #endif
 379 
 380   // Build dominators
 381   void Dominators();
 382 
 383   // Estimate block frequencies based on IfNode probabilities
 384   void Estimate_Block_Frequency();
 385 
 386   // Global Code Motion.  See Click's PLDI95 paper.  Place Nodes in specific
 387   // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block.
 388   void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list );
 389 
 390   // Compute the (backwards) latency of a node from the uses
 391   void latency_from_uses(Node *n);
 392 
 393   // Compute the (backwards) latency of a node from a single use
 394   int latency_from_use(Node *n, const Node *def, Node *use);
 395 
 396   // Compute the (backwards) latency of a node from the uses of this instruction
 397   void partial_latency_of_defs(Node *n);
 398 
 399   // Schedule Nodes early in their basic blocks.
 400   bool schedule_early(VectorSet &visited, Node_List &roots);
 401 
 402   // For each node, find the latest block it can be scheduled into
 403   // and then select the cheapest block between the latest and earliest
 404   // block to place the node.
 405   void schedule_late(VectorSet &visited, Node_List &stack);
 406 
 407   // Pick a block between early and late that is a cheaper alternative
 408   // to late. Helper for schedule_late.
 409   Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
 410 
 411   // Compute the instruction global latency with a backwards walk
 412   void ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack);
 413 
 414   // Set loop alignment
 415   void set_loop_alignment();
 416 
 417   // Remove empty basic blocks
 418   void remove_empty();
 419   void fixup_flow();
 420   bool move_to_next(Block* bx, uint b_index);
 421   void move_to_end(Block* bx, uint b_index);
 422   void insert_goto_at(uint block_no, uint succ_no);
 423 
 424   // Check for NeverBranch at block end.  This needs to become a GOTO to the
 425   // true target.  NeverBranch are treated as a conditional branch that always
 426   // goes the same direction for most of the optimizer and are used to give a
 427   // fake exit path to infinite loops.  At this late stage they need to turn
 428   // into Goto's so that when you enter the infinite loop you indeed hang.
 429   void convert_NeverBranch_to_Goto(Block *b);
 430 
 431   CFGLoop* create_loop_tree();
 432 
 433   // Insert a node into a block, and update the _bbs
 434   void insert( Block *b, uint idx, Node *n ) {
 435     b->_nodes.insert( idx, n );
 436     _bbs.map( n->_idx, b );
 437   }
 438 
 439 #ifndef PRODUCT
 440   bool trace_opto_pipelining() const { return _trace_opto_pipelining; }
 441 
 442   // Debugging print of CFG
 443   void dump( ) const;           // CFG only
 444   void _dump_cfg( const Node *end, VectorSet &visited  ) const;
 445   void verify() const;
 446   void dump_headers();
 447 #else
 448   bool trace_opto_pipelining() const { return false; }
 449 #endif
 450 };
 451 
 452 
 453 //------------------------------UnionFind--------------------------------------
 454 // Map Block indices to a block-index for a cfg-cover.
 455 // Array lookup in the optimized case.
 456 class UnionFind : public ResourceObj {
 457   uint _cnt, _max;
 458   uint* _indices;
 459   ReallocMark _nesting;  // assertion check for reallocations
 460 public:
 461   UnionFind( uint max );
 462   void reset( uint max );  // Reset to identity map for [0..max]
 463 
 464   uint lookup( uint nidx ) const {
 465     return _indices[nidx];
 466   }
 467   uint operator[] (uint nidx) const { return lookup(nidx); }
 468 
 469   void map( uint from_idx, uint to_idx ) {
 470     assert( from_idx < _cnt, "oob" );
 471     _indices[from_idx] = to_idx;
 472   }
 473   void extend( uint from_idx, uint to_idx );
 474 
 475   uint Size() const { return _cnt; }
 476 
 477   uint Find( uint idx ) {
 478     assert( idx < 65536, "Must fit into uint");
 479     uint uf_idx = lookup(idx);
 480     return (uf_idx == idx) ? uf_idx : Find_compress(idx);
 481   }
 482   uint Find_compress( uint idx );
 483   uint Find_const( uint idx ) const;
 484   void Union( uint idx1, uint idx2 );
 485 
 486 };
 487 
 488 //----------------------------BlockProbPair---------------------------
 489 // Ordered pair of Node*.
 490 class BlockProbPair VALUE_OBJ_CLASS_SPEC {
 491 protected:
 492   Block* _target;      // block target
 493   float  _prob;        // probability of edge to block
 494 public:
 495   BlockProbPair() : _target(NULL), _prob(0.0) {}
 496   BlockProbPair(Block* b, float p) : _target(b), _prob(p) {}
 497 
 498   Block* get_target() const { return _target; }
 499   float get_prob() const { return _prob; }
 500 };
 501 
 502 //------------------------------CFGLoop-------------------------------------------
 503 class CFGLoop : public CFGElement {
 504   int _id;
 505   int _depth;
 506   CFGLoop *_parent;      // root of loop tree is the method level "pseudo" loop, it's parent is null
 507   CFGLoop *_sibling;     // null terminated list
 508   CFGLoop *_child;       // first child, use child's sibling to visit all immediately nested loops
 509   GrowableArray<CFGElement*> _members; // list of members of loop
 510   GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities
 511   float _exit_prob;       // probability any loop exit is taken on a single loop iteration
 512   void update_succ_freq(Block* b, float freq);
 513 
 514  public:
 515   CFGLoop(int id) :
 516     CFGElement(),
 517     _id(id),
 518     _depth(0),
 519     _parent(NULL),
 520     _sibling(NULL),
 521     _child(NULL),
 522     _exit_prob(1.0f) {}
 523   CFGLoop* parent() { return _parent; }
 524   void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk);
 525   void add_member(CFGElement *s) { _members.push(s); }
 526   void add_nested_loop(CFGLoop* cl);
 527   Block* head() {
 528     assert(_members.at(0)->is_block(), "head must be a block");
 529     Block* hd = _members.at(0)->as_Block();
 530     assert(hd->_loop == this, "just checking");
 531     assert(hd->head()->is_Loop(), "must begin with loop head node");
 532     return hd;
 533   }
 534   Block* backedge_block(); // Return the block on the backedge of the loop (else NULL)
 535   void compute_loop_depth(int depth);
 536   void compute_freq(); // compute frequency with loop assuming head freq 1.0f
 537   void scale_freq();   // scale frequency by loop trip count (including outer loops)
 538   bool in_loop_nest(Block* b);
 539   float trip_count() const { return 1.0f / _exit_prob; }
 540   virtual bool is_loop()  { return true; }
 541   int id() { return _id; }
 542 
 543 #ifndef PRODUCT
 544   void dump( ) const;
 545   void dump_tree() const;
 546 #endif
 547 };
 548 
 549 
 550 //----------------------------------CFGEdge------------------------------------
 551 // A edge between two basic blocks that will be embodied by a branch or a
 552 // fall-through.
 553 class CFGEdge : public ResourceObj {
 554  private:
 555   Block * _from;        // Source basic block
 556   Block * _to;          // Destination basic block
 557   float _freq;          // Execution frequency (estimate)
 558   int   _state;
 559   bool  _infrequent;
 560   int   _from_pct;
 561   int   _to_pct;
 562 
 563   // Private accessors
 564   int  from_pct() const { return _from_pct; }
 565   int  to_pct()   const { return _to_pct;   }
 566   int  from_infrequent() const { return from_pct() < BlockLayoutMinDiamondPercentage; }
 567   int  to_infrequent()   const { return to_pct()   < BlockLayoutMinDiamondPercentage; }
 568 
 569  public:
 570   enum {
 571     open,               // initial edge state; unprocessed
 572     connected,          // edge used to connect two traces together
 573     interior            // edge is interior to trace (could be backedge)
 574   };
 575 
 576   CFGEdge(Block *from, Block *to, float freq, int from_pct, int to_pct) :
 577     _from(from), _to(to), _freq(freq),
 578     _from_pct(from_pct), _to_pct(to_pct), _state(open) {
 579     _infrequent = from_infrequent() || to_infrequent();
 580   }
 581 
 582   float  freq() const { return _freq; }
 583   Block* from() const { return _from; }
 584   Block* to  () const { return _to;   }
 585   int  infrequent() const { return _infrequent; }
 586   int state() const { return _state; }
 587 
 588   void set_state(int state) { _state = state; }
 589 
 590 #ifndef PRODUCT
 591   void dump( ) const;
 592 #endif
 593 };
 594 
 595 
 596 //-----------------------------------Trace-------------------------------------
 597 // An ordered list of basic blocks.
 598 class Trace : public ResourceObj {
 599  private:
 600   uint _id;             // Unique Trace id (derived from initial block)
 601   Block ** _next_list;  // Array mapping index to next block
 602   Block ** _prev_list;  // Array mapping index to previous block
 603   Block * _first;       // First block in the trace
 604   Block * _last;        // Last block in the trace
 605 
 606   // Return the block that follows "b" in the trace.
 607   Block * next(Block *b) const { return _next_list[b->_pre_order]; }
 608   void set_next(Block *b, Block *n) const { _next_list[b->_pre_order] = n; }
 609 
 610   // Return the block that preceeds "b" in the trace.
 611   Block * prev(Block *b) const { return _prev_list[b->_pre_order]; }
 612   void set_prev(Block *b, Block *p) const { _prev_list[b->_pre_order] = p; }
 613 
 614   // We've discovered a loop in this trace. Reset last to be "b", and first as
 615   // the block following "b
 616   void break_loop_after(Block *b) {
 617     _last = b;
 618     _first = next(b);
 619     set_prev(_first, NULL);
 620     set_next(_last, NULL);
 621   }
 622 
 623  public:
 624 
 625   Trace(Block *b, Block **next_list, Block **prev_list) :
 626     _first(b),
 627     _last(b),
 628     _next_list(next_list),
 629     _prev_list(prev_list),
 630     _id(b->_pre_order) {
 631     set_next(b, NULL);
 632     set_prev(b, NULL);
 633   };
 634 
 635   // Return the id number
 636   uint id() const { return _id; }
 637   void set_id(uint id) { _id = id; }
 638 
 639   // Return the first block in the trace
 640   Block * first_block() const { return _first; }
 641 
 642   // Return the last block in the trace
 643   Block * last_block() const { return _last; }
 644 
 645   // Insert a trace in the middle of this one after b
 646   void insert_after(Block *b, Trace *tr) {
 647     set_next(tr->last_block(), next(b));
 648     if (next(b) != NULL) {
 649       set_prev(next(b), tr->last_block());
 650     }
 651 
 652     set_next(b, tr->first_block());
 653     set_prev(tr->first_block(), b);
 654 
 655     if (b == _last) {
 656       _last = tr->last_block();
 657     }
 658   }
 659 
 660   void insert_before(Block *b, Trace *tr) {
 661     Block *p = prev(b);
 662     assert(p != NULL, "use append instead");
 663     insert_after(p, tr);
 664   }
 665 
 666   // Append another trace to this one.
 667   void append(Trace *tr) {
 668     insert_after(_last, tr);
 669   }
 670 
 671   // Append a block at the end of this trace
 672   void append(Block *b) {
 673     set_next(_last, b);
 674     set_prev(b, _last);
 675     _last = b;
 676   }
 677 
 678   // Adjust the the blocks in this trace
 679   void fixup_blocks(PhaseCFG &cfg);
 680   bool backedge(CFGEdge *e);
 681 
 682 #ifndef PRODUCT
 683   void dump( ) const;
 684 #endif
 685 };
 686 
 687 //------------------------------PhaseBlockLayout-------------------------------
 688 // Rearrange blocks into some canonical order, based on edges and their frequencies
 689 class PhaseBlockLayout : public Phase {
 690   PhaseCFG &_cfg;               // Control flow graph
 691 
 692   GrowableArray<CFGEdge *> *edges;
 693   Trace **traces;
 694   Block **next;
 695   Block **prev;
 696   UnionFind *uf;
 697 
 698   // Given a block, find its encompassing Trace
 699   Trace * trace(Block *b) {
 700     return traces[uf->Find_compress(b->_pre_order)];
 701   }
 702  public:
 703   PhaseBlockLayout(PhaseCFG &cfg);
 704 
 705   void find_edges();
 706   void grow_traces();
 707   void merge_traces(bool loose_connections);
 708   void reorder_traces(int count);
 709   void union_traces(Trace* from, Trace* to);
 710 };