1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_BLOCK_HPP 26 #define SHARE_VM_OPTO_BLOCK_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/phase.hpp" 31 32 // Optimization - Graph Style 33 34 class Block; 35 class CFGLoop; 36 class MachCallNode; 37 class Matcher; 38 class RootNode; 39 class VectorSet; 40 class PhaseChaitin; 41 struct Tarjan; 42 43 //------------------------------Block_Array------------------------------------ 44 // Map dense integer indices to Blocks. Uses classic doubling-array trick. 45 // Abstractly provides an infinite array of Block*'s, initialized to NULL. 46 // Note that the constructor just zeros things, and since I use Arena 47 // allocation I do not need a destructor to reclaim storage. 48 class Block_Array : public ResourceObj { 49 friend class VMStructs; 50 uint _size; // allocated size, as opposed to formal limit 51 debug_only(uint _limit;) // limit to formal domain 52 Arena *_arena; // Arena to allocate in 53 protected: 54 Block **_blocks; 55 void grow( uint i ); // Grow array node to fit 56 57 public: 58 Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) { 59 debug_only(_limit=0); 60 _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize ); 61 for( int i = 0; i < OptoBlockListSize; i++ ) { 62 _blocks[i] = NULL; 63 } 64 } 65 Block *lookup( uint i ) const // Lookup, or NULL for not mapped 66 { return (i<Max()) ? _blocks[i] : (Block*)NULL; } 67 Block *operator[] ( uint i ) const // Lookup, or assert for not mapped 68 { assert( i < Max(), "oob" ); return _blocks[i]; } 69 // Extend the mapping: index i maps to Block *n. 70 void map( uint i, Block *n ) { if( i>=Max() ) grow(i); _blocks[i] = n; } 71 uint Max() const { debug_only(return _limit); return _size; } 72 }; 73 74 75 class Block_List : public Block_Array { 76 friend class VMStructs; 77 public: 78 uint _cnt; 79 Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {} 80 void push( Block *b ) { map(_cnt++,b); } 81 Block *pop() { return _blocks[--_cnt]; } 82 Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;} 83 void remove( uint i ); 84 void insert( uint i, Block *n ); 85 uint size() const { return _cnt; } 86 void reset() { _cnt = 0; } 87 void print(); 88 }; 89 90 91 class CFGElement : public ResourceObj { 92 friend class VMStructs; 93 public: 94 double _freq; // Execution frequency (estimate) 95 96 CFGElement() : _freq(0.0) {} 97 virtual bool is_block() { return false; } 98 virtual bool is_loop() { return false; } 99 Block* as_Block() { assert(is_block(), "must be block"); return (Block*)this; } 100 CFGLoop* as_CFGLoop() { assert(is_loop(), "must be loop"); return (CFGLoop*)this; } 101 }; 102 103 //------------------------------Block------------------------------------------ 104 // This class defines a Basic Block. 105 // Basic blocks are used during the output routines, and are not used during 106 // any optimization pass. They are created late in the game. 107 class Block : public CFGElement { 108 friend class VMStructs; 109 110 private: 111 // Nodes in this block, in order 112 Node_List _nodes; 113 114 public: 115 116 // Get the node at index 'at_index', if 'at_index' is out of bounds return NULL 117 Node* get_node(uint at_index) const { 118 return _nodes[at_index]; 119 } 120 121 // Get the number of nodes in this block 122 uint number_of_nodes() const { 123 return _nodes.size(); 124 } 125 126 // Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased 127 void map_node(Node* node, uint to_index) { 128 _nodes.map(to_index, node); 129 } 130 131 // Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash 132 void insert_node(Node* node, uint at_index) { 133 _nodes.insert(at_index, node); 134 } 135 136 // Remove a node at index 'at_index' 137 void remove_node(uint at_index) { 138 _nodes.remove(at_index); 139 } 140 141 // Push a node 'node' onto the node list 142 void push_node(Node* node) { 143 _nodes.push(node); 144 } 145 146 // Pop the last node off the node list 147 Node* pop_node() { 148 return _nodes.pop(); 149 } 150 151 // Basic blocks have a Node which defines Control for all Nodes pinned in 152 // this block. This Node is a RegionNode. Exception-causing Nodes 153 // (division, subroutines) and Phi functions are always pinned. Later, 154 // every Node will get pinned to some block. 155 Node *head() const { return get_node(0); } 156 157 // CAUTION: num_preds() is ONE based, so that predecessor numbers match 158 // input edges to Regions and Phis. 159 uint num_preds() const { return head()->req(); } 160 Node *pred(uint i) const { return head()->in(i); } 161 162 // Array of successor blocks, same size as projs array 163 Block_Array _succs; 164 165 // Basic blocks have some number of Nodes which split control to all 166 // following blocks. These Nodes are always Projections. The field in 167 // the Projection and the block-ending Node determine which Block follows. 168 uint _num_succs; 169 170 // Basic blocks also carry all sorts of good old fashioned DFS information 171 // used to find loops, loop nesting depth, dominators, etc. 172 uint _pre_order; // Pre-order DFS number 173 174 // Dominator tree 175 uint _dom_depth; // Depth in dominator tree for fast LCA 176 Block* _idom; // Immediate dominator block 177 178 CFGLoop *_loop; // Loop to which this block belongs 179 uint _rpo; // Number in reverse post order walk 180 181 virtual bool is_block() { return true; } 182 float succ_prob(uint i); // return probability of i'th successor 183 int num_fall_throughs(); // How many fall-through candidate this block has 184 void update_uncommon_branch(Block* un); // Lower branch prob to uncommon code 185 bool succ_fall_through(uint i); // Is successor "i" is a fall-through candidate 186 Block* lone_fall_through(); // Return lone fall-through Block or null 187 188 Block* dom_lca(Block* that); // Compute LCA in dominator tree. 189 #ifdef ASSERT 190 bool dominates(Block* that) { 191 int dom_diff = this->_dom_depth - that->_dom_depth; 192 if (dom_diff > 0) return false; 193 for (; dom_diff < 0; dom_diff++) that = that->_idom; 194 return this == that; 195 } 196 #endif 197 198 // Report the alignment required by this block. Must be a power of 2. 199 // The previous block will insert nops to get this alignment. 200 uint code_alignment(); 201 uint compute_loop_alignment(); 202 203 // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies. 204 // It is currently also used to scale such frequencies relative to 205 // FreqCountInvocations relative to the old value of 1500. 206 #define BLOCK_FREQUENCY(f) ((f * (double) 1500) / FreqCountInvocations) 207 208 // Register Pressure (estimate) for Splitting heuristic 209 uint _reg_pressure; 210 uint _ihrp_index; 211 uint _freg_pressure; 212 uint _fhrp_index; 213 214 // Mark and visited bits for an LCA calculation in insert_anti_dependences. 215 // Since they hold unique node indexes, they do not need reinitialization. 216 node_idx_t _raise_LCA_mark; 217 void set_raise_LCA_mark(node_idx_t x) { _raise_LCA_mark = x; } 218 node_idx_t raise_LCA_mark() const { return _raise_LCA_mark; } 219 node_idx_t _raise_LCA_visited; 220 void set_raise_LCA_visited(node_idx_t x) { _raise_LCA_visited = x; } 221 node_idx_t raise_LCA_visited() const { return _raise_LCA_visited; } 222 223 // Estimated size in bytes of first instructions in a loop. 224 uint _first_inst_size; 225 uint first_inst_size() const { return _first_inst_size; } 226 void set_first_inst_size(uint s) { _first_inst_size = s; } 227 228 // Compute the size of first instructions in this block. 229 uint compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra); 230 231 // Compute alignment padding if the block needs it. 232 // Align a loop if loop's padding is less or equal to padding limit 233 // or the size of first instructions in the loop > padding. 234 uint alignment_padding(int current_offset) { 235 int block_alignment = code_alignment(); 236 int max_pad = block_alignment-relocInfo::addr_unit(); 237 if( max_pad > 0 ) { 238 assert(is_power_of_2(max_pad+relocInfo::addr_unit()), ""); 239 int current_alignment = current_offset & max_pad; 240 if( current_alignment != 0 ) { 241 uint padding = (block_alignment-current_alignment) & max_pad; 242 if( has_loop_alignment() && 243 padding > (uint)MaxLoopPad && 244 first_inst_size() <= padding ) { 245 return 0; 246 } 247 return padding; 248 } 249 } 250 return 0; 251 } 252 253 // Connector blocks. Connector blocks are basic blocks devoid of 254 // instructions, but may have relevant non-instruction Nodes, such as 255 // Phis or MergeMems. Such blocks are discovered and marked during the 256 // RemoveEmpty phase, and elided during Output. 257 bool _connector; 258 void set_connector() { _connector = true; } 259 bool is_connector() const { return _connector; }; 260 261 // Loop_alignment will be set for blocks which are at the top of loops. 262 // The block layout pass may rotate loops such that the loop head may not 263 // be the sequentially first block of the loop encountered in the linear 264 // list of blocks. If the layout pass is not run, loop alignment is set 265 // for each block which is the head of a loop. 266 uint _loop_alignment; 267 void set_loop_alignment(Block *loop_top) { 268 uint new_alignment = loop_top->compute_loop_alignment(); 269 if (new_alignment > _loop_alignment) { 270 _loop_alignment = new_alignment; 271 } 272 } 273 uint loop_alignment() const { return _loop_alignment; } 274 bool has_loop_alignment() const { return loop_alignment() > 0; } 275 276 // Create a new Block with given head Node. 277 // Creates the (empty) predecessor arrays. 278 Block( Arena *a, Node *headnode ) 279 : CFGElement(), 280 _nodes(a), 281 _succs(a), 282 _num_succs(0), 283 _pre_order(0), 284 _idom(0), 285 _loop(NULL), 286 _reg_pressure(0), 287 _ihrp_index(1), 288 _freg_pressure(0), 289 _fhrp_index(1), 290 _raise_LCA_mark(0), 291 _raise_LCA_visited(0), 292 _first_inst_size(999999), 293 _connector(false), 294 _loop_alignment(0) { 295 _nodes.push(headnode); 296 } 297 298 // Index of 'end' Node 299 uint end_idx() const { 300 // %%%%% add a proj after every goto 301 // so (last->is_block_proj() != last) always, then simplify this code 302 // This will not give correct end_idx for block 0 when it only contains root. 303 int last_idx = _nodes.size() - 1; 304 Node *last = _nodes[last_idx]; 305 assert(last->is_block_proj() == last || last->is_block_proj() == _nodes[last_idx - _num_succs], ""); 306 return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs); 307 } 308 309 // Basic blocks have a Node which ends them. This Node determines which 310 // basic block follows this one in the program flow. This Node is either an 311 // IfNode, a GotoNode, a JmpNode, or a ReturnNode. 312 Node *end() const { return _nodes[end_idx()]; } 313 314 // Add an instruction to an existing block. It must go after the head 315 // instruction and before the end instruction. 316 void add_inst( Node *n ) { insert_node(n, end_idx()); } 317 // Find node in block. Fails if node not in block. 318 uint find_node( const Node *n ) const; 319 // Find and remove n from block list 320 void find_remove( const Node *n ); 321 // Check wether the node is in the block. 322 bool contains (const Node *n) const; 323 324 // Return the empty status of a block 325 enum { not_empty, empty_with_goto, completely_empty }; 326 int is_Empty() const; 327 328 // Forward through connectors 329 Block* non_connector() { 330 Block* s = this; 331 while (s->is_connector()) { 332 s = s->_succs[0]; 333 } 334 return s; 335 } 336 337 // Return true if b is a successor of this block 338 bool has_successor(Block* b) const { 339 for (uint i = 0; i < _num_succs; i++ ) { 340 if (non_connector_successor(i) == b) { 341 return true; 342 } 343 } 344 return false; 345 } 346 347 // Successor block, after forwarding through connectors 348 Block* non_connector_successor(int i) const { 349 return _succs[i]->non_connector(); 350 } 351 352 // Examine block's code shape to predict if it is not commonly executed. 353 bool has_uncommon_code() const; 354 355 #ifndef PRODUCT 356 // Debugging print of basic block 357 void dump_bidx(const Block* orig, outputStream* st = tty) const; 358 void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const; 359 void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const; 360 void dump() const; 361 void dump(const PhaseCFG* cfg) const; 362 #endif 363 }; 364 365 366 //------------------------------PhaseCFG--------------------------------------- 367 // Build an array of Basic Block pointers, one per Node. 368 class PhaseCFG : public Phase { 369 friend class VMStructs; 370 private: 371 372 // Root of whole program 373 RootNode* _root; 374 375 // The block containing the root node 376 Block* _root_block; 377 378 // List of basic blocks that are created during CFG creation 379 Block_List _blocks; 380 381 // Count of basic blocks 382 uint _number_of_blocks; 383 384 // Arena for the blocks to be stored in 385 Arena* _block_arena; 386 387 // Info used for scheduling 388 PhaseChaitin* _regalloc; 389 390 // Register pressure heuristic used? 391 bool _scheduling_for_pressure; 392 393 // The matcher for this compilation 394 Matcher& _matcher; 395 396 // Map nodes to owning basic block 397 Block_Array _node_to_block_mapping; 398 399 // Loop from the root 400 CFGLoop* _root_loop; 401 402 // Outmost loop frequency 403 double _outer_loop_frequency; 404 405 // Per node latency estimation, valid only during GCM 406 GrowableArray<uint>* _node_latency; 407 408 // Build a proper looking cfg. Return count of basic blocks 409 uint build_cfg(); 410 411 // Build the dominator tree so that we know where we can move instructions 412 void build_dominator_tree(); 413 414 // Estimate block frequencies based on IfNode probabilities, so that we know where we want to move instructions 415 void estimate_block_frequency(); 416 417 // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific 418 // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block. 419 // Move nodes to ensure correctness from GVN and also try to move nodes out of loops. 420 void global_code_motion(); 421 422 // Schedule Nodes early in their basic blocks. 423 bool schedule_early(VectorSet &visited, Node_List &roots); 424 425 // For each node, find the latest block it can be scheduled into 426 // and then select the cheapest block between the latest and earliest 427 // block to place the node. 428 void schedule_late(VectorSet &visited, Node_List &stack); 429 430 // Compute the (backwards) latency of a node from a single use 431 int latency_from_use(Node *n, const Node *def, Node *use); 432 433 // Compute the (backwards) latency of a node from the uses of this instruction 434 void partial_latency_of_defs(Node *n); 435 436 // Compute the instruction global latency with a backwards walk 437 void compute_latencies_backwards(VectorSet &visited, Node_List &stack); 438 439 // Pick a block between early and late that is a cheaper alternative 440 // to late. Helper for schedule_late. 441 Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self); 442 443 bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call, intptr_t* recacl_pressure_nodes); 444 void set_next_call(Block* block, Node* n, VectorSet& next_call); 445 void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call); 446 447 // Perform basic-block local scheduling 448 Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot, 449 intptr_t* recacl_pressure_nodes); 450 void adjust_register_pressure(Node* n, Block* block, intptr_t *recalc_pressure_nodes, bool finalize_mode); 451 452 // Schedule a call next in the block 453 uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call); 454 455 // Cleanup if any code lands between a Call and his Catch 456 void call_catch_cleanup(Block* block); 457 458 Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx); 459 void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx); 460 461 // Detect implicit-null-check opportunities. Basically, find NULL checks 462 // with suitable memory ops nearby. Use the memory op to do the NULL check. 463 // I can generate a memory op if there is not one nearby. 464 void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons); 465 466 // Perform a Depth First Search (DFS). 467 // Setup 'vertex' as DFS to vertex mapping. 468 // Setup 'semi' as vertex to DFS mapping. 469 // Set 'parent' to DFS parent. 470 uint do_DFS(Tarjan* tarjan, uint rpo_counter); 471 472 // Helper function to insert a node into a block 473 void schedule_node_into_block( Node *n, Block *b ); 474 475 void replace_block_proj_ctrl( Node *n ); 476 477 // Set the basic block for pinned Nodes 478 void schedule_pinned_nodes( VectorSet &visited ); 479 480 // I'll need a few machine-specific GotoNodes. Clone from this one. 481 // Used when building the CFG and creating end nodes for blocks. 482 MachNode* _goto; 483 484 Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false); 485 void verify_anti_dependences(Block* LCA, Node* load) { 486 assert(LCA == get_block_for_node(load), "should already be scheduled"); 487 insert_anti_dependences(LCA, load, true); 488 } 489 490 bool move_to_next(Block* bx, uint b_index); 491 void move_to_end(Block* bx, uint b_index); 492 493 void insert_goto_at(uint block_no, uint succ_no); 494 495 // Check for NeverBranch at block end. This needs to become a GOTO to the 496 // true target. NeverBranch are treated as a conditional branch that always 497 // goes the same direction for most of the optimizer and are used to give a 498 // fake exit path to infinite loops. At this late stage they need to turn 499 // into Goto's so that when you enter the infinite loop you indeed hang. 500 void convert_NeverBranch_to_Goto(Block *b); 501 502 CFGLoop* create_loop_tree(); 503 504 #ifndef PRODUCT 505 bool _trace_opto_pipelining; // tracing flag 506 #endif 507 508 public: 509 PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher); 510 511 void set_latency_for_node(Node* node, int latency) { 512 _node_latency->at_put_grow(node->_idx, latency); 513 } 514 515 uint get_latency_for_node(Node* node) { 516 return _node_latency->at_grow(node->_idx); 517 } 518 519 // Get the outer most frequency 520 double get_outer_loop_frequency() const { 521 return _outer_loop_frequency; 522 } 523 524 // Get the root node of the CFG 525 RootNode* get_root_node() const { 526 return _root; 527 } 528 529 // Get the block of the root node 530 Block* get_root_block() const { 531 return _root_block; 532 } 533 534 // Add a block at a position and moves the later ones one step 535 void add_block_at(uint pos, Block* block) { 536 _blocks.insert(pos, block); 537 _number_of_blocks++; 538 } 539 540 // Adds a block to the top of the block list 541 void add_block(Block* block) { 542 _blocks.push(block); 543 _number_of_blocks++; 544 } 545 546 // Clear the list of blocks 547 void clear_blocks() { 548 _blocks.reset(); 549 _number_of_blocks = 0; 550 } 551 552 // Get the block at position pos in _blocks 553 Block* get_block(uint pos) const { 554 return _blocks[pos]; 555 } 556 557 // Number of blocks 558 uint number_of_blocks() const { 559 return _number_of_blocks; 560 } 561 562 // set which block this node should reside in 563 void map_node_to_block(const Node* node, Block* block) { 564 _node_to_block_mapping.map(node->_idx, block); 565 } 566 567 // removes the mapping from a node to a block 568 void unmap_node_from_block(const Node* node) { 569 _node_to_block_mapping.map(node->_idx, NULL); 570 } 571 572 // get the block in which this node resides 573 Block* get_block_for_node(const Node* node) const { 574 return _node_to_block_mapping[node->_idx]; 575 } 576 577 // does this node reside in a block; return true 578 bool has_block(const Node* node) const { 579 return (_node_to_block_mapping.lookup(node->_idx) != NULL); 580 } 581 582 // Use frequency calculations and code shape to predict if the block 583 // is uncommon. 584 bool is_uncommon(const Block* block); 585 586 #ifdef ASSERT 587 Unique_Node_List _raw_oops; 588 #endif 589 590 // Do global code motion by first building dominator tree and estimate block frequency 591 // Returns true on success 592 bool do_global_code_motion(); 593 594 // Compute the (backwards) latency of a node from the uses 595 void latency_from_uses(Node *n); 596 597 // Set loop alignment 598 void set_loop_alignment(); 599 600 // Remove empty basic blocks 601 void remove_empty_blocks(); 602 Block *fixup_trap_based_check(Node *branch, Block *block, int block_pos, Block *bnext); 603 void fixup_flow(); 604 605 // Insert a node into a block at index and map the node to the block 606 void insert(Block *b, uint idx, Node *n) { 607 b->insert_node(n , idx); 608 map_node_to_block(n, b); 609 } 610 611 // Check all nodes and postalloc_expand them if necessary. 612 void postalloc_expand(PhaseRegAlloc* _ra); 613 614 #ifndef PRODUCT 615 bool trace_opto_pipelining() const { return _trace_opto_pipelining; } 616 617 // Debugging print of CFG 618 void dump( ) const; // CFG only 619 void _dump_cfg( const Node *end, VectorSet &visited ) const; 620 void verify() const; 621 void dump_headers(); 622 #else 623 bool trace_opto_pipelining() const { return false; } 624 #endif 625 }; 626 627 628 //------------------------------UnionFind-------------------------------------- 629 // Map Block indices to a block-index for a cfg-cover. 630 // Array lookup in the optimized case. 631 class UnionFind : public ResourceObj { 632 uint _cnt, _max; 633 uint* _indices; 634 ReallocMark _nesting; // assertion check for reallocations 635 public: 636 UnionFind( uint max ); 637 void reset( uint max ); // Reset to identity map for [0..max] 638 639 uint lookup( uint nidx ) const { 640 return _indices[nidx]; 641 } 642 uint operator[] (uint nidx) const { return lookup(nidx); } 643 644 void map( uint from_idx, uint to_idx ) { 645 assert( from_idx < _cnt, "oob" ); 646 _indices[from_idx] = to_idx; 647 } 648 void extend( uint from_idx, uint to_idx ); 649 650 uint Size() const { return _cnt; } 651 652 uint Find( uint idx ) { 653 assert( idx < 65536, "Must fit into uint"); 654 uint uf_idx = lookup(idx); 655 return (uf_idx == idx) ? uf_idx : Find_compress(idx); 656 } 657 uint Find_compress( uint idx ); 658 uint Find_const( uint idx ) const; 659 void Union( uint idx1, uint idx2 ); 660 661 }; 662 663 //----------------------------BlockProbPair--------------------------- 664 // Ordered pair of Node*. 665 class BlockProbPair VALUE_OBJ_CLASS_SPEC { 666 protected: 667 Block* _target; // block target 668 double _prob; // probability of edge to block 669 public: 670 BlockProbPair() : _target(NULL), _prob(0.0) {} 671 BlockProbPair(Block* b, double p) : _target(b), _prob(p) {} 672 673 Block* get_target() const { return _target; } 674 double get_prob() const { return _prob; } 675 }; 676 677 //------------------------------CFGLoop------------------------------------------- 678 class CFGLoop : public CFGElement { 679 friend class VMStructs; 680 int _id; 681 int _depth; 682 CFGLoop *_parent; // root of loop tree is the method level "pseudo" loop, it's parent is null 683 CFGLoop *_sibling; // null terminated list 684 CFGLoop *_child; // first child, use child's sibling to visit all immediately nested loops 685 GrowableArray<CFGElement*> _members; // list of members of loop 686 GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities 687 double _exit_prob; // probability any loop exit is taken on a single loop iteration 688 void update_succ_freq(Block* b, double freq); 689 690 public: 691 CFGLoop(int id) : 692 CFGElement(), 693 _id(id), 694 _depth(0), 695 _parent(NULL), 696 _sibling(NULL), 697 _child(NULL), 698 _exit_prob(1.0f) {} 699 CFGLoop* parent() { return _parent; } 700 void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg); 701 void add_member(CFGElement *s) { _members.push(s); } 702 void add_nested_loop(CFGLoop* cl); 703 Block* head() { 704 assert(_members.at(0)->is_block(), "head must be a block"); 705 Block* hd = _members.at(0)->as_Block(); 706 assert(hd->_loop == this, "just checking"); 707 assert(hd->head()->is_Loop(), "must begin with loop head node"); 708 return hd; 709 } 710 Block* backedge_block(); // Return the block on the backedge of the loop (else NULL) 711 void compute_loop_depth(int depth); 712 void compute_freq(); // compute frequency with loop assuming head freq 1.0f 713 void scale_freq(); // scale frequency by loop trip count (including outer loops) 714 double outer_loop_freq() const; // frequency of outer loop 715 bool in_loop_nest(Block* b); 716 double trip_count() const { return 1.0 / _exit_prob; } 717 virtual bool is_loop() { return true; } 718 int id() { return _id; } 719 720 #ifndef PRODUCT 721 void dump( ) const; 722 void dump_tree() const; 723 #endif 724 }; 725 726 727 //----------------------------------CFGEdge------------------------------------ 728 // A edge between two basic blocks that will be embodied by a branch or a 729 // fall-through. 730 class CFGEdge : public ResourceObj { 731 friend class VMStructs; 732 private: 733 Block * _from; // Source basic block 734 Block * _to; // Destination basic block 735 double _freq; // Execution frequency (estimate) 736 int _state; 737 bool _infrequent; 738 int _from_pct; 739 int _to_pct; 740 741 // Private accessors 742 int from_pct() const { return _from_pct; } 743 int to_pct() const { return _to_pct; } 744 int from_infrequent() const { return from_pct() < BlockLayoutMinDiamondPercentage; } 745 int to_infrequent() const { return to_pct() < BlockLayoutMinDiamondPercentage; } 746 747 public: 748 enum { 749 open, // initial edge state; unprocessed 750 connected, // edge used to connect two traces together 751 interior // edge is interior to trace (could be backedge) 752 }; 753 754 CFGEdge(Block *from, Block *to, double freq, int from_pct, int to_pct) : 755 _from(from), _to(to), _freq(freq), 756 _from_pct(from_pct), _to_pct(to_pct), _state(open) { 757 _infrequent = from_infrequent() || to_infrequent(); 758 } 759 760 double freq() const { return _freq; } 761 Block* from() const { return _from; } 762 Block* to () const { return _to; } 763 int infrequent() const { return _infrequent; } 764 int state() const { return _state; } 765 766 void set_state(int state) { _state = state; } 767 768 #ifndef PRODUCT 769 void dump( ) const; 770 #endif 771 }; 772 773 774 //-----------------------------------Trace------------------------------------- 775 // An ordered list of basic blocks. 776 class Trace : public ResourceObj { 777 private: 778 uint _id; // Unique Trace id (derived from initial block) 779 Block ** _next_list; // Array mapping index to next block 780 Block ** _prev_list; // Array mapping index to previous block 781 Block * _first; // First block in the trace 782 Block * _last; // Last block in the trace 783 784 // Return the block that follows "b" in the trace. 785 Block * next(Block *b) const { return _next_list[b->_pre_order]; } 786 void set_next(Block *b, Block *n) const { _next_list[b->_pre_order] = n; } 787 788 // Return the block that precedes "b" in the trace. 789 Block * prev(Block *b) const { return _prev_list[b->_pre_order]; } 790 void set_prev(Block *b, Block *p) const { _prev_list[b->_pre_order] = p; } 791 792 // We've discovered a loop in this trace. Reset last to be "b", and first as 793 // the block following "b 794 void break_loop_after(Block *b) { 795 _last = b; 796 _first = next(b); 797 set_prev(_first, NULL); 798 set_next(_last, NULL); 799 } 800 801 public: 802 803 Trace(Block *b, Block **next_list, Block **prev_list) : 804 _first(b), 805 _last(b), 806 _next_list(next_list), 807 _prev_list(prev_list), 808 _id(b->_pre_order) { 809 set_next(b, NULL); 810 set_prev(b, NULL); 811 }; 812 813 // Return the id number 814 uint id() const { return _id; } 815 void set_id(uint id) { _id = id; } 816 817 // Return the first block in the trace 818 Block * first_block() const { return _first; } 819 820 // Return the last block in the trace 821 Block * last_block() const { return _last; } 822 823 // Insert a trace in the middle of this one after b 824 void insert_after(Block *b, Trace *tr) { 825 set_next(tr->last_block(), next(b)); 826 if (next(b) != NULL) { 827 set_prev(next(b), tr->last_block()); 828 } 829 830 set_next(b, tr->first_block()); 831 set_prev(tr->first_block(), b); 832 833 if (b == _last) { 834 _last = tr->last_block(); 835 } 836 } 837 838 void insert_before(Block *b, Trace *tr) { 839 Block *p = prev(b); 840 assert(p != NULL, "use append instead"); 841 insert_after(p, tr); 842 } 843 844 // Append another trace to this one. 845 void append(Trace *tr) { 846 insert_after(_last, tr); 847 } 848 849 // Append a block at the end of this trace 850 void append(Block *b) { 851 set_next(_last, b); 852 set_prev(b, _last); 853 _last = b; 854 } 855 856 // Adjust the the blocks in this trace 857 void fixup_blocks(PhaseCFG &cfg); 858 bool backedge(CFGEdge *e); 859 860 #ifndef PRODUCT 861 void dump( ) const; 862 #endif 863 }; 864 865 //------------------------------PhaseBlockLayout------------------------------- 866 // Rearrange blocks into some canonical order, based on edges and their frequencies 867 class PhaseBlockLayout : public Phase { 868 friend class VMStructs; 869 PhaseCFG &_cfg; // Control flow graph 870 871 GrowableArray<CFGEdge *> *edges; 872 Trace **traces; 873 Block **next; 874 Block **prev; 875 UnionFind *uf; 876 877 // Given a block, find its encompassing Trace 878 Trace * trace(Block *b) { 879 return traces[uf->Find_compress(b->_pre_order)]; 880 } 881 public: 882 PhaseBlockLayout(PhaseCFG &cfg); 883 884 void find_edges(); 885 void grow_traces(); 886 void merge_traces(bool loose_connections); 887 void reorder_traces(int count); 888 void union_traces(Trace* from, Trace* to); 889 }; 890 891 #endif // SHARE_VM_OPTO_BLOCK_HPP