1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)block.cpp 1.172 07/09/28 10:23:15 JVM"
3 #endif
4 /*
5 * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
43 }
44 uint old = _size;
45 while( i >= _size ) _size <<= 1; // Double to fit
46 _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*));
47 Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) );
48 }
49
50 //=============================================================================
51 void Block_List::remove(uint i) {
52 assert(i < _cnt, "index out of bounds");
53 Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*)));
54 pop(); // shrink list by one block
55 }
56
57 void Block_List::insert(uint i, Block *b) {
58 push(b); // grow list by one block
59 Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*)));
60 _blocks[i] = b;
61 }
62
63
64 //=============================================================================
65
66 uint Block::code_alignment() {
67 // Check for Root block
68 if( _pre_order == 0 ) return CodeEntryAlignment;
69 // Check for Start block
70 if( _pre_order == 1 ) return InteriorEntryAlignment;
71 // Check for loop alignment
72 Node *h = head();
73 if( h->is_Loop() && h->as_Loop()->is_inner_loop() ) {
74 // Pre- and post-loops have low trip count so do not bother with
75 // NOPs for align loop head. The constants are hidden from tuning
76 // but only because my "divide by 4" heuristic surely gets nearly
77 // all possible gain (a "do not align at all" heuristic has a
78 // chance of getting a really tiny gain).
79 if( h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() ||
80 h->as_CountedLoop()->is_post_loop()) )
81 return (OptoLoopAlignment > 4) ? (OptoLoopAlignment>>2) : 1;
82 // Loops with low backedge frequency should not be aligned.
83 Node *n = h->in(LoopNode::LoopBackControl)->in(0);
84 if( n->is_MachIf() && n->as_MachIf()->_prob < 0.01 ) {
85 return 1; // Loop does not loop, more often than not!
86 }
87 return OptoLoopAlignment; // Otherwise align loop head
88 }
89 return 1; // no particular alignment
90 }
91
92 //-----------------------------------------------------------------------------
93 // Compute the size of first 'inst_cnt' instructions in this block.
94 // Return the number of instructions left to compute if the block has
95 // less then 'inst_cnt' instructions.
96 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
97 PhaseRegAlloc* ra) {
98 uint last_inst = _nodes.size();
99 for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
100 uint inst_size = _nodes[j]->size(ra);
101 if( inst_size > 0 ) {
102 inst_cnt--;
103 uint sz = sum_size + inst_size;
104 if( sz <= (uint)OptoLoopAlignment ) {
105 // Compute size of instructions which fit into fetch buffer only
106 // since all inst_cnt instructions will not fit even if we align them.
107 sum_size = sz;
108 } else {
109 return 0;
110 }
111 }
112 }
113 return inst_cnt;
114 }
115
293 while (!s->is_block_start())
294 s = s->in(0);
295 tty->print("N%d ", s->_idx );
296 }
297 }
298 } else
299 tty->print("BLOCK HEAD IS JUNK ");
300
301 // Print loop, if any
302 const Block *bhead = this; // Head of self-loop
303 Node *bh = bhead->head();
304 if( bbs && bh->is_Loop() && !head()->is_Root() ) {
305 LoopNode *loop = bh->as_Loop();
306 const Block *bx = (*bbs)[loop->in(LoopNode::LoopBackControl)->_idx];
307 while (bx->is_connector()) {
308 bx = (*bbs)[bx->pred(1)->_idx];
309 }
310 tty->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
311 // Dump any loop-specific bits, especially for CountedLoops.
312 loop->dump_spec(tty);
313 }
314 tty->print(" Freq: %g",_freq);
315 if( Verbose || WizardMode ) {
316 tty->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
317 tty->print(" RegPressure: %d",_reg_pressure);
318 tty->print(" IHRP Index: %d",_ihrp_index);
319 tty->print(" FRegPressure: %d",_freg_pressure);
320 tty->print(" FHRP Index: %d",_fhrp_index);
321 }
322 tty->print_cr("");
323 }
324
325 void Block::dump() const { dump(0); }
326
327 void Block::dump( const Block_Array *bbs ) const {
328 dump_head(bbs);
329 uint cnt = _nodes.size();
330 for( uint i=0; i<cnt; i++ )
331 _nodes[i]->dump();
332 tty->print("\n");
453 // Insert self as a child of my predecessor block
454 pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]);
455 assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
456 "too many control users, not a CFG?" );
457 }
458 }
459 // Return number of basic blocks for all children and self
460 return sum;
461 }
462
463 //------------------------------insert_goto_at---------------------------------
464 // Inserts a goto & corresponding basic block between
465 // block[block_no] and its succ_no'th successor block
466 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
467 // get block with block_no
468 assert(block_no < _num_blocks, "illegal block number");
469 Block* in = _blocks[block_no];
470 // get successor block succ_no
471 assert(succ_no < in->_num_succs, "illegal successor number");
472 Block* out = in->_succs[succ_no];
473 // get ProjNode corresponding to the succ_no'th successor of the in block
474 ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
475 // create region for basic block
476 RegionNode* region = new (C, 2) RegionNode(2);
477 region->init_req(1, proj);
478 // setup corresponding basic block
479 Block* block = new (_bbs._arena) Block(_bbs._arena, region);
480 _bbs.map(region->_idx, block);
481 C->regalloc()->set_bad(region->_idx);
482 // add a goto node
483 Node* gto = _goto->clone(); // get a new goto node
484 gto->set_req(0, region);
485 // add it to the basic block
486 block->_nodes.push(gto);
487 _bbs.map(gto->_idx, block);
488 C->regalloc()->set_bad(gto->_idx);
489 // hook up successor block
490 block->_succs.map(block->_num_succs++, out);
491 // remap successor's predecessors if necessary
492 for (uint i = 1; i < out->num_preds(); i++) {
493 if (out->pred(i) == proj) out->head()->set_req(i, gto);
494 }
495 // remap predecessor's successor to new block
496 in->_succs.map(succ_no, block);
497 // add new basic block to basic block list
498 _blocks.insert(block_no + 1, block);
499 _num_blocks++;
500 }
501
502 //------------------------------no_flip_branch---------------------------------
503 // Does this block end in a multiway branch that cannot have the default case
504 // flipped for another case?
505 static bool no_flip_branch( Block *b ) {
506 int branch_idx = b->_nodes.size() - b->_num_succs-1;
507 if( branch_idx < 1 ) return false;
508 Node *bra = b->_nodes[branch_idx];
509 if( bra->is_Catch() ) return true;
510 if( bra->is_Mach() ) {
511 if( bra->is_MachNullCheck() ) return true;
512 int iop = bra->as_Mach()->ideal_Opcode();
513 if( iop == Op_FastLock || iop == Op_FastUnlock )
514 return true;
515 }
516 return false;
517 }
518
519 //------------------------------convert_NeverBranch_to_Goto--------------------
520 // Check for NeverBranch at block end. This needs to become a GOTO to the
521 // true target. NeverBranch are treated as a conditional branch that always
522 // goes the same direction for most of the optimizer and are used to give a
523 // fake exit path to infinite loops. At this late stage they need to turn
524 // into Goto's so that when you enter the infinite loop you indeed hang.
525 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
526 // Find true target
527 int end_idx = b->end_idx();
528 int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
529 Block *succ = b->_succs[idx];
530 Node* gto = _goto->clone(); // get a new goto node
531 gto->set_req(0, b->head());
537 b->_nodes.pop(); // Yank projections
538 b->_succs.map(0,succ); // Map only successor
539 b->_num_succs = 1;
540 // remap successor's predecessors if necessary
541 uint j;
542 for( j = 1; j < succ->num_preds(); j++)
543 if( succ->pred(j)->in(0) == bp )
544 succ->head()->set_req(j, gto);
545 // Kill alternate exit path
546 Block *dead = b->_succs[1-idx];
547 for( j = 1; j < dead->num_preds(); j++)
548 if( dead->pred(j)->in(0) == bp )
549 break;
550 // Scan through block, yanking dead path from
551 // all regions and phis.
552 dead->head()->del_req(j);
553 for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
554 dead->_nodes[k]->del_req(j);
555 }
556
557 //------------------------------MoveToNext-------------------------------------
558 // Helper function to move block bx to the slot following b_index. Return
559 // true if the move is successful, otherwise false
560 bool PhaseCFG::MoveToNext(Block* bx, uint b_index) {
561 if (bx == NULL) return false;
562
563 // Return false if bx is already scheduled.
564 uint bx_index = bx->_pre_order;
565 if ((bx_index <= b_index) && (_blocks[bx_index] == bx)) {
566 return false;
567 }
568
569 // Find the current index of block bx on the block list
570 bx_index = b_index + 1;
571 while( bx_index < _num_blocks && _blocks[bx_index] != bx ) bx_index++;
572 assert(_blocks[bx_index] == bx, "block not found");
573
574 // If the previous block conditionally falls into bx, return false,
575 // because moving bx will create an extra jump.
576 for(uint k = 1; k < bx->num_preds(); k++ ) {
577 Block* pred = _bbs[bx->pred(k)->_idx];
578 if (pred == _blocks[bx_index-1]) {
579 if (pred->_num_succs != 1) {
580 return false;
581 }
582 }
583 }
584
585 // Reinsert bx just past block 'b'
586 _blocks.remove(bx_index);
587 _blocks.insert(b_index + 1, bx);
588 return true;
589 }
590
591 //------------------------------MoveToEnd--------------------------------------
592 // Move empty and uncommon blocks to the end.
593 void PhaseCFG::MoveToEnd(Block *b, uint i) {
594 int e = b->is_Empty();
595 if (e != Block::not_empty) {
596 if (e == Block::empty_with_goto) {
597 // Remove the goto, but leave the block.
598 b->_nodes.pop();
599 }
600 // Mark this block as a connector block, which will cause it to be
601 // ignored in certain functions such as non_connector_successor().
602 b->set_connector();
603 }
604 // Move the empty block to the end, and don't recheck.
605 _blocks.remove(i);
606 _blocks.push(b);
607 }
608
609 //------------------------------RemoveEmpty------------------------------------
610 // Remove empty basic blocks and useless branches.
611 void PhaseCFG::RemoveEmpty() {
612 // Move uncommon blocks to the end
613 uint last = _num_blocks;
614 uint i;
615 assert( _blocks[0] == _broot, "" );
616 for( i = 1; i < last; i++ ) {
617 Block *b = _blocks[i];
618
619 // Check for NeverBranch at block end. This needs to become a GOTO to the
620 // true target. NeverBranch are treated as a conditional branch that
621 // always goes the same direction for most of the optimizer and are used
622 // to give a fake exit path to infinite loops. At this late stage they
623 // need to turn into Goto's so that when you enter the infinite loop you
624 // indeed hang.
625 if( b->_nodes[b->end_idx()]->Opcode() == Op_NeverBranch )
626 convert_NeverBranch_to_Goto(b);
627
628 // Look for uncommon blocks and move to end.
629 if( b->is_uncommon(_bbs) ) {
630 MoveToEnd(b, i);
631 last--; // No longer check for being uncommon!
632 if( no_flip_branch(b) ) { // Fall-thru case must follow?
633 b = _blocks[i]; // Find the fall-thru block
634 MoveToEnd(b, i);
635 last--;
636 }
637 i--; // backup block counter post-increment
638 }
639 }
640
641 // Remove empty blocks
642 uint j1;
643 last = _num_blocks;
644 for( i=0; i < last; i++ ) {
645 Block *b = _blocks[i];
646 if (i > 0) {
647 if (b->is_Empty() != Block::not_empty) {
648 MoveToEnd(b, i);
649 last--;
650 i--;
651 }
652 }
653 } // End of for all blocks
654
655 // Fixup final control flow for the blocks. Remove jump-to-next
656 // block. If neither arm of a IF follows the conditional branch, we
657 // have to add a second jump after the conditional. We place the
658 // TRUE branch target in succs[0] for both GOTOs and IFs.
659 for( i=0; i < _num_blocks; i++ ) {
660 Block *b = _blocks[i];
661 b->_pre_order = i; // turn pre-order into block-index
662
663 // Connector blocks need no further processing.
664 if (b->is_connector()) {
665 assert((i+1) == _num_blocks || _blocks[i+1]->is_connector(),
666 "All connector blocks should sink to the end");
667 continue;
668 }
669 assert(b->is_Empty() != Block::completely_empty,
670 "Empty blocks should be connectors");
671
672 Block *bnext = (i < _num_blocks-1) ? _blocks[i+1] : NULL;
673 Block *bs0 = b->non_connector_successor(0);
674
675 // Check for multi-way branches where I cannot negate the test to
676 // exchange the true and false targets.
677 if( no_flip_branch( b ) ) {
678 // Find fall through case - if must fall into its target
679 int branch_idx = b->_nodes.size() - b->_num_succs;
680 for (uint j2 = 0; j2 < b->_num_succs; j2++) {
681 const ProjNode* p = b->_nodes[branch_idx + j2]->as_Proj();
682 if (p->_con == 0) {
683 // successor j2 is fall through case
684 if (b->non_connector_successor(j2) != bnext) {
685 // but it is not the next block => insert a goto
686 insert_goto_at(i, j2);
687 }
688 // Put taken branch in slot 0
689 if( j2 == 0 && b->_num_succs == 2) {
690 // Flip targets in succs map
691 Block *tbs0 = b->_succs[0];
692 Block *tbs1 = b->_succs[1];
693 b->_succs.map( 0, tbs1 );
694 b->_succs.map( 1, tbs0 );
695 }
696 break;
697 }
698 }
699 // Remove all CatchProjs
700 for (j1 = 0; j1 < b->_num_succs; j1++) b->_nodes.pop();
701
702 } else if (b->_num_succs == 1) {
703 // Block ends in a Goto?
704 if (bnext == bs0) {
705 // We fall into next block; remove the Goto
706 b->_nodes.pop();
707 }
708
709 } else if( b->_num_succs == 2 ) { // Block ends in a If?
710 // Get opcode of 1st projection (matches _succs[0])
711 // Note: Since this basic block has 2 exits, the last 2 nodes must
712 // be projections (in any order), the 3rd last node must be
713 // the IfNode (we have excluded other 2-way exits such as
714 // CatchNodes already).
715 MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
716 ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
717 ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
718
719 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
720 assert(proj0->raw_out(0) == b->_succs[0]->head(), "Mismatch successor 0");
721 assert(proj1->raw_out(0) == b->_succs[1]->head(), "Mismatch successor 1");
722
723 Block *bs1 = b->non_connector_successor(1);
724
725 // Check for neither successor block following the current
726 // block ending in a conditional. If so, move one of the
727 // successors after the current one, provided that the
728 // successor was previously unscheduled, but moveable
729 // (i.e., all paths to it involve a branch).
730 if( bnext != bs0 && bnext != bs1 ) {
731
732 // Choose the more common successor based on the probability
733 // of the conditional branch.
734 Block *bx = bs0;
735 Block *by = bs1;
736
737 // _prob is the probability of taking the true path. Make
738 // p the probability of taking successor #1.
739 float p = iff->as_MachIf()->_prob;
740 if( proj0->Opcode() == Op_IfTrue ) {
741 p = 1.0 - p;
742 }
743
744 // Prefer successor #1 if p > 0.5
745 if (p > PROB_FAIR) {
746 bx = bs1;
747 by = bs0;
748 }
749
750 // Attempt the more common successor first
751 if (MoveToNext(bx, i)) {
752 bnext = bx;
753 } else if (MoveToNext(by, i)) {
754 bnext = by;
755 }
756 }
757
758 // Check for conditional branching the wrong way. Negate
759 // conditional, if needed, so it falls into the following block
760 // and branches to the not-following block.
761
762 // Check for the next block being in succs[0]. We are going to branch
763 // to succs[0], so we want the fall-thru case as the next block in
764 // succs[1].
765 if (bnext == bs0) {
766 // Fall-thru case in succs[0], so flip targets in succs map
767 Block *tbs0 = b->_succs[0];
768 Block *tbs1 = b->_succs[1];
769 b->_succs.map( 0, tbs1 );
770 b->_succs.map( 1, tbs0 );
771 // Flip projection for each target
772 { ProjNode *tmp = proj0; proj0 = proj1; proj1 = tmp; }
773
774 } else if( bnext == bs1 ) { // Fall-thru is already in succs[1]
775
776 } else { // Else need a double-branch
777
778 // The existing conditional branch need not change.
779 // Add a unconditional branch to the false target.
780 // Alas, it must appear in its own block and adding a
781 // block this late in the game is complicated. Sigh.
782 insert_goto_at(i, 1);
783 }
784
785 // Make sure we TRUE branch to the target
786 if( proj0->Opcode() == Op_IfFalse )
787 iff->negate();
788
789 b->_nodes.pop(); // Remove IfFalse & IfTrue projections
790 b->_nodes.pop();
791
792 } else {
793 // Multi-exit block, e.g. a switch statement
794 // But we don't need to do anything here
795 }
796
797 } // End of for all blocks
798
799 }
800
801
802 //------------------------------dump-------------------------------------------
803 #ifndef PRODUCT
804 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const {
805 const Node *x = end->is_block_proj();
806 assert( x, "not a CFG" );
807
808 // Do not visit this block again
809 if( visited.test_set(x->_idx) ) return;
810
811 // Skip through this block
812 const Node *p = x;
813 do {
814 p = p->in(0); // Move control forward
815 assert( !p->is_block_proj() || p->is_Root(), "not a CFG" );
816 } while( !p->is_block_start() );
817
818 // Recursively visit
885 Copy::zero_to_bytes( _indices, sizeof(uint)*max );
886 }
887
888 void UnionFind::extend( uint from_idx, uint to_idx ) {
889 _nesting.check();
890 if( from_idx >= _max ) {
891 uint size = 16;
892 while( size <= from_idx ) size <<=1;
893 _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size );
894 _max = size;
895 }
896 while( _cnt <= from_idx ) _indices[_cnt++] = 0;
897 _indices[from_idx] = to_idx;
898 }
899
900 void UnionFind::reset( uint max ) {
901 assert( max <= max_uint, "Must fit within uint" );
902 // Force the Union-Find mapping to be at least this large
903 extend(max,0);
904 // Initialize to be the ID mapping.
905 for( uint i=0; i<_max; i++ ) map(i,i);
906 }
907
908 //------------------------------Find_compress----------------------------------
909 // Straight out of Tarjan's union-find algorithm
910 uint UnionFind::Find_compress( uint idx ) {
911 uint cur = idx;
912 uint next = lookup(cur);
913 while( next != cur ) { // Scan chain of equivalences
914 assert( next < cur, "always union smaller" );
915 cur = next; // until find a fixed-point
916 next = lookup(cur);
917 }
918 // Core of union-find algorithm: update chain of
919 // equivalences to be equal to the root.
920 while( idx != next ) {
921 uint tmp = lookup(idx);
922 map(idx, next);
923 idx = tmp;
924 }
925 return idx;
926 }
927
928 //------------------------------Find_const-------------------------------------
929 // Like Find above, but no path compress, so bad asymptotic behavior
930 uint UnionFind::Find_const( uint idx ) const {
931 if( idx == 0 ) return idx; // Ignore the zero idx
932 // Off the end? This can happen during debugging dumps
933 // when data structures have not finished being updated.
934 if( idx >= _max ) return idx;
935 uint next = lookup(idx);
936 while( next != idx ) { // Scan chain of equivalences
937 assert( next < idx, "always union smaller" );
938 idx = next; // until find a fixed-point
939 next = lookup(idx);
940 }
941 return next;
942 }
943
944 //------------------------------Union------------------------------------------
945 // union 2 sets together.
946 void UnionFind::Union( uint idx1, uint idx2 ) {
947 uint src = Find(idx1);
948 uint dst = Find(idx2);
949 assert( src, "" );
950 assert( dst, "" );
951 assert( src < _max, "oob" );
952 assert( dst < _max, "oob" );
953 assert( src < dst, "always union smaller" );
954 map(dst,src);
955 }
|
1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)block.cpp 1.172 07/09/28 10:23:15 JVM"
3 #endif
4 /*
5 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
43 }
44 uint old = _size;
45 while( i >= _size ) _size <<= 1; // Double to fit
46 _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*));
47 Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) );
48 }
49
50 //=============================================================================
51 void Block_List::remove(uint i) {
52 assert(i < _cnt, "index out of bounds");
53 Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*)));
54 pop(); // shrink list by one block
55 }
56
57 void Block_List::insert(uint i, Block *b) {
58 push(b); // grow list by one block
59 Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*)));
60 _blocks[i] = b;
61 }
62
63 #ifndef PRODUCT
64 void Block_List::print() {
65 for (uint i=0; i < size(); i++) {
66 tty->print("B%d ", _blocks[i]->_pre_order);
67 }
68 tty->print("size = %d\n", size());
69 }
70 #endif
71
72 //=============================================================================
73
74 uint Block::code_alignment() {
75 // Check for Root block
76 if( _pre_order == 0 ) return CodeEntryAlignment;
77 // Check for Start block
78 if( _pre_order == 1 ) return InteriorEntryAlignment;
79 // Check for loop alignment
80 if (has_loop_alignment()) return loop_alignment();
81
82 return 1; // no particular alignment
83 }
84
85 uint Block::compute_loop_alignment() {
86 Node *h = head();
87 if( h->is_Loop() && h->as_Loop()->is_inner_loop() ) {
88 // Pre- and post-loops have low trip count so do not bother with
89 // NOPs for align loop head. The constants are hidden from tuning
90 // but only because my "divide by 4" heuristic surely gets nearly
91 // all possible gain (a "do not align at all" heuristic has a
92 // chance of getting a really tiny gain).
93 if( h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() ||
94 h->as_CountedLoop()->is_post_loop()) )
95 return (OptoLoopAlignment > 4) ? (OptoLoopAlignment>>2) : 1;
96 // Loops with low backedge frequency should not be aligned.
97 Node *n = h->in(LoopNode::LoopBackControl)->in(0);
98 if( n->is_MachIf() && n->as_MachIf()->_prob < 0.01 ) {
99 return 1; // Loop does not loop, more often than not!
100 }
101 return OptoLoopAlignment; // Otherwise align loop head
102 }
103
104 return 1; // no particular alignment
105 }
106
107 //-----------------------------------------------------------------------------
108 // Compute the size of first 'inst_cnt' instructions in this block.
109 // Return the number of instructions left to compute if the block has
110 // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size
111 // exceeds OptoLoopAlignment.
112 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
113 PhaseRegAlloc* ra) {
114 uint last_inst = _nodes.size();
115 for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
116 uint inst_size = _nodes[j]->size(ra);
117 if( inst_size > 0 ) {
118 inst_cnt--;
119 uint sz = sum_size + inst_size;
120 if( sz <= (uint)OptoLoopAlignment ) {
121 // Compute size of instructions which fit into fetch buffer only
122 // since all inst_cnt instructions will not fit even if we align them.
123 sum_size = sz;
124 } else {
125 return 0;
126 }
127 }
128 }
129 return inst_cnt;
130 }
131
309 while (!s->is_block_start())
310 s = s->in(0);
311 tty->print("N%d ", s->_idx );
312 }
313 }
314 } else
315 tty->print("BLOCK HEAD IS JUNK ");
316
317 // Print loop, if any
318 const Block *bhead = this; // Head of self-loop
319 Node *bh = bhead->head();
320 if( bbs && bh->is_Loop() && !head()->is_Root() ) {
321 LoopNode *loop = bh->as_Loop();
322 const Block *bx = (*bbs)[loop->in(LoopNode::LoopBackControl)->_idx];
323 while (bx->is_connector()) {
324 bx = (*bbs)[bx->pred(1)->_idx];
325 }
326 tty->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
327 // Dump any loop-specific bits, especially for CountedLoops.
328 loop->dump_spec(tty);
329 } else if (has_loop_alignment()) {
330 tty->print(" top-of-loop");
331 }
332 tty->print(" Freq: %g",_freq);
333 if( Verbose || WizardMode ) {
334 tty->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
335 tty->print(" RegPressure: %d",_reg_pressure);
336 tty->print(" IHRP Index: %d",_ihrp_index);
337 tty->print(" FRegPressure: %d",_freg_pressure);
338 tty->print(" FHRP Index: %d",_fhrp_index);
339 }
340 tty->print_cr("");
341 }
342
343 void Block::dump() const { dump(0); }
344
345 void Block::dump( const Block_Array *bbs ) const {
346 dump_head(bbs);
347 uint cnt = _nodes.size();
348 for( uint i=0; i<cnt; i++ )
349 _nodes[i]->dump();
350 tty->print("\n");
471 // Insert self as a child of my predecessor block
472 pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]);
473 assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
474 "too many control users, not a CFG?" );
475 }
476 }
477 // Return number of basic blocks for all children and self
478 return sum;
479 }
480
481 //------------------------------insert_goto_at---------------------------------
482 // Inserts a goto & corresponding basic block between
483 // block[block_no] and its succ_no'th successor block
484 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
485 // get block with block_no
486 assert(block_no < _num_blocks, "illegal block number");
487 Block* in = _blocks[block_no];
488 // get successor block succ_no
489 assert(succ_no < in->_num_succs, "illegal successor number");
490 Block* out = in->_succs[succ_no];
491 // Compute frequency of the new block. Do this before inserting
492 // new block in case succ_prob() needs to infer the probability from
493 // surrounding blocks.
494 float freq = in->_freq * in->succ_prob(succ_no);
495 // get ProjNode corresponding to the succ_no'th successor of the in block
496 ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
497 // create region for basic block
498 RegionNode* region = new (C, 2) RegionNode(2);
499 region->init_req(1, proj);
500 // setup corresponding basic block
501 Block* block = new (_bbs._arena) Block(_bbs._arena, region);
502 _bbs.map(region->_idx, block);
503 C->regalloc()->set_bad(region->_idx);
504 // add a goto node
505 Node* gto = _goto->clone(); // get a new goto node
506 gto->set_req(0, region);
507 // add it to the basic block
508 block->_nodes.push(gto);
509 _bbs.map(gto->_idx, block);
510 C->regalloc()->set_bad(gto->_idx);
511 // hook up successor block
512 block->_succs.map(block->_num_succs++, out);
513 // remap successor's predecessors if necessary
514 for (uint i = 1; i < out->num_preds(); i++) {
515 if (out->pred(i) == proj) out->head()->set_req(i, gto);
516 }
517 // remap predecessor's successor to new block
518 in->_succs.map(succ_no, block);
519 // Set the frequency of the new block
520 block->_freq = freq;
521 // add new basic block to basic block list
522 _blocks.insert(block_no + 1, block);
523 _num_blocks++;
524 }
525
526 //------------------------------no_flip_branch---------------------------------
527 // Does this block end in a multiway branch that cannot have the default case
528 // flipped for another case?
529 static bool no_flip_branch( Block *b ) {
530 int branch_idx = b->_nodes.size() - b->_num_succs-1;
531 if( branch_idx < 1 ) return false;
532 Node *bra = b->_nodes[branch_idx];
533 if( bra->is_Catch() )
534 return true;
535 if( bra->is_Mach() ) {
536 if( bra->is_MachNullCheck() )
537 return true;
538 int iop = bra->as_Mach()->ideal_Opcode();
539 if( iop == Op_FastLock || iop == Op_FastUnlock )
540 return true;
541 }
542 return false;
543 }
544
545 //------------------------------convert_NeverBranch_to_Goto--------------------
546 // Check for NeverBranch at block end. This needs to become a GOTO to the
547 // true target. NeverBranch are treated as a conditional branch that always
548 // goes the same direction for most of the optimizer and are used to give a
549 // fake exit path to infinite loops. At this late stage they need to turn
550 // into Goto's so that when you enter the infinite loop you indeed hang.
551 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
552 // Find true target
553 int end_idx = b->end_idx();
554 int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
555 Block *succ = b->_succs[idx];
556 Node* gto = _goto->clone(); // get a new goto node
557 gto->set_req(0, b->head());
563 b->_nodes.pop(); // Yank projections
564 b->_succs.map(0,succ); // Map only successor
565 b->_num_succs = 1;
566 // remap successor's predecessors if necessary
567 uint j;
568 for( j = 1; j < succ->num_preds(); j++)
569 if( succ->pred(j)->in(0) == bp )
570 succ->head()->set_req(j, gto);
571 // Kill alternate exit path
572 Block *dead = b->_succs[1-idx];
573 for( j = 1; j < dead->num_preds(); j++)
574 if( dead->pred(j)->in(0) == bp )
575 break;
576 // Scan through block, yanking dead path from
577 // all regions and phis.
578 dead->head()->del_req(j);
579 for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
580 dead->_nodes[k]->del_req(j);
581 }
582
583 //------------------------------move_to_next-----------------------------------
584 // Helper function to move block bx to the slot following b_index. Return
585 // true if the move is successful, otherwise false
586 bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
587 if (bx == NULL) return false;
588
589 // Return false if bx is already scheduled.
590 uint bx_index = bx->_pre_order;
591 if ((bx_index <= b_index) && (_blocks[bx_index] == bx)) {
592 return false;
593 }
594
595 // Find the current index of block bx on the block list
596 bx_index = b_index + 1;
597 while( bx_index < _num_blocks && _blocks[bx_index] != bx ) bx_index++;
598 assert(_blocks[bx_index] == bx, "block not found");
599
600 // If the previous block conditionally falls into bx, return false,
601 // because moving bx will create an extra jump.
602 for(uint k = 1; k < bx->num_preds(); k++ ) {
603 Block* pred = _bbs[bx->pred(k)->_idx];
604 if (pred == _blocks[bx_index-1]) {
605 if (pred->_num_succs != 1) {
606 return false;
607 }
608 }
609 }
610
611 // Reinsert bx just past block 'b'
612 _blocks.remove(bx_index);
613 _blocks.insert(b_index + 1, bx);
614 return true;
615 }
616
617 //------------------------------move_to_end------------------------------------
618 // Move empty and uncommon blocks to the end.
619 void PhaseCFG::move_to_end(Block *b, uint i) {
620 int e = b->is_Empty();
621 if (e != Block::not_empty) {
622 if (e == Block::empty_with_goto) {
623 // Remove the goto, but leave the block.
624 b->_nodes.pop();
625 }
626 // Mark this block as a connector block, which will cause it to be
627 // ignored in certain functions such as non_connector_successor().
628 b->set_connector();
629 }
630 // Move the empty block to the end, and don't recheck.
631 _blocks.remove(i);
632 _blocks.push(b);
633 }
634
635 //---------------------------set_loop_alignment--------------------------------
636 // Set loop alignment for every block
637 void PhaseCFG::set_loop_alignment() {
638 uint last = _num_blocks;
639 assert( _blocks[0] == _broot, "" );
640
641 for (uint i = 1; i < last; i++ ) {
642 Block *b = _blocks[i];
643 if (b->head()->is_Loop()) {
644 b->set_loop_alignment(b);
645 }
646 }
647 }
648
649 //-----------------------------remove_empty------------------------------------
650 // Make empty basic blocks to be "connector" blocks, Move uncommon blocks
651 // to the end.
652 void PhaseCFG::remove_empty() {
653 // Move uncommon blocks to the end
654 uint last = _num_blocks;
655 assert( _blocks[0] == _broot, "" );
656
657 for (uint i = 1; i < last; i++) {
658 Block *b = _blocks[i];
659 if (b->is_connector()) break;
660
661 // Check for NeverBranch at block end. This needs to become a GOTO to the
662 // true target. NeverBranch are treated as a conditional branch that
663 // always goes the same direction for most of the optimizer and are used
664 // to give a fake exit path to infinite loops. At this late stage they
665 // need to turn into Goto's so that when you enter the infinite loop you
666 // indeed hang.
667 if( b->_nodes[b->end_idx()]->Opcode() == Op_NeverBranch )
668 convert_NeverBranch_to_Goto(b);
669
670 // Look for uncommon blocks and move to end.
671 if (!C->do_freq_based_layout()) {
672 if( b->is_uncommon(_bbs) ) {
673 move_to_end(b, i);
674 last--; // No longer check for being uncommon!
675 if( no_flip_branch(b) ) { // Fall-thru case must follow?
676 b = _blocks[i]; // Find the fall-thru block
677 move_to_end(b, i);
678 last--;
679 }
680 i--; // backup block counter post-increment
681 }
682 }
683 }
684
685 // Move empty blocks to the end
686 last = _num_blocks;
687 for (uint i = 1; i < last; i++) {
688 Block *b = _blocks[i];
689 if (b->is_Empty() != Block::not_empty) {
690 move_to_end(b, i);
691 last--;
692 i--;
693 }
694 } // End of for all blocks
695 }
696
697 //-----------------------------fixup_flow--------------------------------------
698 // Fix up the final control flow for basic blocks.
699 void PhaseCFG::fixup_flow() {
700 // Fixup final control flow for the blocks. Remove jump-to-next
701 // block. If neither arm of a IF follows the conditional branch, we
702 // have to add a second jump after the conditional. We place the
703 // TRUE branch target in succs[0] for both GOTOs and IFs.
704 for (uint i=0; i < _num_blocks; i++) {
705 Block *b = _blocks[i];
706 b->_pre_order = i; // turn pre-order into block-index
707
708 // Connector blocks need no further processing.
709 if (b->is_connector()) {
710 assert((i+1) == _num_blocks || _blocks[i+1]->is_connector(),
711 "All connector blocks should sink to the end");
712 continue;
713 }
714 assert(b->is_Empty() != Block::completely_empty,
715 "Empty blocks should be connectors");
716
717 Block *bnext = (i < _num_blocks-1) ? _blocks[i+1] : NULL;
718 Block *bs0 = b->non_connector_successor(0);
719
720 // Check for multi-way branches where I cannot negate the test to
721 // exchange the true and false targets.
722 if( no_flip_branch( b ) ) {
723 // Find fall through case - if must fall into its target
724 int branch_idx = b->_nodes.size() - b->_num_succs;
725 for (uint j2 = 0; j2 < b->_num_succs; j2++) {
726 const ProjNode* p = b->_nodes[branch_idx + j2]->as_Proj();
727 if (p->_con == 0) {
728 // successor j2 is fall through case
729 if (b->non_connector_successor(j2) != bnext) {
730 // but it is not the next block => insert a goto
731 insert_goto_at(i, j2);
732 }
733 // Put taken branch in slot 0
734 if( j2 == 0 && b->_num_succs == 2) {
735 // Flip targets in succs map
736 Block *tbs0 = b->_succs[0];
737 Block *tbs1 = b->_succs[1];
738 b->_succs.map( 0, tbs1 );
739 b->_succs.map( 1, tbs0 );
740 }
741 break;
742 }
743 }
744 // Remove all CatchProjs
745 for (uint j1 = 0; j1 < b->_num_succs; j1++) b->_nodes.pop();
746
747 } else if (b->_num_succs == 1) {
748 // Block ends in a Goto?
749 if (bnext == bs0) {
750 // We fall into next block; remove the Goto
751 b->_nodes.pop();
752 }
753
754 } else if( b->_num_succs == 2 ) { // Block ends in a If?
755 // Get opcode of 1st projection (matches _succs[0])
756 // Note: Since this basic block has 2 exits, the last 2 nodes must
757 // be projections (in any order), the 3rd last node must be
758 // the IfNode (we have excluded other 2-way exits such as
759 // CatchNodes already).
760 MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
761 ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
762 ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
763
764 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
765 assert(proj0->raw_out(0) == b->_succs[0]->head(), "Mismatch successor 0");
766 assert(proj1->raw_out(0) == b->_succs[1]->head(), "Mismatch successor 1");
767
768 Block *bs1 = b->non_connector_successor(1);
769
770 // Check for neither successor block following the current
771 // block ending in a conditional. If so, move one of the
772 // successors after the current one, provided that the
773 // successor was previously unscheduled, but moveable
774 // (i.e., all paths to it involve a branch).
775 if( !C->do_freq_based_layout() && bnext != bs0 && bnext != bs1 ) {
776 // Choose the more common successor based on the probability
777 // of the conditional branch.
778 Block *bx = bs0;
779 Block *by = bs1;
780
781 // _prob is the probability of taking the true path. Make
782 // p the probability of taking successor #1.
783 float p = iff->as_MachIf()->_prob;
784 if( proj0->Opcode() == Op_IfTrue ) {
785 p = 1.0 - p;
786 }
787
788 // Prefer successor #1 if p > 0.5
789 if (p > PROB_FAIR) {
790 bx = bs1;
791 by = bs0;
792 }
793
794 // Attempt the more common successor first
795 if (move_to_next(bx, i)) {
796 bnext = bx;
797 } else if (move_to_next(by, i)) {
798 bnext = by;
799 }
800 }
801
802 // Check for conditional branching the wrong way. Negate
803 // conditional, if needed, so it falls into the following block
804 // and branches to the not-following block.
805
806 // Check for the next block being in succs[0]. We are going to branch
807 // to succs[0], so we want the fall-thru case as the next block in
808 // succs[1].
809 if (bnext == bs0) {
810 // Fall-thru case in succs[0], so flip targets in succs map
811 Block *tbs0 = b->_succs[0];
812 Block *tbs1 = b->_succs[1];
813 b->_succs.map( 0, tbs1 );
814 b->_succs.map( 1, tbs0 );
815 // Flip projection for each target
816 { ProjNode *tmp = proj0; proj0 = proj1; proj1 = tmp; }
817
818 } else if( bnext != bs1 ) {
819 // Need a double-branch
820 // The existing conditional branch need not change.
821 // Add a unconditional branch to the false target.
822 // Alas, it must appear in its own block and adding a
823 // block this late in the game is complicated. Sigh.
824 insert_goto_at(i, 1);
825 }
826
827 // Make sure we TRUE branch to the target
828 if( proj0->Opcode() == Op_IfFalse ) {
829 iff->negate();
830 }
831
832 b->_nodes.pop(); // Remove IfFalse & IfTrue projections
833 b->_nodes.pop();
834
835 } else {
836 // Multi-exit block, e.g. a switch statement
837 // But we don't need to do anything here
838 }
839 } // End of for all blocks
840 }
841
842
843 //------------------------------dump-------------------------------------------
844 #ifndef PRODUCT
845 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const {
846 const Node *x = end->is_block_proj();
847 assert( x, "not a CFG" );
848
849 // Do not visit this block again
850 if( visited.test_set(x->_idx) ) return;
851
852 // Skip through this block
853 const Node *p = x;
854 do {
855 p = p->in(0); // Move control forward
856 assert( !p->is_block_proj() || p->is_Root(), "not a CFG" );
857 } while( !p->is_block_start() );
858
859 // Recursively visit
926 Copy::zero_to_bytes( _indices, sizeof(uint)*max );
927 }
928
929 void UnionFind::extend( uint from_idx, uint to_idx ) {
930 _nesting.check();
931 if( from_idx >= _max ) {
932 uint size = 16;
933 while( size <= from_idx ) size <<=1;
934 _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size );
935 _max = size;
936 }
937 while( _cnt <= from_idx ) _indices[_cnt++] = 0;
938 _indices[from_idx] = to_idx;
939 }
940
941 void UnionFind::reset( uint max ) {
942 assert( max <= max_uint, "Must fit within uint" );
943 // Force the Union-Find mapping to be at least this large
944 extend(max,0);
945 // Initialize to be the ID mapping.
946 for( uint i=0; i<max; i++ ) map(i,i);
947 }
948
949 //------------------------------Find_compress----------------------------------
950 // Straight out of Tarjan's union-find algorithm
951 uint UnionFind::Find_compress( uint idx ) {
952 uint cur = idx;
953 uint next = lookup(cur);
954 while( next != cur ) { // Scan chain of equivalences
955 assert( next < cur, "always union smaller" );
956 cur = next; // until find a fixed-point
957 next = lookup(cur);
958 }
959 // Core of union-find algorithm: update chain of
960 // equivalences to be equal to the root.
961 while( idx != next ) {
962 uint tmp = lookup(idx);
963 map(idx, next);
964 idx = tmp;
965 }
966 return idx;
967 }
968
969 //------------------------------Find_const-------------------------------------
970 // Like Find above, but no path compress, so bad asymptotic behavior
971 uint UnionFind::Find_const( uint idx ) const {
972 if( idx == 0 ) return idx; // Ignore the zero idx
973 // Off the end? This can happen during debugging dumps
974 // when data structures have not finished being updated.
975 if( idx >= _max ) return idx;
976 uint next = lookup(idx);
977 while( next != idx ) { // Scan chain of equivalences
978 idx = next; // until find a fixed-point
979 next = lookup(idx);
980 }
981 return next;
982 }
983
984 //------------------------------Union------------------------------------------
985 // union 2 sets together.
986 void UnionFind::Union( uint idx1, uint idx2 ) {
987 uint src = Find(idx1);
988 uint dst = Find(idx2);
989 assert( src, "" );
990 assert( dst, "" );
991 assert( src < _max, "oob" );
992 assert( dst < _max, "oob" );
993 assert( src < dst, "always union smaller" );
994 map(dst,src);
995 }
996
997 #ifndef PRODUCT
998 static void edge_dump(GrowableArray<CFGEdge *> *edges) {
999 tty->print_cr("---- Edges ----");
1000 for (int i = 0; i < edges->length(); i++) {
1001 CFGEdge *e = edges->at(i);
1002 if (e != NULL) {
1003 edges->at(i)->dump();
1004 }
1005 }
1006 }
1007
1008 static void trace_dump(Trace *traces[], int count) {
1009 tty->print_cr("---- Traces ----");
1010 for (int i = 0; i < count; i++) {
1011 Trace *tr = traces[i];
1012 if (tr != NULL) {
1013 tr->dump();
1014 }
1015 }
1016 }
1017
1018 void Trace::dump( ) const {
1019 tty->print_cr("Trace (freq %f)", first_block()->_freq);
1020 for (Block *b = first_block(); b != NULL; b = next(b)) {
1021 tty->print(" B%d", b->_pre_order);
1022 if (b->head()->is_Loop()) {
1023 tty->print(" (L%d)", b->compute_loop_alignment());
1024 }
1025 if (b->has_loop_alignment()) {
1026 tty->print(" (T%d)", b->code_alignment());
1027 }
1028 }
1029 tty->cr();
1030 }
1031
1032 void CFGEdge::dump( ) const {
1033 tty->print(" B%d --> B%d Freq: %f out:%3d%% in:%3d%% State: ",
1034 from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct);
1035 switch(state()) {
1036 case connected:
1037 tty->print("connected");
1038 break;
1039 case open:
1040 tty->print("open");
1041 break;
1042 case interior:
1043 tty->print("interior");
1044 break;
1045 }
1046 if (infrequent()) {
1047 tty->print(" infrequent");
1048 }
1049 tty->cr();
1050 }
1051 #endif
1052
1053 //=============================================================================
1054
1055 //------------------------------edge_order-------------------------------------
1056 // Comparison function for edges
1057 static int edge_order(CFGEdge **e0, CFGEdge **e1) {
1058 float freq0 = (*e0)->freq();
1059 float freq1 = (*e1)->freq();
1060 if (freq0 != freq1) {
1061 return freq0 > freq1 ? -1 : 1;
1062 }
1063
1064 int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo;
1065 int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo;
1066
1067 return dist1 - dist0;
1068 }
1069
1070 //------------------------------trace_frequency_order--------------------------
1071 // Comparison function for edges
1072 static int trace_frequency_order(const void *p0, const void *p1) {
1073 Trace *tr0 = *(Trace **) p0;
1074 Trace *tr1 = *(Trace **) p1;
1075 Block *b0 = tr0->first_block();
1076 Block *b1 = tr1->first_block();
1077
1078 // The trace of connector blocks goes at the end;
1079 // we only expect one such trace
1080 if (b0->is_connector() != b1->is_connector()) {
1081 return b1->is_connector() ? -1 : 1;
1082 }
1083
1084 // Pull more frequently executed blocks to the beginning
1085 float freq0 = b0->_freq;
1086 float freq1 = b1->_freq;
1087 if (freq0 != freq1) {
1088 return freq0 > freq1 ? -1 : 1;
1089 }
1090
1091 int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo;
1092
1093 return diff;
1094 }
1095
1096 //------------------------------find_edges-------------------------------------
1097 // Find edges of interest, i.e, those which can fall through. Presumes that
1098 // edges which don't fall through are of low frequency and can be generally
1099 // ignored. Initialize the list of traces.
1100 void PhaseBlockLayout::find_edges()
1101 {
1102 // Walk the blocks, creating edges and Traces
1103 uint i;
1104 Trace *tr = NULL;
1105 for (i = 0; i < _cfg._num_blocks; i++) {
1106 Block *b = _cfg._blocks[i];
1107 tr = new Trace(b, next, prev);
1108 traces[tr->id()] = tr;
1109
1110 // All connector blocks should be at the end of the list
1111 if (b->is_connector()) break;
1112
1113 // If this block and the next one have a one-to-one successor
1114 // predecessor relationship, simply append the next block
1115 int nfallthru = b->num_fall_throughs();
1116 while (nfallthru == 1 &&
1117 b->succ_fall_through(0)) {
1118 Block *n = b->_succs[0];
1119
1120 // Skip over single-entry connector blocks, we don't want to
1121 // add them to the trace.
1122 while (n->is_connector() && n->num_preds() == 1) {
1123 n = n->_succs[0];
1124 }
1125
1126 // We see a merge point, so stop search for the next block
1127 if (n->num_preds() != 1) break;
1128
1129 i++;
1130 assert(n = _cfg._blocks[i], "expecting next block");
1131 tr->append(n);
1132 uf->map(n->_pre_order, tr->id());
1133 traces[n->_pre_order] = NULL;
1134 nfallthru = b->num_fall_throughs();
1135 b = n;
1136 }
1137
1138 if (nfallthru > 0) {
1139 // Create a CFGEdge for each outgoing
1140 // edge that could be a fall-through.
1141 for (uint j = 0; j < b->_num_succs; j++ ) {
1142 if (b->succ_fall_through(j)) {
1143 Block *target = b->non_connector_successor(j);
1144 float freq = b->_freq * b->succ_prob(j);
1145 int from_pct = (int) ((100 * freq) / b->_freq);
1146 int to_pct = (int) ((100 * freq) / target->_freq);
1147 edges->append(new CFGEdge(b, target, freq, from_pct, to_pct));
1148 }
1149 }
1150 }
1151 }
1152
1153 // Group connector blocks into one trace
1154 for (i++; i < _cfg._num_blocks; i++) {
1155 Block *b = _cfg._blocks[i];
1156 assert(b->is_connector(), "connector blocks at the end");
1157 tr->append(b);
1158 uf->map(b->_pre_order, tr->id());
1159 traces[b->_pre_order] = NULL;
1160 }
1161 }
1162
1163 //------------------------------union_traces----------------------------------
1164 // Union two traces together in uf, and null out the trace in the list
1165 void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace)
1166 {
1167 uint old_id = old_trace->id();
1168 uint updated_id = updated_trace->id();
1169
1170 uint lo_id = updated_id;
1171 uint hi_id = old_id;
1172
1173 // If from is greater than to, swap values to meet
1174 // UnionFind guarantee.
1175 if (updated_id > old_id) {
1176 lo_id = old_id;
1177 hi_id = updated_id;
1178
1179 // Fix up the trace ids
1180 traces[lo_id] = traces[updated_id];
1181 updated_trace->set_id(lo_id);
1182 }
1183
1184 // Union the lower with the higher and remove the pointer
1185 // to the higher.
1186 uf->Union(lo_id, hi_id);
1187 traces[hi_id] = NULL;
1188 }
1189
1190 //------------------------------grow_traces-------------------------------------
1191 // Append traces together via the most frequently executed edges
1192 void PhaseBlockLayout::grow_traces()
1193 {
1194 // Order the edges, and drive the growth of Traces via the most
1195 // frequently executed edges.
1196 edges->sort(edge_order);
1197 for (int i = 0; i < edges->length(); i++) {
1198 CFGEdge *e = edges->at(i);
1199
1200 if (e->state() != CFGEdge::open) continue;
1201
1202 Block *src_block = e->from();
1203 Block *targ_block = e->to();
1204
1205 // Don't grow traces along backedges?
1206 if (!BlockLayoutRotateLoops) {
1207 if (targ_block->_rpo <= src_block->_rpo) {
1208 targ_block->set_loop_alignment(targ_block);
1209 continue;
1210 }
1211 }
1212
1213 Trace *src_trace = trace(src_block);
1214 Trace *targ_trace = trace(targ_block);
1215
1216 // If the edge in question can join two traces at their ends,
1217 // append one trace to the other.
1218 if (src_trace->last_block() == src_block) {
1219 if (src_trace == targ_trace) {
1220 e->set_state(CFGEdge::interior);
1221 if (targ_trace->backedge(e)) {
1222 // Reset i to catch any newly eligible edge
1223 // (Or we could remember the first "open" edge, and reset there)
1224 i = 0;
1225 }
1226 } else if (targ_trace->first_block() == targ_block) {
1227 e->set_state(CFGEdge::connected);
1228 src_trace->append(targ_trace);
1229 union_traces(src_trace, targ_trace);
1230 }
1231 }
1232 }
1233 }
1234
1235 //------------------------------merge_traces-----------------------------------
1236 // Embed one trace into another, if the fork or join points are sufficiently
1237 // balanced.
1238 void PhaseBlockLayout::merge_traces(bool fall_thru_only)
1239 {
1240 // Walk the edge list a another time, looking at unprocessed edges.
1241 // Fold in diamonds
1242 for (int i = 0; i < edges->length(); i++) {
1243 CFGEdge *e = edges->at(i);
1244
1245 if (e->state() != CFGEdge::open) continue;
1246 if (fall_thru_only) {
1247 if (e->infrequent()) continue;
1248 }
1249
1250 Block *src_block = e->from();
1251 Trace *src_trace = trace(src_block);
1252 bool src_at_tail = src_trace->last_block() == src_block;
1253
1254 Block *targ_block = e->to();
1255 Trace *targ_trace = trace(targ_block);
1256 bool targ_at_start = targ_trace->first_block() == targ_block;
1257
1258 if (src_trace == targ_trace) {
1259 // This may be a loop, but we can't do much about it.
1260 e->set_state(CFGEdge::interior);
1261 continue;
1262 }
1263
1264 if (fall_thru_only) {
1265 // If the edge links the middle of two traces, we can't do anything.
1266 // Mark the edge and continue.
1267 if (!src_at_tail & !targ_at_start) {
1268 continue;
1269 }
1270
1271 // Don't grow traces along backedges?
1272 if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) {
1273 continue;
1274 }
1275
1276 // If both ends of the edge are available, why didn't we handle it earlier?
1277 assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier.");
1278
1279 if (targ_at_start) {
1280 // Insert the "targ" trace in the "src" trace if the insertion point
1281 // is a two way branch.
1282 // Better profitability check possible, but may not be worth it.
1283 // Someday, see if the this "fork" has an associated "join";
1284 // then make a policy on merging this trace at the fork or join.
1285 // For example, other things being equal, it may be better to place this
1286 // trace at the join point if the "src" trace ends in a two-way, but
1287 // the insertion point is one-way.
1288 assert(src_block->num_fall_throughs() == 2, "unexpected diamond");
1289 e->set_state(CFGEdge::connected);
1290 src_trace->insert_after(src_block, targ_trace);
1291 union_traces(src_trace, targ_trace);
1292 } else if (src_at_tail) {
1293 if (src_trace != trace(_cfg._broot)) {
1294 e->set_state(CFGEdge::connected);
1295 targ_trace->insert_before(targ_block, src_trace);
1296 union_traces(targ_trace, src_trace);
1297 }
1298 }
1299 } else if (e->state() == CFGEdge::open) {
1300 // Append traces, even without a fall-thru connection.
1301 // But leave root entry at the begining of the block list.
1302 if (targ_trace != trace(_cfg._broot)) {
1303 e->set_state(CFGEdge::connected);
1304 src_trace->append(targ_trace);
1305 union_traces(src_trace, targ_trace);
1306 }
1307 }
1308 }
1309 }
1310
1311 //----------------------------reorder_traces-----------------------------------
1312 // Order the sequence of the traces in some desirable way, and fixup the
1313 // jumps at the end of each block.
1314 void PhaseBlockLayout::reorder_traces(int count)
1315 {
1316 ResourceArea *area = Thread::current()->resource_area();
1317 Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count);
1318 Block_List worklist;
1319 int new_count = 0;
1320
1321 // Compact the traces.
1322 for (int i = 0; i < count; i++) {
1323 Trace *tr = traces[i];
1324 if (tr != NULL) {
1325 new_traces[new_count++] = tr;
1326 }
1327 }
1328
1329 // The entry block should be first on the new trace list.
1330 Trace *tr = trace(_cfg._broot);
1331 assert(tr == new_traces[0], "entry trace misplaced");
1332
1333 // Sort the new trace list by frequency
1334 qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order);
1335
1336 // Patch up the successor blocks
1337 _cfg._blocks.reset();
1338 _cfg._num_blocks = 0;
1339 for (int i = 0; i < new_count; i++) {
1340 Trace *tr = new_traces[i];
1341 if (tr != NULL) {
1342 tr->fixup_blocks(_cfg);
1343 }
1344 }
1345 }
1346
1347 //------------------------------PhaseBlockLayout-------------------------------
1348 // Order basic blocks based on frequency
1349 PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) :
1350 Phase(BlockLayout),
1351 _cfg(cfg)
1352 {
1353 ResourceMark rm;
1354 ResourceArea *area = Thread::current()->resource_area();
1355
1356 // List of traces
1357 int size = _cfg._num_blocks + 1;
1358 traces = NEW_ARENA_ARRAY(area, Trace *, size);
1359 memset(traces, 0, size*sizeof(Trace*));
1360 next = NEW_ARENA_ARRAY(area, Block *, size);
1361 memset(next, 0, size*sizeof(Block *));
1362 prev = NEW_ARENA_ARRAY(area, Block *, size);
1363 memset(prev , 0, size*sizeof(Block *));
1364
1365 // List of edges
1366 edges = new GrowableArray<CFGEdge*>;
1367
1368 // Mapping block index --> block_trace
1369 uf = new UnionFind(size);
1370 uf->reset(size);
1371
1372 // Find edges and create traces.
1373 find_edges();
1374
1375 // Grow traces at their ends via most frequent edges.
1376 grow_traces();
1377
1378 // Merge one trace into another, but only at fall-through points.
1379 // This may make diamonds and other related shapes in a trace.
1380 merge_traces(true);
1381
1382 // Run merge again, allowing two traces to be catenated, even if
1383 // one does not fall through into the other. This appends loosely
1384 // related traces to be near each other.
1385 merge_traces(false);
1386
1387 // Re-order all the remaining traces by frequency
1388 reorder_traces(size);
1389
1390 assert(_cfg._num_blocks >= (uint) (size - 1), "number of blocks can not shrink");
1391 }
1392
1393
1394 //------------------------------backedge---------------------------------------
1395 // Edge e completes a loop in a trace. If the target block is head of the
1396 // loop, rotate the loop block so that the loop ends in a conditional branch.
1397 bool Trace::backedge(CFGEdge *e) {
1398 bool loop_rotated = false;
1399 Block *src_block = e->from();
1400 Block *targ_block = e->to();
1401
1402 assert(last_block() == src_block, "loop discovery at back branch");
1403 if (first_block() == targ_block) {
1404 if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) {
1405 // Find the last block in the trace that has a conditional
1406 // branch.
1407 Block *b;
1408 for (b = last_block(); b != NULL; b = prev(b)) {
1409 if (b->num_fall_throughs() == 2) {
1410 break;
1411 }
1412 }
1413
1414 if (b != last_block() && b != NULL) {
1415 loop_rotated = true;
1416
1417 // Rotate the loop by doing two-part linked-list surgery.
1418 append(first_block());
1419 break_loop_after(b);
1420 }
1421 }
1422
1423 // Backbranch to the top of a trace
1424 // Scroll foward through the trace from the targ_block. If we find
1425 // a loop head before another loop top, use the the loop head alignment.
1426 for (Block *b = targ_block; b != NULL; b = next(b)) {
1427 if (b->has_loop_alignment()) {
1428 break;
1429 }
1430 if (b->head()->is_Loop()) {
1431 targ_block = b;
1432 break;
1433 }
1434 }
1435
1436 first_block()->set_loop_alignment(targ_block);
1437
1438 } else {
1439 // Backbranch into the middle of a trace
1440 targ_block->set_loop_alignment(targ_block);
1441 }
1442
1443 return loop_rotated;
1444 }
1445
1446 //------------------------------fixup_blocks-----------------------------------
1447 // push blocks onto the CFG list
1448 // ensure that blocks have the correct two-way branch sense
1449 void Trace::fixup_blocks(PhaseCFG &cfg) {
1450 Block *last = last_block();
1451 for (Block *b = first_block(); b != NULL; b = next(b)) {
1452 cfg._blocks.push(b);
1453 cfg._num_blocks++;
1454 if (!b->is_connector()) {
1455 int nfallthru = b->num_fall_throughs();
1456 if (b != last) {
1457 if (nfallthru == 2) {
1458 // Ensure that the sense of the branch is correct
1459 Block *bnext = next(b);
1460 Block *bs0 = b->non_connector_successor(0);
1461
1462 MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
1463 ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
1464 ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
1465
1466 if (bnext == bs0) {
1467 // Fall-thru case in succs[0], should be in succs[1]
1468
1469 // Flip targets in _succs map
1470 Block *tbs0 = b->_succs[0];
1471 Block *tbs1 = b->_succs[1];
1472 b->_succs.map( 0, tbs1 );
1473 b->_succs.map( 1, tbs0 );
1474
1475 // Flip projections to match targets
1476 b->_nodes.map(b->_nodes.size()-2, proj1);
1477 b->_nodes.map(b->_nodes.size()-1, proj0);
1478 }
1479 }
1480 }
1481 }
1482 }
1483 }
|