1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/block.hpp" 29 #include "opto/cfgnode.hpp" 30 #include "opto/chaitin.hpp" 31 #include "opto/loopnode.hpp" 32 #include "opto/machnode.hpp" 33 #include "opto/matcher.hpp" 34 #include "opto/opcodes.hpp" 35 #include "opto/rootnode.hpp" 36 #include "utilities/copy.hpp" 37 38 void Block_Array::grow( uint i ) { 39 assert(i >= Max(), "must be an overflow"); 40 debug_only(_limit = i+1); 41 if( i < _size ) return; 42 if( !_size ) { 43 _size = 1; 44 _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) ); 45 _blocks[0] = NULL; 46 } 47 uint old = _size; 48 while( i >= _size ) _size <<= 1; // Double to fit 49 _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*)); 50 Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) ); 51 } 52 53 void Block_List::remove(uint i) { 54 assert(i < _cnt, "index out of bounds"); 55 Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*))); 56 pop(); // shrink list by one block 57 } 58 59 void Block_List::insert(uint i, Block *b) { 60 push(b); // grow list by one block 61 Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*))); 62 _blocks[i] = b; 63 } 64 65 #ifndef PRODUCT 66 void Block_List::print() { 67 for (uint i=0; i < size(); i++) { 68 tty->print("B%d ", _blocks[i]->_pre_order); 69 } 70 tty->print("size = %d\n", size()); 71 } 72 #endif 73 74 uint Block::code_alignment() { 75 // Check for Root block 76 if (_pre_order == 0) return CodeEntryAlignment; 77 // Check for Start block 78 if (_pre_order == 1) return InteriorEntryAlignment; 79 // Check for loop alignment 80 if (has_loop_alignment()) return loop_alignment(); 81 82 return relocInfo::addr_unit(); // no particular alignment 83 } 84 85 uint Block::compute_loop_alignment() { 86 Node *h = head(); 87 int unit_sz = relocInfo::addr_unit(); 88 if (h->is_Loop() && h->as_Loop()->is_inner_loop()) { 89 // Pre- and post-loops have low trip count so do not bother with 90 // NOPs for align loop head. The constants are hidden from tuning 91 // but only because my "divide by 4" heuristic surely gets nearly 92 // all possible gain (a "do not align at all" heuristic has a 93 // chance of getting a really tiny gain). 94 if (h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() || 95 h->as_CountedLoop()->is_post_loop())) { 96 return (OptoLoopAlignment > 4*unit_sz) ? (OptoLoopAlignment>>2) : unit_sz; 97 } 98 // Loops with low backedge frequency should not be aligned. 99 Node *n = h->in(LoopNode::LoopBackControl)->in(0); 100 if (n->is_MachIf() && n->as_MachIf()->_prob < 0.01) { 101 return unit_sz; // Loop does not loop, more often than not! 102 } 103 return OptoLoopAlignment; // Otherwise align loop head 104 } 105 106 return unit_sz; // no particular alignment 107 } 108 109 // Compute the size of first 'inst_cnt' instructions in this block. 110 // Return the number of instructions left to compute if the block has 111 // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size 112 // exceeds OptoLoopAlignment. 113 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt, 114 PhaseRegAlloc* ra) { 115 uint last_inst = number_of_nodes(); 116 for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) { 117 uint inst_size = get_node(j)->size(ra); 118 if( inst_size > 0 ) { 119 inst_cnt--; 120 uint sz = sum_size + inst_size; 121 if( sz <= (uint)OptoLoopAlignment ) { 122 // Compute size of instructions which fit into fetch buffer only 123 // since all inst_cnt instructions will not fit even if we align them. 124 sum_size = sz; 125 } else { 126 return 0; 127 } 128 } 129 } 130 return inst_cnt; 131 } 132 133 uint Block::find_node( const Node *n ) const { 134 for( uint i = 0; i < number_of_nodes(); i++ ) { 135 if( get_node(i) == n ) 136 return i; 137 } 138 ShouldNotReachHere(); 139 return 0; 140 } 141 142 // Find and remove n from block list 143 void Block::find_remove( const Node *n ) { 144 remove_node(find_node(n)); 145 } 146 147 bool Block::contains(const Node *n) const { 148 return _nodes.contains(n); 149 } 150 151 // Return empty status of a block. Empty blocks contain only the head, other 152 // ideal nodes, and an optional trailing goto. 153 int Block::is_Empty() const { 154 155 // Root or start block is not considered empty 156 if (head()->is_Root() || head()->is_Start()) { 157 return not_empty; 158 } 159 160 int success_result = completely_empty; 161 int end_idx = number_of_nodes() - 1; 162 163 // Check for ending goto 164 if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) { 165 success_result = empty_with_goto; 166 end_idx--; 167 } 168 169 // Unreachable blocks are considered empty 170 if (num_preds() <= 1) { 171 return success_result; 172 } 173 174 // Ideal nodes are allowable in empty blocks: skip them Only MachNodes 175 // turn directly into code, because only MachNodes have non-trivial 176 // emit() functions. 177 while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) { 178 end_idx--; 179 } 180 181 // No room for any interesting instructions? 182 if (end_idx == 0) { 183 return success_result; 184 } 185 186 return not_empty; 187 } 188 189 // Return true if the block's code implies that it is likely to be 190 // executed infrequently. Check to see if the block ends in a Halt or 191 // a low probability call. 192 bool Block::has_uncommon_code() const { 193 Node* en = end(); 194 195 if (en->is_MachGoto()) 196 en = en->in(0); 197 if (en->is_Catch()) 198 en = en->in(0); 199 if (en->is_MachProj() && en->in(0)->is_MachCall()) { 200 MachCallNode* call = en->in(0)->as_MachCall(); 201 if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) { 202 // This is true for slow-path stubs like new_{instance,array}, 203 // slow_arraycopy, complete_monitor_locking, uncommon_trap. 204 // The magic number corresponds to the probability of an uncommon_trap, 205 // even though it is a count not a probability. 206 return true; 207 } 208 } 209 210 int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode(); 211 return op == Op_Halt; 212 } 213 214 // True if block is low enough frequency or guarded by a test which 215 // mostly does not go here. 216 bool PhaseCFG::is_uncommon(const Block* block) { 217 // Initial blocks must never be moved, so are never uncommon. 218 if (block->head()->is_Root() || block->head()->is_Start()) return false; 219 220 // Check for way-low freq 221 if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true; 222 223 // Look for code shape indicating uncommon_trap or slow path 224 if (block->has_uncommon_code()) return true; 225 226 const float epsilon = 0.05f; 227 const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon); 228 uint uncommon_preds = 0; 229 uint freq_preds = 0; 230 uint uncommon_for_freq_preds = 0; 231 232 for( uint i=1; i< block->num_preds(); i++ ) { 233 Block* guard = get_block_for_node(block->pred(i)); 234 // Check to see if this block follows its guard 1 time out of 10000 235 // or less. 236 // 237 // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which 238 // we intend to be "uncommon", such as slow-path TLE allocation, 239 // predicted call failure, and uncommon trap triggers. 240 // 241 // Use an epsilon value of 5% to allow for variability in frequency 242 // predictions and floating point calculations. The net effect is 243 // that guard_factor is set to 9500. 244 // 245 // Ignore low-frequency blocks. 246 // The next check is (guard->_freq < 1.e-5 * 9500.). 247 if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) { 248 uncommon_preds++; 249 } else { 250 freq_preds++; 251 if(block->_freq < guard->_freq * guard_factor ) { 252 uncommon_for_freq_preds++; 253 } 254 } 255 } 256 if( block->num_preds() > 1 && 257 // The block is uncommon if all preds are uncommon or 258 (uncommon_preds == (block->num_preds()-1) || 259 // it is uncommon for all frequent preds. 260 uncommon_for_freq_preds == freq_preds) ) { 261 return true; 262 } 263 return false; 264 } 265 266 #ifndef PRODUCT 267 void Block::dump_bidx(const Block* orig, outputStream* st) const { 268 if (_pre_order) st->print("B%d",_pre_order); 269 else st->print("N%d", head()->_idx); 270 271 if (Verbose && orig != this) { 272 // Dump the original block's idx 273 st->print(" ("); 274 orig->dump_bidx(orig, st); 275 st->print(")"); 276 } 277 } 278 279 void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const { 280 if (is_connector()) { 281 for (uint i=1; i<num_preds(); i++) { 282 Block *p = cfg->get_block_for_node(pred(i)); 283 p->dump_pred(cfg, orig, st); 284 } 285 } else { 286 dump_bidx(orig, st); 287 st->print(" "); 288 } 289 } 290 291 void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const { 292 // Print the basic block 293 dump_bidx(this, st); 294 st->print(": #\t"); 295 296 // Print the incoming CFG edges and the outgoing CFG edges 297 for( uint i=0; i<_num_succs; i++ ) { 298 non_connector_successor(i)->dump_bidx(_succs[i], st); 299 st->print(" "); 300 } 301 st->print("<- "); 302 if( head()->is_block_start() ) { 303 for (uint i=1; i<num_preds(); i++) { 304 Node *s = pred(i); 305 if (cfg != NULL) { 306 Block *p = cfg->get_block_for_node(s); 307 p->dump_pred(cfg, p, st); 308 } else { 309 while (!s->is_block_start()) 310 s = s->in(0); 311 st->print("N%d ", s->_idx ); 312 } 313 } 314 } else { 315 st->print("BLOCK HEAD IS JUNK "); 316 } 317 318 // Print loop, if any 319 const Block *bhead = this; // Head of self-loop 320 Node *bh = bhead->head(); 321 322 if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) { 323 LoopNode *loop = bh->as_Loop(); 324 const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl)); 325 while (bx->is_connector()) { 326 bx = cfg->get_block_for_node(bx->pred(1)); 327 } 328 st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order); 329 // Dump any loop-specific bits, especially for CountedLoops. 330 loop->dump_spec(st); 331 } else if (has_loop_alignment()) { 332 st->print(" top-of-loop"); 333 } 334 st->print(" Freq: %g",_freq); 335 if( Verbose || WizardMode ) { 336 st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth); 337 st->print(" RegPressure: %d",_reg_pressure); 338 st->print(" IHRP Index: %d",_ihrp_index); 339 st->print(" FRegPressure: %d",_freg_pressure); 340 st->print(" FHRP Index: %d",_fhrp_index); 341 } 342 st->cr(); 343 } 344 345 void Block::dump() const { 346 dump(NULL); 347 } 348 349 void Block::dump(const PhaseCFG* cfg) const { 350 dump_head(cfg); 351 for (uint i=0; i< number_of_nodes(); i++) { 352 get_node(i)->dump(); 353 } 354 tty->print("\n"); 355 } 356 #endif 357 358 PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher) 359 : Phase(CFG) 360 , _block_arena(arena) 361 , _regalloc(NULL) 362 , _scheduling_for_pressure(false) 363 , _root(root) 364 , _matcher(matcher) 365 , _node_to_block_mapping(arena) 366 , _node_latency(NULL) 367 #ifndef PRODUCT 368 , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining")) 369 #endif 370 #ifdef ASSERT 371 , _raw_oops(arena) 372 #endif 373 { 374 ResourceMark rm; 375 // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode, 376 // then Match it into a machine-specific Node. Then clone the machine 377 // Node on demand. 378 Node *x = new GotoNode(NULL); 379 x->init_req(0, x); 380 _goto = matcher.match_tree(x); 381 assert(_goto != NULL, ""); 382 _goto->set_req(0,_goto); 383 384 // Build the CFG in Reverse Post Order 385 _number_of_blocks = build_cfg(); 386 _root_block = get_block_for_node(_root); 387 } 388 389 // Build a proper looking CFG. Make every block begin with either a StartNode 390 // or a RegionNode. Make every block end with either a Goto, If or Return. 391 // The RootNode both starts and ends it's own block. Do this with a recursive 392 // backwards walk over the control edges. 393 uint PhaseCFG::build_cfg() { 394 Arena *a = Thread::current()->resource_area(); 395 VectorSet visited(a); 396 397 // Allocate stack with enough space to avoid frequent realloc 398 Node_Stack nstack(a, C->live_nodes() >> 1); 399 nstack.push(_root, 0); 400 uint sum = 0; // Counter for blocks 401 402 while (nstack.is_nonempty()) { 403 // node and in's index from stack's top 404 // 'np' is _root (see above) or RegionNode, StartNode: we push on stack 405 // only nodes which point to the start of basic block (see below). 406 Node *np = nstack.node(); 407 // idx > 0, except for the first node (_root) pushed on stack 408 // at the beginning when idx == 0. 409 // We will use the condition (idx == 0) later to end the build. 410 uint idx = nstack.index(); 411 Node *proj = np->in(idx); 412 const Node *x = proj->is_block_proj(); 413 // Does the block end with a proper block-ending Node? One of Return, 414 // If or Goto? (This check should be done for visited nodes also). 415 if (x == NULL) { // Does not end right... 416 Node *g = _goto->clone(); // Force it to end in a Goto 417 g->set_req(0, proj); 418 np->set_req(idx, g); 419 x = proj = g; 420 } 421 if (!visited.test_set(x->_idx)) { // Visit this block once 422 // Skip any control-pinned middle'in stuff 423 Node *p = proj; 424 do { 425 proj = p; // Update pointer to last Control 426 p = p->in(0); // Move control forward 427 } while( !p->is_block_proj() && 428 !p->is_block_start() ); 429 // Make the block begin with one of Region or StartNode. 430 if( !p->is_block_start() ) { 431 RegionNode *r = new RegionNode( 2 ); 432 r->init_req(1, p); // Insert RegionNode in the way 433 proj->set_req(0, r); // Insert RegionNode in the way 434 p = r; 435 } 436 // 'p' now points to the start of this basic block 437 438 // Put self in array of basic blocks 439 Block *bb = new (_block_arena) Block(_block_arena, p); 440 map_node_to_block(p, bb); 441 map_node_to_block(x, bb); 442 if( x != p ) { // Only for root is x == p 443 bb->push_node((Node*)x); 444 } 445 // Now handle predecessors 446 ++sum; // Count 1 for self block 447 uint cnt = bb->num_preds(); 448 for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors 449 Node *prevproj = p->in(i); // Get prior input 450 assert( !prevproj->is_Con(), "dead input not removed" ); 451 // Check to see if p->in(i) is a "control-dependent" CFG edge - 452 // i.e., it splits at the source (via an IF or SWITCH) and merges 453 // at the destination (via a many-input Region). 454 // This breaks critical edges. The RegionNode to start the block 455 // will be added when <p,i> is pulled off the node stack 456 if ( cnt > 2 ) { // Merging many things? 457 assert( prevproj== bb->pred(i),""); 458 if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge? 459 // Force a block on the control-dependent edge 460 Node *g = _goto->clone(); // Force it to end in a Goto 461 g->set_req(0,prevproj); 462 p->set_req(i,g); 463 } 464 } 465 nstack.push(p, i); // 'p' is RegionNode or StartNode 466 } 467 } else { // Post-processing visited nodes 468 nstack.pop(); // remove node from stack 469 // Check if it the fist node pushed on stack at the beginning. 470 if (idx == 0) break; // end of the build 471 // Find predecessor basic block 472 Block *pb = get_block_for_node(x); 473 // Insert into nodes array, if not already there 474 if (!has_block(proj)) { 475 assert( x != proj, "" ); 476 // Map basic block of projection 477 map_node_to_block(proj, pb); 478 pb->push_node(proj); 479 } 480 // Insert self as a child of my predecessor block 481 pb->_succs.map(pb->_num_succs++, get_block_for_node(np)); 482 assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(), 483 "too many control users, not a CFG?" ); 484 } 485 } 486 // Return number of basic blocks for all children and self 487 return sum; 488 } 489 490 // Inserts a goto & corresponding basic block between 491 // block[block_no] and its succ_no'th successor block 492 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) { 493 // get block with block_no 494 assert(block_no < number_of_blocks(), "illegal block number"); 495 Block* in = get_block(block_no); 496 // get successor block succ_no 497 assert(succ_no < in->_num_succs, "illegal successor number"); 498 Block* out = in->_succs[succ_no]; 499 // Compute frequency of the new block. Do this before inserting 500 // new block in case succ_prob() needs to infer the probability from 501 // surrounding blocks. 502 float freq = in->_freq * in->succ_prob(succ_no); 503 // get ProjNode corresponding to the succ_no'th successor of the in block 504 ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj(); 505 // create region for basic block 506 RegionNode* region = new RegionNode(2); 507 region->init_req(1, proj); 508 // setup corresponding basic block 509 Block* block = new (_block_arena) Block(_block_arena, region); 510 map_node_to_block(region, block); 511 C->regalloc()->set_bad(region->_idx); 512 // add a goto node 513 Node* gto = _goto->clone(); // get a new goto node 514 gto->set_req(0, region); 515 // add it to the basic block 516 block->push_node(gto); 517 map_node_to_block(gto, block); 518 C->regalloc()->set_bad(gto->_idx); 519 // hook up successor block 520 block->_succs.map(block->_num_succs++, out); 521 // remap successor's predecessors if necessary 522 for (uint i = 1; i < out->num_preds(); i++) { 523 if (out->pred(i) == proj) out->head()->set_req(i, gto); 524 } 525 // remap predecessor's successor to new block 526 in->_succs.map(succ_no, block); 527 // Set the frequency of the new block 528 block->_freq = freq; 529 // add new basic block to basic block list 530 add_block_at(block_no + 1, block); 531 } 532 533 // Does this block end in a multiway branch that cannot have the default case 534 // flipped for another case? 535 static bool no_flip_branch(Block *b) { 536 int branch_idx = b->number_of_nodes() - b->_num_succs-1; 537 if (branch_idx < 1) { 538 return false; 539 } 540 Node *branch = b->get_node(branch_idx); 541 if (branch->is_Catch()) { 542 return true; 543 } 544 if (branch->is_Mach()) { 545 if (branch->is_MachNullCheck()) { 546 return true; 547 } 548 int iop = branch->as_Mach()->ideal_Opcode(); 549 if (iop == Op_FastLock || iop == Op_FastUnlock) { 550 return true; 551 } 552 // Don't flip if branch has an implicit check. 553 if (branch->as_Mach()->is_TrapBasedCheckNode()) { 554 return true; 555 } 556 } 557 return false; 558 } 559 560 // Check for NeverBranch at block end. This needs to become a GOTO to the 561 // true target. NeverBranch are treated as a conditional branch that always 562 // goes the same direction for most of the optimizer and are used to give a 563 // fake exit path to infinite loops. At this late stage they need to turn 564 // into Goto's so that when you enter the infinite loop you indeed hang. 565 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) { 566 // Find true target 567 int end_idx = b->end_idx(); 568 int idx = b->get_node(end_idx+1)->as_Proj()->_con; 569 Block *succ = b->_succs[idx]; 570 Node* gto = _goto->clone(); // get a new goto node 571 gto->set_req(0, b->head()); 572 Node *bp = b->get_node(end_idx); 573 b->map_node(gto, end_idx); // Slam over NeverBranch 574 map_node_to_block(gto, b); 575 C->regalloc()->set_bad(gto->_idx); 576 b->pop_node(); // Yank projections 577 b->pop_node(); // Yank projections 578 b->_succs.map(0,succ); // Map only successor 579 b->_num_succs = 1; 580 // remap successor's predecessors if necessary 581 uint j; 582 for( j = 1; j < succ->num_preds(); j++) 583 if( succ->pred(j)->in(0) == bp ) 584 succ->head()->set_req(j, gto); 585 // Kill alternate exit path 586 Block *dead = b->_succs[1-idx]; 587 for( j = 1; j < dead->num_preds(); j++) 588 if( dead->pred(j)->in(0) == bp ) 589 break; 590 // Scan through block, yanking dead path from 591 // all regions and phis. 592 dead->head()->del_req(j); 593 for( int k = 1; dead->get_node(k)->is_Phi(); k++ ) 594 dead->get_node(k)->del_req(j); 595 } 596 597 // Helper function to move block bx to the slot following b_index. Return 598 // true if the move is successful, otherwise false 599 bool PhaseCFG::move_to_next(Block* bx, uint b_index) { 600 if (bx == NULL) return false; 601 602 // Return false if bx is already scheduled. 603 uint bx_index = bx->_pre_order; 604 if ((bx_index <= b_index) && (get_block(bx_index) == bx)) { 605 return false; 606 } 607 608 // Find the current index of block bx on the block list 609 bx_index = b_index + 1; 610 while (bx_index < number_of_blocks() && get_block(bx_index) != bx) { 611 bx_index++; 612 } 613 assert(get_block(bx_index) == bx, "block not found"); 614 615 // If the previous block conditionally falls into bx, return false, 616 // because moving bx will create an extra jump. 617 for(uint k = 1; k < bx->num_preds(); k++ ) { 618 Block* pred = get_block_for_node(bx->pred(k)); 619 if (pred == get_block(bx_index - 1)) { 620 if (pred->_num_succs != 1) { 621 return false; 622 } 623 } 624 } 625 626 // Reinsert bx just past block 'b' 627 _blocks.remove(bx_index); 628 _blocks.insert(b_index + 1, bx); 629 return true; 630 } 631 632 // Move empty and uncommon blocks to the end. 633 void PhaseCFG::move_to_end(Block *b, uint i) { 634 int e = b->is_Empty(); 635 if (e != Block::not_empty) { 636 if (e == Block::empty_with_goto) { 637 // Remove the goto, but leave the block. 638 b->pop_node(); 639 } 640 // Mark this block as a connector block, which will cause it to be 641 // ignored in certain functions such as non_connector_successor(). 642 b->set_connector(); 643 } 644 // Move the empty block to the end, and don't recheck. 645 _blocks.remove(i); 646 _blocks.push(b); 647 } 648 649 // Set loop alignment for every block 650 void PhaseCFG::set_loop_alignment() { 651 uint last = number_of_blocks(); 652 assert(get_block(0) == get_root_block(), ""); 653 654 for (uint i = 1; i < last; i++) { 655 Block* block = get_block(i); 656 if (block->head()->is_Loop()) { 657 block->set_loop_alignment(block); 658 } 659 } 660 } 661 662 // Make empty basic blocks to be "connector" blocks, Move uncommon blocks 663 // to the end. 664 void PhaseCFG::remove_empty_blocks() { 665 // Move uncommon blocks to the end 666 uint last = number_of_blocks(); 667 assert(get_block(0) == get_root_block(), ""); 668 669 for (uint i = 1; i < last; i++) { 670 Block* block = get_block(i); 671 if (block->is_connector()) { 672 break; 673 } 674 675 // Check for NeverBranch at block end. This needs to become a GOTO to the 676 // true target. NeverBranch are treated as a conditional branch that 677 // always goes the same direction for most of the optimizer and are used 678 // to give a fake exit path to infinite loops. At this late stage they 679 // need to turn into Goto's so that when you enter the infinite loop you 680 // indeed hang. 681 if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) { 682 convert_NeverBranch_to_Goto(block); 683 } 684 685 // Look for uncommon blocks and move to end. 686 if (!C->do_freq_based_layout()) { 687 if (is_uncommon(block)) { 688 move_to_end(block, i); 689 last--; // No longer check for being uncommon! 690 if (no_flip_branch(block)) { // Fall-thru case must follow? 691 // Find the fall-thru block 692 block = get_block(i); 693 move_to_end(block, i); 694 last--; 695 } 696 // backup block counter post-increment 697 i--; 698 } 699 } 700 } 701 702 // Move empty blocks to the end 703 last = number_of_blocks(); 704 for (uint i = 1; i < last; i++) { 705 Block* block = get_block(i); 706 if (block->is_Empty() != Block::not_empty) { 707 move_to_end(block, i); 708 last--; 709 i--; 710 } 711 } // End of for all blocks 712 } 713 714 Block *PhaseCFG::fixup_trap_based_check(Node *branch, Block *block, int block_pos, Block *bnext) { 715 // Trap based checks must fall through to the successor with 716 // PROB_ALWAYS. 717 // They should be an If with 2 successors. 718 assert(branch->is_MachIf(), "must be If"); 719 assert(block->_num_succs == 2, "must have 2 successors"); 720 721 // Get the If node and the projection for the first successor. 722 MachIfNode *iff = block->get_node(block->number_of_nodes()-3)->as_MachIf(); 723 ProjNode *proj0 = block->get_node(block->number_of_nodes()-2)->as_Proj(); 724 ProjNode *proj1 = block->get_node(block->number_of_nodes()-1)->as_Proj(); 725 ProjNode *projt = (proj0->Opcode() == Op_IfTrue) ? proj0 : proj1; 726 ProjNode *projf = (proj0->Opcode() == Op_IfFalse) ? proj0 : proj1; 727 728 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1]. 729 assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0"); 730 assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1"); 731 732 ProjNode *proj_always; 733 ProjNode *proj_never; 734 // We must negate the branch if the implicit check doesn't follow 735 // the branch's TRUE path. Then, the new TRUE branch target will 736 // be the old FALSE branch target. 737 if (iff->_prob <= 2*PROB_NEVER) { // There are small rounding errors. 738 proj_never = projt; 739 proj_always = projf; 740 } else { 741 // We must negate the branch if the trap doesn't follow the 742 // branch's TRUE path. Then, the new TRUE branch target will 743 // be the old FALSE branch target. 744 proj_never = projf; 745 proj_always = projt; 746 iff->negate(); 747 } 748 assert(iff->_prob <= 2*PROB_NEVER, "Trap based checks are expected to trap never!"); 749 // Map the successors properly 750 block->_succs.map(0, get_block_for_node(proj_never ->raw_out(0))); // The target of the trap. 751 block->_succs.map(1, get_block_for_node(proj_always->raw_out(0))); // The fall through target. 752 753 if (block->get_node(block->number_of_nodes() - block->_num_succs + 1) != proj_always) { 754 block->map_node(proj_never, block->number_of_nodes() - block->_num_succs + 0); 755 block->map_node(proj_always, block->number_of_nodes() - block->_num_succs + 1); 756 } 757 758 // Place the fall through block after this block. 759 Block *bs1 = block->non_connector_successor(1); 760 if (bs1 != bnext && move_to_next(bs1, block_pos)) { 761 bnext = bs1; 762 } 763 // If the fall through block still is not the next block, insert a goto. 764 if (bs1 != bnext) { 765 insert_goto_at(block_pos, 1); 766 } 767 return bnext; 768 } 769 770 // Fix up the final control flow for basic blocks. 771 void PhaseCFG::fixup_flow() { 772 // Fixup final control flow for the blocks. Remove jump-to-next 773 // block. If neither arm of an IF follows the conditional branch, we 774 // have to add a second jump after the conditional. We place the 775 // TRUE branch target in succs[0] for both GOTOs and IFs. 776 for (uint i = 0; i < number_of_blocks(); i++) { 777 Block* block = get_block(i); 778 block->_pre_order = i; // turn pre-order into block-index 779 780 // Connector blocks need no further processing. 781 if (block->is_connector()) { 782 assert((i+1) == number_of_blocks() || get_block(i + 1)->is_connector(), "All connector blocks should sink to the end"); 783 continue; 784 } 785 assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors"); 786 787 Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : NULL; 788 Block* bs0 = block->non_connector_successor(0); 789 790 // Check for multi-way branches where I cannot negate the test to 791 // exchange the true and false targets. 792 if (no_flip_branch(block)) { 793 // Find fall through case - if must fall into its target. 794 // Get the index of the branch's first successor. 795 int branch_idx = block->number_of_nodes() - block->_num_succs; 796 797 // The branch is 1 before the branch's first successor. 798 Node *branch = block->get_node(branch_idx-1); 799 800 // Handle no-flip branches which have implicit checks and which require 801 // special block ordering and individual semantics of the 'fall through 802 // case'. 803 if ((TrapBasedNullChecks || TrapBasedRangeChecks) && 804 branch->is_Mach() && branch->as_Mach()->is_TrapBasedCheckNode()) { 805 bnext = fixup_trap_based_check(branch, block, i, bnext); 806 } else { 807 // Else, default handling for no-flip branches 808 for (uint j2 = 0; j2 < block->_num_succs; j2++) { 809 const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj(); 810 if (p->_con == 0) { 811 // successor j2 is fall through case 812 if (block->non_connector_successor(j2) != bnext) { 813 // but it is not the next block => insert a goto 814 insert_goto_at(i, j2); 815 } 816 // Put taken branch in slot 0 817 if (j2 == 0 && block->_num_succs == 2) { 818 // Flip targets in succs map 819 Block *tbs0 = block->_succs[0]; 820 Block *tbs1 = block->_succs[1]; 821 block->_succs.map(0, tbs1); 822 block->_succs.map(1, tbs0); 823 } 824 break; 825 } 826 } 827 } 828 829 // Remove all CatchProjs 830 for (uint j = 0; j < block->_num_succs; j++) { 831 block->pop_node(); 832 } 833 834 } else if (block->_num_succs == 1) { 835 // Block ends in a Goto? 836 if (bnext == bs0) { 837 // We fall into next block; remove the Goto 838 block->pop_node(); 839 } 840 841 } else if(block->_num_succs == 2) { // Block ends in a If? 842 // Get opcode of 1st projection (matches _succs[0]) 843 // Note: Since this basic block has 2 exits, the last 2 nodes must 844 // be projections (in any order), the 3rd last node must be 845 // the IfNode (we have excluded other 2-way exits such as 846 // CatchNodes already). 847 MachNode* iff = block->get_node(block->number_of_nodes() - 3)->as_Mach(); 848 ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj(); 849 ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj(); 850 851 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1]. 852 assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0"); 853 assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1"); 854 855 Block* bs1 = block->non_connector_successor(1); 856 857 // Check for neither successor block following the current 858 // block ending in a conditional. If so, move one of the 859 // successors after the current one, provided that the 860 // successor was previously unscheduled, but moveable 861 // (i.e., all paths to it involve a branch). 862 if (!C->do_freq_based_layout() && bnext != bs0 && bnext != bs1) { 863 // Choose the more common successor based on the probability 864 // of the conditional branch. 865 Block* bx = bs0; 866 Block* by = bs1; 867 868 // _prob is the probability of taking the true path. Make 869 // p the probability of taking successor #1. 870 float p = iff->as_MachIf()->_prob; 871 if (proj0->Opcode() == Op_IfTrue) { 872 p = 1.0 - p; 873 } 874 875 // Prefer successor #1 if p > 0.5 876 if (p > PROB_FAIR) { 877 bx = bs1; 878 by = bs0; 879 } 880 881 // Attempt the more common successor first 882 if (move_to_next(bx, i)) { 883 bnext = bx; 884 } else if (move_to_next(by, i)) { 885 bnext = by; 886 } 887 } 888 889 // Check for conditional branching the wrong way. Negate 890 // conditional, if needed, so it falls into the following block 891 // and branches to the not-following block. 892 893 // Check for the next block being in succs[0]. We are going to branch 894 // to succs[0], so we want the fall-thru case as the next block in 895 // succs[1]. 896 if (bnext == bs0) { 897 // Fall-thru case in succs[0], so flip targets in succs map 898 Block* tbs0 = block->_succs[0]; 899 Block* tbs1 = block->_succs[1]; 900 block->_succs.map(0, tbs1); 901 block->_succs.map(1, tbs0); 902 // Flip projection for each target 903 ProjNode* tmp = proj0; 904 proj0 = proj1; 905 proj1 = tmp; 906 907 } else if(bnext != bs1) { 908 // Need a double-branch 909 // The existing conditional branch need not change. 910 // Add a unconditional branch to the false target. 911 // Alas, it must appear in its own block and adding a 912 // block this late in the game is complicated. Sigh. 913 insert_goto_at(i, 1); 914 } 915 916 // Make sure we TRUE branch to the target 917 if (proj0->Opcode() == Op_IfFalse) { 918 iff->as_MachIf()->negate(); 919 } 920 921 block->pop_node(); // Remove IfFalse & IfTrue projections 922 block->pop_node(); 923 924 } else { 925 // Multi-exit block, e.g. a switch statement 926 // But we don't need to do anything here 927 } 928 } // End of for all blocks 929 } 930 931 932 // postalloc_expand: Expand nodes after register allocation. 933 // 934 // postalloc_expand has to be called after register allocation, just 935 // before output (i.e. scheduling). It only gets called if 936 // Matcher::require_postalloc_expand is true. 937 // 938 // Background: 939 // 940 // Nodes that are expandend (one compound node requiring several 941 // assembler instructions to be implemented split into two or more 942 // non-compound nodes) after register allocation are not as nice as 943 // the ones expanded before register allocation - they don't 944 // participate in optimizations as global code motion. But after 945 // register allocation we can expand nodes that use registers which 946 // are not spillable or registers that are not allocated, because the 947 // old compound node is simply replaced (in its location in the basic 948 // block) by a new subgraph which does not contain compound nodes any 949 // more. The scheduler called during output can later on process these 950 // non-compound nodes. 951 // 952 // Implementation: 953 // 954 // Nodes requiring postalloc expand are specified in the ad file by using 955 // a postalloc_expand statement instead of ins_encode. A postalloc_expand 956 // contains a single call to an encoding, as does an ins_encode 957 // statement. Instead of an emit() function a postalloc_expand() function 958 // is generated that doesn't emit assembler but creates a new 959 // subgraph. The code below calls this postalloc_expand function for each 960 // node with the appropriate attribute. This function returns the new 961 // nodes generated in an array passed in the call. The old node, 962 // potential MachTemps before and potential Projs after it then get 963 // disconnected and replaced by the new nodes. The instruction 964 // generating the result has to be the last one in the array. In 965 // general it is assumed that Projs after the node expanded are 966 // kills. These kills are not required any more after expanding as 967 // there are now explicitly visible def-use chains and the Projs are 968 // removed. This does not hold for calls: They do not only have 969 // kill-Projs but also Projs defining values. Therefore Projs after 970 // the node expanded are removed for all but for calls. If a node is 971 // to be reused, it must be added to the nodes list returned, and it 972 // will be added again. 973 // 974 // Implementing the postalloc_expand function for a node in an enc_class 975 // is rather tedious. It requires knowledge about many node details, as 976 // the nodes and the subgraph must be hand crafted. To simplify this, 977 // adlc generates some utility variables into the postalloc_expand function, 978 // e.g., holding the operands as specified by the postalloc_expand encoding 979 // specification, e.g.: 980 // * unsigned idx_<par_name> holding the index of the node in the ins 981 // * Node *n_<par_name> holding the node loaded from the ins 982 // * MachOpnd *op_<par_name> holding the corresponding operand 983 // 984 // The ordering of operands can not be determined by looking at a 985 // rule. Especially if a match rule matches several different trees, 986 // several nodes are generated from one instruct specification with 987 // different operand orderings. In this case the adlc generated 988 // variables are the only way to access the ins and operands 989 // deterministically. 990 // 991 // If assigning a register to a node that contains an oop, don't 992 // forget to call ra_->set_oop() for the node. 993 void PhaseCFG::postalloc_expand(PhaseRegAlloc* _ra) { 994 GrowableArray <Node *> new_nodes(32); // Array with new nodes filled by postalloc_expand function of node. 995 GrowableArray <Node *> remove(32); 996 GrowableArray <Node *> succs(32); 997 unsigned int max_idx = C->unique(); // Remember to distinguish new from old nodes. 998 DEBUG_ONLY(bool foundNode = false); 999 1000 // for all blocks 1001 for (uint i = 0; i < number_of_blocks(); i++) { 1002 Block *b = _blocks[i]; 1003 // For all instructions in the current block. 1004 for (uint j = 0; j < b->number_of_nodes(); j++) { 1005 Node *n = b->get_node(j); 1006 if (n->is_Mach() && n->as_Mach()->requires_postalloc_expand()) { 1007 #ifdef ASSERT 1008 if (TracePostallocExpand) { 1009 if (!foundNode) { 1010 foundNode = true; 1011 tty->print("POSTALLOC EXPANDING %d %s\n", C->compile_id(), 1012 C->method() ? C->method()->name()->as_utf8() : C->stub_name()); 1013 } 1014 tty->print(" postalloc expanding "); n->dump(); 1015 if (Verbose) { 1016 tty->print(" with ins:\n"); 1017 for (uint k = 0; k < n->len(); ++k) { 1018 if (n->in(k)) { tty->print(" "); n->in(k)->dump(); } 1019 } 1020 } 1021 } 1022 #endif 1023 new_nodes.clear(); 1024 // Collect nodes that have to be removed from the block later on. 1025 uint req = n->req(); 1026 remove.clear(); 1027 for (uint k = 0; k < req; ++k) { 1028 if (n->in(k) && n->in(k)->is_MachTemp()) { 1029 remove.push(n->in(k)); // MachTemps which are inputs to the old node have to be removed. 1030 n->in(k)->del_req(0); 1031 j--; 1032 } 1033 } 1034 1035 // Check whether we can allocate enough nodes. We set a fix limit for 1036 // the size of postalloc expands with this. 1037 uint unique_limit = C->unique() + 40; 1038 if (unique_limit >= _ra->node_regs_max_index()) { 1039 Compile::current()->record_failure("out of nodes in postalloc expand"); 1040 return; 1041 } 1042 1043 // Emit (i.e. generate new nodes). 1044 n->as_Mach()->postalloc_expand(&new_nodes, _ra); 1045 1046 assert(C->unique() < unique_limit, "You allocated too many nodes in your postalloc expand."); 1047 1048 // Disconnect the inputs of the old node. 1049 // 1050 // We reuse MachSpillCopy nodes. If we need to expand them, there 1051 // are many, so reusing pays off. If reused, the node already 1052 // has the new ins. n must be the last node on new_nodes list. 1053 if (!n->is_MachSpillCopy()) { 1054 for (int k = req - 1; k >= 0; --k) { 1055 n->del_req(k); 1056 } 1057 } 1058 1059 #ifdef ASSERT 1060 // Check that all nodes have proper operands. 1061 for (int k = 0; k < new_nodes.length(); ++k) { 1062 if (new_nodes.at(k)->_idx < max_idx || !new_nodes.at(k)->is_Mach()) continue; // old node, Proj ... 1063 MachNode *m = new_nodes.at(k)->as_Mach(); 1064 for (unsigned int l = 0; l < m->num_opnds(); ++l) { 1065 if (MachOper::notAnOper(m->_opnds[l])) { 1066 outputStream *os = tty; 1067 os->print("Node %s ", m->Name()); 1068 os->print("has invalid opnd %d: %p\n", l, m->_opnds[l]); 1069 assert(0, "Invalid operands, see inline trace in hs_err_pid file."); 1070 } 1071 } 1072 } 1073 #endif 1074 1075 // Collect succs of old node in remove (for projections) and in succs (for 1076 // all other nodes) do _not_ collect projections in remove (but in succs) 1077 // in case the node is a call. We need the projections for calls as they are 1078 // associated with registes (i.e. they are defs). 1079 succs.clear(); 1080 for (DUIterator k = n->outs(); n->has_out(k); k++) { 1081 if (n->out(k)->is_Proj() && !n->is_MachCall() && !n->is_MachBranch()) { 1082 remove.push(n->out(k)); 1083 } else { 1084 succs.push(n->out(k)); 1085 } 1086 } 1087 // Replace old node n as input of its succs by last of the new nodes. 1088 for (int k = 0; k < succs.length(); ++k) { 1089 Node *succ = succs.at(k); 1090 for (uint l = 0; l < succ->req(); ++l) { 1091 if (succ->in(l) == n) { 1092 succ->set_req(l, new_nodes.at(new_nodes.length() - 1)); 1093 } 1094 } 1095 for (uint l = succ->req(); l < succ->len(); ++l) { 1096 if (succ->in(l) == n) { 1097 succ->set_prec(l, new_nodes.at(new_nodes.length() - 1)); 1098 } 1099 } 1100 } 1101 1102 // Index of old node in block. 1103 uint index = b->find_node(n); 1104 // Insert new nodes into block and map them in nodes->blocks array 1105 // and remember last node in n2. 1106 Node *n2 = NULL; 1107 for (int k = 0; k < new_nodes.length(); ++k) { 1108 n2 = new_nodes.at(k); 1109 b->insert_node(n2, ++index); 1110 map_node_to_block(n2, b); 1111 } 1112 1113 // Add old node n to remove and remove them all from block. 1114 remove.push(n); 1115 j--; 1116 #ifdef ASSERT 1117 if (TracePostallocExpand && Verbose) { 1118 tty->print(" removing:\n"); 1119 for (int k = 0; k < remove.length(); ++k) { 1120 tty->print(" "); remove.at(k)->dump(); 1121 } 1122 tty->print(" inserting:\n"); 1123 for (int k = 0; k < new_nodes.length(); ++k) { 1124 tty->print(" "); new_nodes.at(k)->dump(); 1125 } 1126 } 1127 #endif 1128 for (int k = 0; k < remove.length(); ++k) { 1129 if (b->contains(remove.at(k))) { 1130 b->find_remove(remove.at(k)); 1131 } else { 1132 assert(remove.at(k)->is_Proj() && (remove.at(k)->in(0)->is_MachBranch()), ""); 1133 } 1134 } 1135 // If anything has been inserted (n2 != NULL), continue after last node inserted. 1136 // This does not always work. Some postalloc expands don't insert any nodes, if they 1137 // do optimizations (e.g., max(x,x)). In this case we decrement j accordingly. 1138 j = n2 ? b->find_node(n2) : j; 1139 } 1140 } 1141 } 1142 1143 #ifdef ASSERT 1144 if (foundNode) { 1145 tty->print("FINISHED %d %s\n", C->compile_id(), 1146 C->method() ? C->method()->name()->as_utf8() : C->stub_name()); 1147 tty->flush(); 1148 } 1149 #endif 1150 } 1151 1152 1153 //------------------------------dump------------------------------------------- 1154 #ifndef PRODUCT 1155 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const { 1156 const Node *x = end->is_block_proj(); 1157 assert( x, "not a CFG" ); 1158 1159 // Do not visit this block again 1160 if( visited.test_set(x->_idx) ) return; 1161 1162 // Skip through this block 1163 const Node *p = x; 1164 do { 1165 p = p->in(0); // Move control forward 1166 assert( !p->is_block_proj() || p->is_Root(), "not a CFG" ); 1167 } while( !p->is_block_start() ); 1168 1169 // Recursively visit 1170 for (uint i = 1; i < p->req(); i++) { 1171 _dump_cfg(p->in(i), visited); 1172 } 1173 1174 // Dump the block 1175 get_block_for_node(p)->dump(this); 1176 } 1177 1178 void PhaseCFG::dump( ) const { 1179 tty->print("\n--- CFG --- %d BBs\n", number_of_blocks()); 1180 if (_blocks.size()) { // Did we do basic-block layout? 1181 for (uint i = 0; i < number_of_blocks(); i++) { 1182 const Block* block = get_block(i); 1183 block->dump(this); 1184 } 1185 } else { // Else do it with a DFS 1186 VectorSet visited(_block_arena); 1187 _dump_cfg(_root,visited); 1188 } 1189 } 1190 1191 void PhaseCFG::dump_headers() { 1192 for (uint i = 0; i < number_of_blocks(); i++) { 1193 Block* block = get_block(i); 1194 if (block != NULL) { 1195 block->dump_head(this); 1196 } 1197 } 1198 } 1199 1200 void PhaseCFG::verify() const { 1201 #ifdef ASSERT 1202 // Verify sane CFG 1203 for (uint i = 0; i < number_of_blocks(); i++) { 1204 Block* block = get_block(i); 1205 uint cnt = block->number_of_nodes(); 1206 uint j; 1207 for (j = 0; j < cnt; j++) { 1208 Node *n = block->get_node(j); 1209 assert(get_block_for_node(n) == block, ""); 1210 if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) { 1211 assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block"); 1212 } 1213 for (uint k = 0; k < n->req(); k++) { 1214 Node *def = n->in(k); 1215 if (def && def != n) { 1216 assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok"); 1217 // Verify that instructions in the block is in correct order. 1218 // Uses must follow their definition if they are at the same block. 1219 // Mostly done to check that MachSpillCopy nodes are placed correctly 1220 // when CreateEx node is moved in build_ifg_physical(). 1221 if (get_block_for_node(def) == block && !(block->head()->is_Loop() && n->is_Phi()) && 1222 // See (+++) comment in reg_split.cpp 1223 !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) { 1224 bool is_loop = false; 1225 if (n->is_Phi()) { 1226 for (uint l = 1; l < def->req(); l++) { 1227 if (n == def->in(l)) { 1228 is_loop = true; 1229 break; // Some kind of loop 1230 } 1231 } 1232 } 1233 assert(is_loop || block->find_node(def) < j, "uses must follow definitions"); 1234 } 1235 } 1236 } 1237 } 1238 1239 j = block->end_idx(); 1240 Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj(); 1241 assert(bp, "last instruction must be a block proj"); 1242 assert(bp == block->get_node(j), "wrong number of successors for this block"); 1243 if (bp->is_Catch()) { 1244 while (block->get_node(--j)->is_MachProj()) { 1245 ; 1246 } 1247 assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call"); 1248 } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) { 1249 assert(block->_num_succs == 2, "Conditional branch must have two targets"); 1250 } 1251 } 1252 #endif 1253 } 1254 #endif 1255 1256 UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) { 1257 Copy::zero_to_bytes( _indices, sizeof(uint)*max ); 1258 } 1259 1260 void UnionFind::extend( uint from_idx, uint to_idx ) { 1261 _nesting.check(); 1262 if( from_idx >= _max ) { 1263 uint size = 16; 1264 while( size <= from_idx ) size <<=1; 1265 _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size ); 1266 _max = size; 1267 } 1268 while( _cnt <= from_idx ) _indices[_cnt++] = 0; 1269 _indices[from_idx] = to_idx; 1270 } 1271 1272 void UnionFind::reset( uint max ) { 1273 // Force the Union-Find mapping to be at least this large 1274 extend(max,0); 1275 // Initialize to be the ID mapping. 1276 for( uint i=0; i<max; i++ ) map(i,i); 1277 } 1278 1279 // Straight out of Tarjan's union-find algorithm 1280 uint UnionFind::Find_compress( uint idx ) { 1281 uint cur = idx; 1282 uint next = lookup(cur); 1283 while( next != cur ) { // Scan chain of equivalences 1284 assert( next < cur, "always union smaller" ); 1285 cur = next; // until find a fixed-point 1286 next = lookup(cur); 1287 } 1288 // Core of union-find algorithm: update chain of 1289 // equivalences to be equal to the root. 1290 while( idx != next ) { 1291 uint tmp = lookup(idx); 1292 map(idx, next); 1293 idx = tmp; 1294 } 1295 return idx; 1296 } 1297 1298 // Like Find above, but no path compress, so bad asymptotic behavior 1299 uint UnionFind::Find_const( uint idx ) const { 1300 if( idx == 0 ) return idx; // Ignore the zero idx 1301 // Off the end? This can happen during debugging dumps 1302 // when data structures have not finished being updated. 1303 if( idx >= _max ) return idx; 1304 uint next = lookup(idx); 1305 while( next != idx ) { // Scan chain of equivalences 1306 idx = next; // until find a fixed-point 1307 next = lookup(idx); 1308 } 1309 return next; 1310 } 1311 1312 // union 2 sets together. 1313 void UnionFind::Union( uint idx1, uint idx2 ) { 1314 uint src = Find(idx1); 1315 uint dst = Find(idx2); 1316 assert( src, "" ); 1317 assert( dst, "" ); 1318 assert( src < _max, "oob" ); 1319 assert( dst < _max, "oob" ); 1320 assert( src < dst, "always union smaller" ); 1321 map(dst,src); 1322 } 1323 1324 #ifndef PRODUCT 1325 void Trace::dump( ) const { 1326 tty->print_cr("Trace (freq %f)", first_block()->_freq); 1327 for (Block *b = first_block(); b != NULL; b = next(b)) { 1328 tty->print(" B%d", b->_pre_order); 1329 if (b->head()->is_Loop()) { 1330 tty->print(" (L%d)", b->compute_loop_alignment()); 1331 } 1332 if (b->has_loop_alignment()) { 1333 tty->print(" (T%d)", b->code_alignment()); 1334 } 1335 } 1336 tty->cr(); 1337 } 1338 1339 void CFGEdge::dump( ) const { 1340 tty->print(" B%d --> B%d Freq: %f out:%3d%% in:%3d%% State: ", 1341 from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct); 1342 switch(state()) { 1343 case connected: 1344 tty->print("connected"); 1345 break; 1346 case open: 1347 tty->print("open"); 1348 break; 1349 case interior: 1350 tty->print("interior"); 1351 break; 1352 } 1353 if (infrequent()) { 1354 tty->print(" infrequent"); 1355 } 1356 tty->cr(); 1357 } 1358 #endif 1359 1360 // Comparison function for edges 1361 static int edge_order(CFGEdge **e0, CFGEdge **e1) { 1362 float freq0 = (*e0)->freq(); 1363 float freq1 = (*e1)->freq(); 1364 if (freq0 != freq1) { 1365 return freq0 > freq1 ? -1 : 1; 1366 } 1367 1368 int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo; 1369 int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo; 1370 1371 return dist1 - dist0; 1372 } 1373 1374 // Comparison function for edges 1375 extern "C" int trace_frequency_order(const void *p0, const void *p1) { 1376 Trace *tr0 = *(Trace **) p0; 1377 Trace *tr1 = *(Trace **) p1; 1378 Block *b0 = tr0->first_block(); 1379 Block *b1 = tr1->first_block(); 1380 1381 // The trace of connector blocks goes at the end; 1382 // we only expect one such trace 1383 if (b0->is_connector() != b1->is_connector()) { 1384 return b1->is_connector() ? -1 : 1; 1385 } 1386 1387 // Pull more frequently executed blocks to the beginning 1388 float freq0 = b0->_freq; 1389 float freq1 = b1->_freq; 1390 if (freq0 != freq1) { 1391 return freq0 > freq1 ? -1 : 1; 1392 } 1393 1394 int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo; 1395 1396 return diff; 1397 } 1398 1399 // Find edges of interest, i.e, those which can fall through. Presumes that 1400 // edges which don't fall through are of low frequency and can be generally 1401 // ignored. Initialize the list of traces. 1402 void PhaseBlockLayout::find_edges() { 1403 // Walk the blocks, creating edges and Traces 1404 uint i; 1405 Trace *tr = NULL; 1406 for (i = 0; i < _cfg.number_of_blocks(); i++) { 1407 Block* b = _cfg.get_block(i); 1408 tr = new Trace(b, next, prev); 1409 traces[tr->id()] = tr; 1410 1411 // All connector blocks should be at the end of the list 1412 if (b->is_connector()) break; 1413 1414 // If this block and the next one have a one-to-one successor 1415 // predecessor relationship, simply append the next block 1416 int nfallthru = b->num_fall_throughs(); 1417 while (nfallthru == 1 && 1418 b->succ_fall_through(0)) { 1419 Block *n = b->_succs[0]; 1420 1421 // Skip over single-entry connector blocks, we don't want to 1422 // add them to the trace. 1423 while (n->is_connector() && n->num_preds() == 1) { 1424 n = n->_succs[0]; 1425 } 1426 1427 // We see a merge point, so stop search for the next block 1428 if (n->num_preds() != 1) break; 1429 1430 i++; 1431 assert(n = _cfg.get_block(i), "expecting next block"); 1432 tr->append(n); 1433 uf->map(n->_pre_order, tr->id()); 1434 traces[n->_pre_order] = NULL; 1435 nfallthru = b->num_fall_throughs(); 1436 b = n; 1437 } 1438 1439 if (nfallthru > 0) { 1440 // Create a CFGEdge for each outgoing 1441 // edge that could be a fall-through. 1442 for (uint j = 0; j < b->_num_succs; j++ ) { 1443 if (b->succ_fall_through(j)) { 1444 Block *target = b->non_connector_successor(j); 1445 float freq = b->_freq * b->succ_prob(j); 1446 int from_pct = (int) ((100 * freq) / b->_freq); 1447 int to_pct = (int) ((100 * freq) / target->_freq); 1448 edges->append(new CFGEdge(b, target, freq, from_pct, to_pct)); 1449 } 1450 } 1451 } 1452 } 1453 1454 // Group connector blocks into one trace 1455 for (i++; i < _cfg.number_of_blocks(); i++) { 1456 Block *b = _cfg.get_block(i); 1457 assert(b->is_connector(), "connector blocks at the end"); 1458 tr->append(b); 1459 uf->map(b->_pre_order, tr->id()); 1460 traces[b->_pre_order] = NULL; 1461 } 1462 } 1463 1464 // Union two traces together in uf, and null out the trace in the list 1465 void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) { 1466 uint old_id = old_trace->id(); 1467 uint updated_id = updated_trace->id(); 1468 1469 uint lo_id = updated_id; 1470 uint hi_id = old_id; 1471 1472 // If from is greater than to, swap values to meet 1473 // UnionFind guarantee. 1474 if (updated_id > old_id) { 1475 lo_id = old_id; 1476 hi_id = updated_id; 1477 1478 // Fix up the trace ids 1479 traces[lo_id] = traces[updated_id]; 1480 updated_trace->set_id(lo_id); 1481 } 1482 1483 // Union the lower with the higher and remove the pointer 1484 // to the higher. 1485 uf->Union(lo_id, hi_id); 1486 traces[hi_id] = NULL; 1487 } 1488 1489 // Append traces together via the most frequently executed edges 1490 void PhaseBlockLayout::grow_traces() { 1491 // Order the edges, and drive the growth of Traces via the most 1492 // frequently executed edges. 1493 edges->sort(edge_order); 1494 for (int i = 0; i < edges->length(); i++) { 1495 CFGEdge *e = edges->at(i); 1496 1497 if (e->state() != CFGEdge::open) continue; 1498 1499 Block *src_block = e->from(); 1500 Block *targ_block = e->to(); 1501 1502 // Don't grow traces along backedges? 1503 if (!BlockLayoutRotateLoops) { 1504 if (targ_block->_rpo <= src_block->_rpo) { 1505 targ_block->set_loop_alignment(targ_block); 1506 continue; 1507 } 1508 } 1509 1510 Trace *src_trace = trace(src_block); 1511 Trace *targ_trace = trace(targ_block); 1512 1513 // If the edge in question can join two traces at their ends, 1514 // append one trace to the other. 1515 if (src_trace->last_block() == src_block) { 1516 if (src_trace == targ_trace) { 1517 e->set_state(CFGEdge::interior); 1518 if (targ_trace->backedge(e)) { 1519 // Reset i to catch any newly eligible edge 1520 // (Or we could remember the first "open" edge, and reset there) 1521 i = 0; 1522 } 1523 } else if (targ_trace->first_block() == targ_block) { 1524 e->set_state(CFGEdge::connected); 1525 src_trace->append(targ_trace); 1526 union_traces(src_trace, targ_trace); 1527 } 1528 } 1529 } 1530 } 1531 1532 // Embed one trace into another, if the fork or join points are sufficiently 1533 // balanced. 1534 void PhaseBlockLayout::merge_traces(bool fall_thru_only) { 1535 // Walk the edge list a another time, looking at unprocessed edges. 1536 // Fold in diamonds 1537 for (int i = 0; i < edges->length(); i++) { 1538 CFGEdge *e = edges->at(i); 1539 1540 if (e->state() != CFGEdge::open) continue; 1541 if (fall_thru_only) { 1542 if (e->infrequent()) continue; 1543 } 1544 1545 Block *src_block = e->from(); 1546 Trace *src_trace = trace(src_block); 1547 bool src_at_tail = src_trace->last_block() == src_block; 1548 1549 Block *targ_block = e->to(); 1550 Trace *targ_trace = trace(targ_block); 1551 bool targ_at_start = targ_trace->first_block() == targ_block; 1552 1553 if (src_trace == targ_trace) { 1554 // This may be a loop, but we can't do much about it. 1555 e->set_state(CFGEdge::interior); 1556 continue; 1557 } 1558 1559 if (fall_thru_only) { 1560 // If the edge links the middle of two traces, we can't do anything. 1561 // Mark the edge and continue. 1562 if (!src_at_tail & !targ_at_start) { 1563 continue; 1564 } 1565 1566 // Don't grow traces along backedges? 1567 if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) { 1568 continue; 1569 } 1570 1571 // If both ends of the edge are available, why didn't we handle it earlier? 1572 assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier."); 1573 1574 if (targ_at_start) { 1575 // Insert the "targ" trace in the "src" trace if the insertion point 1576 // is a two way branch. 1577 // Better profitability check possible, but may not be worth it. 1578 // Someday, see if the this "fork" has an associated "join"; 1579 // then make a policy on merging this trace at the fork or join. 1580 // For example, other things being equal, it may be better to place this 1581 // trace at the join point if the "src" trace ends in a two-way, but 1582 // the insertion point is one-way. 1583 assert(src_block->num_fall_throughs() == 2, "unexpected diamond"); 1584 e->set_state(CFGEdge::connected); 1585 src_trace->insert_after(src_block, targ_trace); 1586 union_traces(src_trace, targ_trace); 1587 } else if (src_at_tail) { 1588 if (src_trace != trace(_cfg.get_root_block())) { 1589 e->set_state(CFGEdge::connected); 1590 targ_trace->insert_before(targ_block, src_trace); 1591 union_traces(targ_trace, src_trace); 1592 } 1593 } 1594 } else if (e->state() == CFGEdge::open) { 1595 // Append traces, even without a fall-thru connection. 1596 // But leave root entry at the beginning of the block list. 1597 if (targ_trace != trace(_cfg.get_root_block())) { 1598 e->set_state(CFGEdge::connected); 1599 src_trace->append(targ_trace); 1600 union_traces(src_trace, targ_trace); 1601 } 1602 } 1603 } 1604 } 1605 1606 // Order the sequence of the traces in some desirable way, and fixup the 1607 // jumps at the end of each block. 1608 void PhaseBlockLayout::reorder_traces(int count) { 1609 ResourceArea *area = Thread::current()->resource_area(); 1610 Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count); 1611 Block_List worklist; 1612 int new_count = 0; 1613 1614 // Compact the traces. 1615 for (int i = 0; i < count; i++) { 1616 Trace *tr = traces[i]; 1617 if (tr != NULL) { 1618 new_traces[new_count++] = tr; 1619 } 1620 } 1621 1622 // The entry block should be first on the new trace list. 1623 Trace *tr = trace(_cfg.get_root_block()); 1624 assert(tr == new_traces[0], "entry trace misplaced"); 1625 1626 // Sort the new trace list by frequency 1627 qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order); 1628 1629 // Patch up the successor blocks 1630 _cfg.clear_blocks(); 1631 for (int i = 0; i < new_count; i++) { 1632 Trace *tr = new_traces[i]; 1633 if (tr != NULL) { 1634 tr->fixup_blocks(_cfg); 1635 } 1636 } 1637 } 1638 1639 // Order basic blocks based on frequency 1640 PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) 1641 : Phase(BlockLayout) 1642 , _cfg(cfg) { 1643 ResourceMark rm; 1644 ResourceArea *area = Thread::current()->resource_area(); 1645 1646 // List of traces 1647 int size = _cfg.number_of_blocks() + 1; 1648 traces = NEW_ARENA_ARRAY(area, Trace *, size); 1649 memset(traces, 0, size*sizeof(Trace*)); 1650 next = NEW_ARENA_ARRAY(area, Block *, size); 1651 memset(next, 0, size*sizeof(Block *)); 1652 prev = NEW_ARENA_ARRAY(area, Block *, size); 1653 memset(prev , 0, size*sizeof(Block *)); 1654 1655 // List of edges 1656 edges = new GrowableArray<CFGEdge*>; 1657 1658 // Mapping block index --> block_trace 1659 uf = new UnionFind(size); 1660 uf->reset(size); 1661 1662 // Find edges and create traces. 1663 find_edges(); 1664 1665 // Grow traces at their ends via most frequent edges. 1666 grow_traces(); 1667 1668 // Merge one trace into another, but only at fall-through points. 1669 // This may make diamonds and other related shapes in a trace. 1670 merge_traces(true); 1671 1672 // Run merge again, allowing two traces to be catenated, even if 1673 // one does not fall through into the other. This appends loosely 1674 // related traces to be near each other. 1675 merge_traces(false); 1676 1677 // Re-order all the remaining traces by frequency 1678 reorder_traces(size); 1679 1680 assert(_cfg.number_of_blocks() >= (uint) (size - 1), "number of blocks can not shrink"); 1681 } 1682 1683 1684 // Edge e completes a loop in a trace. If the target block is head of the 1685 // loop, rotate the loop block so that the loop ends in a conditional branch. 1686 bool Trace::backedge(CFGEdge *e) { 1687 bool loop_rotated = false; 1688 Block *src_block = e->from(); 1689 Block *targ_block = e->to(); 1690 1691 assert(last_block() == src_block, "loop discovery at back branch"); 1692 if (first_block() == targ_block) { 1693 if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) { 1694 // Find the last block in the trace that has a conditional 1695 // branch. 1696 Block *b; 1697 for (b = last_block(); b != NULL; b = prev(b)) { 1698 if (b->num_fall_throughs() == 2) { 1699 break; 1700 } 1701 } 1702 1703 if (b != last_block() && b != NULL) { 1704 loop_rotated = true; 1705 1706 // Rotate the loop by doing two-part linked-list surgery. 1707 append(first_block()); 1708 break_loop_after(b); 1709 } 1710 } 1711 1712 // Backbranch to the top of a trace 1713 // Scroll forward through the trace from the targ_block. If we find 1714 // a loop head before another loop top, use the the loop head alignment. 1715 for (Block *b = targ_block; b != NULL; b = next(b)) { 1716 if (b->has_loop_alignment()) { 1717 break; 1718 } 1719 if (b->head()->is_Loop()) { 1720 targ_block = b; 1721 break; 1722 } 1723 } 1724 1725 first_block()->set_loop_alignment(targ_block); 1726 1727 } else { 1728 // Backbranch into the middle of a trace 1729 targ_block->set_loop_alignment(targ_block); 1730 } 1731 1732 return loop_rotated; 1733 } 1734 1735 // push blocks onto the CFG list 1736 // ensure that blocks have the correct two-way branch sense 1737 void Trace::fixup_blocks(PhaseCFG &cfg) { 1738 Block *last = last_block(); 1739 for (Block *b = first_block(); b != NULL; b = next(b)) { 1740 cfg.add_block(b); 1741 if (!b->is_connector()) { 1742 int nfallthru = b->num_fall_throughs(); 1743 if (b != last) { 1744 if (nfallthru == 2) { 1745 // Ensure that the sense of the branch is correct 1746 Block *bnext = next(b); 1747 Block *bs0 = b->non_connector_successor(0); 1748 1749 MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach(); 1750 ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj(); 1751 ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj(); 1752 1753 if (bnext == bs0) { 1754 // Fall-thru case in succs[0], should be in succs[1] 1755 1756 // Flip targets in _succs map 1757 Block *tbs0 = b->_succs[0]; 1758 Block *tbs1 = b->_succs[1]; 1759 b->_succs.map( 0, tbs1 ); 1760 b->_succs.map( 1, tbs0 ); 1761 1762 // Flip projections to match targets 1763 b->map_node(proj1, b->number_of_nodes() - 2); 1764 b->map_node(proj0, b->number_of_nodes() - 1); 1765 } 1766 } 1767 } 1768 } 1769 } 1770 }