1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "compiler/compilerDirectives.hpp" 30 #include "opto/block.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/chaitin.hpp" 33 #include "opto/loopnode.hpp" 34 #include "opto/machnode.hpp" 35 #include "opto/matcher.hpp" 36 #include "opto/opcodes.hpp" 37 #include "opto/rootnode.hpp" 38 #include "utilities/copy.hpp" 39 40 void Block_Array::grow( uint i ) { 41 assert(i >= Max(), "must be an overflow"); 42 debug_only(_limit = i+1); 43 if( i < _size ) return; 44 if( !_size ) { 45 _size = 1; 46 _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) ); 47 _blocks[0] = NULL; 48 } 49 uint old = _size; 50 while( i >= _size ) _size <<= 1; // Double to fit 51 _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*)); 52 Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) ); 53 } 54 55 void Block_List::remove(uint i) { 56 assert(i < _cnt, "index out of bounds"); 57 Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*))); 58 pop(); // shrink list by one block 59 } 60 61 void Block_List::insert(uint i, Block *b) { 62 push(b); // grow list by one block 63 Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*))); 64 _blocks[i] = b; 65 } 66 67 #ifndef PRODUCT 68 void Block_List::print() { 69 for (uint i=0; i < size(); i++) { 70 tty->print("B%d ", _blocks[i]->_pre_order); 71 } 72 tty->print("size = %d\n", size()); 73 } 74 #endif 75 76 uint Block::code_alignment() const { 77 // Check for Root block 78 if (_pre_order == 0) return CodeEntryAlignment; 79 // Check for Start block 80 if (_pre_order == 1) return InteriorEntryAlignment; 81 // Check for loop alignment 82 if (has_loop_alignment()) return loop_alignment(); 83 84 return relocInfo::addr_unit(); // no particular alignment 85 } 86 87 uint Block::compute_loop_alignment() { 88 Node *h = head(); 89 int unit_sz = relocInfo::addr_unit(); 90 if (h->is_Loop() && h->as_Loop()->is_inner_loop()) { 91 // Pre- and post-loops have low trip count so do not bother with 92 // NOPs for align loop head. The constants are hidden from tuning 93 // but only because my "divide by 4" heuristic surely gets nearly 94 // all possible gain (a "do not align at all" heuristic has a 95 // chance of getting a really tiny gain). 96 if (h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() || 97 h->as_CountedLoop()->is_post_loop())) { 98 return (OptoLoopAlignment > 4*unit_sz) ? (OptoLoopAlignment>>2) : unit_sz; 99 } 100 // Loops with low backedge frequency should not be aligned. 101 Node *n = h->in(LoopNode::LoopBackControl)->in(0); 102 if (n->is_MachIf() && n->as_MachIf()->_prob < 0.01) { 103 return unit_sz; // Loop does not loop, more often than not! 104 } 105 return OptoLoopAlignment; // Otherwise align loop head 106 } 107 108 return unit_sz; // no particular alignment 109 } 110 111 // Compute the size of first 'inst_cnt' instructions in this block. 112 // Return the number of instructions left to compute if the block has 113 // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size 114 // exceeds OptoLoopAlignment. 115 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt, 116 PhaseRegAlloc* ra) { 117 uint last_inst = number_of_nodes(); 118 for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) { 119 uint inst_size = get_node(j)->size(ra); 120 if( inst_size > 0 ) { 121 inst_cnt--; 122 uint sz = sum_size + inst_size; 123 if( sz <= (uint)OptoLoopAlignment ) { 124 // Compute size of instructions which fit into fetch buffer only 125 // since all inst_cnt instructions will not fit even if we align them. 126 sum_size = sz; 127 } else { 128 return 0; 129 } 130 } 131 } 132 return inst_cnt; 133 } 134 135 uint Block::find_node( const Node *n ) const { 136 for( uint i = 0; i < number_of_nodes(); i++ ) { 137 if( get_node(i) == n ) 138 return i; 139 } 140 ShouldNotReachHere(); 141 return 0; 142 } 143 144 // Find and remove n from block list 145 void Block::find_remove( const Node *n ) { 146 remove_node(find_node(n)); 147 } 148 149 bool Block::contains(const Node *n) const { 150 return _nodes.contains(n); 151 } 152 153 // Return empty status of a block. Empty blocks contain only the head, other 154 // ideal nodes, and an optional trailing goto. 155 int Block::is_Empty() const { 156 157 // Root or start block is not considered empty 158 if (head()->is_Root() || head()->is_Start()) { 159 return not_empty; 160 } 161 162 int success_result = completely_empty; 163 int end_idx = number_of_nodes() - 1; 164 165 // Check for ending goto 166 if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) { 167 success_result = empty_with_goto; 168 end_idx--; 169 } 170 171 // Unreachable blocks are considered empty 172 if (num_preds() <= 1) { 173 return success_result; 174 } 175 176 // Ideal nodes are allowable in empty blocks: skip them Only MachNodes 177 // turn directly into code, because only MachNodes have non-trivial 178 // emit() functions. 179 while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) { 180 end_idx--; 181 } 182 183 // No room for any interesting instructions? 184 if (end_idx == 0) { 185 return success_result; 186 } 187 188 return not_empty; 189 } 190 191 // Return true if the block's code implies that it is likely to be 192 // executed infrequently. Check to see if the block ends in a Halt or 193 // a low probability call. 194 bool Block::has_uncommon_code() const { 195 Node* en = end(); 196 197 if (en->is_MachGoto()) 198 en = en->in(0); 199 if (en->is_Catch()) 200 en = en->in(0); 201 if (en->is_MachProj() && en->in(0)->is_MachCall()) { 202 MachCallNode* call = en->in(0)->as_MachCall(); 203 if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) { 204 // This is true for slow-path stubs like new_{instance,array}, 205 // slow_arraycopy, complete_monitor_locking, uncommon_trap. 206 // The magic number corresponds to the probability of an uncommon_trap, 207 // even though it is a count not a probability. 208 return true; 209 } 210 } 211 212 int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode(); 213 return op == Op_Halt; 214 } 215 216 // True if block is low enough frequency or guarded by a test which 217 // mostly does not go here. 218 bool PhaseCFG::is_uncommon(const Block* block) { 219 // Initial blocks must never be moved, so are never uncommon. 220 if (block->head()->is_Root() || block->head()->is_Start()) return false; 221 222 // Check for way-low freq 223 if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true; 224 225 // Look for code shape indicating uncommon_trap or slow path 226 if (block->has_uncommon_code()) return true; 227 228 const float epsilon = 0.05f; 229 const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon); 230 uint uncommon_preds = 0; 231 uint freq_preds = 0; 232 uint uncommon_for_freq_preds = 0; 233 234 for( uint i=1; i< block->num_preds(); i++ ) { 235 Block* guard = get_block_for_node(block->pred(i)); 236 // Check to see if this block follows its guard 1 time out of 10000 237 // or less. 238 // 239 // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which 240 // we intend to be "uncommon", such as slow-path TLE allocation, 241 // predicted call failure, and uncommon trap triggers. 242 // 243 // Use an epsilon value of 5% to allow for variability in frequency 244 // predictions and floating point calculations. The net effect is 245 // that guard_factor is set to 9500. 246 // 247 // Ignore low-frequency blocks. 248 // The next check is (guard->_freq < 1.e-5 * 9500.). 249 if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) { 250 uncommon_preds++; 251 } else { 252 freq_preds++; 253 if(block->_freq < guard->_freq * guard_factor ) { 254 uncommon_for_freq_preds++; 255 } 256 } 257 } 258 if( block->num_preds() > 1 && 259 // The block is uncommon if all preds are uncommon or 260 (uncommon_preds == (block->num_preds()-1) || 261 // it is uncommon for all frequent preds. 262 uncommon_for_freq_preds == freq_preds) ) { 263 return true; 264 } 265 return false; 266 } 267 268 #ifndef PRODUCT 269 void Block::dump_bidx(const Block* orig, outputStream* st) const { 270 if (_pre_order) st->print("B%d", _pre_order); 271 else st->print("N%d", head()->_idx); 272 273 if (Verbose && orig != this) { 274 // Dump the original block's idx 275 st->print(" ("); 276 orig->dump_bidx(orig, st); 277 st->print(")"); 278 } 279 } 280 281 void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const { 282 if (is_connector()) { 283 for (uint i=1; i<num_preds(); i++) { 284 Block *p = cfg->get_block_for_node(pred(i)); 285 p->dump_pred(cfg, orig, st); 286 } 287 } else { 288 dump_bidx(orig, st); 289 st->print(" "); 290 } 291 } 292 293 void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const { 294 // Print the basic block. 295 dump_bidx(this, st); 296 st->print(": "); 297 298 // Print the outgoing CFG edges. 299 st->print("#\tout( "); 300 for( uint i=0; i<_num_succs; i++ ) { 301 non_connector_successor(i)->dump_bidx(_succs[i], st); 302 st->print(" "); 303 } 304 305 // Print the incoming CFG edges. 306 st->print(") <- "); 307 if( head()->is_block_start() ) { 308 st->print("in( "); 309 for (uint i=1; i<num_preds(); i++) { 310 Node *s = pred(i); 311 if (cfg != NULL) { 312 Block *p = cfg->get_block_for_node(s); 313 p->dump_pred(cfg, p, st); 314 } else { 315 while (!s->is_block_start()) { 316 s = s->in(0); 317 } 318 st->print("N%d ", s->_idx ); 319 } 320 } 321 st->print(") "); 322 } else { 323 st->print("BLOCK HEAD IS JUNK "); 324 } 325 326 // Print loop, if any 327 const Block *bhead = this; // Head of self-loop 328 Node *bh = bhead->head(); 329 330 if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) { 331 LoopNode *loop = bh->as_Loop(); 332 const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl)); 333 while (bx->is_connector()) { 334 bx = cfg->get_block_for_node(bx->pred(1)); 335 } 336 st->print("Loop( B%d-B%d ", bhead->_pre_order, bx->_pre_order); 337 // Dump any loop-specific bits, especially for CountedLoops. 338 loop->dump_spec(st); 339 st->print(")"); 340 } else if (has_loop_alignment()) { 341 st->print("top-of-loop"); 342 } 343 344 // Print frequency and other optimization-relevant information 345 st->print(" Freq: %g",_freq); 346 if( Verbose || WizardMode ) { 347 st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth); 348 st->print(" RegPressure: %d",_reg_pressure); 349 st->print(" IHRP Index: %d",_ihrp_index); 350 st->print(" FRegPressure: %d",_freg_pressure); 351 st->print(" FHRP Index: %d",_fhrp_index); 352 } 353 st->cr(); 354 } 355 356 void Block::dump() const { 357 dump(NULL); 358 } 359 360 void Block::dump(const PhaseCFG* cfg) const { 361 dump_head(cfg); 362 for (uint i=0; i< number_of_nodes(); i++) { 363 get_node(i)->dump(); 364 } 365 tty->print("\n"); 366 } 367 #endif 368 369 PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher) 370 : Phase(CFG) 371 , _root(root) 372 , _block_arena(arena) 373 , _regalloc(NULL) 374 , _scheduling_for_pressure(false) 375 , _matcher(matcher) 376 , _node_to_block_mapping(arena) 377 , _node_latency(NULL) 378 #ifndef PRODUCT 379 , _trace_opto_pipelining(C->directive()->TraceOptoPipeliningOption) 380 #endif 381 #ifdef ASSERT 382 , _raw_oops(arena) 383 #endif 384 { 385 ResourceMark rm; 386 // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode, 387 // then Match it into a machine-specific Node. Then clone the machine 388 // Node on demand. 389 Node *x = new GotoNode(NULL); 390 x->init_req(0, x); 391 _goto = matcher.match_tree(x); 392 assert(_goto != NULL, ""); 393 _goto->set_req(0,_goto); 394 395 // Build the CFG in Reverse Post Order 396 _number_of_blocks = build_cfg(); 397 _root_block = get_block_for_node(_root); 398 } 399 400 // Build a proper looking CFG. Make every block begin with either a StartNode 401 // or a RegionNode. Make every block end with either a Goto, If or Return. 402 // The RootNode both starts and ends it's own block. Do this with a recursive 403 // backwards walk over the control edges. 404 uint PhaseCFG::build_cfg() { 405 Arena *a = Thread::current()->resource_area(); 406 VectorSet visited(a); 407 408 // Allocate stack with enough space to avoid frequent realloc 409 Node_Stack nstack(a, C->live_nodes() >> 1); 410 nstack.push(_root, 0); 411 uint sum = 0; // Counter for blocks 412 413 while (nstack.is_nonempty()) { 414 // node and in's index from stack's top 415 // 'np' is _root (see above) or RegionNode, StartNode: we push on stack 416 // only nodes which point to the start of basic block (see below). 417 Node *np = nstack.node(); 418 // idx > 0, except for the first node (_root) pushed on stack 419 // at the beginning when idx == 0. 420 // We will use the condition (idx == 0) later to end the build. 421 uint idx = nstack.index(); 422 Node *proj = np->in(idx); 423 const Node *x = proj->is_block_proj(); 424 // Does the block end with a proper block-ending Node? One of Return, 425 // If or Goto? (This check should be done for visited nodes also). 426 if (x == NULL) { // Does not end right... 427 Node *g = _goto->clone(); // Force it to end in a Goto 428 g->set_req(0, proj); 429 np->set_req(idx, g); 430 x = proj = g; 431 } 432 if (!visited.test_set(x->_idx)) { // Visit this block once 433 // Skip any control-pinned middle'in stuff 434 Node *p = proj; 435 do { 436 proj = p; // Update pointer to last Control 437 p = p->in(0); // Move control forward 438 } while( !p->is_block_proj() && 439 !p->is_block_start() ); 440 // Make the block begin with one of Region or StartNode. 441 if( !p->is_block_start() ) { 442 RegionNode *r = new RegionNode( 2 ); 443 r->init_req(1, p); // Insert RegionNode in the way 444 proj->set_req(0, r); // Insert RegionNode in the way 445 p = r; 446 } 447 // 'p' now points to the start of this basic block 448 449 // Put self in array of basic blocks 450 Block *bb = new (_block_arena) Block(_block_arena, p); 451 map_node_to_block(p, bb); 452 map_node_to_block(x, bb); 453 if( x != p ) { // Only for root is x == p 454 bb->push_node((Node*)x); 455 } 456 // Now handle predecessors 457 ++sum; // Count 1 for self block 458 uint cnt = bb->num_preds(); 459 for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors 460 Node *prevproj = p->in(i); // Get prior input 461 assert( !prevproj->is_Con(), "dead input not removed" ); 462 // Check to see if p->in(i) is a "control-dependent" CFG edge - 463 // i.e., it splits at the source (via an IF or SWITCH) and merges 464 // at the destination (via a many-input Region). 465 // This breaks critical edges. The RegionNode to start the block 466 // will be added when <p,i> is pulled off the node stack 467 if ( cnt > 2 ) { // Merging many things? 468 assert( prevproj== bb->pred(i),""); 469 if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge? 470 // Force a block on the control-dependent edge 471 Node *g = _goto->clone(); // Force it to end in a Goto 472 g->set_req(0,prevproj); 473 p->set_req(i,g); 474 } 475 } 476 nstack.push(p, i); // 'p' is RegionNode or StartNode 477 } 478 } else { // Post-processing visited nodes 479 nstack.pop(); // remove node from stack 480 // Check if it the fist node pushed on stack at the beginning. 481 if (idx == 0) break; // end of the build 482 // Find predecessor basic block 483 Block *pb = get_block_for_node(x); 484 // Insert into nodes array, if not already there 485 if (!has_block(proj)) { 486 assert( x != proj, "" ); 487 // Map basic block of projection 488 map_node_to_block(proj, pb); 489 pb->push_node(proj); 490 } 491 // Insert self as a child of my predecessor block 492 pb->_succs.map(pb->_num_succs++, get_block_for_node(np)); 493 assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(), 494 "too many control users, not a CFG?" ); 495 } 496 } 497 // Return number of basic blocks for all children and self 498 return sum; 499 } 500 501 // Inserts a goto & corresponding basic block between 502 // block[block_no] and its succ_no'th successor block 503 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) { 504 // get block with block_no 505 assert(block_no < number_of_blocks(), "illegal block number"); 506 Block* in = get_block(block_no); 507 // get successor block succ_no 508 assert(succ_no < in->_num_succs, "illegal successor number"); 509 Block* out = in->_succs[succ_no]; 510 // Compute frequency of the new block. Do this before inserting 511 // new block in case succ_prob() needs to infer the probability from 512 // surrounding blocks. 513 float freq = in->_freq * in->succ_prob(succ_no); 514 // get ProjNode corresponding to the succ_no'th successor of the in block 515 ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj(); 516 // create region for basic block 517 RegionNode* region = new RegionNode(2); 518 region->init_req(1, proj); 519 // setup corresponding basic block 520 Block* block = new (_block_arena) Block(_block_arena, region); 521 map_node_to_block(region, block); 522 C->regalloc()->set_bad(region->_idx); 523 // add a goto node 524 Node* gto = _goto->clone(); // get a new goto node 525 gto->set_req(0, region); 526 // add it to the basic block 527 block->push_node(gto); 528 map_node_to_block(gto, block); 529 C->regalloc()->set_bad(gto->_idx); 530 // hook up successor block 531 block->_succs.map(block->_num_succs++, out); 532 // remap successor's predecessors if necessary 533 for (uint i = 1; i < out->num_preds(); i++) { 534 if (out->pred(i) == proj) out->head()->set_req(i, gto); 535 } 536 // remap predecessor's successor to new block 537 in->_succs.map(succ_no, block); 538 // Set the frequency of the new block 539 block->_freq = freq; 540 // add new basic block to basic block list 541 add_block_at(block_no + 1, block); 542 } 543 544 // Does this block end in a multiway branch that cannot have the default case 545 // flipped for another case? 546 static bool no_flip_branch(Block *b) { 547 int branch_idx = b->number_of_nodes() - b->_num_succs-1; 548 if (branch_idx < 1) { 549 return false; 550 } 551 Node *branch = b->get_node(branch_idx); 552 if (branch->is_Catch()) { 553 return true; 554 } 555 if (branch->is_Mach()) { 556 if (branch->is_MachNullCheck()) { 557 return true; 558 } 559 int iop = branch->as_Mach()->ideal_Opcode(); 560 if (iop == Op_FastLock || iop == Op_FastUnlock) { 561 return true; 562 } 563 // Don't flip if branch has an implicit check. 564 if (branch->as_Mach()->is_TrapBasedCheckNode()) { 565 return true; 566 } 567 } 568 return false; 569 } 570 571 // Check for NeverBranch at block end. This needs to become a GOTO to the 572 // true target. NeverBranch are treated as a conditional branch that always 573 // goes the same direction for most of the optimizer and are used to give a 574 // fake exit path to infinite loops. At this late stage they need to turn 575 // into Goto's so that when you enter the infinite loop you indeed hang. 576 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) { 577 // Find true target 578 int end_idx = b->end_idx(); 579 int idx = b->get_node(end_idx+1)->as_Proj()->_con; 580 Block *succ = b->_succs[idx]; 581 Node* gto = _goto->clone(); // get a new goto node 582 gto->set_req(0, b->head()); 583 Node *bp = b->get_node(end_idx); 584 b->map_node(gto, end_idx); // Slam over NeverBranch 585 map_node_to_block(gto, b); 586 C->regalloc()->set_bad(gto->_idx); 587 b->pop_node(); // Yank projections 588 b->pop_node(); // Yank projections 589 b->_succs.map(0,succ); // Map only successor 590 b->_num_succs = 1; 591 // remap successor's predecessors if necessary 592 uint j; 593 for( j = 1; j < succ->num_preds(); j++) 594 if( succ->pred(j)->in(0) == bp ) 595 succ->head()->set_req(j, gto); 596 // Kill alternate exit path 597 Block *dead = b->_succs[1-idx]; 598 for( j = 1; j < dead->num_preds(); j++) 599 if( dead->pred(j)->in(0) == bp ) 600 break; 601 // Scan through block, yanking dead path from 602 // all regions and phis. 603 dead->head()->del_req(j); 604 for( int k = 1; dead->get_node(k)->is_Phi(); k++ ) 605 dead->get_node(k)->del_req(j); 606 } 607 608 // Helper function to move block bx to the slot following b_index. Return 609 // true if the move is successful, otherwise false 610 bool PhaseCFG::move_to_next(Block* bx, uint b_index) { 611 if (bx == NULL) return false; 612 613 // Return false if bx is already scheduled. 614 uint bx_index = bx->_pre_order; 615 if ((bx_index <= b_index) && (get_block(bx_index) == bx)) { 616 return false; 617 } 618 619 // Find the current index of block bx on the block list 620 bx_index = b_index + 1; 621 while (bx_index < number_of_blocks() && get_block(bx_index) != bx) { 622 bx_index++; 623 } 624 assert(get_block(bx_index) == bx, "block not found"); 625 626 // If the previous block conditionally falls into bx, return false, 627 // because moving bx will create an extra jump. 628 for(uint k = 1; k < bx->num_preds(); k++ ) { 629 Block* pred = get_block_for_node(bx->pred(k)); 630 if (pred == get_block(bx_index - 1)) { 631 if (pred->_num_succs != 1) { 632 return false; 633 } 634 } 635 } 636 637 // Reinsert bx just past block 'b' 638 _blocks.remove(bx_index); 639 _blocks.insert(b_index + 1, bx); 640 return true; 641 } 642 643 // Move empty and uncommon blocks to the end. 644 void PhaseCFG::move_to_end(Block *b, uint i) { 645 int e = b->is_Empty(); 646 if (e != Block::not_empty) { 647 if (e == Block::empty_with_goto) { 648 // Remove the goto, but leave the block. 649 b->pop_node(); 650 } 651 // Mark this block as a connector block, which will cause it to be 652 // ignored in certain functions such as non_connector_successor(). 653 b->set_connector(); 654 } 655 // Move the empty block to the end, and don't recheck. 656 _blocks.remove(i); 657 _blocks.push(b); 658 } 659 660 // Set loop alignment for every block 661 void PhaseCFG::set_loop_alignment() { 662 uint last = number_of_blocks(); 663 assert(get_block(0) == get_root_block(), ""); 664 665 for (uint i = 1; i < last; i++) { 666 Block* block = get_block(i); 667 if (block->head()->is_Loop()) { 668 block->set_loop_alignment(block); 669 } 670 } 671 } 672 673 // Make empty basic blocks to be "connector" blocks, Move uncommon blocks 674 // to the end. 675 void PhaseCFG::remove_empty_blocks() { 676 // Move uncommon blocks to the end 677 uint last = number_of_blocks(); 678 assert(get_block(0) == get_root_block(), ""); 679 680 for (uint i = 1; i < last; i++) { 681 Block* block = get_block(i); 682 if (block->is_connector()) { 683 break; 684 } 685 686 // Check for NeverBranch at block end. This needs to become a GOTO to the 687 // true target. NeverBranch are treated as a conditional branch that 688 // always goes the same direction for most of the optimizer and are used 689 // to give a fake exit path to infinite loops. At this late stage they 690 // need to turn into Goto's so that when you enter the infinite loop you 691 // indeed hang. 692 if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) { 693 convert_NeverBranch_to_Goto(block); 694 } 695 696 // Look for uncommon blocks and move to end. 697 if (!C->do_freq_based_layout()) { 698 if (is_uncommon(block)) { 699 move_to_end(block, i); 700 last--; // No longer check for being uncommon! 701 if (no_flip_branch(block)) { // Fall-thru case must follow? 702 // Find the fall-thru block 703 block = get_block(i); 704 move_to_end(block, i); 705 last--; 706 } 707 // backup block counter post-increment 708 i--; 709 } 710 } 711 } 712 713 // Move empty blocks to the end 714 last = number_of_blocks(); 715 for (uint i = 1; i < last; i++) { 716 Block* block = get_block(i); 717 if (block->is_Empty() != Block::not_empty) { 718 move_to_end(block, i); 719 last--; 720 i--; 721 } 722 } // End of for all blocks 723 } 724 725 Block *PhaseCFG::fixup_trap_based_check(Node *branch, Block *block, int block_pos, Block *bnext) { 726 // Trap based checks must fall through to the successor with 727 // PROB_ALWAYS. 728 // They should be an If with 2 successors. 729 assert(branch->is_MachIf(), "must be If"); 730 assert(block->_num_succs == 2, "must have 2 successors"); 731 732 // Get the If node and the projection for the first successor. 733 MachIfNode *iff = block->get_node(block->number_of_nodes()-3)->as_MachIf(); 734 ProjNode *proj0 = block->get_node(block->number_of_nodes()-2)->as_Proj(); 735 ProjNode *proj1 = block->get_node(block->number_of_nodes()-1)->as_Proj(); 736 ProjNode *projt = (proj0->Opcode() == Op_IfTrue) ? proj0 : proj1; 737 ProjNode *projf = (proj0->Opcode() == Op_IfFalse) ? proj0 : proj1; 738 739 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1]. 740 assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0"); 741 assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1"); 742 743 ProjNode *proj_always; 744 ProjNode *proj_never; 745 // We must negate the branch if the implicit check doesn't follow 746 // the branch's TRUE path. Then, the new TRUE branch target will 747 // be the old FALSE branch target. 748 if (iff->_prob <= 2*PROB_NEVER) { // There are small rounding errors. 749 proj_never = projt; 750 proj_always = projf; 751 } else { 752 // We must negate the branch if the trap doesn't follow the 753 // branch's TRUE path. Then, the new TRUE branch target will 754 // be the old FALSE branch target. 755 proj_never = projf; 756 proj_always = projt; 757 iff->negate(); 758 } 759 assert(iff->_prob <= 2*PROB_NEVER, "Trap based checks are expected to trap never!"); 760 // Map the successors properly 761 block->_succs.map(0, get_block_for_node(proj_never ->raw_out(0))); // The target of the trap. 762 block->_succs.map(1, get_block_for_node(proj_always->raw_out(0))); // The fall through target. 763 764 if (block->get_node(block->number_of_nodes() - block->_num_succs + 1) != proj_always) { 765 block->map_node(proj_never, block->number_of_nodes() - block->_num_succs + 0); 766 block->map_node(proj_always, block->number_of_nodes() - block->_num_succs + 1); 767 } 768 769 // Place the fall through block after this block. 770 Block *bs1 = block->non_connector_successor(1); 771 if (bs1 != bnext && move_to_next(bs1, block_pos)) { 772 bnext = bs1; 773 } 774 // If the fall through block still is not the next block, insert a goto. 775 if (bs1 != bnext) { 776 insert_goto_at(block_pos, 1); 777 } 778 return bnext; 779 } 780 781 // Fix up the final control flow for basic blocks. 782 void PhaseCFG::fixup_flow() { 783 // Fixup final control flow for the blocks. Remove jump-to-next 784 // block. If neither arm of an IF follows the conditional branch, we 785 // have to add a second jump after the conditional. We place the 786 // TRUE branch target in succs[0] for both GOTOs and IFs. 787 for (uint i = 0; i < number_of_blocks(); i++) { 788 Block* block = get_block(i); 789 block->_pre_order = i; // turn pre-order into block-index 790 791 // Connector blocks need no further processing. 792 if (block->is_connector()) { 793 assert((i+1) == number_of_blocks() || get_block(i + 1)->is_connector(), "All connector blocks should sink to the end"); 794 continue; 795 } 796 assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors"); 797 798 Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : NULL; 799 Block* bs0 = block->non_connector_successor(0); 800 801 // Check for multi-way branches where I cannot negate the test to 802 // exchange the true and false targets. 803 if (no_flip_branch(block)) { 804 // Find fall through case - if must fall into its target. 805 // Get the index of the branch's first successor. 806 int branch_idx = block->number_of_nodes() - block->_num_succs; 807 808 // The branch is 1 before the branch's first successor. 809 Node *branch = block->get_node(branch_idx-1); 810 811 // Handle no-flip branches which have implicit checks and which require 812 // special block ordering and individual semantics of the 'fall through 813 // case'. 814 if ((TrapBasedNullChecks || TrapBasedRangeChecks) && 815 branch->is_Mach() && branch->as_Mach()->is_TrapBasedCheckNode()) { 816 bnext = fixup_trap_based_check(branch, block, i, bnext); 817 } else { 818 // Else, default handling for no-flip branches 819 for (uint j2 = 0; j2 < block->_num_succs; j2++) { 820 const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj(); 821 if (p->_con == 0) { 822 // successor j2 is fall through case 823 if (block->non_connector_successor(j2) != bnext) { 824 // but it is not the next block => insert a goto 825 insert_goto_at(i, j2); 826 } 827 // Put taken branch in slot 0 828 if (j2 == 0 && block->_num_succs == 2) { 829 // Flip targets in succs map 830 Block *tbs0 = block->_succs[0]; 831 Block *tbs1 = block->_succs[1]; 832 block->_succs.map(0, tbs1); 833 block->_succs.map(1, tbs0); 834 } 835 break; 836 } 837 } 838 } 839 840 // Remove all CatchProjs 841 for (uint j = 0; j < block->_num_succs; j++) { 842 block->pop_node(); 843 } 844 845 } else if (block->_num_succs == 1) { 846 // Block ends in a Goto? 847 if (bnext == bs0) { 848 // We fall into next block; remove the Goto 849 block->pop_node(); 850 } 851 852 } else if(block->_num_succs == 2) { // Block ends in a If? 853 // Get opcode of 1st projection (matches _succs[0]) 854 // Note: Since this basic block has 2 exits, the last 2 nodes must 855 // be projections (in any order), the 3rd last node must be 856 // the IfNode (we have excluded other 2-way exits such as 857 // CatchNodes already). 858 MachNode* iff = block->get_node(block->number_of_nodes() - 3)->as_Mach(); 859 ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj(); 860 ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj(); 861 862 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1]. 863 assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0"); 864 assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1"); 865 866 Block* bs1 = block->non_connector_successor(1); 867 868 // Check for neither successor block following the current 869 // block ending in a conditional. If so, move one of the 870 // successors after the current one, provided that the 871 // successor was previously unscheduled, but moveable 872 // (i.e., all paths to it involve a branch). 873 if (!C->do_freq_based_layout() && bnext != bs0 && bnext != bs1) { 874 // Choose the more common successor based on the probability 875 // of the conditional branch. 876 Block* bx = bs0; 877 Block* by = bs1; 878 879 // _prob is the probability of taking the true path. Make 880 // p the probability of taking successor #1. 881 float p = iff->as_MachIf()->_prob; 882 if (proj0->Opcode() == Op_IfTrue) { 883 p = 1.0 - p; 884 } 885 886 // Prefer successor #1 if p > 0.5 887 if (p > PROB_FAIR) { 888 bx = bs1; 889 by = bs0; 890 } 891 892 // Attempt the more common successor first 893 if (move_to_next(bx, i)) { 894 bnext = bx; 895 } else if (move_to_next(by, i)) { 896 bnext = by; 897 } 898 } 899 900 // Check for conditional branching the wrong way. Negate 901 // conditional, if needed, so it falls into the following block 902 // and branches to the not-following block. 903 904 // Check for the next block being in succs[0]. We are going to branch 905 // to succs[0], so we want the fall-thru case as the next block in 906 // succs[1]. 907 if (bnext == bs0) { 908 // Fall-thru case in succs[0], so flip targets in succs map 909 Block* tbs0 = block->_succs[0]; 910 Block* tbs1 = block->_succs[1]; 911 block->_succs.map(0, tbs1); 912 block->_succs.map(1, tbs0); 913 // Flip projection for each target 914 ProjNode* tmp = proj0; 915 proj0 = proj1; 916 proj1 = tmp; 917 918 } else if(bnext != bs1) { 919 // Need a double-branch 920 // The existing conditional branch need not change. 921 // Add a unconditional branch to the false target. 922 // Alas, it must appear in its own block and adding a 923 // block this late in the game is complicated. Sigh. 924 insert_goto_at(i, 1); 925 } 926 927 // Make sure we TRUE branch to the target 928 if (proj0->Opcode() == Op_IfFalse) { 929 iff->as_MachIf()->negate(); 930 } 931 932 block->pop_node(); // Remove IfFalse & IfTrue projections 933 block->pop_node(); 934 935 } else { 936 // Multi-exit block, e.g. a switch statement 937 // But we don't need to do anything here 938 } 939 } // End of for all blocks 940 } 941 942 943 // postalloc_expand: Expand nodes after register allocation. 944 // 945 // postalloc_expand has to be called after register allocation, just 946 // before output (i.e. scheduling). It only gets called if 947 // Matcher::require_postalloc_expand is true. 948 // 949 // Background: 950 // 951 // Nodes that are expandend (one compound node requiring several 952 // assembler instructions to be implemented split into two or more 953 // non-compound nodes) after register allocation are not as nice as 954 // the ones expanded before register allocation - they don't 955 // participate in optimizations as global code motion. But after 956 // register allocation we can expand nodes that use registers which 957 // are not spillable or registers that are not allocated, because the 958 // old compound node is simply replaced (in its location in the basic 959 // block) by a new subgraph which does not contain compound nodes any 960 // more. The scheduler called during output can later on process these 961 // non-compound nodes. 962 // 963 // Implementation: 964 // 965 // Nodes requiring postalloc expand are specified in the ad file by using 966 // a postalloc_expand statement instead of ins_encode. A postalloc_expand 967 // contains a single call to an encoding, as does an ins_encode 968 // statement. Instead of an emit() function a postalloc_expand() function 969 // is generated that doesn't emit assembler but creates a new 970 // subgraph. The code below calls this postalloc_expand function for each 971 // node with the appropriate attribute. This function returns the new 972 // nodes generated in an array passed in the call. The old node, 973 // potential MachTemps before and potential Projs after it then get 974 // disconnected and replaced by the new nodes. The instruction 975 // generating the result has to be the last one in the array. In 976 // general it is assumed that Projs after the node expanded are 977 // kills. These kills are not required any more after expanding as 978 // there are now explicitly visible def-use chains and the Projs are 979 // removed. This does not hold for calls: They do not only have 980 // kill-Projs but also Projs defining values. Therefore Projs after 981 // the node expanded are removed for all but for calls. If a node is 982 // to be reused, it must be added to the nodes list returned, and it 983 // will be added again. 984 // 985 // Implementing the postalloc_expand function for a node in an enc_class 986 // is rather tedious. It requires knowledge about many node details, as 987 // the nodes and the subgraph must be hand crafted. To simplify this, 988 // adlc generates some utility variables into the postalloc_expand function, 989 // e.g., holding the operands as specified by the postalloc_expand encoding 990 // specification, e.g.: 991 // * unsigned idx_<par_name> holding the index of the node in the ins 992 // * Node *n_<par_name> holding the node loaded from the ins 993 // * MachOpnd *op_<par_name> holding the corresponding operand 994 // 995 // The ordering of operands can not be determined by looking at a 996 // rule. Especially if a match rule matches several different trees, 997 // several nodes are generated from one instruct specification with 998 // different operand orderings. In this case the adlc generated 999 // variables are the only way to access the ins and operands 1000 // deterministically. 1001 // 1002 // If assigning a register to a node that contains an oop, don't 1003 // forget to call ra_->set_oop() for the node. 1004 void PhaseCFG::postalloc_expand(PhaseRegAlloc* _ra) { 1005 GrowableArray <Node *> new_nodes(32); // Array with new nodes filled by postalloc_expand function of node. 1006 GrowableArray <Node *> remove(32); 1007 GrowableArray <Node *> succs(32); 1008 unsigned int max_idx = C->unique(); // Remember to distinguish new from old nodes. 1009 DEBUG_ONLY(bool foundNode = false); 1010 1011 // for all blocks 1012 for (uint i = 0; i < number_of_blocks(); i++) { 1013 Block *b = _blocks[i]; 1014 // For all instructions in the current block. 1015 for (uint j = 0; j < b->number_of_nodes(); j++) { 1016 Node *n = b->get_node(j); 1017 if (n->is_Mach() && n->as_Mach()->requires_postalloc_expand()) { 1018 #ifdef ASSERT 1019 if (TracePostallocExpand) { 1020 if (!foundNode) { 1021 foundNode = true; 1022 tty->print("POSTALLOC EXPANDING %d %s\n", C->compile_id(), 1023 C->method() ? C->method()->name()->as_utf8() : C->stub_name()); 1024 } 1025 tty->print(" postalloc expanding "); n->dump(); 1026 if (Verbose) { 1027 tty->print(" with ins:\n"); 1028 for (uint k = 0; k < n->len(); ++k) { 1029 if (n->in(k)) { tty->print(" "); n->in(k)->dump(); } 1030 } 1031 } 1032 } 1033 #endif 1034 new_nodes.clear(); 1035 // Collect nodes that have to be removed from the block later on. 1036 uint req = n->req(); 1037 remove.clear(); 1038 for (uint k = 0; k < req; ++k) { 1039 if (n->in(k) && n->in(k)->is_MachTemp()) { 1040 remove.push(n->in(k)); // MachTemps which are inputs to the old node have to be removed. 1041 n->in(k)->del_req(0); 1042 j--; 1043 } 1044 } 1045 1046 // Check whether we can allocate enough nodes. We set a fix limit for 1047 // the size of postalloc expands with this. 1048 uint unique_limit = C->unique() + 40; 1049 if (unique_limit >= _ra->node_regs_max_index()) { 1050 Compile::current()->record_failure("out of nodes in postalloc expand"); 1051 return; 1052 } 1053 1054 // Emit (i.e. generate new nodes). 1055 n->as_Mach()->postalloc_expand(&new_nodes, _ra); 1056 1057 assert(C->unique() < unique_limit, "You allocated too many nodes in your postalloc expand."); 1058 1059 // Disconnect the inputs of the old node. 1060 // 1061 // We reuse MachSpillCopy nodes. If we need to expand them, there 1062 // are many, so reusing pays off. If reused, the node already 1063 // has the new ins. n must be the last node on new_nodes list. 1064 if (!n->is_MachSpillCopy()) { 1065 for (int k = req - 1; k >= 0; --k) { 1066 n->del_req(k); 1067 } 1068 } 1069 1070 #ifdef ASSERT 1071 // Check that all nodes have proper operands. 1072 for (int k = 0; k < new_nodes.length(); ++k) { 1073 if (new_nodes.at(k)->_idx < max_idx || !new_nodes.at(k)->is_Mach()) continue; // old node, Proj ... 1074 MachNode *m = new_nodes.at(k)->as_Mach(); 1075 for (unsigned int l = 0; l < m->num_opnds(); ++l) { 1076 if (MachOper::notAnOper(m->_opnds[l])) { 1077 outputStream *os = tty; 1078 os->print("Node %s ", m->Name()); 1079 os->print("has invalid opnd %d: %p\n", l, m->_opnds[l]); 1080 assert(0, "Invalid operands, see inline trace in hs_err_pid file."); 1081 } 1082 } 1083 } 1084 #endif 1085 1086 // Collect succs of old node in remove (for projections) and in succs (for 1087 // all other nodes) do _not_ collect projections in remove (but in succs) 1088 // in case the node is a call. We need the projections for calls as they are 1089 // associated with registes (i.e. they are defs). 1090 succs.clear(); 1091 for (DUIterator k = n->outs(); n->has_out(k); k++) { 1092 if (n->out(k)->is_Proj() && !n->is_MachCall() && !n->is_MachBranch()) { 1093 remove.push(n->out(k)); 1094 } else { 1095 succs.push(n->out(k)); 1096 } 1097 } 1098 // Replace old node n as input of its succs by last of the new nodes. 1099 for (int k = 0; k < succs.length(); ++k) { 1100 Node *succ = succs.at(k); 1101 for (uint l = 0; l < succ->req(); ++l) { 1102 if (succ->in(l) == n) { 1103 succ->set_req(l, new_nodes.at(new_nodes.length() - 1)); 1104 } 1105 } 1106 for (uint l = succ->req(); l < succ->len(); ++l) { 1107 if (succ->in(l) == n) { 1108 succ->set_prec(l, new_nodes.at(new_nodes.length() - 1)); 1109 } 1110 } 1111 } 1112 1113 // Index of old node in block. 1114 uint index = b->find_node(n); 1115 // Insert new nodes into block and map them in nodes->blocks array 1116 // and remember last node in n2. 1117 Node *n2 = NULL; 1118 for (int k = 0; k < new_nodes.length(); ++k) { 1119 n2 = new_nodes.at(k); 1120 b->insert_node(n2, ++index); 1121 map_node_to_block(n2, b); 1122 } 1123 1124 // Add old node n to remove and remove them all from block. 1125 remove.push(n); 1126 j--; 1127 #ifdef ASSERT 1128 if (TracePostallocExpand && Verbose) { 1129 tty->print(" removing:\n"); 1130 for (int k = 0; k < remove.length(); ++k) { 1131 tty->print(" "); remove.at(k)->dump(); 1132 } 1133 tty->print(" inserting:\n"); 1134 for (int k = 0; k < new_nodes.length(); ++k) { 1135 tty->print(" "); new_nodes.at(k)->dump(); 1136 } 1137 } 1138 #endif 1139 for (int k = 0; k < remove.length(); ++k) { 1140 if (b->contains(remove.at(k))) { 1141 b->find_remove(remove.at(k)); 1142 } else { 1143 assert(remove.at(k)->is_Proj() && (remove.at(k)->in(0)->is_MachBranch()), ""); 1144 } 1145 } 1146 // If anything has been inserted (n2 != NULL), continue after last node inserted. 1147 // This does not always work. Some postalloc expands don't insert any nodes, if they 1148 // do optimizations (e.g., max(x,x)). In this case we decrement j accordingly. 1149 j = n2 ? b->find_node(n2) : j; 1150 } 1151 } 1152 } 1153 1154 #ifdef ASSERT 1155 if (foundNode) { 1156 tty->print("FINISHED %d %s\n", C->compile_id(), 1157 C->method() ? C->method()->name()->as_utf8() : C->stub_name()); 1158 tty->flush(); 1159 } 1160 #endif 1161 } 1162 1163 1164 //------------------------------dump------------------------------------------- 1165 #ifndef PRODUCT 1166 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const { 1167 const Node *x = end->is_block_proj(); 1168 assert( x, "not a CFG" ); 1169 1170 // Do not visit this block again 1171 if( visited.test_set(x->_idx) ) return; 1172 1173 // Skip through this block 1174 const Node *p = x; 1175 do { 1176 p = p->in(0); // Move control forward 1177 assert( !p->is_block_proj() || p->is_Root(), "not a CFG" ); 1178 } while( !p->is_block_start() ); 1179 1180 // Recursively visit 1181 for (uint i = 1; i < p->req(); i++) { 1182 _dump_cfg(p->in(i), visited); 1183 } 1184 1185 // Dump the block 1186 get_block_for_node(p)->dump(this); 1187 } 1188 1189 void PhaseCFG::dump( ) const { 1190 tty->print("\n--- CFG --- %d BBs\n", number_of_blocks()); 1191 if (_blocks.size()) { // Did we do basic-block layout? 1192 for (uint i = 0; i < number_of_blocks(); i++) { 1193 const Block* block = get_block(i); 1194 block->dump(this); 1195 } 1196 } else { // Else do it with a DFS 1197 VectorSet visited(_block_arena); 1198 _dump_cfg(_root,visited); 1199 } 1200 } 1201 1202 void PhaseCFG::dump_headers() { 1203 for (uint i = 0; i < number_of_blocks(); i++) { 1204 Block* block = get_block(i); 1205 if (block != NULL) { 1206 block->dump_head(this); 1207 } 1208 } 1209 } 1210 1211 void PhaseCFG::verify() const { 1212 #ifdef ASSERT 1213 // Verify sane CFG 1214 for (uint i = 0; i < number_of_blocks(); i++) { 1215 Block* block = get_block(i); 1216 uint cnt = block->number_of_nodes(); 1217 uint j; 1218 for (j = 0; j < cnt; j++) { 1219 Node *n = block->get_node(j); 1220 assert(get_block_for_node(n) == block, ""); 1221 if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) { 1222 assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block"); 1223 } 1224 if (n->needs_anti_dependence_check()) { 1225 verify_anti_dependences(block, n); 1226 } 1227 for (uint k = 0; k < n->req(); k++) { 1228 Node *def = n->in(k); 1229 if (def && def != n) { 1230 assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok"); 1231 // Verify that instructions in the block is in correct order. 1232 // Uses must follow their definition if they are at the same block. 1233 // Mostly done to check that MachSpillCopy nodes are placed correctly 1234 // when CreateEx node is moved in build_ifg_physical(). 1235 if (get_block_for_node(def) == block && !(block->head()->is_Loop() && n->is_Phi()) && 1236 // See (+++) comment in reg_split.cpp 1237 !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) { 1238 bool is_loop = false; 1239 if (n->is_Phi()) { 1240 for (uint l = 1; l < def->req(); l++) { 1241 if (n == def->in(l)) { 1242 is_loop = true; 1243 break; // Some kind of loop 1244 } 1245 } 1246 } 1247 assert(is_loop || block->find_node(def) < j, "uses must follow definitions"); 1248 } 1249 } 1250 } 1251 } 1252 1253 j = block->end_idx(); 1254 Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj(); 1255 assert(bp, "last instruction must be a block proj"); 1256 assert(bp == block->get_node(j), "wrong number of successors for this block"); 1257 if (bp->is_Catch()) { 1258 while (block->get_node(--j)->is_MachProj()) { 1259 ; 1260 } 1261 assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call"); 1262 } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) { 1263 assert(block->_num_succs == 2, "Conditional branch must have two targets"); 1264 } 1265 } 1266 #endif 1267 } 1268 #endif 1269 1270 UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) { 1271 Copy::zero_to_bytes( _indices, sizeof(uint)*max ); 1272 } 1273 1274 void UnionFind::extend( uint from_idx, uint to_idx ) { 1275 _nesting.check(); 1276 if( from_idx >= _max ) { 1277 uint size = 16; 1278 while( size <= from_idx ) size <<=1; 1279 _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size ); 1280 _max = size; 1281 } 1282 while( _cnt <= from_idx ) _indices[_cnt++] = 0; 1283 _indices[from_idx] = to_idx; 1284 } 1285 1286 void UnionFind::reset( uint max ) { 1287 // Force the Union-Find mapping to be at least this large 1288 extend(max,0); 1289 // Initialize to be the ID mapping. 1290 for( uint i=0; i<max; i++ ) map(i,i); 1291 } 1292 1293 // Straight out of Tarjan's union-find algorithm 1294 uint UnionFind::Find_compress( uint idx ) { 1295 uint cur = idx; 1296 uint next = lookup(cur); 1297 while( next != cur ) { // Scan chain of equivalences 1298 assert( next < cur, "always union smaller" ); 1299 cur = next; // until find a fixed-point 1300 next = lookup(cur); 1301 } 1302 // Core of union-find algorithm: update chain of 1303 // equivalences to be equal to the root. 1304 while( idx != next ) { 1305 uint tmp = lookup(idx); 1306 map(idx, next); 1307 idx = tmp; 1308 } 1309 return idx; 1310 } 1311 1312 // Like Find above, but no path compress, so bad asymptotic behavior 1313 uint UnionFind::Find_const( uint idx ) const { 1314 if( idx == 0 ) return idx; // Ignore the zero idx 1315 // Off the end? This can happen during debugging dumps 1316 // when data structures have not finished being updated. 1317 if( idx >= _max ) return idx; 1318 uint next = lookup(idx); 1319 while( next != idx ) { // Scan chain of equivalences 1320 idx = next; // until find a fixed-point 1321 next = lookup(idx); 1322 } 1323 return next; 1324 } 1325 1326 // union 2 sets together. 1327 void UnionFind::Union( uint idx1, uint idx2 ) { 1328 uint src = Find(idx1); 1329 uint dst = Find(idx2); 1330 assert( src, "" ); 1331 assert( dst, "" ); 1332 assert( src < _max, "oob" ); 1333 assert( dst < _max, "oob" ); 1334 assert( src < dst, "always union smaller" ); 1335 map(dst,src); 1336 } 1337 1338 #ifndef PRODUCT 1339 void Trace::dump( ) const { 1340 tty->print_cr("Trace (freq %f)", first_block()->_freq); 1341 for (Block *b = first_block(); b != NULL; b = next(b)) { 1342 tty->print(" B%d", b->_pre_order); 1343 if (b->head()->is_Loop()) { 1344 tty->print(" (L%d)", b->compute_loop_alignment()); 1345 } 1346 if (b->has_loop_alignment()) { 1347 tty->print(" (T%d)", b->code_alignment()); 1348 } 1349 } 1350 tty->cr(); 1351 } 1352 1353 void CFGEdge::dump( ) const { 1354 tty->print(" B%d --> B%d Freq: %f out:%3d%% in:%3d%% State: ", 1355 from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct); 1356 switch(state()) { 1357 case connected: 1358 tty->print("connected"); 1359 break; 1360 case open: 1361 tty->print("open"); 1362 break; 1363 case interior: 1364 tty->print("interior"); 1365 break; 1366 } 1367 if (infrequent()) { 1368 tty->print(" infrequent"); 1369 } 1370 tty->cr(); 1371 } 1372 #endif 1373 1374 // Comparison function for edges 1375 static int edge_order(CFGEdge **e0, CFGEdge **e1) { 1376 float freq0 = (*e0)->freq(); 1377 float freq1 = (*e1)->freq(); 1378 if (freq0 != freq1) { 1379 return freq0 > freq1 ? -1 : 1; 1380 } 1381 1382 int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo; 1383 int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo; 1384 1385 return dist1 - dist0; 1386 } 1387 1388 // Comparison function for edges 1389 extern "C" int trace_frequency_order(const void *p0, const void *p1) { 1390 Trace *tr0 = *(Trace **) p0; 1391 Trace *tr1 = *(Trace **) p1; 1392 Block *b0 = tr0->first_block(); 1393 Block *b1 = tr1->first_block(); 1394 1395 // The trace of connector blocks goes at the end; 1396 // we only expect one such trace 1397 if (b0->is_connector() != b1->is_connector()) { 1398 return b1->is_connector() ? -1 : 1; 1399 } 1400 1401 // Pull more frequently executed blocks to the beginning 1402 float freq0 = b0->_freq; 1403 float freq1 = b1->_freq; 1404 if (freq0 != freq1) { 1405 return freq0 > freq1 ? -1 : 1; 1406 } 1407 1408 int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo; 1409 1410 return diff; 1411 } 1412 1413 // Find edges of interest, i.e, those which can fall through. Presumes that 1414 // edges which don't fall through are of low frequency and can be generally 1415 // ignored. Initialize the list of traces. 1416 void PhaseBlockLayout::find_edges() { 1417 // Walk the blocks, creating edges and Traces 1418 uint i; 1419 Trace *tr = NULL; 1420 for (i = 0; i < _cfg.number_of_blocks(); i++) { 1421 Block* b = _cfg.get_block(i); 1422 tr = new Trace(b, next, prev); 1423 traces[tr->id()] = tr; 1424 1425 // All connector blocks should be at the end of the list 1426 if (b->is_connector()) break; 1427 1428 // If this block and the next one have a one-to-one successor 1429 // predecessor relationship, simply append the next block 1430 int nfallthru = b->num_fall_throughs(); 1431 while (nfallthru == 1 && 1432 b->succ_fall_through(0)) { 1433 Block *n = b->_succs[0]; 1434 1435 // Skip over single-entry connector blocks, we don't want to 1436 // add them to the trace. 1437 while (n->is_connector() && n->num_preds() == 1) { 1438 n = n->_succs[0]; 1439 } 1440 1441 // We see a merge point, so stop search for the next block 1442 if (n->num_preds() != 1) break; 1443 1444 i++; 1445 assert(n == _cfg.get_block(i), "expecting next block"); 1446 tr->append(n); 1447 uf->map(n->_pre_order, tr->id()); 1448 traces[n->_pre_order] = NULL; 1449 nfallthru = b->num_fall_throughs(); 1450 b = n; 1451 } 1452 1453 if (nfallthru > 0) { 1454 // Create a CFGEdge for each outgoing 1455 // edge that could be a fall-through. 1456 for (uint j = 0; j < b->_num_succs; j++ ) { 1457 if (b->succ_fall_through(j)) { 1458 Block *target = b->non_connector_successor(j); 1459 float freq = b->_freq * b->succ_prob(j); 1460 int from_pct = (int) ((100 * freq) / b->_freq); 1461 int to_pct = (int) ((100 * freq) / target->_freq); 1462 edges->append(new CFGEdge(b, target, freq, from_pct, to_pct)); 1463 } 1464 } 1465 } 1466 } 1467 1468 // Group connector blocks into one trace 1469 for (i++; i < _cfg.number_of_blocks(); i++) { 1470 Block *b = _cfg.get_block(i); 1471 assert(b->is_connector(), "connector blocks at the end"); 1472 tr->append(b); 1473 uf->map(b->_pre_order, tr->id()); 1474 traces[b->_pre_order] = NULL; 1475 } 1476 } 1477 1478 // Union two traces together in uf, and null out the trace in the list 1479 void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) { 1480 uint old_id = old_trace->id(); 1481 uint updated_id = updated_trace->id(); 1482 1483 uint lo_id = updated_id; 1484 uint hi_id = old_id; 1485 1486 // If from is greater than to, swap values to meet 1487 // UnionFind guarantee. 1488 if (updated_id > old_id) { 1489 lo_id = old_id; 1490 hi_id = updated_id; 1491 1492 // Fix up the trace ids 1493 traces[lo_id] = traces[updated_id]; 1494 updated_trace->set_id(lo_id); 1495 } 1496 1497 // Union the lower with the higher and remove the pointer 1498 // to the higher. 1499 uf->Union(lo_id, hi_id); 1500 traces[hi_id] = NULL; 1501 } 1502 1503 // Append traces together via the most frequently executed edges 1504 void PhaseBlockLayout::grow_traces() { 1505 // Order the edges, and drive the growth of Traces via the most 1506 // frequently executed edges. 1507 edges->sort(edge_order); 1508 for (int i = 0; i < edges->length(); i++) { 1509 CFGEdge *e = edges->at(i); 1510 1511 if (e->state() != CFGEdge::open) continue; 1512 1513 Block *src_block = e->from(); 1514 Block *targ_block = e->to(); 1515 1516 // Don't grow traces along backedges? 1517 if (!BlockLayoutRotateLoops) { 1518 if (targ_block->_rpo <= src_block->_rpo) { 1519 targ_block->set_loop_alignment(targ_block); 1520 continue; 1521 } 1522 } 1523 1524 Trace *src_trace = trace(src_block); 1525 Trace *targ_trace = trace(targ_block); 1526 1527 // If the edge in question can join two traces at their ends, 1528 // append one trace to the other. 1529 if (src_trace->last_block() == src_block) { 1530 if (src_trace == targ_trace) { 1531 e->set_state(CFGEdge::interior); 1532 if (targ_trace->backedge(e)) { 1533 // Reset i to catch any newly eligible edge 1534 // (Or we could remember the first "open" edge, and reset there) 1535 i = 0; 1536 } 1537 } else if (targ_trace->first_block() == targ_block) { 1538 e->set_state(CFGEdge::connected); 1539 src_trace->append(targ_trace); 1540 union_traces(src_trace, targ_trace); 1541 } 1542 } 1543 } 1544 } 1545 1546 // Embed one trace into another, if the fork or join points are sufficiently 1547 // balanced. 1548 void PhaseBlockLayout::merge_traces(bool fall_thru_only) { 1549 // Walk the edge list a another time, looking at unprocessed edges. 1550 // Fold in diamonds 1551 for (int i = 0; i < edges->length(); i++) { 1552 CFGEdge *e = edges->at(i); 1553 1554 if (e->state() != CFGEdge::open) continue; 1555 if (fall_thru_only) { 1556 if (e->infrequent()) continue; 1557 } 1558 1559 Block *src_block = e->from(); 1560 Trace *src_trace = trace(src_block); 1561 bool src_at_tail = src_trace->last_block() == src_block; 1562 1563 Block *targ_block = e->to(); 1564 Trace *targ_trace = trace(targ_block); 1565 bool targ_at_start = targ_trace->first_block() == targ_block; 1566 1567 if (src_trace == targ_trace) { 1568 // This may be a loop, but we can't do much about it. 1569 e->set_state(CFGEdge::interior); 1570 continue; 1571 } 1572 1573 if (fall_thru_only) { 1574 // If the edge links the middle of two traces, we can't do anything. 1575 // Mark the edge and continue. 1576 if (!src_at_tail & !targ_at_start) { 1577 continue; 1578 } 1579 1580 // Don't grow traces along backedges? 1581 if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) { 1582 continue; 1583 } 1584 1585 // If both ends of the edge are available, why didn't we handle it earlier? 1586 assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier."); 1587 1588 if (targ_at_start) { 1589 // Insert the "targ" trace in the "src" trace if the insertion point 1590 // is a two way branch. 1591 // Better profitability check possible, but may not be worth it. 1592 // Someday, see if the this "fork" has an associated "join"; 1593 // then make a policy on merging this trace at the fork or join. 1594 // For example, other things being equal, it may be better to place this 1595 // trace at the join point if the "src" trace ends in a two-way, but 1596 // the insertion point is one-way. 1597 assert(src_block->num_fall_throughs() == 2, "unexpected diamond"); 1598 e->set_state(CFGEdge::connected); 1599 src_trace->insert_after(src_block, targ_trace); 1600 union_traces(src_trace, targ_trace); 1601 } else if (src_at_tail) { 1602 if (src_trace != trace(_cfg.get_root_block())) { 1603 e->set_state(CFGEdge::connected); 1604 targ_trace->insert_before(targ_block, src_trace); 1605 union_traces(targ_trace, src_trace); 1606 } 1607 } 1608 } else if (e->state() == CFGEdge::open) { 1609 // Append traces, even without a fall-thru connection. 1610 // But leave root entry at the beginning of the block list. 1611 if (targ_trace != trace(_cfg.get_root_block())) { 1612 e->set_state(CFGEdge::connected); 1613 src_trace->append(targ_trace); 1614 union_traces(src_trace, targ_trace); 1615 } 1616 } 1617 } 1618 } 1619 1620 // Order the sequence of the traces in some desirable way, and fixup the 1621 // jumps at the end of each block. 1622 void PhaseBlockLayout::reorder_traces(int count) { 1623 ResourceArea *area = Thread::current()->resource_area(); 1624 Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count); 1625 Block_List worklist; 1626 int new_count = 0; 1627 1628 // Compact the traces. 1629 for (int i = 0; i < count; i++) { 1630 Trace *tr = traces[i]; 1631 if (tr != NULL) { 1632 new_traces[new_count++] = tr; 1633 } 1634 } 1635 1636 // The entry block should be first on the new trace list. 1637 Trace *tr = trace(_cfg.get_root_block()); 1638 assert(tr == new_traces[0], "entry trace misplaced"); 1639 1640 // Sort the new trace list by frequency 1641 qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order); 1642 1643 // Patch up the successor blocks 1644 _cfg.clear_blocks(); 1645 for (int i = 0; i < new_count; i++) { 1646 Trace *tr = new_traces[i]; 1647 if (tr != NULL) { 1648 tr->fixup_blocks(_cfg); 1649 } 1650 } 1651 } 1652 1653 // Order basic blocks based on frequency 1654 PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) 1655 : Phase(BlockLayout) 1656 , _cfg(cfg) { 1657 ResourceMark rm; 1658 ResourceArea *area = Thread::current()->resource_area(); 1659 1660 // List of traces 1661 int size = _cfg.number_of_blocks() + 1; 1662 traces = NEW_ARENA_ARRAY(area, Trace *, size); 1663 memset(traces, 0, size*sizeof(Trace*)); 1664 next = NEW_ARENA_ARRAY(area, Block *, size); 1665 memset(next, 0, size*sizeof(Block *)); 1666 prev = NEW_ARENA_ARRAY(area, Block *, size); 1667 memset(prev , 0, size*sizeof(Block *)); 1668 1669 // List of edges 1670 edges = new GrowableArray<CFGEdge*>; 1671 1672 // Mapping block index --> block_trace 1673 uf = new UnionFind(size); 1674 uf->reset(size); 1675 1676 // Find edges and create traces. 1677 find_edges(); 1678 1679 // Grow traces at their ends via most frequent edges. 1680 grow_traces(); 1681 1682 // Merge one trace into another, but only at fall-through points. 1683 // This may make diamonds and other related shapes in a trace. 1684 merge_traces(true); 1685 1686 // Run merge again, allowing two traces to be catenated, even if 1687 // one does not fall through into the other. This appends loosely 1688 // related traces to be near each other. 1689 merge_traces(false); 1690 1691 // Re-order all the remaining traces by frequency 1692 reorder_traces(size); 1693 1694 assert(_cfg.number_of_blocks() >= (uint) (size - 1), "number of blocks can not shrink"); 1695 } 1696 1697 1698 // Edge e completes a loop in a trace. If the target block is head of the 1699 // loop, rotate the loop block so that the loop ends in a conditional branch. 1700 bool Trace::backedge(CFGEdge *e) { 1701 bool loop_rotated = false; 1702 Block *src_block = e->from(); 1703 Block *targ_block = e->to(); 1704 1705 assert(last_block() == src_block, "loop discovery at back branch"); 1706 if (first_block() == targ_block) { 1707 if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) { 1708 // Find the last block in the trace that has a conditional 1709 // branch. 1710 Block *b; 1711 for (b = last_block(); b != NULL; b = prev(b)) { 1712 if (b->num_fall_throughs() == 2) { 1713 break; 1714 } 1715 } 1716 1717 if (b != last_block() && b != NULL) { 1718 loop_rotated = true; 1719 1720 // Rotate the loop by doing two-part linked-list surgery. 1721 append(first_block()); 1722 break_loop_after(b); 1723 } 1724 } 1725 1726 // Backbranch to the top of a trace 1727 // Scroll forward through the trace from the targ_block. If we find 1728 // a loop head before another loop top, use the the loop head alignment. 1729 for (Block *b = targ_block; b != NULL; b = next(b)) { 1730 if (b->has_loop_alignment()) { 1731 break; 1732 } 1733 if (b->head()->is_Loop()) { 1734 targ_block = b; 1735 break; 1736 } 1737 } 1738 1739 first_block()->set_loop_alignment(targ_block); 1740 1741 } else { 1742 // That loop may already have a loop top (we're reaching it again 1743 // through the backedge of an outer loop) 1744 Block* b = prev(targ_block); 1745 bool has_top = targ_block->head()->is_Loop() && b->has_loop_alignment() && !b->head()->is_Loop(); 1746 if (!has_top) { 1747 // Backbranch into the middle of a trace 1748 targ_block->set_loop_alignment(targ_block); 1749 } 1750 } 1751 1752 return loop_rotated; 1753 } 1754 1755 // push blocks onto the CFG list 1756 // ensure that blocks have the correct two-way branch sense 1757 void Trace::fixup_blocks(PhaseCFG &cfg) { 1758 Block *last = last_block(); 1759 for (Block *b = first_block(); b != NULL; b = next(b)) { 1760 cfg.add_block(b); 1761 if (!b->is_connector()) { 1762 int nfallthru = b->num_fall_throughs(); 1763 if (b != last) { 1764 if (nfallthru == 2) { 1765 // Ensure that the sense of the branch is correct 1766 Block *bnext = next(b); 1767 Block *bs0 = b->non_connector_successor(0); 1768 1769 MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach(); 1770 ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj(); 1771 ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj(); 1772 1773 if (bnext == bs0) { 1774 // Fall-thru case in succs[0], should be in succs[1] 1775 1776 // Flip targets in _succs map 1777 Block *tbs0 = b->_succs[0]; 1778 Block *tbs1 = b->_succs[1]; 1779 b->_succs.map( 0, tbs1 ); 1780 b->_succs.map( 1, tbs0 ); 1781 1782 // Flip projections to match targets 1783 b->map_node(proj1, b->number_of_nodes() - 2); 1784 b->map_node(proj0, b->number_of_nodes() - 1); 1785 } 1786 } 1787 } 1788 } 1789 } 1790 }