src/share/vm/opto/block.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File JDK-8022284 Sdiff src/share/vm/opto

src/share/vm/opto/block.cpp

Print this page




 204   if (en->is_Catch())
 205     en = en->in(0);
 206   if (en->is_MachProj() && en->in(0)->is_MachCall()) {
 207     MachCallNode* call = en->in(0)->as_MachCall();
 208     if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) {
 209       // This is true for slow-path stubs like new_{instance,array},
 210       // slow_arraycopy, complete_monitor_locking, uncommon_trap.
 211       // The magic number corresponds to the probability of an uncommon_trap,
 212       // even though it is a count not a probability.
 213       return true;
 214     }
 215   }
 216 
 217   int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode();
 218   return op == Op_Halt;
 219 }
 220 
 221 //------------------------------is_uncommon------------------------------------
 222 // True if block is low enough frequency or guarded by a test which
 223 // mostly does not go here.
 224 bool Block::is_uncommon( Block_Array &bbs ) const {
 225   // Initial blocks must never be moved, so are never uncommon.
 226   if (head()->is_Root() || head()->is_Start())  return false;
 227 
 228   // Check for way-low freq
 229   if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
 230 
 231   // Look for code shape indicating uncommon_trap or slow path
 232   if (has_uncommon_code()) return true;
 233 
 234   const float epsilon = 0.05f;
 235   const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
 236   uint uncommon_preds = 0;
 237   uint freq_preds = 0;
 238   uint uncommon_for_freq_preds = 0;
 239 
 240   for( uint i=1; i<num_preds(); i++ ) {
 241     Block* guard = bbs[pred(i)->_idx];
 242     // Check to see if this block follows its guard 1 time out of 10000
 243     // or less.
 244     //
 245     // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which
 246     // we intend to be "uncommon", such as slow-path TLE allocation,
 247     // predicted call failure, and uncommon trap triggers.
 248     //
 249     // Use an epsilon value of 5% to allow for variability in frequency
 250     // predictions and floating point calculations. The net effect is
 251     // that guard_factor is set to 9500.
 252     //
 253     // Ignore low-frequency blocks.
 254     // The next check is (guard->_freq < 1.e-5 * 9500.).
 255     if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) {
 256       uncommon_preds++;
 257     } else {
 258       freq_preds++;
 259       if( _freq < guard->_freq * guard_factor ) {
 260         uncommon_for_freq_preds++;
 261       }


 268        uncommon_for_freq_preds == freq_preds) ) {
 269     return true;
 270   }
 271   return false;
 272 }
 273 
 274 //------------------------------dump-------------------------------------------
 275 #ifndef PRODUCT
 276 void Block::dump_bidx(const Block* orig, outputStream* st) const {
 277   if (_pre_order) st->print("B%d",_pre_order);
 278   else st->print("N%d", head()->_idx);
 279 
 280   if (Verbose && orig != this) {
 281     // Dump the original block's idx
 282     st->print(" (");
 283     orig->dump_bidx(orig, st);
 284     st->print(")");
 285   }
 286 }
 287 
 288 void Block::dump_pred(const Block_Array *bbs, Block* orig, outputStream* st) const {
 289   if (is_connector()) {
 290     for (uint i=1; i<num_preds(); i++) {
 291       Block *p = ((*bbs)[pred(i)->_idx]);
 292       p->dump_pred(bbs, orig, st);
 293     }
 294   } else {
 295     dump_bidx(orig, st);
 296     st->print(" ");
 297   }
 298 }
 299 
 300 void Block::dump_head( const Block_Array *bbs, outputStream* st ) const {
 301   // Print the basic block
 302   dump_bidx(this, st);
 303   st->print(": #\t");
 304 
 305   // Print the incoming CFG edges and the outgoing CFG edges
 306   for( uint i=0; i<_num_succs; i++ ) {
 307     non_connector_successor(i)->dump_bidx(_succs[i], st);
 308     st->print(" ");
 309   }
 310   st->print("<- ");
 311   if( head()->is_block_start() ) {
 312     for (uint i=1; i<num_preds(); i++) {
 313       Node *s = pred(i);
 314       if (bbs) {
 315         Block *p = (*bbs)[s->_idx];
 316         p->dump_pred(bbs, p, st);
 317       } else {
 318         while (!s->is_block_start())
 319           s = s->in(0);
 320         st->print("N%d ", s->_idx );
 321       }
 322     }
 323   } else
 324     st->print("BLOCK HEAD IS JUNK  ");

 325 
 326   // Print loop, if any
 327   const Block *bhead = this;    // Head of self-loop
 328   Node *bh = bhead->head();
 329   if( bbs && bh->is_Loop() && !head()->is_Root() ) {

 330     LoopNode *loop = bh->as_Loop();
 331     const Block *bx = (*bbs)[loop->in(LoopNode::LoopBackControl)->_idx];
 332     while (bx->is_connector()) {
 333       bx = (*bbs)[bx->pred(1)->_idx];
 334     }
 335     st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
 336     // Dump any loop-specific bits, especially for CountedLoops.
 337     loop->dump_spec(st);
 338   } else if (has_loop_alignment()) {
 339     st->print(" top-of-loop");
 340   }
 341   st->print(" Freq: %g",_freq);
 342   if( Verbose || WizardMode ) {
 343     st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
 344     st->print(" RegPressure: %d",_reg_pressure);
 345     st->print(" IHRP Index: %d",_ihrp_index);
 346     st->print(" FRegPressure: %d",_freg_pressure);
 347     st->print(" FHRP Index: %d",_fhrp_index);
 348   }
 349   st->print_cr("");
 350 }
 351 
 352 void Block::dump() const { dump(NULL); }


 353 
 354 void Block::dump( const Block_Array *bbs ) const {
 355   dump_head(bbs);
 356   uint cnt = _nodes.size();
 357   for( uint i=0; i<cnt; i++ )
 358     _nodes[i]->dump();

 359   tty->print("\n");
 360 }
 361 #endif
 362 
 363 //=============================================================================
 364 //------------------------------PhaseCFG---------------------------------------
 365 PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
 366   Phase(CFG),
 367   _bbs(a),
 368   _root(r),
 369   _node_latency(NULL)

 370 #ifndef PRODUCT
 371   , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
 372 #endif
 373 #ifdef ASSERT
 374   , _raw_oops(a)
 375 #endif
 376 {
 377   ResourceMark rm;
 378   // I'll need a few machine-specific GotoNodes.  Make an Ideal GotoNode,
 379   // then Match it into a machine-specific Node.  Then clone the machine
 380   // Node on demand.
 381   Node *x = new (C) GotoNode(NULL);
 382   x->init_req(0, x);
 383   _goto = m.match_tree(x);
 384   assert(_goto != NULL, "");
 385   _goto->set_req(0,_goto);
 386 
 387   // Build the CFG in Reverse Post Order
 388   _num_blocks = build_cfg();
 389   _broot = _bbs[_root->_idx];
 390 }
 391 
 392 //------------------------------build_cfg--------------------------------------
 393 // Build a proper looking CFG.  Make every block begin with either a StartNode
 394 // or a RegionNode.  Make every block end with either a Goto, If or Return.
 395 // The RootNode both starts and ends it's own block.  Do this with a recursive
 396 // backwards walk over the control edges.
 397 uint PhaseCFG::build_cfg() {
 398   Arena *a = Thread::current()->resource_area();
 399   VectorSet visited(a);
 400 
 401   // Allocate stack with enough space to avoid frequent realloc
 402   Node_Stack nstack(a, C->unique() >> 1);
 403   nstack.push(_root, 0);
 404   uint sum = 0;                 // Counter for blocks
 405 
 406   while (nstack.is_nonempty()) {
 407     // node and in's index from stack's top
 408     // 'np' is _root (see above) or RegionNode, StartNode: we push on stack
 409     // only nodes which point to the start of basic block (see below).


 423       x = proj = g;
 424     }
 425     if (!visited.test_set(x->_idx)) { // Visit this block once
 426       // Skip any control-pinned middle'in stuff
 427       Node *p = proj;
 428       do {
 429         proj = p;                   // Update pointer to last Control
 430         p = p->in(0);               // Move control forward
 431       } while( !p->is_block_proj() &&
 432                !p->is_block_start() );
 433       // Make the block begin with one of Region or StartNode.
 434       if( !p->is_block_start() ) {
 435         RegionNode *r = new (C) RegionNode( 2 );
 436         r->init_req(1, p);         // Insert RegionNode in the way
 437         proj->set_req(0, r);        // Insert RegionNode in the way
 438         p = r;
 439       }
 440       // 'p' now points to the start of this basic block
 441 
 442       // Put self in array of basic blocks
 443       Block *bb = new (_bbs._arena) Block(_bbs._arena,p);
 444       _bbs.map(p->_idx,bb);
 445       _bbs.map(x->_idx,bb);
 446       if( x != p ) {                // Only for root is x == p
 447         bb->_nodes.push((Node*)x);
 448       }
 449       // Now handle predecessors
 450       ++sum;                        // Count 1 for self block
 451       uint cnt = bb->num_preds();
 452       for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors
 453         Node *prevproj = p->in(i);  // Get prior input
 454         assert( !prevproj->is_Con(), "dead input not removed" );
 455         // Check to see if p->in(i) is a "control-dependent" CFG edge -
 456         // i.e., it splits at the source (via an IF or SWITCH) and merges
 457         // at the destination (via a many-input Region).
 458         // This breaks critical edges.  The RegionNode to start the block
 459         // will be added when <p,i> is pulled off the node stack
 460         if ( cnt > 2 ) {             // Merging many things?
 461           assert( prevproj== bb->pred(i),"");
 462           if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge?
 463             // Force a block on the control-dependent edge
 464             Node *g = _goto->clone();       // Force it to end in a Goto
 465             g->set_req(0,prevproj);
 466             p->set_req(i,g);
 467           }
 468         }
 469         nstack.push(p, i);  // 'p' is RegionNode or StartNode
 470       }
 471     } else { // Post-processing visited nodes
 472       nstack.pop();                 // remove node from stack
 473       // Check if it the fist node pushed on stack at the beginning.
 474       if (idx == 0) break;          // end of the build
 475       // Find predecessor basic block
 476       Block *pb = _bbs[x->_idx];
 477       // Insert into nodes array, if not already there
 478       if( !_bbs.lookup(proj->_idx) ) {
 479         assert( x != proj, "" );
 480         // Map basic block of projection
 481         _bbs.map(proj->_idx,pb);
 482         pb->_nodes.push(proj);
 483       }
 484       // Insert self as a child of my predecessor block
 485       pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]);
 486       assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
 487               "too many control users, not a CFG?" );
 488     }
 489   }
 490   // Return number of basic blocks for all children and self
 491   return sum;
 492 }
 493 
 494 //------------------------------insert_goto_at---------------------------------
 495 // Inserts a goto & corresponding basic block between
 496 // block[block_no] and its succ_no'th successor block
 497 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
 498   // get block with block_no
 499   assert(block_no < _num_blocks, "illegal block number");
 500   Block* in  = _blocks[block_no];
 501   // get successor block succ_no
 502   assert(succ_no < in->_num_succs, "illegal successor number");
 503   Block* out = in->_succs[succ_no];
 504   // Compute frequency of the new block. Do this before inserting
 505   // new block in case succ_prob() needs to infer the probability from
 506   // surrounding blocks.
 507   float freq = in->_freq * in->succ_prob(succ_no);
 508   // get ProjNode corresponding to the succ_no'th successor of the in block
 509   ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
 510   // create region for basic block
 511   RegionNode* region = new (C) RegionNode(2);
 512   region->init_req(1, proj);
 513   // setup corresponding basic block
 514   Block* block = new (_bbs._arena) Block(_bbs._arena, region);
 515   _bbs.map(region->_idx, block);
 516   C->regalloc()->set_bad(region->_idx);
 517   // add a goto node
 518   Node* gto = _goto->clone(); // get a new goto node
 519   gto->set_req(0, region);
 520   // add it to the basic block
 521   block->_nodes.push(gto);
 522   _bbs.map(gto->_idx, block);
 523   C->regalloc()->set_bad(gto->_idx);
 524   // hook up successor block
 525   block->_succs.map(block->_num_succs++, out);
 526   // remap successor's predecessors if necessary
 527   for (uint i = 1; i < out->num_preds(); i++) {
 528     if (out->pred(i) == proj) out->head()->set_req(i, gto);
 529   }
 530   // remap predecessor's successor to new block
 531   in->_succs.map(succ_no, block);
 532   // Set the frequency of the new block
 533   block->_freq = freq;
 534   // add new basic block to basic block list
 535   _blocks.insert(block_no + 1, block);
 536   _num_blocks++;
 537 }
 538 
 539 //------------------------------no_flip_branch---------------------------------
 540 // Does this block end in a multiway branch that cannot have the default case
 541 // flipped for another case?
 542 static bool no_flip_branch( Block *b ) {


 553       return true;
 554   }
 555   return false;
 556 }
 557 
 558 //------------------------------convert_NeverBranch_to_Goto--------------------
 559 // Check for NeverBranch at block end.  This needs to become a GOTO to the
 560 // true target.  NeverBranch are treated as a conditional branch that always
 561 // goes the same direction for most of the optimizer and are used to give a
 562 // fake exit path to infinite loops.  At this late stage they need to turn
 563 // into Goto's so that when you enter the infinite loop you indeed hang.
 564 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
 565   // Find true target
 566   int end_idx = b->end_idx();
 567   int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
 568   Block *succ = b->_succs[idx];
 569   Node* gto = _goto->clone(); // get a new goto node
 570   gto->set_req(0, b->head());
 571   Node *bp = b->_nodes[end_idx];
 572   b->_nodes.map(end_idx,gto); // Slam over NeverBranch
 573   _bbs.map(gto->_idx, b);
 574   C->regalloc()->set_bad(gto->_idx);
 575   b->_nodes.pop();              // Yank projections
 576   b->_nodes.pop();              // Yank projections
 577   b->_succs.map(0,succ);        // Map only successor
 578   b->_num_succs = 1;
 579   // remap successor's predecessors if necessary
 580   uint j;
 581   for( j = 1; j < succ->num_preds(); j++)
 582     if( succ->pred(j)->in(0) == bp )
 583       succ->head()->set_req(j, gto);
 584   // Kill alternate exit path
 585   Block *dead = b->_succs[1-idx];
 586   for( j = 1; j < dead->num_preds(); j++)
 587     if( dead->pred(j)->in(0) == bp )
 588       break;
 589   // Scan through block, yanking dead path from
 590   // all regions and phis.
 591   dead->head()->del_req(j);
 592   for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
 593     dead->_nodes[k]->del_req(j);


 596 //------------------------------move_to_next-----------------------------------
 597 // Helper function to move block bx to the slot following b_index. Return
 598 // true if the move is successful, otherwise false
 599 bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
 600   if (bx == NULL) return false;
 601 
 602   // Return false if bx is already scheduled.
 603   uint bx_index = bx->_pre_order;
 604   if ((bx_index <= b_index) && (_blocks[bx_index] == bx)) {
 605     return false;
 606   }
 607 
 608   // Find the current index of block bx on the block list
 609   bx_index = b_index + 1;
 610   while( bx_index < _num_blocks && _blocks[bx_index] != bx ) bx_index++;
 611   assert(_blocks[bx_index] == bx, "block not found");
 612 
 613   // If the previous block conditionally falls into bx, return false,
 614   // because moving bx will create an extra jump.
 615   for(uint k = 1; k < bx->num_preds(); k++ ) {
 616     Block* pred = _bbs[bx->pred(k)->_idx];
 617     if (pred == _blocks[bx_index-1]) {
 618       if (pred->_num_succs != 1) {
 619         return false;
 620       }
 621     }
 622   }
 623 
 624   // Reinsert bx just past block 'b'
 625   _blocks.remove(bx_index);
 626   _blocks.insert(b_index + 1, bx);
 627   return true;
 628 }
 629 
 630 //------------------------------move_to_end------------------------------------
 631 // Move empty and uncommon blocks to the end.
 632 void PhaseCFG::move_to_end(Block *b, uint i) {
 633   int e = b->is_Empty();
 634   if (e != Block::not_empty) {
 635     if (e == Block::empty_with_goto) {
 636       // Remove the goto, but leave the block.


 665 void PhaseCFG::remove_empty() {
 666   // Move uncommon blocks to the end
 667   uint last = _num_blocks;
 668   assert( _blocks[0] == _broot, "" );
 669 
 670   for (uint i = 1; i < last; i++) {
 671     Block *b = _blocks[i];
 672     if (b->is_connector()) break;
 673 
 674     // Check for NeverBranch at block end.  This needs to become a GOTO to the
 675     // true target.  NeverBranch are treated as a conditional branch that
 676     // always goes the same direction for most of the optimizer and are used
 677     // to give a fake exit path to infinite loops.  At this late stage they
 678     // need to turn into Goto's so that when you enter the infinite loop you
 679     // indeed hang.
 680     if( b->_nodes[b->end_idx()]->Opcode() == Op_NeverBranch )
 681       convert_NeverBranch_to_Goto(b);
 682 
 683     // Look for uncommon blocks and move to end.
 684     if (!C->do_freq_based_layout()) {
 685       if( b->is_uncommon(_bbs) ) {
 686         move_to_end(b, i);
 687         last--;                   // No longer check for being uncommon!
 688         if( no_flip_branch(b) ) { // Fall-thru case must follow?
 689           b = _blocks[i];         // Find the fall-thru block
 690           move_to_end(b, i);
 691           last--;
 692         }
 693         i--;                      // backup block counter post-increment
 694       }
 695     }
 696   }
 697 
 698   // Move empty blocks to the end
 699   last = _num_blocks;
 700   for (uint i = 1; i < last; i++) {
 701     Block *b = _blocks[i];
 702     if (b->is_Empty() != Block::not_empty) {
 703       move_to_end(b, i);
 704       last--;
 705       i--;


 853 }
 854 
 855 
 856 //------------------------------dump-------------------------------------------
 857 #ifndef PRODUCT
 858 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited  ) const {
 859   const Node *x = end->is_block_proj();
 860   assert( x, "not a CFG" );
 861 
 862   // Do not visit this block again
 863   if( visited.test_set(x->_idx) ) return;
 864 
 865   // Skip through this block
 866   const Node *p = x;
 867   do {
 868     p = p->in(0);               // Move control forward
 869     assert( !p->is_block_proj() || p->is_Root(), "not a CFG" );
 870   } while( !p->is_block_start() );
 871 
 872   // Recursively visit
 873   for( uint i=1; i<p->req(); i++ )
 874     _dump_cfg(p->in(i),visited);

 875 
 876   // Dump the block
 877   _bbs[p->_idx]->dump(&_bbs);
 878 }
 879 
 880 void PhaseCFG::dump( ) const {
 881   tty->print("\n--- CFG --- %d BBs\n",_num_blocks);
 882   if( _blocks.size() ) {        // Did we do basic-block layout?
 883     for( uint i=0; i<_num_blocks; i++ )
 884       _blocks[i]->dump(&_bbs);

 885   } else {                      // Else do it with a DFS
 886     VectorSet visited(_bbs._arena);
 887     _dump_cfg(_root,visited);
 888   }
 889 }
 890 
 891 void PhaseCFG::dump_headers() {
 892   for( uint i = 0; i < _num_blocks; i++ ) {
 893     if( _blocks[i] == NULL ) continue;
 894     _blocks[i]->dump_head(&_bbs);

 895   }
 896 }
 897 
 898 void PhaseCFG::verify( ) const {
 899 #ifdef ASSERT
 900   // Verify sane CFG
 901   for (uint i = 0; i < _num_blocks; i++) {
 902     Block *b = _blocks[i];
 903     uint cnt = b->_nodes.size();
 904     uint j;
 905     for (j = 0; j < cnt; j++)  {
 906       Node *n = b->_nodes[j];
 907       assert( _bbs[n->_idx] == b, "" );
 908       if (j >= 1 && n->is_Mach() &&
 909           n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
 910         assert(j == 1 || b->_nodes[j-1]->is_Phi(),
 911                "CreateEx must be first instruction in block");
 912       }
 913       for (uint k = 0; k < n->req(); k++) {
 914         Node *def = n->in(k);
 915         if (def && def != n) {
 916           assert(_bbs[def->_idx] || def->is_Con(),
 917                  "must have block; constants for debug info ok");
 918           // Verify that instructions in the block is in correct order.
 919           // Uses must follow their definition if they are at the same block.
 920           // Mostly done to check that MachSpillCopy nodes are placed correctly
 921           // when CreateEx node is moved in build_ifg_physical().
 922           if (_bbs[def->_idx] == b &&
 923               !(b->head()->is_Loop() && n->is_Phi()) &&
 924               // See (+++) comment in reg_split.cpp
 925               !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
 926             bool is_loop = false;
 927             if (n->is_Phi()) {
 928               for (uint l = 1; l < def->req(); l++) {
 929                 if (n == def->in(l)) {
 930                   is_loop = true;
 931                   break; // Some kind of loop
 932                 }
 933               }
 934             }
 935             assert(is_loop || b->find_node(def) < j, "uses must follow definitions");
 936           }
 937         }
 938       }
 939     }
 940 
 941     j = b->end_idx();
 942     Node *bp = (Node*)b->_nodes[b->_nodes.size()-1]->is_block_proj();




 204   if (en->is_Catch())
 205     en = en->in(0);
 206   if (en->is_MachProj() && en->in(0)->is_MachCall()) {
 207     MachCallNode* call = en->in(0)->as_MachCall();
 208     if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) {
 209       // This is true for slow-path stubs like new_{instance,array},
 210       // slow_arraycopy, complete_monitor_locking, uncommon_trap.
 211       // The magic number corresponds to the probability of an uncommon_trap,
 212       // even though it is a count not a probability.
 213       return true;
 214     }
 215   }
 216 
 217   int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode();
 218   return op == Op_Halt;
 219 }
 220 
 221 //------------------------------is_uncommon------------------------------------
 222 // True if block is low enough frequency or guarded by a test which
 223 // mostly does not go here.
 224 bool Block::is_uncommon(PhaseCFG* cfg) const {
 225   // Initial blocks must never be moved, so are never uncommon.
 226   if (head()->is_Root() || head()->is_Start())  return false;
 227 
 228   // Check for way-low freq
 229   if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
 230 
 231   // Look for code shape indicating uncommon_trap or slow path
 232   if (has_uncommon_code()) return true;
 233 
 234   const float epsilon = 0.05f;
 235   const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
 236   uint uncommon_preds = 0;
 237   uint freq_preds = 0;
 238   uint uncommon_for_freq_preds = 0;
 239 
 240   for( uint i=1; i<num_preds(); i++ ) {
 241     Block* guard = cfg->get_block_for_node(pred(i));
 242     // Check to see if this block follows its guard 1 time out of 10000
 243     // or less.
 244     //
 245     // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which
 246     // we intend to be "uncommon", such as slow-path TLE allocation,
 247     // predicted call failure, and uncommon trap triggers.
 248     //
 249     // Use an epsilon value of 5% to allow for variability in frequency
 250     // predictions and floating point calculations. The net effect is
 251     // that guard_factor is set to 9500.
 252     //
 253     // Ignore low-frequency blocks.
 254     // The next check is (guard->_freq < 1.e-5 * 9500.).
 255     if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) {
 256       uncommon_preds++;
 257     } else {
 258       freq_preds++;
 259       if( _freq < guard->_freq * guard_factor ) {
 260         uncommon_for_freq_preds++;
 261       }


 268        uncommon_for_freq_preds == freq_preds) ) {
 269     return true;
 270   }
 271   return false;
 272 }
 273 
 274 //------------------------------dump-------------------------------------------
 275 #ifndef PRODUCT
 276 void Block::dump_bidx(const Block* orig, outputStream* st) const {
 277   if (_pre_order) st->print("B%d",_pre_order);
 278   else st->print("N%d", head()->_idx);
 279 
 280   if (Verbose && orig != this) {
 281     // Dump the original block's idx
 282     st->print(" (");
 283     orig->dump_bidx(orig, st);
 284     st->print(")");
 285   }
 286 }
 287 
 288 void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const {
 289   if (is_connector()) {
 290     for (uint i=1; i<num_preds(); i++) {
 291       Block *p = cfg->get_block_for_node(pred(i));
 292       p->dump_pred(cfg, orig, st);
 293     }
 294   } else {
 295     dump_bidx(orig, st);
 296     st->print(" ");
 297   }
 298 }
 299 
 300 void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
 301   // Print the basic block
 302   dump_bidx(this, st);
 303   st->print(": #\t");
 304 
 305   // Print the incoming CFG edges and the outgoing CFG edges
 306   for( uint i=0; i<_num_succs; i++ ) {
 307     non_connector_successor(i)->dump_bidx(_succs[i], st);
 308     st->print(" ");
 309   }
 310   st->print("<- ");
 311   if( head()->is_block_start() ) {
 312     for (uint i=1; i<num_preds(); i++) {
 313       Node *s = pred(i);
 314       if (cfg != NULL) {
 315         Block *p = cfg->get_block_for_node(s);
 316         p->dump_pred(cfg, p, st);
 317       } else {
 318         while (!s->is_block_start())
 319           s = s->in(0);
 320         st->print("N%d ", s->_idx );
 321       }
 322     }
 323   } else {
 324     st->print("BLOCK HEAD IS JUNK  ");
 325   }
 326 
 327   // Print loop, if any
 328   const Block *bhead = this;    // Head of self-loop
 329   Node *bh = bhead->head();
 330 
 331   if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) {
 332     LoopNode *loop = bh->as_Loop();
 333     const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl));
 334     while (bx->is_connector()) {
 335       bx = cfg->get_block_for_node(bx->pred(1));
 336     }
 337     st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
 338     // Dump any loop-specific bits, especially for CountedLoops.
 339     loop->dump_spec(st);
 340   } else if (has_loop_alignment()) {
 341     st->print(" top-of-loop");
 342   }
 343   st->print(" Freq: %g",_freq);
 344   if( Verbose || WizardMode ) {
 345     st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
 346     st->print(" RegPressure: %d",_reg_pressure);
 347     st->print(" IHRP Index: %d",_ihrp_index);
 348     st->print(" FRegPressure: %d",_freg_pressure);
 349     st->print(" FHRP Index: %d",_fhrp_index);
 350   }
 351   st->print_cr("");
 352 }
 353 
 354 void Block::dump() const {
 355   dump(NULL);
 356 }
 357 
 358 void Block::dump(const PhaseCFG* cfg) const {
 359   dump_head(cfg);
 360   for (uint i=0; i< _nodes.size(); i++) {

 361     _nodes[i]->dump();
 362   }
 363   tty->print("\n");
 364 }
 365 #endif
 366 
 367 //=============================================================================
 368 //------------------------------PhaseCFG---------------------------------------
 369 PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
 370 : Phase(CFG)
 371 , _block_arena(arena)
 372 , _node_to_block_mapping(arena)
 373 , _root(root)
 374 , _node_latency(NULL)
 375 #ifndef PRODUCT
 376 , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
 377 #endif
 378 #ifdef ASSERT
 379 , _raw_oops(arena)
 380 #endif
 381 {
 382   ResourceMark rm;
 383   // I'll need a few machine-specific GotoNodes.  Make an Ideal GotoNode,
 384   // then Match it into a machine-specific Node.  Then clone the machine
 385   // Node on demand.
 386   Node *x = new (C) GotoNode(NULL);
 387   x->init_req(0, x);
 388   _goto = matcher.match_tree(x);
 389   assert(_goto != NULL, "");
 390   _goto->set_req(0,_goto);
 391 
 392   // Build the CFG in Reverse Post Order
 393   _num_blocks = build_cfg();
 394   _broot = get_block_for_node(_root);
 395 }
 396 
 397 //------------------------------build_cfg--------------------------------------
 398 // Build a proper looking CFG.  Make every block begin with either a StartNode
 399 // or a RegionNode.  Make every block end with either a Goto, If or Return.
 400 // The RootNode both starts and ends it's own block.  Do this with a recursive
 401 // backwards walk over the control edges.
 402 uint PhaseCFG::build_cfg() {
 403   Arena *a = Thread::current()->resource_area();
 404   VectorSet visited(a);
 405 
 406   // Allocate stack with enough space to avoid frequent realloc
 407   Node_Stack nstack(a, C->unique() >> 1);
 408   nstack.push(_root, 0);
 409   uint sum = 0;                 // Counter for blocks
 410 
 411   while (nstack.is_nonempty()) {
 412     // node and in's index from stack's top
 413     // 'np' is _root (see above) or RegionNode, StartNode: we push on stack
 414     // only nodes which point to the start of basic block (see below).


 428       x = proj = g;
 429     }
 430     if (!visited.test_set(x->_idx)) { // Visit this block once
 431       // Skip any control-pinned middle'in stuff
 432       Node *p = proj;
 433       do {
 434         proj = p;                   // Update pointer to last Control
 435         p = p->in(0);               // Move control forward
 436       } while( !p->is_block_proj() &&
 437                !p->is_block_start() );
 438       // Make the block begin with one of Region or StartNode.
 439       if( !p->is_block_start() ) {
 440         RegionNode *r = new (C) RegionNode( 2 );
 441         r->init_req(1, p);         // Insert RegionNode in the way
 442         proj->set_req(0, r);        // Insert RegionNode in the way
 443         p = r;
 444       }
 445       // 'p' now points to the start of this basic block
 446 
 447       // Put self in array of basic blocks
 448       Block *bb = new (_block_arena) Block(_block_arena, p);
 449       map_node_to_block(p, bb);
 450       map_node_to_block(x, bb);
 451       if( x != p ) {                // Only for root is x == p
 452         bb->_nodes.push((Node*)x);
 453       }
 454       // Now handle predecessors
 455       ++sum;                        // Count 1 for self block
 456       uint cnt = bb->num_preds();
 457       for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors
 458         Node *prevproj = p->in(i);  // Get prior input
 459         assert( !prevproj->is_Con(), "dead input not removed" );
 460         // Check to see if p->in(i) is a "control-dependent" CFG edge -
 461         // i.e., it splits at the source (via an IF or SWITCH) and merges
 462         // at the destination (via a many-input Region).
 463         // This breaks critical edges.  The RegionNode to start the block
 464         // will be added when <p,i> is pulled off the node stack
 465         if ( cnt > 2 ) {             // Merging many things?
 466           assert( prevproj== bb->pred(i),"");
 467           if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge?
 468             // Force a block on the control-dependent edge
 469             Node *g = _goto->clone();       // Force it to end in a Goto
 470             g->set_req(0,prevproj);
 471             p->set_req(i,g);
 472           }
 473         }
 474         nstack.push(p, i);  // 'p' is RegionNode or StartNode
 475       }
 476     } else { // Post-processing visited nodes
 477       nstack.pop();                 // remove node from stack
 478       // Check if it the fist node pushed on stack at the beginning.
 479       if (idx == 0) break;          // end of the build
 480       // Find predecessor basic block
 481       Block *pb = get_block_for_node(x);
 482       // Insert into nodes array, if not already there
 483       if (!has_block(proj)) {
 484         assert( x != proj, "" );
 485         // Map basic block of projection
 486         map_node_to_block(proj, pb);
 487         pb->_nodes.push(proj);
 488       }
 489       // Insert self as a child of my predecessor block
 490       pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
 491       assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
 492               "too many control users, not a CFG?" );
 493     }
 494   }
 495   // Return number of basic blocks for all children and self
 496   return sum;
 497 }
 498 
 499 //------------------------------insert_goto_at---------------------------------
 500 // Inserts a goto & corresponding basic block between
 501 // block[block_no] and its succ_no'th successor block
 502 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
 503   // get block with block_no
 504   assert(block_no < _num_blocks, "illegal block number");
 505   Block* in  = _blocks[block_no];
 506   // get successor block succ_no
 507   assert(succ_no < in->_num_succs, "illegal successor number");
 508   Block* out = in->_succs[succ_no];
 509   // Compute frequency of the new block. Do this before inserting
 510   // new block in case succ_prob() needs to infer the probability from
 511   // surrounding blocks.
 512   float freq = in->_freq * in->succ_prob(succ_no);
 513   // get ProjNode corresponding to the succ_no'th successor of the in block
 514   ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
 515   // create region for basic block
 516   RegionNode* region = new (C) RegionNode(2);
 517   region->init_req(1, proj);
 518   // setup corresponding basic block
 519   Block* block = new (_block_arena) Block(_block_arena, region);
 520   map_node_to_block(region, block);
 521   C->regalloc()->set_bad(region->_idx);
 522   // add a goto node
 523   Node* gto = _goto->clone(); // get a new goto node
 524   gto->set_req(0, region);
 525   // add it to the basic block
 526   block->_nodes.push(gto);
 527   map_node_to_block(gto, block);
 528   C->regalloc()->set_bad(gto->_idx);
 529   // hook up successor block
 530   block->_succs.map(block->_num_succs++, out);
 531   // remap successor's predecessors if necessary
 532   for (uint i = 1; i < out->num_preds(); i++) {
 533     if (out->pred(i) == proj) out->head()->set_req(i, gto);
 534   }
 535   // remap predecessor's successor to new block
 536   in->_succs.map(succ_no, block);
 537   // Set the frequency of the new block
 538   block->_freq = freq;
 539   // add new basic block to basic block list
 540   _blocks.insert(block_no + 1, block);
 541   _num_blocks++;
 542 }
 543 
 544 //------------------------------no_flip_branch---------------------------------
 545 // Does this block end in a multiway branch that cannot have the default case
 546 // flipped for another case?
 547 static bool no_flip_branch( Block *b ) {


 558       return true;
 559   }
 560   return false;
 561 }
 562 
 563 //------------------------------convert_NeverBranch_to_Goto--------------------
 564 // Check for NeverBranch at block end.  This needs to become a GOTO to the
 565 // true target.  NeverBranch are treated as a conditional branch that always
 566 // goes the same direction for most of the optimizer and are used to give a
 567 // fake exit path to infinite loops.  At this late stage they need to turn
 568 // into Goto's so that when you enter the infinite loop you indeed hang.
 569 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
 570   // Find true target
 571   int end_idx = b->end_idx();
 572   int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
 573   Block *succ = b->_succs[idx];
 574   Node* gto = _goto->clone(); // get a new goto node
 575   gto->set_req(0, b->head());
 576   Node *bp = b->_nodes[end_idx];
 577   b->_nodes.map(end_idx,gto); // Slam over NeverBranch
 578   map_node_to_block(gto, b);
 579   C->regalloc()->set_bad(gto->_idx);
 580   b->_nodes.pop();              // Yank projections
 581   b->_nodes.pop();              // Yank projections
 582   b->_succs.map(0,succ);        // Map only successor
 583   b->_num_succs = 1;
 584   // remap successor's predecessors if necessary
 585   uint j;
 586   for( j = 1; j < succ->num_preds(); j++)
 587     if( succ->pred(j)->in(0) == bp )
 588       succ->head()->set_req(j, gto);
 589   // Kill alternate exit path
 590   Block *dead = b->_succs[1-idx];
 591   for( j = 1; j < dead->num_preds(); j++)
 592     if( dead->pred(j)->in(0) == bp )
 593       break;
 594   // Scan through block, yanking dead path from
 595   // all regions and phis.
 596   dead->head()->del_req(j);
 597   for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
 598     dead->_nodes[k]->del_req(j);


 601 //------------------------------move_to_next-----------------------------------
 602 // Helper function to move block bx to the slot following b_index. Return
 603 // true if the move is successful, otherwise false
 604 bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
 605   if (bx == NULL) return false;
 606 
 607   // Return false if bx is already scheduled.
 608   uint bx_index = bx->_pre_order;
 609   if ((bx_index <= b_index) && (_blocks[bx_index] == bx)) {
 610     return false;
 611   }
 612 
 613   // Find the current index of block bx on the block list
 614   bx_index = b_index + 1;
 615   while( bx_index < _num_blocks && _blocks[bx_index] != bx ) bx_index++;
 616   assert(_blocks[bx_index] == bx, "block not found");
 617 
 618   // If the previous block conditionally falls into bx, return false,
 619   // because moving bx will create an extra jump.
 620   for(uint k = 1; k < bx->num_preds(); k++ ) {
 621     Block* pred = get_block_for_node(bx->pred(k));
 622     if (pred == _blocks[bx_index-1]) {
 623       if (pred->_num_succs != 1) {
 624         return false;
 625       }
 626     }
 627   }
 628 
 629   // Reinsert bx just past block 'b'
 630   _blocks.remove(bx_index);
 631   _blocks.insert(b_index + 1, bx);
 632   return true;
 633 }
 634 
 635 //------------------------------move_to_end------------------------------------
 636 // Move empty and uncommon blocks to the end.
 637 void PhaseCFG::move_to_end(Block *b, uint i) {
 638   int e = b->is_Empty();
 639   if (e != Block::not_empty) {
 640     if (e == Block::empty_with_goto) {
 641       // Remove the goto, but leave the block.


 670 void PhaseCFG::remove_empty() {
 671   // Move uncommon blocks to the end
 672   uint last = _num_blocks;
 673   assert( _blocks[0] == _broot, "" );
 674 
 675   for (uint i = 1; i < last; i++) {
 676     Block *b = _blocks[i];
 677     if (b->is_connector()) break;
 678 
 679     // Check for NeverBranch at block end.  This needs to become a GOTO to the
 680     // true target.  NeverBranch are treated as a conditional branch that
 681     // always goes the same direction for most of the optimizer and are used
 682     // to give a fake exit path to infinite loops.  At this late stage they
 683     // need to turn into Goto's so that when you enter the infinite loop you
 684     // indeed hang.
 685     if( b->_nodes[b->end_idx()]->Opcode() == Op_NeverBranch )
 686       convert_NeverBranch_to_Goto(b);
 687 
 688     // Look for uncommon blocks and move to end.
 689     if (!C->do_freq_based_layout()) {
 690       if (b->is_uncommon(this)) {
 691         move_to_end(b, i);
 692         last--;                   // No longer check for being uncommon!
 693         if( no_flip_branch(b) ) { // Fall-thru case must follow?
 694           b = _blocks[i];         // Find the fall-thru block
 695           move_to_end(b, i);
 696           last--;
 697         }
 698         i--;                      // backup block counter post-increment
 699       }
 700     }
 701   }
 702 
 703   // Move empty blocks to the end
 704   last = _num_blocks;
 705   for (uint i = 1; i < last; i++) {
 706     Block *b = _blocks[i];
 707     if (b->is_Empty() != Block::not_empty) {
 708       move_to_end(b, i);
 709       last--;
 710       i--;


 858 }
 859 
 860 
 861 //------------------------------dump-------------------------------------------
 862 #ifndef PRODUCT
 863 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited  ) const {
 864   const Node *x = end->is_block_proj();
 865   assert( x, "not a CFG" );
 866 
 867   // Do not visit this block again
 868   if( visited.test_set(x->_idx) ) return;
 869 
 870   // Skip through this block
 871   const Node *p = x;
 872   do {
 873     p = p->in(0);               // Move control forward
 874     assert( !p->is_block_proj() || p->is_Root(), "not a CFG" );
 875   } while( !p->is_block_start() );
 876 
 877   // Recursively visit
 878   for (uint i = 1; i < p->req(); i++) {
 879     _dump_cfg(p->in(i), visited);
 880   }
 881 
 882   // Dump the block
 883   get_block_for_node(p)->dump(this);
 884 }
 885 
 886 void PhaseCFG::dump( ) const {
 887   tty->print("\n--- CFG --- %d BBs\n",_num_blocks);
 888   if (_blocks.size()) {        // Did we do basic-block layout?
 889     for (uint i = 0; i < _num_blocks; i++) {
 890       _blocks[i]->dump(this);
 891     }
 892   } else {                      // Else do it with a DFS
 893     VectorSet visited(_block_arena);
 894     _dump_cfg(_root,visited);
 895   }
 896 }
 897 
 898 void PhaseCFG::dump_headers() {
 899   for( uint i = 0; i < _num_blocks; i++ ) {
 900     if (_blocks[i]) {
 901       _blocks[i]->dump_head(this);
 902     }
 903   }
 904 }
 905 
 906 void PhaseCFG::verify( ) const {
 907 #ifdef ASSERT
 908   // Verify sane CFG
 909   for (uint i = 0; i < _num_blocks; i++) {
 910     Block *b = _blocks[i];
 911     uint cnt = b->_nodes.size();
 912     uint j;
 913     for (j = 0; j < cnt; j++)  {
 914       Node *n = b->_nodes[j];
 915       assert(get_block_for_node(n) == b, "");
 916       if (j >= 1 && n->is_Mach() &&
 917           n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
 918         assert(j == 1 || b->_nodes[j-1]->is_Phi(),
 919                "CreateEx must be first instruction in block");
 920       }
 921       for (uint k = 0; k < n->req(); k++) {
 922         Node *def = n->in(k);
 923         if (def && def != n) {
 924           assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok");

 925           // Verify that instructions in the block is in correct order.
 926           // Uses must follow their definition if they are at the same block.
 927           // Mostly done to check that MachSpillCopy nodes are placed correctly
 928           // when CreateEx node is moved in build_ifg_physical().
 929           if (get_block_for_node(def) == b &&
 930               !(b->head()->is_Loop() && n->is_Phi()) &&
 931               // See (+++) comment in reg_split.cpp
 932               !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
 933             bool is_loop = false;
 934             if (n->is_Phi()) {
 935               for (uint l = 1; l < def->req(); l++) {
 936                 if (n == def->in(l)) {
 937                   is_loop = true;
 938                   break; // Some kind of loop
 939                 }
 940               }
 941             }
 942             assert(is_loop || b->find_node(def) < j, "uses must follow definitions");
 943           }
 944         }
 945       }
 946     }
 947 
 948     j = b->end_idx();
 949     Node *bp = (Node*)b->_nodes[b->_nodes.size()-1]->is_block_proj();


src/share/vm/opto/block.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File