src/share/vm/opto/output.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 7079317 Sdiff src/share/vm/opto

src/share/vm/opto/output.cpp

Print this page




 403             reloc_size += reloc_java_to_interp();
 404           }
 405         } else if (mach->is_MachSafePoint()) {
 406           // If call/safepoint are adjacent, account for possible
 407           // nop to disambiguate the two safepoints.
 408           // ScheduleAndBundle() can rearrange nodes in a block,
 409           // check for all offsets inside this block.
 410           if (last_call_adr >= blk_starts[i]) {
 411             blk_size += nop_size;
 412           }
 413         }
 414         if (mach->avoid_back_to_back()) {
 415           // Nop is inserted between "avoid back to back" instructions.
 416           // ScheduleAndBundle() can rearrange nodes in a block,
 417           // check for all offsets inside this block.
 418           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 419             blk_size += nop_size;
 420           }
 421         }
 422         if (mach->may_be_short_branch()) {
 423           if (!nj->is_Branch()) {
 424 #ifndef PRODUCT
 425             nj->dump(3);
 426 #endif
 427             Unimplemented();
 428           }
 429           assert(jmp_nidx[i] == -1, "block should have only one branch");
 430           jmp_offset[i] = blk_size;
 431           jmp_size[i]   = inst_size;
 432           jmp_nidx[i]   = j;
 433           has_short_branch_candidate = true;
 434         }
 435       }
 436       blk_size += inst_size;
 437       // Remember end of call offset
 438       if (nj->is_MachCall() && !nj->is_MachCallLeaf()) {
 439         last_call_adr = blk_starts[i]+blk_size;
 440       }
 441       // Remember end of avoid_back_to_back offset
 442       if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back()) {
 443         last_avoid_back_to_back_adr = blk_starts[i]+blk_size;


 456       }
 457     }
 458 
 459     // Save block size; update total method size
 460     blk_starts[i+1] = blk_starts[i]+blk_size;
 461   }
 462 
 463   // Step two, replace eligible long jumps.
 464   bool progress = true;
 465   uint last_may_be_short_branch_adr = max_uint;
 466   while (has_short_branch_candidate && progress) {
 467     progress = false;
 468     has_short_branch_candidate = false;
 469     int adjust_block_start = 0;
 470     for (uint i = 0; i < nblocks; i++) {
 471       Block *b = _cfg->_blocks[i];
 472       int idx = jmp_nidx[i];
 473       MachNode* mach = (idx == -1) ? NULL: b->_nodes[idx]->as_Mach();
 474       if (mach != NULL && mach->may_be_short_branch()) {
 475 #ifdef ASSERT
 476         assert(jmp_size[i] > 0 && mach->is_Branch(), "sanity");
 477         int j;
 478         // Find the branch; ignore trailing NOPs.
 479         for (j = b->_nodes.size()-1; j>=0; j--) {
 480           Node* n = b->_nodes[j];
 481           if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
 482             break;
 483         }
 484         assert(j >= 0 && j == idx && b->_nodes[j] == (Node*)mach, "sanity");
 485 #endif
 486         int br_size = jmp_size[i];
 487         int br_offs = blk_starts[i] + jmp_offset[i];
 488 
 489         // This requires the TRUE branch target be in succs[0]
 490         uint bnum = b->non_connector_successor(0)->_pre_order;
 491         int offset = blk_starts[bnum] - br_offs;
 492         if (bnum > i) { // adjust following block's offset
 493           offset -= adjust_block_start;
 494         }
 495         // In the following code a nop could be inserted before
 496         // the branch which will increase the backward distance.
 497         bool needs_padding = ((uint)br_offs == last_may_be_short_branch_adr);
 498         if (needs_padding && offset <= 0)
 499           offset -= nop_size;
 500 
 501         if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
 502           // We've got a winner.  Replace this branch.
 503           MachNode* replacement = mach->short_branch_version(this);
 504 
 505           // Update the jmp_size.
 506           int new_size = replacement->size(_regalloc);
 507           int diff     = br_size - new_size;
 508           assert(diff >= (int)nop_size, "short_branch size should be smaller");
 509           // Conservatively take into accound padding between
 510           // avoid_back_to_back branches. Previous branch could be
 511           // converted into avoid_back_to_back branch during next
 512           // rounds.
 513           if (needs_padding && replacement->avoid_back_to_back()) {
 514             jmp_offset[i] += nop_size;
 515             diff -= nop_size;
 516           }
 517           adjust_block_start += diff;
 518           b->_nodes.map(idx, replacement);
 519           mach->subsume_by(replacement);
 520           mach = replacement;
 521           progress = true;
 522 
 523           jmp_size[i] = new_size;


 653                                   node_bundling(nj)->use_unconditional_delay();
 654         if (!delay_slot_is_used && mach->may_be_short_branch()) {
 655           int br_size = inst_size;
 656 
 657           // This requires the TRUE branch target be in succs[0]
 658           uint bnum = b->non_connector_successor(0)->_pre_order;
 659           int offset = blk_starts[bnum] - current_offset;
 660           if (bnum >= i) {
 661             // Current and following block's offset are not
 662             // finilized yet, adjust distance.
 663             offset -= (blk_starts[i] - blk_offset);
 664           }
 665           // In the following code a nop could be inserted before
 666           // the branch which will increase the backward distance.
 667           bool needs_padding = (current_offset == last_avoid_back_to_back_adr);
 668           if (needs_padding && offset <= 0)
 669             offset -= nop_size;
 670 
 671           if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
 672             // We've got a winner.  Replace this branch.
 673             MachNode* replacement = mach->short_branch_version(this);
 674 
 675             // Update the jmp_size.
 676             int new_size = replacement->size(_regalloc);
 677             assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
 678             // Conservatively take into accound padding between
 679             // avoid_back_to_back branches. Previous branch could be
 680             // converted into avoid_back_to_back branch during next
 681             // rounds.
 682             if (needs_padding && replacement->avoid_back_to_back()) {
 683               MachNode *nop = new (this) MachNopNode();
 684               b->_nodes.insert(j++, nop);
 685               _cfg->_bbs.map(nop->_idx, b);
 686               last_inst++;
 687               current_offset += nop_size;
 688             }
 689             inst_size = new_size;
 690             b->_nodes.map(j, replacement);
 691             mach->subsume_by(replacement);
 692             nj = replacement;
 693 #ifdef ASSERT


1508             if (sfn->jvms()->method() == NULL) {
1509               // Write the oopmap directly to the code blob??!!
1510 #             ifdef ENABLE_ZAP_DEAD_LOCALS
1511               assert( !is_node_getting_a_safepoint(sfn),  "logic does not match; false positive");
1512 #             endif
1513               continue;
1514             }
1515           } // End synchronization
1516 
1517           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1518                                            current_offset);
1519           Process_OopMap_Node(mach, current_offset);
1520         } // End if safepoint
1521 
1522         // If this is a null check, then add the start of the previous instruction to the list
1523         else if( mach->is_MachNullCheck() ) {
1524           inct_starts[inct_cnt++] = previous_offset;
1525         }
1526 
1527         // If this is a branch, then fill in the label with the target BB's label
1528         else if (mach->is_Branch()) {
1529 
1530           if (mach->ideal_Opcode() == Op_Jump) {


1531             for (uint h = 0; h < b->_num_succs; h++) {
1532               Block* succs_block = b->_succs[h];
1533               for (uint j = 1; j < succs_block->num_preds(); j++) {
1534                 Node* jpn = succs_block->pred(j);
1535                 if (jpn->is_JumpProj() && jpn->in(0) == mach) {
1536                   uint block_num = succs_block->non_connector()->_pre_order;
1537                   Label *blkLabel = &blk_labels[block_num];
1538                   mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1539                 }
1540               }
1541             }
1542           } else {
1543             // For Branchs
1544             // This requires the TRUE branch target be in succs[0]
1545             uint block_num = b->non_connector_successor(0)->_pre_order;
1546             mach->label_set( &blk_labels[block_num], block_num );
1547           }
1548         }
1549 
1550 #ifdef ASSERT
1551         // Check that oop-store precedes the card-mark
1552         else if (mach->ideal_Opcode() == Op_StoreCM) {
1553           uint storeCM_idx = j;
1554           int count = 0;
1555           for (uint prec = mach->req(); prec < mach->len(); prec++) {
1556             Node *oop_store = mach->in(prec);  // Precedence edge
1557             if (oop_store == NULL) continue;
1558             count++;
1559             uint i4;
1560             for( i4 = 0; i4 < last_inst; ++i4 ) {
1561               if( b->_nodes[i4] == oop_store ) break;
1562             }
1563             // Note: This test can provide a false failure if other precedence
1564             // edges have been added to the storeCMNode.
1565             assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
1566           }
1567           assert(count > 0, "storeCM expects at least one precedence edge");
1568         }


2212   // See if this fits in the current bundle
2213   const Pipeline *node_pipeline = n->pipeline();
2214   const Pipeline_Use& node_usage = node_pipeline->resourceUse();
2215 
2216   // Check for instructions to be placed in the delay slot. We
2217   // do this before we actually schedule the current instruction,
2218   // because the delay slot follows the current instruction.
2219   if (Pipeline::_branch_has_delay_slot &&
2220       node_pipeline->hasBranchDelay() &&
2221       !_unconditional_delay_slot) {
2222 
2223     uint siz = _available.size();
2224 
2225     // Conditional branches can support an instruction that
2226     // is unconditionally executed and not dependent by the
2227     // branch, OR a conditionally executed instruction if
2228     // the branch is taken.  In practice, this means that
2229     // the first instruction at the branch target is
2230     // copied to the delay slot, and the branch goes to
2231     // the instruction after that at the branch target
2232     if ( n->is_Mach() && n->is_Branch() ) {
2233 
2234       assert( !n->is_MachNullCheck(), "should not look for delay slot for Null Check" );
2235       assert( !n->is_Catch(),         "should not look for delay slot for Catch" );
2236 
2237 #ifndef PRODUCT
2238       _branches++;
2239 #endif
2240 
2241       // At least 1 instruction is on the available list
2242       // that is not dependent on the branch
2243       for (uint i = 0; i < siz; i++) {
2244         Node *d = _available[i];
2245         const Pipeline *avail_pipeline = d->pipeline();
2246 
2247         // Don't allow safepoints in the branch shadow, that will
2248         // cause a number of difficulties
2249         if ( avail_pipeline->instructionCount() == 1 &&
2250             !avail_pipeline->hasMultipleBundles() &&
2251             !avail_pipeline->hasBranchDelay() &&
2252             Pipeline::instr_has_unit_size() &&


2873     if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
2874       // Fat-proj kills a slew of registers
2875       // This can add edges to 'n' and obscure whether or not it was a def,
2876       // hence the is_def flag.
2877       fat_proj_seen = true;
2878       RegMask rm = n->out_RegMask();// Make local copy
2879       while( rm.is_NotEmpty() ) {
2880         OptoReg::Name kill = rm.find_first_elem();
2881         rm.Remove(kill);
2882         anti_do_def( b, n, kill, is_def );
2883       }
2884     } else {
2885       // Get DEF'd registers the normal way
2886       anti_do_def( b, n, _regalloc->get_reg_first(n), is_def );
2887       anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
2888     }
2889 
2890     // Kill projections on a branch should appear to occur on the
2891     // branch, not afterwards, so grab the masks from the projections
2892     // and process them.
2893     if (n->is_Branch()) {
2894       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2895         Node* use = n->fast_out(i);
2896         if (use->is_Proj()) {
2897           RegMask rm = use->out_RegMask();// Make local copy
2898           while( rm.is_NotEmpty() ) {
2899             OptoReg::Name kill = rm.find_first_elem();
2900             rm.Remove(kill);
2901             anti_do_def( b, n, kill, false );
2902           }
2903         }
2904       }
2905     }
2906 
2907     // Check each register used by this instruction for a following DEF/KILL
2908     // that must occur afterward and requires an anti-dependence edge.
2909     for( uint j=0; j<n->req(); j++ ) {
2910       Node *def = n->in(j);
2911       if( def ) {
2912         assert( !def->is_MachProj() || def->ideal_reg() != MachProjNode::fat_proj, "" );
2913         anti_do_use( b, n, _regalloc->get_reg_first(def) );




 403             reloc_size += reloc_java_to_interp();
 404           }
 405         } else if (mach->is_MachSafePoint()) {
 406           // If call/safepoint are adjacent, account for possible
 407           // nop to disambiguate the two safepoints.
 408           // ScheduleAndBundle() can rearrange nodes in a block,
 409           // check for all offsets inside this block.
 410           if (last_call_adr >= blk_starts[i]) {
 411             blk_size += nop_size;
 412           }
 413         }
 414         if (mach->avoid_back_to_back()) {
 415           // Nop is inserted between "avoid back to back" instructions.
 416           // ScheduleAndBundle() can rearrange nodes in a block,
 417           // check for all offsets inside this block.
 418           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 419             blk_size += nop_size;
 420           }
 421         }
 422         if (mach->may_be_short_branch()) {
 423           if (!nj->is_MachBranch()) {
 424 #ifndef PRODUCT
 425             nj->dump(3);
 426 #endif
 427             Unimplemented();
 428           }
 429           assert(jmp_nidx[i] == -1, "block should have only one branch");
 430           jmp_offset[i] = blk_size;
 431           jmp_size[i]   = inst_size;
 432           jmp_nidx[i]   = j;
 433           has_short_branch_candidate = true;
 434         }
 435       }
 436       blk_size += inst_size;
 437       // Remember end of call offset
 438       if (nj->is_MachCall() && !nj->is_MachCallLeaf()) {
 439         last_call_adr = blk_starts[i]+blk_size;
 440       }
 441       // Remember end of avoid_back_to_back offset
 442       if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back()) {
 443         last_avoid_back_to_back_adr = blk_starts[i]+blk_size;


 456       }
 457     }
 458 
 459     // Save block size; update total method size
 460     blk_starts[i+1] = blk_starts[i]+blk_size;
 461   }
 462 
 463   // Step two, replace eligible long jumps.
 464   bool progress = true;
 465   uint last_may_be_short_branch_adr = max_uint;
 466   while (has_short_branch_candidate && progress) {
 467     progress = false;
 468     has_short_branch_candidate = false;
 469     int adjust_block_start = 0;
 470     for (uint i = 0; i < nblocks; i++) {
 471       Block *b = _cfg->_blocks[i];
 472       int idx = jmp_nidx[i];
 473       MachNode* mach = (idx == -1) ? NULL: b->_nodes[idx]->as_Mach();
 474       if (mach != NULL && mach->may_be_short_branch()) {
 475 #ifdef ASSERT
 476         assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
 477         int j;
 478         // Find the branch; ignore trailing NOPs.
 479         for (j = b->_nodes.size()-1; j>=0; j--) {
 480           Node* n = b->_nodes[j];
 481           if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
 482             break;
 483         }
 484         assert(j >= 0 && j == idx && b->_nodes[j] == (Node*)mach, "sanity");
 485 #endif
 486         int br_size = jmp_size[i];
 487         int br_offs = blk_starts[i] + jmp_offset[i];
 488 
 489         // This requires the TRUE branch target be in succs[0]
 490         uint bnum = b->non_connector_successor(0)->_pre_order;
 491         int offset = blk_starts[bnum] - br_offs;
 492         if (bnum > i) { // adjust following block's offset
 493           offset -= adjust_block_start;
 494         }
 495         // In the following code a nop could be inserted before
 496         // the branch which will increase the backward distance.
 497         bool needs_padding = ((uint)br_offs == last_may_be_short_branch_adr);
 498         if (needs_padding && offset <= 0)
 499           offset -= nop_size;
 500 
 501         if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
 502           // We've got a winner.  Replace this branch.
 503           MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
 504 
 505           // Update the jmp_size.
 506           int new_size = replacement->size(_regalloc);
 507           int diff     = br_size - new_size;
 508           assert(diff >= (int)nop_size, "short_branch size should be smaller");
 509           // Conservatively take into accound padding between
 510           // avoid_back_to_back branches. Previous branch could be
 511           // converted into avoid_back_to_back branch during next
 512           // rounds.
 513           if (needs_padding && replacement->avoid_back_to_back()) {
 514             jmp_offset[i] += nop_size;
 515             diff -= nop_size;
 516           }
 517           adjust_block_start += diff;
 518           b->_nodes.map(idx, replacement);
 519           mach->subsume_by(replacement);
 520           mach = replacement;
 521           progress = true;
 522 
 523           jmp_size[i] = new_size;


 653                                   node_bundling(nj)->use_unconditional_delay();
 654         if (!delay_slot_is_used && mach->may_be_short_branch()) {
 655           int br_size = inst_size;
 656 
 657           // This requires the TRUE branch target be in succs[0]
 658           uint bnum = b->non_connector_successor(0)->_pre_order;
 659           int offset = blk_starts[bnum] - current_offset;
 660           if (bnum >= i) {
 661             // Current and following block's offset are not
 662             // finilized yet, adjust distance.
 663             offset -= (blk_starts[i] - blk_offset);
 664           }
 665           // In the following code a nop could be inserted before
 666           // the branch which will increase the backward distance.
 667           bool needs_padding = (current_offset == last_avoid_back_to_back_adr);
 668           if (needs_padding && offset <= 0)
 669             offset -= nop_size;
 670 
 671           if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
 672             // We've got a winner.  Replace this branch.
 673             MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
 674 
 675             // Update the jmp_size.
 676             int new_size = replacement->size(_regalloc);
 677             assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
 678             // Conservatively take into accound padding between
 679             // avoid_back_to_back branches. Previous branch could be
 680             // converted into avoid_back_to_back branch during next
 681             // rounds.
 682             if (needs_padding && replacement->avoid_back_to_back()) {
 683               MachNode *nop = new (this) MachNopNode();
 684               b->_nodes.insert(j++, nop);
 685               _cfg->_bbs.map(nop->_idx, b);
 686               last_inst++;
 687               current_offset += nop_size;
 688             }
 689             inst_size = new_size;
 690             b->_nodes.map(j, replacement);
 691             mach->subsume_by(replacement);
 692             nj = replacement;
 693 #ifdef ASSERT


1508             if (sfn->jvms()->method() == NULL) {
1509               // Write the oopmap directly to the code blob??!!
1510 #             ifdef ENABLE_ZAP_DEAD_LOCALS
1511               assert( !is_node_getting_a_safepoint(sfn),  "logic does not match; false positive");
1512 #             endif
1513               continue;
1514             }
1515           } // End synchronization
1516 
1517           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1518                                            current_offset);
1519           Process_OopMap_Node(mach, current_offset);
1520         } // End if safepoint
1521 
1522         // If this is a null check, then add the start of the previous instruction to the list
1523         else if( mach->is_MachNullCheck() ) {
1524           inct_starts[inct_cnt++] = previous_offset;
1525         }
1526 
1527         // If this is a branch, then fill in the label with the target BB's label
1528         else if (mach->is_MachBranch()) {
1529           // This requires the TRUE branch target be in succs[0]
1530           uint block_num = b->non_connector_successor(0)->_pre_order;
1531           mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
1532         } else if (mach->ideal_Opcode() == Op_Jump) {
1533           for (uint h = 0; h < b->_num_succs; h++) {
1534             Block* succs_block = b->_succs[h];
1535             for (uint j = 1; j < succs_block->num_preds(); j++) {
1536               Node* jpn = succs_block->pred(j);
1537               if (jpn->is_JumpProj() && jpn->in(0) == mach) {
1538                 uint block_num = succs_block->non_connector()->_pre_order;
1539                 Label *blkLabel = &blk_labels[block_num];
1540                 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1541               }
1542             }
1543           }





1544         }

1545 
1546 #ifdef ASSERT
1547         // Check that oop-store precedes the card-mark
1548         else if (mach->ideal_Opcode() == Op_StoreCM) {
1549           uint storeCM_idx = j;
1550           int count = 0;
1551           for (uint prec = mach->req(); prec < mach->len(); prec++) {
1552             Node *oop_store = mach->in(prec);  // Precedence edge
1553             if (oop_store == NULL) continue;
1554             count++;
1555             uint i4;
1556             for( i4 = 0; i4 < last_inst; ++i4 ) {
1557               if( b->_nodes[i4] == oop_store ) break;
1558             }
1559             // Note: This test can provide a false failure if other precedence
1560             // edges have been added to the storeCMNode.
1561             assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
1562           }
1563           assert(count > 0, "storeCM expects at least one precedence edge");
1564         }


2208   // See if this fits in the current bundle
2209   const Pipeline *node_pipeline = n->pipeline();
2210   const Pipeline_Use& node_usage = node_pipeline->resourceUse();
2211 
2212   // Check for instructions to be placed in the delay slot. We
2213   // do this before we actually schedule the current instruction,
2214   // because the delay slot follows the current instruction.
2215   if (Pipeline::_branch_has_delay_slot &&
2216       node_pipeline->hasBranchDelay() &&
2217       !_unconditional_delay_slot) {
2218 
2219     uint siz = _available.size();
2220 
2221     // Conditional branches can support an instruction that
2222     // is unconditionally executed and not dependent by the
2223     // branch, OR a conditionally executed instruction if
2224     // the branch is taken.  In practice, this means that
2225     // the first instruction at the branch target is
2226     // copied to the delay slot, and the branch goes to
2227     // the instruction after that at the branch target
2228     if ( n->is_MachBranch() ) {
2229 
2230       assert( !n->is_MachNullCheck(), "should not look for delay slot for Null Check" );
2231       assert( !n->is_Catch(),         "should not look for delay slot for Catch" );
2232 
2233 #ifndef PRODUCT
2234       _branches++;
2235 #endif
2236 
2237       // At least 1 instruction is on the available list
2238       // that is not dependent on the branch
2239       for (uint i = 0; i < siz; i++) {
2240         Node *d = _available[i];
2241         const Pipeline *avail_pipeline = d->pipeline();
2242 
2243         // Don't allow safepoints in the branch shadow, that will
2244         // cause a number of difficulties
2245         if ( avail_pipeline->instructionCount() == 1 &&
2246             !avail_pipeline->hasMultipleBundles() &&
2247             !avail_pipeline->hasBranchDelay() &&
2248             Pipeline::instr_has_unit_size() &&


2869     if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
2870       // Fat-proj kills a slew of registers
2871       // This can add edges to 'n' and obscure whether or not it was a def,
2872       // hence the is_def flag.
2873       fat_proj_seen = true;
2874       RegMask rm = n->out_RegMask();// Make local copy
2875       while( rm.is_NotEmpty() ) {
2876         OptoReg::Name kill = rm.find_first_elem();
2877         rm.Remove(kill);
2878         anti_do_def( b, n, kill, is_def );
2879       }
2880     } else {
2881       // Get DEF'd registers the normal way
2882       anti_do_def( b, n, _regalloc->get_reg_first(n), is_def );
2883       anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
2884     }
2885 
2886     // Kill projections on a branch should appear to occur on the
2887     // branch, not afterwards, so grab the masks from the projections
2888     // and process them.
2889     if (n->is_MachBranch() || n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_Jump) {
2890       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2891         Node* use = n->fast_out(i);
2892         if (use->is_Proj()) {
2893           RegMask rm = use->out_RegMask();// Make local copy
2894           while( rm.is_NotEmpty() ) {
2895             OptoReg::Name kill = rm.find_first_elem();
2896             rm.Remove(kill);
2897             anti_do_def( b, n, kill, false );
2898           }
2899         }
2900       }
2901     }
2902 
2903     // Check each register used by this instruction for a following DEF/KILL
2904     // that must occur afterward and requires an anti-dependence edge.
2905     for( uint j=0; j<n->req(); j++ ) {
2906       Node *def = n->in(j);
2907       if( def ) {
2908         assert( !def->is_MachProj() || def->ideal_reg() != MachProjNode::fat_proj, "" );
2909         anti_do_use( b, n, _regalloc->get_reg_first(def) );


src/share/vm/opto/output.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File