src/share/vm/opto/reg_split.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File JDK-8022284 Sdiff src/share/vm/opto

src/share/vm/opto/reg_split.cpp

Print this page




 115   while( i < b->_nodes.size() &&
 116          (b->_nodes[i]->is_Proj() ||
 117           b->_nodes[i]->is_Phi() ) )
 118     i++;
 119 
 120   // Do not insert between a call and his Catch
 121   if( b->_nodes[i]->is_Catch() ) {
 122     // Put the instruction at the top of the fall-thru block.
 123     // Find the fall-thru projection
 124     while( 1 ) {
 125       const CatchProjNode *cp = b->_nodes[++i]->as_CatchProj();
 126       if( cp->_con == CatchProjNode::fall_through_index )
 127         break;
 128     }
 129     int sidx = i - b->end_idx()-1;
 130     b = b->_succs[sidx];        // Switch to successor block
 131     i = 1;                      // Right at start of block
 132   }
 133 
 134   b->_nodes.insert(i,spill);    // Insert node in block
 135   _cfg._bbs.map(spill->_idx,b); // Update node->block mapping to reflect
 136   // Adjust the point where we go hi-pressure
 137   if( i <= b->_ihrp_index ) b->_ihrp_index++;
 138   if( i <= b->_fhrp_index ) b->_fhrp_index++;
 139 
 140   // Assign a new Live Range Number to the SpillCopy and grow
 141   // the node->live range mapping.
 142   new_lrg(spill,maxlrg);
 143 }
 144 
 145 //------------------------------split_DEF--------------------------------------
 146 // There are four categories of Split; UP/DOWN x DEF/USE
 147 // Only three of these really occur as DOWN/USE will always color
 148 // Any Split with a DEF cannot CISC-Spill now.  Thus we need
 149 // two helper routines, one for Split DEFS (insert after instruction),
 150 // one for Split USES (insert before instruction).  DEF insertion
 151 // happens inside Split, where the Leaveblock array is updated.
 152 uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx ) {
 153 #ifdef ASSERT
 154   // Increment the counter for this lrg
 155   splits.at_put(slidx, splits.at(slidx)+1);


 202 
 203   //-------------------------------------------
 204   // Check for use of debug info
 205   if (useidx >= debug_start && useidx < debug_end) {
 206     // Actually it's perfectly legal for constant debug info to appear
 207     // just unlikely.  In this case the optimizer left a ConI of a 4
 208     // as both inputs to a Phi with only a debug use.  It's a single-def
 209     // live range of a rematerializable value.  The live range spills,
 210     // rematerializes and now the ConI directly feeds into the debug info.
 211     // assert(!def->is_Con(), "constant debug info already constructed directly");
 212 
 213     // Special split handling for Debug Info
 214     // If DEF is DOWN, just hook the edge and return
 215     // If DEF is UP, Split it DOWN for this USE.
 216     if( def->is_Mach() ) {
 217       if( def_down ) {
 218         // DEF is DOWN, so connect USE directly to the DEF
 219         use->set_req(useidx, def);
 220       } else {
 221         // Block and index where the use occurs.
 222         Block *b = _cfg._bbs[use->_idx];
 223         // Put the clone just prior to use
 224         int bindex = b->find_node(use);
 225         // DEF is UP, so must copy it DOWN and hook in USE
 226         // Insert SpillCopy before the USE, which uses DEF as its input,
 227         // and defs a new live range, which is used by this node.
 228         Node *spill = get_spillcopy_wide(def,use,useidx);
 229         // did we fail to split?
 230         if (!spill) {
 231           // Bail
 232           return 0;
 233         }
 234         // insert into basic block
 235         insert_proj( b, bindex, spill, maxlrg++ );
 236         // Use the new split
 237         use->set_req(useidx,spill);
 238       }
 239       // No further split handling needed for this use
 240       return maxlrg;
 241     }  // End special splitting for debug info live range
 242   }  // If debug info


 253       inp = use->as_Mach()->operand_index(inp);
 254     if( inp == (int)useidx ) {
 255       use->set_req(useidx, def);
 256 #ifndef PRODUCT
 257       if( TraceCISCSpill ) {
 258         tty->print("  set_split: ");
 259         use->dump();
 260       }
 261 #endif
 262       return maxlrg;
 263     }
 264   }
 265 
 266   //-------------------------------------------
 267   // Insert a Copy before the use
 268 
 269   // Block and index where the use occurs.
 270   int bindex;
 271   // Phi input spill-copys belong at the end of the prior block
 272   if( use->is_Phi() ) {
 273     b = _cfg._bbs[b->pred(useidx)->_idx];
 274     bindex = b->end_idx();
 275   } else {
 276     // Put the clone just prior to use
 277     bindex = b->find_node(use);
 278   }
 279 
 280   Node *spill = get_spillcopy_wide( def, use, useidx );
 281   if( !spill ) return 0;        // Bailed out
 282   // Insert SpillCopy before the USE, which uses the reaching DEF as
 283   // its input, and defs a new live range, which is used by this node.
 284   insert_proj( b, bindex, spill, maxlrg++ );
 285   // Use the spill/clone
 286   use->set_req(useidx,spill);
 287 
 288   // return updated live range count
 289   return maxlrg;
 290 }
 291 
 292 //------------------------------clone_node----------------------------
 293 // Clone node with anti dependence check.


 318 //------------------------------split_Rematerialize----------------------------
 319 // Clone a local copy of the def.
 320 Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru ) {
 321   // The input live ranges will be stretched to the site of the new
 322   // instruction.  They might be stretched past a def and will thus
 323   // have the old and new values of the same live range alive at the
 324   // same time - a definite no-no.  Split out private copies of
 325   // the inputs.
 326   if( def->req() > 1 ) {
 327     for( uint i = 1; i < def->req(); i++ ) {
 328       Node *in = def->in(i);
 329       // Check for single-def (LRG cannot redefined)
 330       uint lidx = _lrg_map.live_range_id(in);
 331       if (lidx >= _lrg_map.max_lrg_id()) {
 332         continue; // Value is a recent spill-copy
 333       }
 334       if (lrgs(lidx).is_singledef()) {
 335         continue;
 336       }
 337 
 338       Block *b_def = _cfg._bbs[def->_idx];
 339       int idx_def = b_def->find_node(def);
 340       Node *in_spill = get_spillcopy_wide( in, def, i );
 341       if( !in_spill ) return 0; // Bailed out
 342       insert_proj(b_def,idx_def,in_spill,maxlrg++);
 343       if( b_def == b )
 344         insidx++;
 345       def->set_req(i,in_spill);
 346     }
 347   }
 348 
 349   Node *spill = clone_node(def, b, C);
 350   if (spill == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
 351     // Check when generating nodes
 352     return 0;
 353   }
 354 
 355   // See if any inputs are currently being spilled, and take the
 356   // latest copy of spilled inputs.
 357   if( spill->req() > 1 ) {
 358     for( uint i = 1; i < spill->req(); i++ ) {


 572     // Cycle through this block's predecessors, collecting Reaches
 573     // info for each spilled LRG.  If they are identical, no phi is
 574     // needed.  If they differ, check for a phi, and insert if missing,
 575     // or update edges if present.  Set current block's Reaches set to
 576     // be either the phi's or the reaching def, as appropriate.
 577     // If no Phi is needed, check if the LRG needs to spill on entry
 578     // to the block due to HRP.
 579     for( slidx = 0; slidx < spill_cnt; slidx++ ) {
 580       // Grab the live range number
 581       uint lidx = lidxs.at(slidx);
 582       // Do not bother splitting or putting in Phis for single-def
 583       // rematerialized live ranges.  This happens alot to constants
 584       // with long live ranges.
 585       if( lrgs(lidx).is_singledef() &&
 586           lrgs(lidx)._def->rematerialize() ) {
 587         // reset the Reaches & UP entries
 588         Reachblock[slidx] = lrgs(lidx)._def;
 589         UPblock[slidx] = true;
 590         // Record following instruction in case 'n' rematerializes and
 591         // kills flags
 592         Block *pred1 = _cfg._bbs[b->pred(1)->_idx];
 593         continue;
 594       }
 595 
 596       // Initialize needs_phi and needs_split
 597       bool needs_phi = false;
 598       bool needs_split = false;
 599       bool has_phi = false;
 600       // Walk the predecessor blocks to check inputs for that live range
 601       // Grab predecessor block header
 602       n1 = b->pred(1);
 603       // Grab the appropriate reaching def info for inpidx
 604       pred = _cfg._bbs[n1->_idx];
 605       pidx = pred->_pre_order;
 606       Node **Ltmp = Reaches[pidx];
 607       bool  *Utmp = UP[pidx];
 608       n1 = Ltmp[slidx];
 609       u1 = Utmp[slidx];
 610       // Initialize node for saving type info
 611       n3 = n1;
 612       u3 = u1;
 613 
 614       // Compare inputs to see if a Phi is needed
 615       for( inpidx = 2; inpidx < b->num_preds(); inpidx++ ) {
 616         // Grab predecessor block headers
 617         n2 = b->pred(inpidx);
 618         // Grab the appropriate reaching def info for inpidx
 619         pred = _cfg._bbs[n2->_idx];
 620         pidx = pred->_pre_order;
 621         Ltmp = Reaches[pidx];
 622         Utmp = UP[pidx];
 623         n2 = Ltmp[slidx];
 624         u2 = Utmp[slidx];
 625         // For each LRG, decide if a phi is necessary
 626         if( n1 != n2 ) {
 627           needs_phi = true;
 628         }
 629         // See if the phi has mismatched inputs, UP vs. DOWN
 630         if( n1 && n2 && (u1 != u2) ) {
 631           needs_split = true;
 632         }
 633         // Move n2/u2 to n1/u1 for next iteration
 634         n1 = n2;
 635         u1 = u2;
 636         // Preserve a non-NULL predecessor for later type referencing
 637         if( (n3 == NULL) && (n2 != NULL) ){
 638           n3 = n2;
 639           u3 = u2;


 684         assert(phi != NULL,"Must have a Phi Node here");
 685         phis->push(phi);
 686         // PhiNodes should either force the LRG UP or DOWN depending
 687         // on its inputs and the register pressure in the Phi's block.
 688         UPblock[slidx] = true;  // Assume new DEF is UP
 689         // If entering a high-pressure area with no immediate use,
 690         // assume Phi is DOWN
 691         if( is_high_pressure( b, &lrgs(lidx), b->end_idx()) && !prompt_use(b,lidx) )
 692           UPblock[slidx] = false;
 693         // If we are not split up/down and all inputs are down, then we
 694         // are down
 695         if( !needs_split && !u3 )
 696           UPblock[slidx] = false;
 697       }  // end if phi is needed
 698 
 699       // Do not need a phi, so grab the reaching DEF
 700       else {
 701         // Grab predecessor block header
 702         n1 = b->pred(1);
 703         // Grab the appropriate reaching def info for k
 704         pred = _cfg._bbs[n1->_idx];
 705         pidx = pred->_pre_order;
 706         Node **Ltmp = Reaches[pidx];
 707         bool  *Utmp = UP[pidx];
 708         // reset the Reaches & UP entries
 709         Reachblock[slidx] = Ltmp[slidx];
 710         UPblock[slidx] = Utmp[slidx];
 711       }  // end else no Phi is needed
 712     }  // end for all spilling live ranges
 713     // DEBUG
 714 #ifndef PRODUCT
 715     if(trace_spilling()) {
 716       tty->print("/`\nBlock %d: ", b->_pre_order);
 717       tty->print("Reaching Definitions after Phi handling\n");
 718       for( uint x = 0; x < spill_cnt; x++ ) {
 719         tty->print("Spill Idx %d: UP %d: Node\n",x,UPblock[x]);
 720         if( Reachblock[x] )
 721           Reachblock[x]->dump();
 722         else
 723           tty->print("Undefined\n");
 724       }


 902           // Get lidx of input
 903           uint useidx = _lrg_map.find_id(n->in(inpidx));
 904           // Not a brand-new split, and it is a spill use
 905           if (useidx < _lrg_map.max_lrg_id() && lrgs(useidx).reg() >= LRG::SPILL_REG) {
 906             // Check for valid reaching DEF
 907             slidx = lrg2reach[useidx];
 908             Node *def = Reachblock[slidx];
 909             assert( def != NULL, "Using Undefined Value in Split()\n");
 910 
 911             // (+++) %%%% remove this in favor of pre-pass in matcher.cpp
 912             // monitor references do not care where they live, so just hook
 913             if ( jvms && jvms->is_monitor_use(inpidx) ) {
 914               // The effect of this clone is to drop the node out of the block,
 915               // so that the allocator does not see it anymore, and therefore
 916               // does not attempt to assign it a register.
 917               def = clone_node(def, b, C);
 918               if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
 919                 return 0;
 920               }
 921               _lrg_map.extend(def->_idx, 0);
 922               _cfg._bbs.map(def->_idx,b);
 923               n->set_req(inpidx, def);
 924               continue;
 925             }
 926 
 927             // Rematerializable?  Then clone def at use site instead
 928             // of store/load
 929             if( def->rematerialize() ) {
 930               int old_size = b->_nodes.size();
 931               def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true );
 932               if( !def ) return 0; // Bail out
 933               insidx += b->_nodes.size()-old_size;
 934             }
 935 
 936             MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
 937             // Base pointers and oopmap references do not care where they live.
 938             if ((inpidx >= oopoff) ||
 939                 (mach && mach->ideal_Opcode() == Op_AddP && inpidx == AddPNode::Base)) {
 940               if (def->rematerialize() && lrgs(useidx)._was_spilled2) {
 941                 // This def has been rematerialized a couple of times without
 942                 // progress. It doesn't care if it lives UP or DOWN, so


1274 #endif
1275   }  // End For All Blocks
1276 
1277   //----------PASS 2----------
1278   // Reset all DEF live range numbers here
1279   for( insidx = 0; insidx < defs->size(); insidx++ ) {
1280     // Grab the def
1281     n1 = defs->at(insidx);
1282     // Set new lidx for DEF
1283     new_lrg(n1, maxlrg++);
1284   }
1285   //----------Phi Node Splitting----------
1286   // Clean up a phi here, and assign a new live range number
1287   // Cycle through this block's predecessors, collecting Reaches
1288   // info for each spilled LRG and update edges.
1289   // Walk the phis list to patch inputs, split phis, and name phis
1290   uint lrgs_before_phi_split = maxlrg;
1291   for( insidx = 0; insidx < phis->size(); insidx++ ) {
1292     Node *phi = phis->at(insidx);
1293     assert(phi->is_Phi(),"This list must only contain Phi Nodes");
1294     Block *b = _cfg._bbs[phi->_idx];
1295     // Grab the live range number
1296     uint lidx = _lrg_map.find_id(phi);
1297     uint slidx = lrg2reach[lidx];
1298     // Update node to lidx map
1299     new_lrg(phi, maxlrg++);
1300     // Get PASS1's up/down decision for the block.
1301     int phi_up = !!UP_entry[slidx]->test(b->_pre_order);
1302 
1303     // Force down if double-spilling live range
1304     if( lrgs(lidx)._was_spilled1 )
1305       phi_up = false;
1306 
1307     // When splitting a Phi we an split it normal or "inverted".
1308     // An inverted split makes the splits target the Phi's UP/DOWN
1309     // sense inverted; then the Phi is followed by a final def-side
1310     // split to invert back.  It changes which blocks the spill code
1311     // goes in.
1312 
1313     // Walk the predecessor blocks and assign the reaching def to the Phi.
1314     // Split Phi nodes by placing USE side splits wherever the reaching
1315     // DEF has the wrong UP/DOWN value.
1316     for( uint i = 1; i < b->num_preds(); i++ ) {
1317       // Get predecessor block pre-order number
1318       Block *pred = _cfg._bbs[b->pred(i)->_idx];
1319       pidx = pred->_pre_order;
1320       // Grab reaching def
1321       Node *def = Reaches[pidx][slidx];
1322       assert( def, "must have reaching def" );
1323       // If input up/down sense and reg-pressure DISagree
1324       if (def->rematerialize() && contains_no_live_range_input(def)) {
1325         // Place the rematerialized node above any MSCs created during
1326         // phi node splitting.  end_idx points at the insertion point
1327         // so look at the node before it.
1328         int insert = pred->end_idx();
1329         while (insert >= 1 &&
1330                pred->_nodes[insert - 1]->is_SpillCopy() &&
1331                _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) {
1332           insert--;
1333         }
1334         def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
1335         if (!def) {
1336           return 0;    // Bail out
1337         }
1338       }




 115   while( i < b->_nodes.size() &&
 116          (b->_nodes[i]->is_Proj() ||
 117           b->_nodes[i]->is_Phi() ) )
 118     i++;
 119 
 120   // Do not insert between a call and his Catch
 121   if( b->_nodes[i]->is_Catch() ) {
 122     // Put the instruction at the top of the fall-thru block.
 123     // Find the fall-thru projection
 124     while( 1 ) {
 125       const CatchProjNode *cp = b->_nodes[++i]->as_CatchProj();
 126       if( cp->_con == CatchProjNode::fall_through_index )
 127         break;
 128     }
 129     int sidx = i - b->end_idx()-1;
 130     b = b->_succs[sidx];        // Switch to successor block
 131     i = 1;                      // Right at start of block
 132   }
 133 
 134   b->_nodes.insert(i,spill);    // Insert node in block
 135   _cfg.map_node_to_block(spill,  b); // Update node->block mapping to reflect
 136   // Adjust the point where we go hi-pressure
 137   if( i <= b->_ihrp_index ) b->_ihrp_index++;
 138   if( i <= b->_fhrp_index ) b->_fhrp_index++;
 139 
 140   // Assign a new Live Range Number to the SpillCopy and grow
 141   // the node->live range mapping.
 142   new_lrg(spill,maxlrg);
 143 }
 144 
 145 //------------------------------split_DEF--------------------------------------
 146 // There are four categories of Split; UP/DOWN x DEF/USE
 147 // Only three of these really occur as DOWN/USE will always color
 148 // Any Split with a DEF cannot CISC-Spill now.  Thus we need
 149 // two helper routines, one for Split DEFS (insert after instruction),
 150 // one for Split USES (insert before instruction).  DEF insertion
 151 // happens inside Split, where the Leaveblock array is updated.
 152 uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx ) {
 153 #ifdef ASSERT
 154   // Increment the counter for this lrg
 155   splits.at_put(slidx, splits.at(slidx)+1);


 202 
 203   //-------------------------------------------
 204   // Check for use of debug info
 205   if (useidx >= debug_start && useidx < debug_end) {
 206     // Actually it's perfectly legal for constant debug info to appear
 207     // just unlikely.  In this case the optimizer left a ConI of a 4
 208     // as both inputs to a Phi with only a debug use.  It's a single-def
 209     // live range of a rematerializable value.  The live range spills,
 210     // rematerializes and now the ConI directly feeds into the debug info.
 211     // assert(!def->is_Con(), "constant debug info already constructed directly");
 212 
 213     // Special split handling for Debug Info
 214     // If DEF is DOWN, just hook the edge and return
 215     // If DEF is UP, Split it DOWN for this USE.
 216     if( def->is_Mach() ) {
 217       if( def_down ) {
 218         // DEF is DOWN, so connect USE directly to the DEF
 219         use->set_req(useidx, def);
 220       } else {
 221         // Block and index where the use occurs.
 222         Block *b = _cfg.get_block_for_node(use);
 223         // Put the clone just prior to use
 224         int bindex = b->find_node(use);
 225         // DEF is UP, so must copy it DOWN and hook in USE
 226         // Insert SpillCopy before the USE, which uses DEF as its input,
 227         // and defs a new live range, which is used by this node.
 228         Node *spill = get_spillcopy_wide(def,use,useidx);
 229         // did we fail to split?
 230         if (!spill) {
 231           // Bail
 232           return 0;
 233         }
 234         // insert into basic block
 235         insert_proj( b, bindex, spill, maxlrg++ );
 236         // Use the new split
 237         use->set_req(useidx,spill);
 238       }
 239       // No further split handling needed for this use
 240       return maxlrg;
 241     }  // End special splitting for debug info live range
 242   }  // If debug info


 253       inp = use->as_Mach()->operand_index(inp);
 254     if( inp == (int)useidx ) {
 255       use->set_req(useidx, def);
 256 #ifndef PRODUCT
 257       if( TraceCISCSpill ) {
 258         tty->print("  set_split: ");
 259         use->dump();
 260       }
 261 #endif
 262       return maxlrg;
 263     }
 264   }
 265 
 266   //-------------------------------------------
 267   // Insert a Copy before the use
 268 
 269   // Block and index where the use occurs.
 270   int bindex;
 271   // Phi input spill-copys belong at the end of the prior block
 272   if( use->is_Phi() ) {
 273     b = _cfg.get_block_for_node(b->pred(useidx));
 274     bindex = b->end_idx();
 275   } else {
 276     // Put the clone just prior to use
 277     bindex = b->find_node(use);
 278   }
 279 
 280   Node *spill = get_spillcopy_wide( def, use, useidx );
 281   if( !spill ) return 0;        // Bailed out
 282   // Insert SpillCopy before the USE, which uses the reaching DEF as
 283   // its input, and defs a new live range, which is used by this node.
 284   insert_proj( b, bindex, spill, maxlrg++ );
 285   // Use the spill/clone
 286   use->set_req(useidx,spill);
 287 
 288   // return updated live range count
 289   return maxlrg;
 290 }
 291 
 292 //------------------------------clone_node----------------------------
 293 // Clone node with anti dependence check.


 318 //------------------------------split_Rematerialize----------------------------
 319 // Clone a local copy of the def.
 320 Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru ) {
 321   // The input live ranges will be stretched to the site of the new
 322   // instruction.  They might be stretched past a def and will thus
 323   // have the old and new values of the same live range alive at the
 324   // same time - a definite no-no.  Split out private copies of
 325   // the inputs.
 326   if( def->req() > 1 ) {
 327     for( uint i = 1; i < def->req(); i++ ) {
 328       Node *in = def->in(i);
 329       // Check for single-def (LRG cannot redefined)
 330       uint lidx = _lrg_map.live_range_id(in);
 331       if (lidx >= _lrg_map.max_lrg_id()) {
 332         continue; // Value is a recent spill-copy
 333       }
 334       if (lrgs(lidx).is_singledef()) {
 335         continue;
 336       }
 337 
 338       Block *b_def = _cfg.get_block_for_node(def);
 339       int idx_def = b_def->find_node(def);
 340       Node *in_spill = get_spillcopy_wide( in, def, i );
 341       if( !in_spill ) return 0; // Bailed out
 342       insert_proj(b_def,idx_def,in_spill,maxlrg++);
 343       if( b_def == b )
 344         insidx++;
 345       def->set_req(i,in_spill);
 346     }
 347   }
 348 
 349   Node *spill = clone_node(def, b, C);
 350   if (spill == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
 351     // Check when generating nodes
 352     return 0;
 353   }
 354 
 355   // See if any inputs are currently being spilled, and take the
 356   // latest copy of spilled inputs.
 357   if( spill->req() > 1 ) {
 358     for( uint i = 1; i < spill->req(); i++ ) {


 572     // Cycle through this block's predecessors, collecting Reaches
 573     // info for each spilled LRG.  If they are identical, no phi is
 574     // needed.  If they differ, check for a phi, and insert if missing,
 575     // or update edges if present.  Set current block's Reaches set to
 576     // be either the phi's or the reaching def, as appropriate.
 577     // If no Phi is needed, check if the LRG needs to spill on entry
 578     // to the block due to HRP.
 579     for( slidx = 0; slidx < spill_cnt; slidx++ ) {
 580       // Grab the live range number
 581       uint lidx = lidxs.at(slidx);
 582       // Do not bother splitting or putting in Phis for single-def
 583       // rematerialized live ranges.  This happens alot to constants
 584       // with long live ranges.
 585       if( lrgs(lidx).is_singledef() &&
 586           lrgs(lidx)._def->rematerialize() ) {
 587         // reset the Reaches & UP entries
 588         Reachblock[slidx] = lrgs(lidx)._def;
 589         UPblock[slidx] = true;
 590         // Record following instruction in case 'n' rematerializes and
 591         // kills flags
 592         Block *pred1 = _cfg.get_block_for_node(b->pred(1));
 593         continue;
 594       }
 595 
 596       // Initialize needs_phi and needs_split
 597       bool needs_phi = false;
 598       bool needs_split = false;
 599       bool has_phi = false;
 600       // Walk the predecessor blocks to check inputs for that live range
 601       // Grab predecessor block header
 602       n1 = b->pred(1);
 603       // Grab the appropriate reaching def info for inpidx
 604       pred = _cfg.get_block_for_node(n1);
 605       pidx = pred->_pre_order;
 606       Node **Ltmp = Reaches[pidx];
 607       bool  *Utmp = UP[pidx];
 608       n1 = Ltmp[slidx];
 609       u1 = Utmp[slidx];
 610       // Initialize node for saving type info
 611       n3 = n1;
 612       u3 = u1;
 613 
 614       // Compare inputs to see if a Phi is needed
 615       for( inpidx = 2; inpidx < b->num_preds(); inpidx++ ) {
 616         // Grab predecessor block headers
 617         n2 = b->pred(inpidx);
 618         // Grab the appropriate reaching def info for inpidx
 619         pred = _cfg.get_block_for_node(n2);
 620         pidx = pred->_pre_order;
 621         Ltmp = Reaches[pidx];
 622         Utmp = UP[pidx];
 623         n2 = Ltmp[slidx];
 624         u2 = Utmp[slidx];
 625         // For each LRG, decide if a phi is necessary
 626         if( n1 != n2 ) {
 627           needs_phi = true;
 628         }
 629         // See if the phi has mismatched inputs, UP vs. DOWN
 630         if( n1 && n2 && (u1 != u2) ) {
 631           needs_split = true;
 632         }
 633         // Move n2/u2 to n1/u1 for next iteration
 634         n1 = n2;
 635         u1 = u2;
 636         // Preserve a non-NULL predecessor for later type referencing
 637         if( (n3 == NULL) && (n2 != NULL) ){
 638           n3 = n2;
 639           u3 = u2;


 684         assert(phi != NULL,"Must have a Phi Node here");
 685         phis->push(phi);
 686         // PhiNodes should either force the LRG UP or DOWN depending
 687         // on its inputs and the register pressure in the Phi's block.
 688         UPblock[slidx] = true;  // Assume new DEF is UP
 689         // If entering a high-pressure area with no immediate use,
 690         // assume Phi is DOWN
 691         if( is_high_pressure( b, &lrgs(lidx), b->end_idx()) && !prompt_use(b,lidx) )
 692           UPblock[slidx] = false;
 693         // If we are not split up/down and all inputs are down, then we
 694         // are down
 695         if( !needs_split && !u3 )
 696           UPblock[slidx] = false;
 697       }  // end if phi is needed
 698 
 699       // Do not need a phi, so grab the reaching DEF
 700       else {
 701         // Grab predecessor block header
 702         n1 = b->pred(1);
 703         // Grab the appropriate reaching def info for k
 704         pred = _cfg.get_block_for_node(n1);
 705         pidx = pred->_pre_order;
 706         Node **Ltmp = Reaches[pidx];
 707         bool  *Utmp = UP[pidx];
 708         // reset the Reaches & UP entries
 709         Reachblock[slidx] = Ltmp[slidx];
 710         UPblock[slidx] = Utmp[slidx];
 711       }  // end else no Phi is needed
 712     }  // end for all spilling live ranges
 713     // DEBUG
 714 #ifndef PRODUCT
 715     if(trace_spilling()) {
 716       tty->print("/`\nBlock %d: ", b->_pre_order);
 717       tty->print("Reaching Definitions after Phi handling\n");
 718       for( uint x = 0; x < spill_cnt; x++ ) {
 719         tty->print("Spill Idx %d: UP %d: Node\n",x,UPblock[x]);
 720         if( Reachblock[x] )
 721           Reachblock[x]->dump();
 722         else
 723           tty->print("Undefined\n");
 724       }


 902           // Get lidx of input
 903           uint useidx = _lrg_map.find_id(n->in(inpidx));
 904           // Not a brand-new split, and it is a spill use
 905           if (useidx < _lrg_map.max_lrg_id() && lrgs(useidx).reg() >= LRG::SPILL_REG) {
 906             // Check for valid reaching DEF
 907             slidx = lrg2reach[useidx];
 908             Node *def = Reachblock[slidx];
 909             assert( def != NULL, "Using Undefined Value in Split()\n");
 910 
 911             // (+++) %%%% remove this in favor of pre-pass in matcher.cpp
 912             // monitor references do not care where they live, so just hook
 913             if ( jvms && jvms->is_monitor_use(inpidx) ) {
 914               // The effect of this clone is to drop the node out of the block,
 915               // so that the allocator does not see it anymore, and therefore
 916               // does not attempt to assign it a register.
 917               def = clone_node(def, b, C);
 918               if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
 919                 return 0;
 920               }
 921               _lrg_map.extend(def->_idx, 0);
 922               _cfg.map_node_to_block(def, b);
 923               n->set_req(inpidx, def);
 924               continue;
 925             }
 926 
 927             // Rematerializable?  Then clone def at use site instead
 928             // of store/load
 929             if( def->rematerialize() ) {
 930               int old_size = b->_nodes.size();
 931               def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true );
 932               if( !def ) return 0; // Bail out
 933               insidx += b->_nodes.size()-old_size;
 934             }
 935 
 936             MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
 937             // Base pointers and oopmap references do not care where they live.
 938             if ((inpidx >= oopoff) ||
 939                 (mach && mach->ideal_Opcode() == Op_AddP && inpidx == AddPNode::Base)) {
 940               if (def->rematerialize() && lrgs(useidx)._was_spilled2) {
 941                 // This def has been rematerialized a couple of times without
 942                 // progress. It doesn't care if it lives UP or DOWN, so


1274 #endif
1275   }  // End For All Blocks
1276 
1277   //----------PASS 2----------
1278   // Reset all DEF live range numbers here
1279   for( insidx = 0; insidx < defs->size(); insidx++ ) {
1280     // Grab the def
1281     n1 = defs->at(insidx);
1282     // Set new lidx for DEF
1283     new_lrg(n1, maxlrg++);
1284   }
1285   //----------Phi Node Splitting----------
1286   // Clean up a phi here, and assign a new live range number
1287   // Cycle through this block's predecessors, collecting Reaches
1288   // info for each spilled LRG and update edges.
1289   // Walk the phis list to patch inputs, split phis, and name phis
1290   uint lrgs_before_phi_split = maxlrg;
1291   for( insidx = 0; insidx < phis->size(); insidx++ ) {
1292     Node *phi = phis->at(insidx);
1293     assert(phi->is_Phi(),"This list must only contain Phi Nodes");
1294     Block *b = _cfg.get_block_for_node(phi);
1295     // Grab the live range number
1296     uint lidx = _lrg_map.find_id(phi);
1297     uint slidx = lrg2reach[lidx];
1298     // Update node to lidx map
1299     new_lrg(phi, maxlrg++);
1300     // Get PASS1's up/down decision for the block.
1301     int phi_up = !!UP_entry[slidx]->test(b->_pre_order);
1302 
1303     // Force down if double-spilling live range
1304     if( lrgs(lidx)._was_spilled1 )
1305       phi_up = false;
1306 
1307     // When splitting a Phi we an split it normal or "inverted".
1308     // An inverted split makes the splits target the Phi's UP/DOWN
1309     // sense inverted; then the Phi is followed by a final def-side
1310     // split to invert back.  It changes which blocks the spill code
1311     // goes in.
1312 
1313     // Walk the predecessor blocks and assign the reaching def to the Phi.
1314     // Split Phi nodes by placing USE side splits wherever the reaching
1315     // DEF has the wrong UP/DOWN value.
1316     for( uint i = 1; i < b->num_preds(); i++ ) {
1317       // Get predecessor block pre-order number
1318       Block *pred = _cfg.get_block_for_node(b->pred(i));
1319       pidx = pred->_pre_order;
1320       // Grab reaching def
1321       Node *def = Reaches[pidx][slidx];
1322       assert( def, "must have reaching def" );
1323       // If input up/down sense and reg-pressure DISagree
1324       if (def->rematerialize() && contains_no_live_range_input(def)) {
1325         // Place the rematerialized node above any MSCs created during
1326         // phi node splitting.  end_idx points at the insertion point
1327         // so look at the node before it.
1328         int insert = pred->end_idx();
1329         while (insert >= 1 &&
1330                pred->_nodes[insert - 1]->is_SpillCopy() &&
1331                _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) {
1332           insert--;
1333         }
1334         def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
1335         if (!def) {
1336           return 0;    // Bail out
1337         }
1338       }


src/share/vm/opto/reg_split.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File