< prev index next >

src/share/vm/opto/reg_split.cpp

Print this page




  43 // point between LRP and HRP.  Uses in the HRP region will use the spilled
  44 // Def.  The first Use outside the HRP region will generate a SpillCopy to
  45 // hoist the live range back up into a register, and all subsequent uses
  46 // will use that new Def until another HRP region is encountered.  Defs in
  47 // HRP regions will get trailing SpillCopies to push the LRG down into the
  48 // stack immediately.
  49 //
  50 // As a side effect, unlink from (hence make dead) coalesced copies.
  51 //
  52 
  53 static const char out_of_nodes[] = "out of nodes during split";
  54 
  55 //------------------------------get_spillcopy_wide-----------------------------
  56 // Get a SpillCopy node with wide-enough masks.  Use the 'wide-mask', the
  57 // wide ideal-register spill-mask if possible.  If the 'wide-mask' does
  58 // not cover the input (or output), use the input (or output) mask instead.
  59 Node *PhaseChaitin::get_spillcopy_wide(MachSpillCopyNode::SpillType spill_type, Node *def, Node *use, uint uidx) {
  60   // If ideal reg doesn't exist we've got a bad schedule happening
  61   // that is forcing us to spill something that isn't spillable.
  62   // Bail rather than abort
  63   int ireg = def->ideal_reg();
  64   if (ireg == 0 || ireg == Op_RegFlags) {
  65     assert(false, "attempted to spill a non-spillable item: %d: %s <- %d: %s, ireg = %d, spill_type: %s",
  66            def->_idx, def->Name(), use->_idx, use->Name(), ireg,
  67            MachSpillCopyNode::spill_type(spill_type));
  68     C->record_method_not_compilable("attempted to spill a non-spillable item");
  69     return NULL;
  70   }
  71   if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
  72     return NULL;
  73   }
  74   const RegMask *i_mask = &def->out_RegMask();
  75   const RegMask *w_mask = C->matcher()->idealreg2spillmask[ireg];
  76   const RegMask *o_mask = use ? &use->in_RegMask(uidx) : w_mask;
  77   const RegMask *w_i_mask = w_mask->overlap( *i_mask ) ? w_mask : i_mask;
  78   const RegMask *w_o_mask;
  79 
  80   int num_regs = RegMask::num_registers(ireg);
  81   bool is_vect = RegMask::is_vector(ireg);
  82   if( w_mask->overlap( *o_mask ) && // Overlap AND
  83       ((num_regs == 1) // Single use or aligned
  84         ||  is_vect    // or vector
  85         || !is_vect && o_mask->is_aligned_pairs()) ) {
  86     assert(!is_vect || o_mask->is_aligned_sets(num_regs), "vectors are aligned");
  87     // Don't come here for mis-aligned doubles
  88     w_o_mask = w_mask;
  89   } else {                      // wide ideal mask does not overlap with o_mask
  90     // Mis-aligned doubles come here and XMM->FPR moves on x86.
  91     w_o_mask = o_mask;          // Must target desired registers
  92     // Does the ideal-reg-mask overlap with o_mask?  I.e., can I use
  93     // a reg-reg move or do I need a trip across register classes
  94     // (and thus through memory)?
  95     if( !C->matcher()->idealreg2regmask[ireg]->overlap( *o_mask) && o_mask->is_UP() )
  96       // Here we assume a trip through memory is required.
  97       w_i_mask = &C->FIRST_STACK_mask();
  98   }
  99   return new MachSpillCopyNode(spill_type, def, *w_i_mask, *w_o_mask );
 100 }
 101 
 102 //------------------------------insert_proj------------------------------------
 103 // Insert the spill at chosen location.  Skip over any intervening Proj's or
 104 // Phis.  Skip over a CatchNode and projs, inserting in the fall-through block
 105 // instead.  Update high-pressure indices.  Create a new live range.
 106 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
 107   // Skip intervening ProjNodes.  Do not insert between a ProjNode and
 108   // its definer.
 109   while( i < b->number_of_nodes() &&
 110          (b->get_node(i)->is_Proj() ||
 111           b->get_node(i)->is_Phi() ) )
 112     i++;
 113 
 114   // Do not insert between a call and his Catch
 115   if( b->get_node(i)->is_Catch() ) {


 139 //------------------------------split_DEF--------------------------------------
 140 // There are four categories of Split; UP/DOWN x DEF/USE
 141 // Only three of these really occur as DOWN/USE will always color
 142 // Any Split with a DEF cannot CISC-Spill now.  Thus we need
 143 // two helper routines, one for Split DEFS (insert after instruction),
 144 // one for Split USES (insert before instruction).  DEF insertion
 145 // happens inside Split, where the Leaveblock array is updated.
 146 uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx ) {
 147 #ifdef ASSERT
 148   // Increment the counter for this lrg
 149   splits.at_put(slidx, splits.at(slidx)+1);
 150 #endif
 151   // If we are spilling the memory op for an implicit null check, at the
 152   // null check location (ie - null check is in HRP block) we need to do
 153   // the null-check first, then spill-down in the following block.
 154   // (The implicit_null_check function ensures the use is also dominated
 155   // by the branch-not-taken block.)
 156   Node *be = b->end();
 157   if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) {
 158     // Spill goes in the branch-not-taken block
 159     b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue];
 160     loc = 0;                    // Just past the Region
 161   }
 162   assert( loc >= 0, "must insert past block head" );
 163 
 164   // Get a def-side SpillCopy
 165   Node *spill = get_spillcopy_wide(MachSpillCopyNode::Definition, def, NULL, 0);
 166   // Did we fail to split?, then bail
 167   if (!spill) {
 168     return 0;
 169   }
 170 
 171   // Insert the spill at chosen location
 172   insert_proj( b, loc+1, spill, maxlrg++);
 173 
 174   // Insert new node into Reaches array
 175   Reachblock[slidx] = spill;
 176   // Update debug list of reaching down definitions by adding this one
 177   debug_defs[slidx] = spill;
 178 
 179   // return updated count of live ranges


 317   // The input live ranges will be stretched to the site of the new
 318   // instruction.  They might be stretched past a def and will thus
 319   // have the old and new values of the same live range alive at the
 320   // same time - a definite no-no.  Split out private copies of
 321   // the inputs.
 322   if (def->req() > 1) {
 323     for (uint i = 1; i < def->req(); i++) {
 324       Node *in = def->in(i);
 325       uint lidx = _lrg_map.live_range_id(in);
 326       // We do not need this for live ranges that are only defined once.
 327       // However, this is not true for spill copies that are added in this
 328       // Split() pass, since they might get coalesced later on in this pass.
 329       if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_singledef()) {
 330         continue;
 331       }
 332 
 333       Block *b_def = _cfg.get_block_for_node(def);
 334       int idx_def = b_def->find_node(def);
 335       // Cannot spill Op_RegFlags.
 336       Node *in_spill;
 337       if (in->ideal_reg() != Op_RegFlags) {
 338         in_spill = get_spillcopy_wide(MachSpillCopyNode::InputToRematerialization, in, def, i);
 339         if (!in_spill) { return 0; } // Bailed out
 340         insert_proj(b_def, idx_def, in_spill, maxlrg++);
 341         if (b_def == b) {
 342           insidx++;
 343         }
 344         def->set_req(i, in_spill);
 345       } else {
 346         // The 'in' defines a flag register. Flag registers can not be spilled.
 347         // Register allocation handles live ranges with flag registers
 348         // by rematerializing the def (in this case 'in'). Thus, this is not
 349         // critical if the input can be rematerialized, too.
 350         if (!in->rematerialize()) {
 351           assert(false, "Can not rematerialize %d: %s. Prolongs RegFlags live"
 352                  " range and defining node %d: %s may not be rematerialized.",
 353                  def->_idx, def->Name(), in->_idx, in->Name());
 354           C->record_method_not_compilable("attempted to spill a non-spillable item with RegFlags input");
 355           return 0; // Bailed out
 356         }
 357       }


 934                 return 0;
 935               }
 936               _lrg_map.extend(def->_idx, 0);
 937               _cfg.map_node_to_block(def, b);
 938               n->set_req(inpidx, def);
 939               continue;
 940             }
 941 
 942             // Rematerializable?  Then clone def at use site instead
 943             // of store/load
 944             if( def->rematerialize() ) {
 945               int old_size = b->number_of_nodes();
 946               def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true );
 947               if( !def ) return 0; // Bail out
 948               insidx += b->number_of_nodes()-old_size;
 949             }
 950 
 951             MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
 952             // Base pointers and oopmap references do not care where they live.
 953             if ((inpidx >= oopoff) ||
 954                 (mach && mach->ideal_Opcode() == Op_AddP && inpidx == AddPNode::Base)) {
 955               if (def->rematerialize() && lrgs(useidx)._was_spilled2) {
 956                 // This def has been rematerialized a couple of times without
 957                 // progress. It doesn't care if it lives UP or DOWN, so
 958                 // spill it down now.
 959                 maxlrg = split_USE(MachSpillCopyNode::BasePointerToMem, def,b,n,inpidx,maxlrg,false,false,splits,slidx);
 960                 // If it wasn't split bail
 961                 if (!maxlrg) {
 962                   return 0;
 963                 }
 964                 insidx++;  // Reset iterator to skip USE side split
 965               } else {
 966                 // Just hook the def edge
 967                 n->set_req(inpidx, def);
 968               }
 969 
 970               if (inpidx >= oopoff) {
 971                 // After oopoff, we have derived/base pairs.  We must mention all
 972                 // derived pointers here as derived/base pairs for GC.  If the
 973                 // derived value is spilling and we have a copy both in Reachblock
 974                 // (called here 'def') and debug_defs[slidx] we need to mention
 975                 // both in derived/base pairs or kill one.
 976                 Node *derived_debug = debug_defs[slidx];
 977                 if( ((inpidx - oopoff) & 1) == DERIVED && // derived vs base?
 978                     mach && mach->ideal_Opcode() != Op_Halt &&
 979                     derived_debug != NULL &&
 980                     derived_debug != def ) { // Actual 2nd value appears
 981                   // We have already set 'def' as a derived value.
 982                   // Also set debug_defs[slidx] as a derived value.
 983                   uint k;
 984                   for( k = oopoff; k < cnt; k += 2 )
 985                     if( n->in(k) == derived_debug )
 986                       break;      // Found an instance of debug derived
 987                   if( k == cnt ) {// No instance of debug_defs[slidx]
 988                     // Add a derived/base pair to cover the debug info.
 989                     // We have to process the added base later since it is not
 990                     // handled yet at this point but skip derived part.
 991                     assert(((n->req() - oopoff) & 1) == DERIVED,
 992                            "must match skip condition above");
 993                     n->add_req( derived_debug );   // this will be skipped above
 994                     n->add_req( n->in(inpidx+1) ); // this will be processed
 995                     // Increment cnt to handle added input edges on
 996                     // subsequent iterations.
 997                     cnt += 2;
 998                   }


1077             // resetting DEF. Finally if DEF is UP, Split-DOWN, with
1078             // special handling for Debug Info.
1079             if( dup == uup ) {
1080               if( dmask.overlap(umask) ) {
1081                 // Both are either up or down, and there is overlap, No Split
1082                 n->set_req(inpidx, def);
1083               }
1084               else {  // Both are either up or down, and there is no overlap
1085                 if( dup ) {  // If UP, reg->reg copy
1086                   // COPY ACROSS HERE - NO DEF - NO CISC SPILL
1087                   maxlrg = split_USE(MachSpillCopyNode::RegToReg, def,b,n,inpidx,maxlrg,false,false, splits,slidx);
1088                   // If it wasn't split bail
1089                   if (!maxlrg) {
1090                     return 0;
1091                   }
1092                   insidx++;  // Reset iterator to skip USE side split
1093                 }
1094                 else {       // DOWN, mem->mem copy
1095                   // COPY UP & DOWN HERE - NO DEF - NO CISC SPILL
1096                   // First Split-UP to move value into Register
1097                   uint def_ideal = def->ideal_reg();
1098                   const RegMask* tmp_rm = Matcher::idealreg2regmask[def_ideal];
1099                   Node *spill = new MachSpillCopyNode(MachSpillCopyNode::MemToReg, def, dmask, *tmp_rm);
1100                   insert_proj( b, insidx, spill, maxlrg );
1101                   // Then Split-DOWN as if previous Split was DEF
1102                   maxlrg = split_USE(MachSpillCopyNode::RegToMem, spill,b,n,inpidx,maxlrg,false,false, splits,slidx);
1103                   // If it wasn't split bail
1104                   if (!maxlrg) {
1105                     return 0;
1106                   }
1107                   insidx += 2;  // Reset iterator to skip USE side splits
1108                 }
1109               }  // End else no overlap
1110             }  // End if dup == uup
1111             // dup != uup, so check dup for direction of Split
1112             else {
1113               if( dup ) {  // If UP, Split-DOWN and check Debug Info
1114                 // If this node is already a SpillCopy, just patch the edge
1115                 // except the case of spilling to stack.
1116                 if( n->is_SpillCopy() ) {
1117                   RegMask tmp_rm(umask);
1118                   tmp_rm.SUBTRACT(Matcher::STACK_ONLY_mask);


1167           }  // End if Spill USE
1168         }  // End For All Inputs
1169       }  // End If not nullcheck
1170 
1171       // ********** Handle DEFS **********
1172       // DEFS either Split DOWN in HRP regions or when the LRG is bound, or
1173       // just reset the Reaches info in LRP regions.  DEFS must always update
1174       // UP info.
1175       if( deflrg.reg() >= LRG::SPILL_REG ) {    // Spilled?
1176         uint slidx = lrg2reach[defidx];
1177         // Add to defs list for later assignment of new live range number
1178         defs->push(n);
1179         // Set a flag on the Node indicating it has already spilled.
1180         // Only do it for capacity spills not conflict spills.
1181         if( !deflrg._direct_conflict )
1182           set_was_spilled(n);
1183         assert(!n->is_Phi(),"Cannot insert Phi into DEFS list");
1184         // Grab UP info for DEF
1185         const RegMask &dmask = n->out_RegMask();
1186         bool defup = dmask.is_UP();
1187         int ireg = n->ideal_reg();
1188         bool is_vect = RegMask::is_vector(ireg);
1189         // Only split at Def if this is a HRP block or bound (and spilled once)
1190         if( !n->rematerialize() &&
1191             (((dmask.is_bound(ireg) || !is_vect && dmask.is_misaligned_pair()) &&
1192               (deflrg._direct_conflict || deflrg._must_spill)) ||
1193              // Check for LRG being up in a register and we are inside a high
1194              // pressure area.  Spill it down immediately.
1195              (defup && is_high_pressure(b,&deflrg,insidx))) ) {
1196           assert( !n->rematerialize(), "" );
1197           assert( !n->is_SpillCopy(), "" );
1198           // Do a split at the def site.
1199           maxlrg = split_DEF( n, b, insidx, maxlrg, Reachblock, debug_defs, splits, slidx );
1200           // If it wasn't split bail
1201           if (!maxlrg) {
1202             return 0;
1203           }
1204           // Split DEF's Down
1205           UPblock[slidx] = 0;
1206 #ifndef PRODUCT
1207           // DEBUG


1226             tty->print("%d, UP %d:\n",slidx,defup);
1227             n->dump();
1228           }
1229 #endif
1230         }  // End else LRP
1231       }  // End if spill def
1232 
1233       // ********** Split Left Over Mem-Mem Moves **********
1234       // Check for mem-mem copies and split them now.  Do not do this
1235       // to copies about to be spilled; they will be Split shortly.
1236       if (copyidx) {
1237         Node *use = n->in(copyidx);
1238         uint useidx = _lrg_map.find_id(use);
1239         if (useidx < _lrg_map.max_lrg_id() &&       // This is not a new split
1240             OptoReg::is_stack(deflrg.reg()) &&
1241             deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack
1242           LRG &uselrg = lrgs(useidx);
1243           if( OptoReg::is_stack(uselrg.reg()) &&
1244               uselrg.reg() < LRG::SPILL_REG && // USE is from stack
1245               deflrg.reg() != uselrg.reg() ) { // Not trivially removed
1246             uint def_ideal_reg = n->bottom_type()->ideal_reg();
1247             const RegMask &def_rm = *Matcher::idealreg2regmask[def_ideal_reg];
1248             const RegMask &use_rm = n->in_RegMask(copyidx);
1249             if( def_rm.overlap(use_rm) && n->is_SpillCopy() ) {  // Bug 4707800, 'n' may be a storeSSL
1250               if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {  // Check when generating nodes
1251                 return 0;
1252               }
1253               Node *spill = new MachSpillCopyNode(MachSpillCopyNode::MemToReg, use,use_rm,def_rm);
1254               n->set_req(copyidx,spill);
1255               n->as_MachSpillCopy()->set_in_RegMask(def_rm);
1256               // Put the spill just before the copy
1257               insert_proj( b, insidx++, spill, maxlrg++ );
1258             }
1259           }
1260         }
1261       }
1262     }  // End For All Instructions in Block - Non-PHI Pass
1263 
1264     // Check if each LRG is live out of this block so as not to propagate
1265     // beyond the last use of a LRG.
1266     for( slidx = 0; slidx < spill_cnt; slidx++ ) {
1267       uint defidx = lidxs.at(slidx);




  43 // point between LRP and HRP.  Uses in the HRP region will use the spilled
  44 // Def.  The first Use outside the HRP region will generate a SpillCopy to
  45 // hoist the live range back up into a register, and all subsequent uses
  46 // will use that new Def until another HRP region is encountered.  Defs in
  47 // HRP regions will get trailing SpillCopies to push the LRG down into the
  48 // stack immediately.
  49 //
  50 // As a side effect, unlink from (hence make dead) coalesced copies.
  51 //
  52 
  53 static const char out_of_nodes[] = "out of nodes during split";
  54 
  55 //------------------------------get_spillcopy_wide-----------------------------
  56 // Get a SpillCopy node with wide-enough masks.  Use the 'wide-mask', the
  57 // wide ideal-register spill-mask if possible.  If the 'wide-mask' does
  58 // not cover the input (or output), use the input (or output) mask instead.
  59 Node *PhaseChaitin::get_spillcopy_wide(MachSpillCopyNode::SpillType spill_type, Node *def, Node *use, uint uidx) {
  60   // If ideal reg doesn't exist we've got a bad schedule happening
  61   // that is forcing us to spill something that isn't spillable.
  62   // Bail rather than abort
  63   Opcodes ireg = def->ideal_reg();
  64   if (ireg == Opcodes::Op_Node || ireg == Opcodes::Op_RegFlags) {
  65     assert(false, "attempted to spill a non-spillable item: %d: %s <- %d: %s, ireg = %u, spill_type: %s",
  66            def->_idx, def->Name(), use->_idx, use->Name(), static_cast<uint>(ireg),
  67            MachSpillCopyNode::spill_type(spill_type));
  68     C->record_method_not_compilable("attempted to spill a non-spillable item");
  69     return NULL;
  70   }
  71   if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
  72     return NULL;
  73   }
  74   const RegMask *i_mask = &def->out_RegMask();
  75   const RegMask *w_mask = C->matcher()->idealreg2spillmask[static_cast<uint>(ireg)];
  76   const RegMask *o_mask = use ? &use->in_RegMask(uidx) : w_mask;
  77   const RegMask *w_i_mask = w_mask->overlap( *i_mask ) ? w_mask : i_mask;
  78   const RegMask *w_o_mask;
  79 
  80   int num_regs = RegMask::num_registers(ireg);
  81   bool is_vect = RegMask::is_vector(ireg);
  82   if( w_mask->overlap( *o_mask ) && // Overlap AND
  83       ((num_regs == 1) // Single use or aligned
  84         ||  is_vect    // or vector
  85         || !is_vect && o_mask->is_aligned_pairs()) ) {
  86     assert(!is_vect || o_mask->is_aligned_sets(num_regs), "vectors are aligned");
  87     // Don't come here for mis-aligned doubles
  88     w_o_mask = w_mask;
  89   } else {                      // wide ideal mask does not overlap with o_mask
  90     // Mis-aligned doubles come here and XMM->FPR moves on x86.
  91     w_o_mask = o_mask;          // Must target desired registers
  92     // Does the ideal-reg-mask overlap with o_mask?  I.e., can I use
  93     // a reg-reg move or do I need a trip across register classes
  94     // (and thus through memory)?
  95     if( !C->matcher()->idealreg2regmask[static_cast<uint>(ireg)]->overlap( *o_mask) && o_mask->is_UP() )
  96       // Here we assume a trip through memory is required.
  97       w_i_mask = &C->FIRST_STACK_mask();
  98   }
  99   return new MachSpillCopyNode(spill_type, def, *w_i_mask, *w_o_mask );
 100 }
 101 
 102 //------------------------------insert_proj------------------------------------
 103 // Insert the spill at chosen location.  Skip over any intervening Proj's or
 104 // Phis.  Skip over a CatchNode and projs, inserting in the fall-through block
 105 // instead.  Update high-pressure indices.  Create a new live range.
 106 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
 107   // Skip intervening ProjNodes.  Do not insert between a ProjNode and
 108   // its definer.
 109   while( i < b->number_of_nodes() &&
 110          (b->get_node(i)->is_Proj() ||
 111           b->get_node(i)->is_Phi() ) )
 112     i++;
 113 
 114   // Do not insert between a call and his Catch
 115   if( b->get_node(i)->is_Catch() ) {


 139 //------------------------------split_DEF--------------------------------------
 140 // There are four categories of Split; UP/DOWN x DEF/USE
 141 // Only three of these really occur as DOWN/USE will always color
 142 // Any Split with a DEF cannot CISC-Spill now.  Thus we need
 143 // two helper routines, one for Split DEFS (insert after instruction),
 144 // one for Split USES (insert before instruction).  DEF insertion
 145 // happens inside Split, where the Leaveblock array is updated.
 146 uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx ) {
 147 #ifdef ASSERT
 148   // Increment the counter for this lrg
 149   splits.at_put(slidx, splits.at(slidx)+1);
 150 #endif
 151   // If we are spilling the memory op for an implicit null check, at the
 152   // null check location (ie - null check is in HRP block) we need to do
 153   // the null-check first, then spill-down in the following block.
 154   // (The implicit_null_check function ensures the use is also dominated
 155   // by the branch-not-taken block.)
 156   Node *be = b->end();
 157   if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) {
 158     // Spill goes in the branch-not-taken block
 159     b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Opcodes::Op_IfTrue];
 160     loc = 0;                    // Just past the Region
 161   }
 162   assert( loc >= 0, "must insert past block head" );
 163 
 164   // Get a def-side SpillCopy
 165   Node *spill = get_spillcopy_wide(MachSpillCopyNode::Definition, def, NULL, 0);
 166   // Did we fail to split?, then bail
 167   if (!spill) {
 168     return 0;
 169   }
 170 
 171   // Insert the spill at chosen location
 172   insert_proj( b, loc+1, spill, maxlrg++);
 173 
 174   // Insert new node into Reaches array
 175   Reachblock[slidx] = spill;
 176   // Update debug list of reaching down definitions by adding this one
 177   debug_defs[slidx] = spill;
 178 
 179   // return updated count of live ranges


 317   // The input live ranges will be stretched to the site of the new
 318   // instruction.  They might be stretched past a def and will thus
 319   // have the old and new values of the same live range alive at the
 320   // same time - a definite no-no.  Split out private copies of
 321   // the inputs.
 322   if (def->req() > 1) {
 323     for (uint i = 1; i < def->req(); i++) {
 324       Node *in = def->in(i);
 325       uint lidx = _lrg_map.live_range_id(in);
 326       // We do not need this for live ranges that are only defined once.
 327       // However, this is not true for spill copies that are added in this
 328       // Split() pass, since they might get coalesced later on in this pass.
 329       if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_singledef()) {
 330         continue;
 331       }
 332 
 333       Block *b_def = _cfg.get_block_for_node(def);
 334       int idx_def = b_def->find_node(def);
 335       // Cannot spill Op_RegFlags.
 336       Node *in_spill;
 337       if (in->ideal_reg() != Opcodes::Op_RegFlags) {
 338         in_spill = get_spillcopy_wide(MachSpillCopyNode::InputToRematerialization, in, def, i);
 339         if (!in_spill) { return 0; } // Bailed out
 340         insert_proj(b_def, idx_def, in_spill, maxlrg++);
 341         if (b_def == b) {
 342           insidx++;
 343         }
 344         def->set_req(i, in_spill);
 345       } else {
 346         // The 'in' defines a flag register. Flag registers can not be spilled.
 347         // Register allocation handles live ranges with flag registers
 348         // by rematerializing the def (in this case 'in'). Thus, this is not
 349         // critical if the input can be rematerialized, too.
 350         if (!in->rematerialize()) {
 351           assert(false, "Can not rematerialize %d: %s. Prolongs RegFlags live"
 352                  " range and defining node %d: %s may not be rematerialized.",
 353                  def->_idx, def->Name(), in->_idx, in->Name());
 354           C->record_method_not_compilable("attempted to spill a non-spillable item with RegFlags input");
 355           return 0; // Bailed out
 356         }
 357       }


 934                 return 0;
 935               }
 936               _lrg_map.extend(def->_idx, 0);
 937               _cfg.map_node_to_block(def, b);
 938               n->set_req(inpidx, def);
 939               continue;
 940             }
 941 
 942             // Rematerializable?  Then clone def at use site instead
 943             // of store/load
 944             if( def->rematerialize() ) {
 945               int old_size = b->number_of_nodes();
 946               def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true );
 947               if( !def ) return 0; // Bail out
 948               insidx += b->number_of_nodes()-old_size;
 949             }
 950 
 951             MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
 952             // Base pointers and oopmap references do not care where they live.
 953             if ((inpidx >= oopoff) ||
 954                 (mach && mach->ideal_Opcode() == Opcodes::Op_AddP && inpidx == AddPNode::Base)) {
 955               if (def->rematerialize() && lrgs(useidx)._was_spilled2) {
 956                 // This def has been rematerialized a couple of times without
 957                 // progress. It doesn't care if it lives UP or DOWN, so
 958                 // spill it down now.
 959                 maxlrg = split_USE(MachSpillCopyNode::BasePointerToMem, def,b,n,inpidx,maxlrg,false,false,splits,slidx);
 960                 // If it wasn't split bail
 961                 if (!maxlrg) {
 962                   return 0;
 963                 }
 964                 insidx++;  // Reset iterator to skip USE side split
 965               } else {
 966                 // Just hook the def edge
 967                 n->set_req(inpidx, def);
 968               }
 969 
 970               if (inpidx >= oopoff) {
 971                 // After oopoff, we have derived/base pairs.  We must mention all
 972                 // derived pointers here as derived/base pairs for GC.  If the
 973                 // derived value is spilling and we have a copy both in Reachblock
 974                 // (called here 'def') and debug_defs[slidx] we need to mention
 975                 // both in derived/base pairs or kill one.
 976                 Node *derived_debug = debug_defs[slidx];
 977                 if( ((inpidx - oopoff) & 1) == DERIVED && // derived vs base?
 978                     mach && mach->ideal_Opcode() != Opcodes::Op_Halt &&
 979                     derived_debug != NULL &&
 980                     derived_debug != def ) { // Actual 2nd value appears
 981                   // We have already set 'def' as a derived value.
 982                   // Also set debug_defs[slidx] as a derived value.
 983                   uint k;
 984                   for( k = oopoff; k < cnt; k += 2 )
 985                     if( n->in(k) == derived_debug )
 986                       break;      // Found an instance of debug derived
 987                   if( k == cnt ) {// No instance of debug_defs[slidx]
 988                     // Add a derived/base pair to cover the debug info.
 989                     // We have to process the added base later since it is not
 990                     // handled yet at this point but skip derived part.
 991                     assert(((n->req() - oopoff) & 1) == DERIVED,
 992                            "must match skip condition above");
 993                     n->add_req( derived_debug );   // this will be skipped above
 994                     n->add_req( n->in(inpidx+1) ); // this will be processed
 995                     // Increment cnt to handle added input edges on
 996                     // subsequent iterations.
 997                     cnt += 2;
 998                   }


1077             // resetting DEF. Finally if DEF is UP, Split-DOWN, with
1078             // special handling for Debug Info.
1079             if( dup == uup ) {
1080               if( dmask.overlap(umask) ) {
1081                 // Both are either up or down, and there is overlap, No Split
1082                 n->set_req(inpidx, def);
1083               }
1084               else {  // Both are either up or down, and there is no overlap
1085                 if( dup ) {  // If UP, reg->reg copy
1086                   // COPY ACROSS HERE - NO DEF - NO CISC SPILL
1087                   maxlrg = split_USE(MachSpillCopyNode::RegToReg, def,b,n,inpidx,maxlrg,false,false, splits,slidx);
1088                   // If it wasn't split bail
1089                   if (!maxlrg) {
1090                     return 0;
1091                   }
1092                   insidx++;  // Reset iterator to skip USE side split
1093                 }
1094                 else {       // DOWN, mem->mem copy
1095                   // COPY UP & DOWN HERE - NO DEF - NO CISC SPILL
1096                   // First Split-UP to move value into Register
1097                   Opcodes def_ideal = def->ideal_reg();
1098                   const RegMask* tmp_rm = Matcher::idealreg2regmask[static_cast<uint>(def_ideal)];
1099                   Node *spill = new MachSpillCopyNode(MachSpillCopyNode::MemToReg, def, dmask, *tmp_rm);
1100                   insert_proj( b, insidx, spill, maxlrg );
1101                   // Then Split-DOWN as if previous Split was DEF
1102                   maxlrg = split_USE(MachSpillCopyNode::RegToMem, spill,b,n,inpidx,maxlrg,false,false, splits,slidx);
1103                   // If it wasn't split bail
1104                   if (!maxlrg) {
1105                     return 0;
1106                   }
1107                   insidx += 2;  // Reset iterator to skip USE side splits
1108                 }
1109               }  // End else no overlap
1110             }  // End if dup == uup
1111             // dup != uup, so check dup for direction of Split
1112             else {
1113               if( dup ) {  // If UP, Split-DOWN and check Debug Info
1114                 // If this node is already a SpillCopy, just patch the edge
1115                 // except the case of spilling to stack.
1116                 if( n->is_SpillCopy() ) {
1117                   RegMask tmp_rm(umask);
1118                   tmp_rm.SUBTRACT(Matcher::STACK_ONLY_mask);


1167           }  // End if Spill USE
1168         }  // End For All Inputs
1169       }  // End If not nullcheck
1170 
1171       // ********** Handle DEFS **********
1172       // DEFS either Split DOWN in HRP regions or when the LRG is bound, or
1173       // just reset the Reaches info in LRP regions.  DEFS must always update
1174       // UP info.
1175       if( deflrg.reg() >= LRG::SPILL_REG ) {    // Spilled?
1176         uint slidx = lrg2reach[defidx];
1177         // Add to defs list for later assignment of new live range number
1178         defs->push(n);
1179         // Set a flag on the Node indicating it has already spilled.
1180         // Only do it for capacity spills not conflict spills.
1181         if( !deflrg._direct_conflict )
1182           set_was_spilled(n);
1183         assert(!n->is_Phi(),"Cannot insert Phi into DEFS list");
1184         // Grab UP info for DEF
1185         const RegMask &dmask = n->out_RegMask();
1186         bool defup = dmask.is_UP();
1187         Opcodes ireg = n->ideal_reg();
1188         bool is_vect = RegMask::is_vector(ireg);
1189         // Only split at Def if this is a HRP block or bound (and spilled once)
1190         if( !n->rematerialize() &&
1191             (((dmask.is_bound(ireg) || !is_vect && dmask.is_misaligned_pair()) &&
1192               (deflrg._direct_conflict || deflrg._must_spill)) ||
1193              // Check for LRG being up in a register and we are inside a high
1194              // pressure area.  Spill it down immediately.
1195              (defup && is_high_pressure(b,&deflrg,insidx))) ) {
1196           assert( !n->rematerialize(), "" );
1197           assert( !n->is_SpillCopy(), "" );
1198           // Do a split at the def site.
1199           maxlrg = split_DEF( n, b, insidx, maxlrg, Reachblock, debug_defs, splits, slidx );
1200           // If it wasn't split bail
1201           if (!maxlrg) {
1202             return 0;
1203           }
1204           // Split DEF's Down
1205           UPblock[slidx] = 0;
1206 #ifndef PRODUCT
1207           // DEBUG


1226             tty->print("%d, UP %d:\n",slidx,defup);
1227             n->dump();
1228           }
1229 #endif
1230         }  // End else LRP
1231       }  // End if spill def
1232 
1233       // ********** Split Left Over Mem-Mem Moves **********
1234       // Check for mem-mem copies and split them now.  Do not do this
1235       // to copies about to be spilled; they will be Split shortly.
1236       if (copyidx) {
1237         Node *use = n->in(copyidx);
1238         uint useidx = _lrg_map.find_id(use);
1239         if (useidx < _lrg_map.max_lrg_id() &&       // This is not a new split
1240             OptoReg::is_stack(deflrg.reg()) &&
1241             deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack
1242           LRG &uselrg = lrgs(useidx);
1243           if( OptoReg::is_stack(uselrg.reg()) &&
1244               uselrg.reg() < LRG::SPILL_REG && // USE is from stack
1245               deflrg.reg() != uselrg.reg() ) { // Not trivially removed
1246             Opcodes def_ideal_reg = n->bottom_type()->ideal_reg();
1247             const RegMask &def_rm = *Matcher::idealreg2regmask[static_cast<uint>(def_ideal_reg)];
1248             const RegMask &use_rm = n->in_RegMask(copyidx);
1249             if( def_rm.overlap(use_rm) && n->is_SpillCopy() ) {  // Bug 4707800, 'n' may be a storeSSL
1250               if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {  // Check when generating nodes
1251                 return 0;
1252               }
1253               Node *spill = new MachSpillCopyNode(MachSpillCopyNode::MemToReg, use,use_rm,def_rm);
1254               n->set_req(copyidx,spill);
1255               n->as_MachSpillCopy()->set_in_RegMask(def_rm);
1256               // Put the spill just before the copy
1257               insert_proj( b, insidx++, spill, maxlrg++ );
1258             }
1259           }
1260         }
1261       }
1262     }  // End For All Instructions in Block - Non-PHI Pass
1263 
1264     // Check if each LRG is live out of this block so as not to propagate
1265     // beyond the last use of a LRG.
1266     for( slidx = 0; slidx < spill_cnt; slidx++ ) {
1267       uint defidx = lidxs.at(slidx);


< prev index next >