1 /*
   2  * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/oopMap.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "opto/addnode.hpp"
  29 #include "opto/block.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/chaitin.hpp"
  33 #include "opto/coalesce.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/indexSet.hpp"
  36 #include "opto/machnode.hpp"
  37 #include "opto/memnode.hpp"
  38 #include "opto/opcodes.hpp"
  39 
  40 PhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) {
  41 }
  42 
  43 void PhaseIFG::init( uint maxlrg ) {
  44   _maxlrg = maxlrg;
  45   _yanked = new (_arena) VectorSet(_arena);
  46   _is_square = false;
  47   // Make uninitialized adjacency lists
  48   _adjs = (IndexSet*)_arena->Amalloc(sizeof(IndexSet)*maxlrg);
  49   // Also make empty live range structures
  50   _lrgs = (LRG *)_arena->Amalloc( maxlrg * sizeof(LRG) );
  51   memset(_lrgs,0,sizeof(LRG)*maxlrg);
  52   // Init all to empty
  53   for( uint i = 0; i < maxlrg; i++ ) {
  54     _adjs[i].initialize(maxlrg);
  55     _lrgs[i].Set_All();
  56   }
  57 }
  58 
  59 // Add edge between vertices a & b.  These are sorted (triangular matrix),
  60 // then the smaller number is inserted in the larger numbered array.
  61 int PhaseIFG::add_edge( uint a, uint b ) {
  62   lrgs(a).invalid_degree();
  63   lrgs(b).invalid_degree();
  64   // Sort a and b, so that a is bigger
  65   assert( !_is_square, "only on triangular" );
  66   if( a < b ) { uint tmp = a; a = b; b = tmp; }
  67   return _adjs[a].insert( b );
  68 }
  69 
  70 // Add an edge between 'a' and everything in the vector.
  71 void PhaseIFG::add_vector( uint a, IndexSet *vec ) {
  72   // IFG is triangular, so do the inserts where 'a' < 'b'.
  73   assert( !_is_square, "only on triangular" );
  74   IndexSet *adjs_a = &_adjs[a];
  75   if( !vec->count() ) return;
  76 
  77   IndexSetIterator elements(vec);
  78   uint neighbor;
  79   while ((neighbor = elements.next()) != 0) {
  80     add_edge( a, neighbor );
  81   }
  82 }
  83 
  84 // Is there an edge between a and b?
  85 int PhaseIFG::test_edge( uint a, uint b ) const {
  86   // Sort a and b, so that a is larger
  87   assert( !_is_square, "only on triangular" );
  88   if( a < b ) { uint tmp = a; a = b; b = tmp; }
  89   return _adjs[a].member(b);
  90 }
  91 
  92 // Convert triangular matrix to square matrix
  93 void PhaseIFG::SquareUp() {
  94   assert( !_is_square, "only on triangular" );
  95 
  96   // Simple transpose
  97   for( uint i = 0; i < _maxlrg; i++ ) {
  98     IndexSetIterator elements(&_adjs[i]);
  99     uint datum;
 100     while ((datum = elements.next()) != 0) {
 101       _adjs[datum].insert( i );
 102     }
 103   }
 104   _is_square = true;
 105 }
 106 
 107 // Compute effective degree in bulk
 108 void PhaseIFG::Compute_Effective_Degree() {
 109   assert( _is_square, "only on square" );
 110 
 111   for( uint i = 0; i < _maxlrg; i++ )
 112     lrgs(i).set_degree(effective_degree(i));
 113 }
 114 
 115 int PhaseIFG::test_edge_sq( uint a, uint b ) const {
 116   assert( _is_square, "only on square" );
 117   // Swap, so that 'a' has the lesser count.  Then binary search is on
 118   // the smaller of a's list and b's list.
 119   if( neighbor_cnt(a) > neighbor_cnt(b) ) { uint tmp = a; a = b; b = tmp; }
 120   //return _adjs[a].unordered_member(b);
 121   return _adjs[a].member(b);
 122 }
 123 
 124 // Union edges of B into A
 125 void PhaseIFG::Union( uint a, uint b ) {
 126   assert( _is_square, "only on square" );
 127   IndexSet *A = &_adjs[a];
 128   IndexSetIterator b_elements(&_adjs[b]);
 129   uint datum;
 130   while ((datum = b_elements.next()) != 0) {
 131     if(A->insert(datum)) {
 132       _adjs[datum].insert(a);
 133       lrgs(a).invalid_degree();
 134       lrgs(datum).invalid_degree();
 135     }
 136   }
 137 }
 138 
 139 // Yank a Node and all connected edges from the IFG.  Return a
 140 // list of neighbors (edges) yanked.
 141 IndexSet *PhaseIFG::remove_node( uint a ) {
 142   assert( _is_square, "only on square" );
 143   assert( !_yanked->test(a), "" );
 144   _yanked->set(a);
 145 
 146   // I remove the LRG from all neighbors.
 147   IndexSetIterator elements(&_adjs[a]);
 148   LRG &lrg_a = lrgs(a);
 149   uint datum;
 150   while ((datum = elements.next()) != 0) {
 151     _adjs[datum].remove(a);
 152     lrgs(datum).inc_degree( -lrg_a.compute_degree(lrgs(datum)) );
 153   }
 154   return neighbors(a);
 155 }
 156 
 157 // Re-insert a yanked Node.
 158 void PhaseIFG::re_insert( uint a ) {
 159   assert( _is_square, "only on square" );
 160   assert( _yanked->test(a), "" );
 161   (*_yanked) >>= a;
 162 
 163   IndexSetIterator elements(&_adjs[a]);
 164   uint datum;
 165   while ((datum = elements.next()) != 0) {
 166     _adjs[datum].insert(a);
 167     lrgs(datum).invalid_degree();
 168   }
 169 }
 170 
 171 // Compute the degree between 2 live ranges.  If both live ranges are
 172 // aligned-adjacent powers-of-2 then we use the MAX size.  If either is
 173 // mis-aligned (or for Fat-Projections, not-adjacent) then we have to
 174 // MULTIPLY the sizes.  Inspect Brigg's thesis on register pairs to see why
 175 // this is so.
 176 int LRG::compute_degree( LRG &l ) const {
 177   int tmp;
 178   int num_regs = _num_regs;
 179   int nregs = l.num_regs();
 180   tmp =  (_fat_proj || l._fat_proj)     // either is a fat-proj?
 181     ? (num_regs * nregs)                // then use product
 182     : MAX2(num_regs,nregs);             // else use max
 183   return tmp;
 184 }
 185 
 186 // Compute effective degree for this live range.  If both live ranges are
 187 // aligned-adjacent powers-of-2 then we use the MAX size.  If either is
 188 // mis-aligned (or for Fat-Projections, not-adjacent) then we have to
 189 // MULTIPLY the sizes.  Inspect Brigg's thesis on register pairs to see why
 190 // this is so.
 191 int PhaseIFG::effective_degree( uint lidx ) const {
 192   int eff = 0;
 193   int num_regs = lrgs(lidx).num_regs();
 194   int fat_proj = lrgs(lidx)._fat_proj;
 195   IndexSet *s = neighbors(lidx);
 196   IndexSetIterator elements(s);
 197   uint nidx;
 198   while((nidx = elements.next()) != 0) {
 199     LRG &lrgn = lrgs(nidx);
 200     int nregs = lrgn.num_regs();
 201     eff += (fat_proj || lrgn._fat_proj) // either is a fat-proj?
 202       ? (num_regs * nregs)              // then use product
 203       : MAX2(num_regs,nregs);           // else use max
 204   }
 205   return eff;
 206 }
 207 
 208 
 209 #ifndef PRODUCT
 210 void PhaseIFG::dump() const {
 211   tty->print_cr("-- Interference Graph --%s--",
 212                 _is_square ? "square" : "triangular" );
 213   if( _is_square ) {
 214     for( uint i = 0; i < _maxlrg; i++ ) {
 215       tty->print( (*_yanked)[i] ? "XX " : "  ");
 216       tty->print("L%d: { ",i);
 217       IndexSetIterator elements(&_adjs[i]);
 218       uint datum;
 219       while ((datum = elements.next()) != 0) {
 220         tty->print("L%d ", datum);
 221       }
 222       tty->print_cr("}");
 223 
 224     }
 225     return;
 226   }
 227 
 228   // Triangular
 229   for( uint i = 0; i < _maxlrg; i++ ) {
 230     uint j;
 231     tty->print( (*_yanked)[i] ? "XX " : "  ");
 232     tty->print("L%d: { ",i);
 233     for( j = _maxlrg; j > i; j-- )
 234       if( test_edge(j - 1,i) ) {
 235         tty->print("L%d ",j - 1);
 236       }
 237     tty->print("| ");
 238     IndexSetIterator elements(&_adjs[i]);
 239     uint datum;
 240     while ((datum = elements.next()) != 0) {
 241       tty->print("L%d ", datum);
 242     }
 243     tty->print("}\n");
 244   }
 245   tty->print("\n");
 246 }
 247 
 248 void PhaseIFG::stats() const {
 249   ResourceMark rm;
 250   int *h_cnt = NEW_RESOURCE_ARRAY(int,_maxlrg*2);
 251   memset( h_cnt, 0, sizeof(int)*_maxlrg*2 );
 252   uint i;
 253   for( i = 0; i < _maxlrg; i++ ) {
 254     h_cnt[neighbor_cnt(i)]++;
 255   }
 256   tty->print_cr("--Histogram of counts--");
 257   for( i = 0; i < _maxlrg*2; i++ )
 258     if( h_cnt[i] )
 259       tty->print("%d/%d ",i,h_cnt[i]);
 260   tty->cr();
 261 }
 262 
 263 void PhaseIFG::verify( const PhaseChaitin *pc ) const {
 264   // IFG is square, sorted and no need for Find
 265   for( uint i = 0; i < _maxlrg; i++ ) {
 266     assert(!((*_yanked)[i]) || !neighbor_cnt(i), "Is removed completely" );
 267     IndexSet *set = &_adjs[i];
 268     IndexSetIterator elements(set);
 269     uint idx;
 270     uint last = 0;
 271     while ((idx = elements.next()) != 0) {
 272       assert(idx != i, "Must have empty diagonal");
 273       assert(pc->_lrg_map.find_const(idx) == idx, "Must not need Find");
 274       assert(_adjs[idx].member(i), "IFG not square");
 275       assert(!(*_yanked)[idx], "No yanked neighbors");
 276       assert(last < idx, "not sorted increasing");
 277       last = idx;
 278     }
 279     assert(!lrgs(i)._degree_valid || effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong");
 280   }
 281 }
 282 #endif
 283 
 284 // Interfere this register with everything currently live.  Use the RegMasks
 285 // to trim the set of possible interferences. Return a count of register-only
 286 // interferences as an estimate of register pressure.
 287 void PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) {
 288   uint retval = 0;
 289   // Interfere with everything live.
 290   const RegMask &rm = lrgs(r).mask();
 291   // Check for interference by checking overlap of regmasks.
 292   // Only interfere if acceptable register masks overlap.
 293   IndexSetIterator elements(liveout);
 294   uint l;
 295   while( (l = elements.next()) != 0 )
 296     if( rm.overlap( lrgs(l).mask() ) )
 297       _ifg->add_edge( r, l );
 298 }
 299 
 300 // Actually build the interference graph.  Uses virtual registers only, no
 301 // physical register masks.  This allows me to be very aggressive when
 302 // coalescing copies.  Some of this aggressiveness will have to be undone
 303 // later, but I'd rather get all the copies I can now (since unremoved copies
 304 // at this point can end up in bad places).  Copies I re-insert later I have
 305 // more opportunity to insert them in low-frequency locations.
 306 void PhaseChaitin::build_ifg_virtual( ) {
 307 
 308   // For all blocks (in any order) do...
 309   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
 310     Block* block = _cfg.get_block(i);
 311     IndexSet* liveout = _live->live(block);
 312 
 313     // The IFG is built by a single reverse pass over each basic block.
 314     // Starting with the known live-out set, we remove things that get
 315     // defined and add things that become live (essentially executing one
 316     // pass of a standard LIVE analysis). Just before a Node defines a value
 317     // (and removes it from the live-ness set) that value is certainly live.
 318     // The defined value interferes with everything currently live.  The
 319     // value is then removed from the live-ness set and it's inputs are
 320     // added to the live-ness set.
 321     for (uint j = block->end_idx() + 1; j > 1; j--) {
 322       Node* n = block->get_node(j - 1);
 323 
 324       // Get value being defined
 325       uint r = _lrg_map.live_range_id(n);
 326 
 327       // Some special values do not allocate
 328       if (r) {
 329 
 330         // Remove from live-out set
 331         liveout->remove(r);
 332 
 333         // Copies do not define a new value and so do not interfere.
 334         // Remove the copies source from the liveout set before interfering.
 335         uint idx = n->is_Copy();
 336         if (idx) {
 337           liveout->remove(_lrg_map.live_range_id(n->in(idx)));
 338         }
 339 
 340         // Interfere with everything live
 341         interfere_with_live(r, liveout);
 342       }
 343 
 344       // Make all inputs live
 345       if (!n->is_Phi()) {      // Phi function uses come from prior block
 346         for(uint k = 1; k < n->req(); k++) {
 347           liveout->insert(_lrg_map.live_range_id(n->in(k)));
 348         }
 349       }
 350 
 351       // 2-address instructions always have the defined value live
 352       // on entry to the instruction, even though it is being defined
 353       // by the instruction.  We pretend a virtual copy sits just prior
 354       // to the instruction and kills the src-def'd register.
 355       // In other words, for 2-address instructions the defined value
 356       // interferes with all inputs.
 357       uint idx;
 358       if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) {
 359         const MachNode *mach = n->as_Mach();
 360         // Sometimes my 2-address ADDs are commuted in a bad way.
 361         // We generally want the USE-DEF register to refer to the
 362         // loop-varying quantity, to avoid a copy.
 363         uint op = mach->ideal_Opcode();
 364         // Check that mach->num_opnds() == 3 to ensure instruction is
 365         // not subsuming constants, effectively excludes addI_cin_imm
 366         // Can NOT swap for instructions like addI_cin_imm since it
 367         // is adding zero to yhi + carry and the second ideal-input
 368         // points to the result of adding low-halves.
 369         // Checking req() and num_opnds() does NOT distinguish addI_cout from addI_cout_imm
 370         if( (op == Op_AddI && mach->req() == 3 && mach->num_opnds() == 3) &&
 371             n->in(1)->bottom_type()->base() == Type::Int &&
 372             // See if the ADD is involved in a tight data loop the wrong way
 373             n->in(2)->is_Phi() &&
 374             n->in(2)->in(2) == n ) {
 375           Node *tmp = n->in(1);
 376           n->set_req( 1, n->in(2) );
 377           n->set_req( 2, tmp );
 378         }
 379         // Defined value interferes with all inputs
 380         uint lidx = _lrg_map.live_range_id(n->in(idx));
 381         for (uint k = 1; k < n->req(); k++) {
 382           uint kidx = _lrg_map.live_range_id(n->in(k));
 383           if (kidx != lidx) {
 384             _ifg->add_edge(r, kidx);
 385           }
 386         }
 387       }
 388     } // End of forall instructions in block
 389   } // End of forall blocks
 390 }
 391 
 392 uint PhaseChaitin::count_int_pressure( IndexSet *liveout ) {
 393   IndexSetIterator elements(liveout);
 394   uint lidx;
 395   uint cnt = 0;
 396   while ((lidx = elements.next()) != 0) {
 397     if( lrgs(lidx).mask().is_UP() &&
 398         lrgs(lidx).mask_size() &&
 399         !lrgs(lidx)._is_float &&
 400         !lrgs(lidx)._is_vector &&
 401         lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) )
 402       cnt += lrgs(lidx).reg_pressure();
 403   }
 404   return cnt;
 405 }
 406 
 407 uint PhaseChaitin::count_float_pressure( IndexSet *liveout ) {
 408   IndexSetIterator elements(liveout);
 409   uint lidx;
 410   uint cnt = 0;
 411   while ((lidx = elements.next()) != 0) {
 412     if( lrgs(lidx).mask().is_UP() &&
 413         lrgs(lidx).mask_size() &&
 414         (lrgs(lidx)._is_float || lrgs(lidx)._is_vector))
 415       cnt += lrgs(lidx).reg_pressure();
 416   }
 417   return cnt;
 418 }
 419 
 420 // Adjust register pressure down by 1.  Capture last hi-to-low transition,
 421 static void lower_pressure( LRG *lrg, uint where, Block *b, uint *pressure, uint *hrp_index ) {
 422   if (lrg->mask().is_UP() && lrg->mask_size()) {
 423     if (lrg->_is_float || lrg->_is_vector) {
 424       pressure[1] -= lrg->reg_pressure();
 425       if( pressure[1] == (uint)FLOATPRESSURE ) {
 426         hrp_index[1] = where;
 427         if( pressure[1] > b->_freg_pressure )
 428           b->_freg_pressure = pressure[1]+1;
 429       }
 430     } else if( lrg->mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
 431       pressure[0] -= lrg->reg_pressure();
 432       if( pressure[0] == (uint)INTPRESSURE   ) {
 433         hrp_index[0] = where;
 434         if( pressure[0] > b->_reg_pressure )
 435           b->_reg_pressure = pressure[0]+1;
 436       }
 437     }
 438   }
 439 }
 440 
 441 // Build the interference graph using physical registers when available.
 442 // That is, if 2 live ranges are simultaneously alive but in their acceptable
 443 // register sets do not overlap, then they do not interfere.
 444 uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
 445   NOT_PRODUCT( Compile::TracePhase t3("buildIFG", &_t_buildIFGphysical, TimeCompiler); )
 446 
 447   uint must_spill = 0;
 448 
 449   // For all blocks (in any order) do...
 450   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
 451     Block* block = _cfg.get_block(i);
 452     // Clone (rather than smash in place) the liveout info, so it is alive
 453     // for the "collect_gc_info" phase later.
 454     IndexSet liveout(_live->live(block));
 455     uint last_inst = block->end_idx();
 456     // Compute first nonphi node index
 457     uint first_inst;
 458     for (first_inst = 1; first_inst < last_inst; first_inst++) {
 459       if (!block->get_node(first_inst)->is_Phi()) {
 460         break;
 461       }
 462     }
 463 
 464     // Spills could be inserted before CreateEx node which should be
 465     // first instruction in block after Phis. Move CreateEx up.
 466     for (uint insidx = first_inst; insidx < last_inst; insidx++) {
 467       Node *ex = block->get_node(insidx);
 468       if (ex->is_SpillCopy()) {
 469         continue;
 470       }
 471       if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
 472         // If the CreateEx isn't above all the MachSpillCopies
 473         // then move it to the top.
 474         block->remove_node(insidx);
 475         block->insert_node(ex, first_inst);
 476       }
 477       // Stop once a CreateEx or any other node is found
 478       break;
 479     }
 480 
 481     // Reset block's register pressure values for each ifg construction
 482     uint pressure[2], hrp_index[2];
 483     pressure[0] = pressure[1] = 0;
 484     hrp_index[0] = hrp_index[1] = last_inst+1;
 485     block->_reg_pressure = block->_freg_pressure = 0;
 486     // Liveout things are presumed live for the whole block.  We accumulate
 487     // 'area' accordingly.  If they get killed in the block, we'll subtract
 488     // the unused part of the block from the area.
 489     int inst_count = last_inst - first_inst;
 490     double cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
 491     assert(!(cost < 0.0), "negative spill cost" );
 492     IndexSetIterator elements(&liveout);
 493     uint lidx;
 494     while ((lidx = elements.next()) != 0) {
 495       LRG &lrg = lrgs(lidx);
 496       lrg._area += cost;
 497       // Compute initial register pressure
 498       if (lrg.mask().is_UP() && lrg.mask_size()) {
 499         if (lrg._is_float || lrg._is_vector) {   // Count float pressure
 500           pressure[1] += lrg.reg_pressure();
 501           if (pressure[1] > block->_freg_pressure) {
 502             block->_freg_pressure = pressure[1];
 503           }
 504           // Count int pressure, but do not count the SP, flags
 505         } else if(lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI])) {
 506           pressure[0] += lrg.reg_pressure();
 507           if (pressure[0] > block->_reg_pressure) {
 508             block->_reg_pressure = pressure[0];
 509           }
 510         }
 511       }
 512     }
 513     assert( pressure[0] == count_int_pressure  (&liveout), "" );
 514     assert( pressure[1] == count_float_pressure(&liveout), "" );
 515 
 516     // The IFG is built by a single reverse pass over each basic block.
 517     // Starting with the known live-out set, we remove things that get
 518     // defined and add things that become live (essentially executing one
 519     // pass of a standard LIVE analysis).  Just before a Node defines a value
 520     // (and removes it from the live-ness set) that value is certainly live.
 521     // The defined value interferes with everything currently live.  The
 522     // value is then removed from the live-ness set and it's inputs are added
 523     // to the live-ness set.
 524     uint j;
 525     for (j = last_inst + 1; j > 1; j--) {
 526       Node* n = block->get_node(j - 1);
 527 
 528       // Get value being defined
 529       uint r = _lrg_map.live_range_id(n);
 530 
 531       // Some special values do not allocate
 532       if(r) {
 533         // A DEF normally costs block frequency; rematerialized values are
 534         // removed from the DEF sight, so LOWER costs here.
 535         lrgs(r)._cost += n->rematerialize() ? 0 : block->_freq;
 536 
 537         // If it is not live, then this instruction is dead.  Probably caused
 538         // by spilling and rematerialization.  Who cares why, yank this baby.
 539         if( !liveout.member(r) && n->Opcode() != Op_SafePoint ) {
 540           Node *def = n->in(0);
 541           if( !n->is_Proj() ||
 542               // Could also be a flags-projection of a dead ADD or such.
 543               (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
 544             bool remove = true;
 545             if (n->is_MachProj()) {
 546               // Don't remove KILL projections if their "defining" nodes have
 547               // memory effects (have SCMemProj projection node) -
 548               // they are not dead even when their result is not used.
 549               // For example, compareAndSwapL (and other CAS) and EncodeISOArray nodes.
 550               // The method add_input_to_liveout() keeps such nodes alive (put them on liveout list)
 551               // when it sees SCMemProj node in a block. Unfortunately SCMemProj node could be placed
 552               // in block in such order that KILL MachProj nodes are processed first.
 553               uint cnt = def->outcnt();
 554               for (uint i = 0; i < cnt; i++) {
 555                 Node* proj = def->raw_out(i);
 556                 if (proj->Opcode() == Op_SCMemProj) {
 557                   remove = false;
 558                   break;
 559                 }
 560               }
 561             }
 562             if (remove) {
 563               block->remove_node(j - 1);
 564               if (lrgs(r)._def == n) {
 565                 lrgs(r)._def = 0;
 566               }
 567               n->disconnect_inputs(NULL, C);
 568               _cfg.unmap_node_from_block(n);
 569               n->replace_by(C->top());
 570               // Since yanking a Node from block, high pressure moves up one
 571               hrp_index[0]--;
 572               hrp_index[1]--;
 573               continue;
 574             }
 575           }
 576 
 577           // Fat-projections kill many registers which cannot be used to
 578           // hold live ranges.
 579           if (lrgs(r)._fat_proj) {
 580             // Count the int-only registers
 581             RegMask itmp = lrgs(r).mask();
 582             itmp.AND(*Matcher::idealreg2regmask[Op_RegI]);
 583             int iregs = itmp.Size();
 584             if (pressure[0]+iregs > block->_reg_pressure) {
 585               block->_reg_pressure = pressure[0] + iregs;
 586             }
 587             if (pressure[0] <= (uint)INTPRESSURE && pressure[0] + iregs > (uint)INTPRESSURE) {
 588               hrp_index[0] = j - 1;
 589             }
 590             // Count the float-only registers
 591             RegMask ftmp = lrgs(r).mask();
 592             ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]);
 593             int fregs = ftmp.Size();
 594             if (pressure[1] + fregs > block->_freg_pressure) {
 595               block->_freg_pressure = pressure[1] + fregs;
 596             }
 597             if(pressure[1] <= (uint)FLOATPRESSURE && pressure[1]+fregs > (uint)FLOATPRESSURE) {
 598               hrp_index[1] = j - 1;
 599             }
 600           }
 601 
 602         } else {                // Else it is live
 603           // A DEF also ends 'area' partway through the block.
 604           lrgs(r)._area -= cost;
 605           assert(!(lrgs(r)._area < 0.0), "negative spill area" );
 606 
 607           // Insure high score for immediate-use spill copies so they get a color
 608           if( n->is_SpillCopy()
 609               && lrgs(r).is_singledef()        // MultiDef live range can still split
 610               && n->outcnt() == 1              // and use must be in this block
 611               && _cfg.get_block_for_node(n->unique_out()) == block) {
 612             // All single-use MachSpillCopy(s) that immediately precede their
 613             // use must color early.  If a longer live range steals their
 614             // color, the spill copy will split and may push another spill copy
 615             // further away resulting in an infinite spill-split-retry cycle.
 616             // Assigning a zero area results in a high score() and a good
 617             // location in the simplify list.
 618             //
 619 
 620             Node *single_use = n->unique_out();
 621             assert(block->find_node(single_use) >= j, "Use must be later in block");
 622             // Use can be earlier in block if it is a Phi, but then I should be a MultiDef
 623 
 624             // Find first non SpillCopy 'm' that follows the current instruction
 625             // (j - 1) is index for current instruction 'n'
 626             Node *m = n;
 627             for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) {
 628               m = block->get_node(i);
 629             }
 630             if (m == single_use) {
 631               lrgs(r)._area = 0.0;
 632             }
 633           }
 634 
 635           // Remove from live-out set
 636           if( liveout.remove(r) ) {
 637             // Adjust register pressure.
 638             // Capture last hi-to-lo pressure transition
 639             lower_pressure(&lrgs(r), j - 1, block, pressure, hrp_index);
 640             assert( pressure[0] == count_int_pressure  (&liveout), "" );
 641             assert( pressure[1] == count_float_pressure(&liveout), "" );
 642           }
 643 
 644           // Copies do not define a new value and so do not interfere.
 645           // Remove the copies source from the liveout set before interfering.
 646           uint idx = n->is_Copy();
 647           if (idx) {
 648             uint x = _lrg_map.live_range_id(n->in(idx));
 649             if (liveout.remove(x)) {
 650               lrgs(x)._area -= cost;
 651               // Adjust register pressure.
 652               lower_pressure(&lrgs(x), j - 1, block, pressure, hrp_index);
 653               assert( pressure[0] == count_int_pressure  (&liveout), "" );
 654               assert( pressure[1] == count_float_pressure(&liveout), "" );
 655             }
 656           }
 657         } // End of if live or not
 658 
 659         // Interfere with everything live.  If the defined value must
 660         // go in a particular register, just remove that register from
 661         // all conflicting parties and avoid the interference.
 662 
 663         // Make exclusions for rematerializable defs.  Since rematerializable
 664         // DEFs are not bound but the live range is, some uses must be bound.
 665         // If we spill live range 'r', it can rematerialize at each use site
 666         // according to its bindings.
 667         const RegMask &rmask = lrgs(r).mask();
 668         if( lrgs(r).is_bound() && !(n->rematerialize()) && rmask.is_NotEmpty() ) {
 669           // Check for common case
 670           int r_size = lrgs(r).num_regs();
 671           OptoReg::Name r_reg = (r_size == 1) ? rmask.find_first_elem() : OptoReg::Physical;
 672           // Smear odd bits
 673           IndexSetIterator elements(&liveout);
 674           uint l;
 675           while ((l = elements.next()) != 0) {
 676             LRG &lrg = lrgs(l);
 677             // If 'l' must spill already, do not further hack his bits.
 678             // He'll get some interferences and be forced to spill later.
 679             if( lrg._must_spill ) continue;
 680             // Remove bound register(s) from 'l's choices
 681             RegMask old = lrg.mask();
 682             uint old_size = lrg.mask_size();
 683             // Remove the bits from LRG 'r' from LRG 'l' so 'l' no
 684             // longer interferes with 'r'.  If 'l' requires aligned
 685             // adjacent pairs, subtract out bit pairs.
 686             assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
 687             if (lrg.num_regs() > 1 && !lrg._fat_proj) {
 688               RegMask r2mask = rmask;
 689               // Leave only aligned set of bits.
 690               r2mask.smear_to_sets(lrg.num_regs());
 691               // It includes vector case.
 692               lrg.SUBTRACT( r2mask );
 693               lrg.compute_set_mask_size();
 694             } else if( r_size != 1 ) { // fat proj
 695               lrg.SUBTRACT( rmask );
 696               lrg.compute_set_mask_size();
 697             } else {            // Common case: size 1 bound removal
 698               if( lrg.mask().Member(r_reg) ) {
 699                 lrg.Remove(r_reg);
 700                 lrg.set_mask_size(lrg.mask().is_AllStack() ? LRG::AllStack_size : old_size - 1);
 701               }
 702             }
 703             // If 'l' goes completely dry, it must spill.
 704             if( lrg.not_free() ) {
 705               // Give 'l' some kind of reasonable mask, so he picks up
 706               // interferences (and will spill later).
 707               lrg.set_mask( old );
 708               lrg.set_mask_size(old_size);
 709               must_spill++;
 710               lrg._must_spill = 1;
 711               lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
 712             }
 713           }
 714         } // End of if bound
 715 
 716         // Now interference with everything that is live and has
 717         // compatible register sets.
 718         interfere_with_live(r,&liveout);
 719 
 720       } // End of if normal register-allocated value
 721 
 722       // Area remaining in the block
 723       inst_count--;
 724       cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
 725 
 726       // Make all inputs live
 727       if( !n->is_Phi() ) {      // Phi function uses come from prior block
 728         JVMState* jvms = n->jvms();
 729         uint debug_start = jvms ? jvms->debug_start() : 999999;
 730         // Start loop at 1 (skip control edge) for most Nodes.
 731         // SCMemProj's might be the sole use of a StoreLConditional.
 732         // While StoreLConditionals set memory (the SCMemProj use)
 733         // they also def flags; if that flag def is unused the
 734         // allocator sees a flag-setting instruction with no use of
 735         // the flags and assumes it's dead.  This keeps the (useless)
 736         // flag-setting behavior alive while also keeping the (useful)
 737         // memory update effect.
 738         for (uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++) {
 739           Node *def = n->in(k);
 740           uint x = _lrg_map.live_range_id(def);
 741           if (!x) {
 742             continue;
 743           }
 744           LRG &lrg = lrgs(x);
 745           // No use-side cost for spilling debug info
 746           if (k < debug_start) {
 747             // A USE costs twice block frequency (once for the Load, once
 748             // for a Load-delay).  Rematerialized uses only cost once.
 749             lrg._cost += (def->rematerialize() ? block->_freq : (block->_freq + block->_freq));
 750           }
 751           // It is live now
 752           if (liveout.insert(x)) {
 753             // Newly live things assumed live from here to top of block
 754             lrg._area += cost;
 755             // Adjust register pressure
 756             if (lrg.mask().is_UP() && lrg.mask_size()) {
 757               if (lrg._is_float || lrg._is_vector) {
 758                 pressure[1] += lrg.reg_pressure();
 759                 if (pressure[1] > block->_freg_pressure)  {
 760                   block->_freg_pressure = pressure[1];
 761                 }
 762               } else if( lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
 763                 pressure[0] += lrg.reg_pressure();
 764                 if (pressure[0] > block->_reg_pressure) {
 765                   block->_reg_pressure = pressure[0];
 766                 }
 767               }
 768             }
 769             assert( pressure[0] == count_int_pressure  (&liveout), "" );
 770             assert( pressure[1] == count_float_pressure(&liveout), "" );
 771           }
 772           assert(!(lrg._area < 0.0), "negative spill area" );
 773         }
 774       }
 775     } // End of reverse pass over all instructions in block
 776 
 777     // If we run off the top of the block with high pressure and
 778     // never see a hi-to-low pressure transition, just record that
 779     // the whole block is high pressure.
 780     if (pressure[0] > (uint)INTPRESSURE) {
 781       hrp_index[0] = 0;
 782       if (pressure[0] > block->_reg_pressure) {
 783         block->_reg_pressure = pressure[0];
 784       }
 785     }
 786     if (pressure[1] > (uint)FLOATPRESSURE) {
 787       hrp_index[1] = 0;
 788       if (pressure[1] > block->_freg_pressure) {
 789         block->_freg_pressure = pressure[1];
 790       }
 791     }
 792 
 793     // Compute high pressure indice; avoid landing in the middle of projnodes
 794     j = hrp_index[0];
 795     if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
 796       Node* cur = block->get_node(j);
 797       while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
 798         j--;
 799         cur = block->get_node(j);
 800       }
 801     }
 802     block->_ihrp_index = j;
 803     j = hrp_index[1];
 804     if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
 805       Node* cur = block->get_node(j);
 806       while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
 807         j--;
 808         cur = block->get_node(j);
 809       }
 810     }
 811     block->_fhrp_index = j;
 812 
 813 #ifndef PRODUCT
 814     // Gather Register Pressure Statistics
 815     if( PrintOptoStatistics ) {
 816       if (block->_reg_pressure > (uint)INTPRESSURE || block->_freg_pressure > (uint)FLOATPRESSURE) {
 817         _high_pressure++;
 818       } else {
 819         _low_pressure++;
 820       }
 821     }
 822 #endif
 823   } // End of for all blocks
 824 
 825   return must_spill;
 826 }