1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "opto/block.hpp"
  28 #include "opto/cfgnode.hpp"
  29 #include "opto/chaitin.hpp"
  30 #include "opto/coalesce.hpp"
  31 #include "opto/connode.hpp"
  32 #include "opto/indexSet.hpp"
  33 #include "opto/machnode.hpp"
  34 #include "opto/matcher.hpp"
  35 #include "opto/regmask.hpp"
  36 
  37 #ifndef PRODUCT
  38 void PhaseCoalesce::dump(Node *n) const {
  39   // Being a const function means I cannot use 'Find'
  40   uint r = _phc._lrg_map.find(n);
  41   tty->print("L%d/N%d ",r,n->_idx);
  42 }
  43 
  44 void PhaseCoalesce::dump() const {
  45   // I know I have a block layout now, so I can print blocks in a loop
  46   for( uint i=0; i<_phc._cfg.number_of_blocks(); i++ ) {
  47     uint j;
  48     Block* b = _phc._cfg.get_block(i);
  49     // Print a nice block header
  50     tty->print("B%d: ",b->_pre_order);
  51     for( j=1; j<b->num_preds(); j++ )
  52       tty->print("B%d ", _phc._cfg.get_block_for_node(b->pred(j))->_pre_order);
  53     tty->print("-> ");
  54     for( j=0; j<b->_num_succs; j++ )
  55       tty->print("B%d ",b->_succs[j]->_pre_order);
  56     tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth);
  57     uint cnt = b->number_of_nodes();
  58     for( j=0; j<cnt; j++ ) {
  59       Node *n = b->get_node(j);
  60       dump( n );
  61       tty->print("\t%s\t",n->Name());
  62 
  63       // Dump the inputs
  64       uint k;                   // Exit value of loop
  65       for( k=0; k<n->req(); k++ ) // For all required inputs
  66         if( n->in(k) ) dump( n->in(k) );
  67         else tty->print("_ ");
  68       int any_prec = 0;
  69       for( ; k<n->len(); k++ )          // For all precedence inputs
  70         if( n->in(k) ) {
  71           if( !any_prec++ ) tty->print(" |");
  72           dump( n->in(k) );
  73         }
  74 
  75       // Dump node-specific info
  76       n->dump_spec(tty);
  77       tty->print("\n");
  78 
  79     }
  80     tty->print("\n");
  81   }
  82 }
  83 #endif
  84 
  85 // Combine the live ranges def'd by these 2 Nodes.  N2 is an input to N1.
  86 void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) {
  87   uint lr1 = _phc._lrg_map.find(n1);
  88   uint lr2 = _phc._lrg_map.find(n2);
  89   if( lr1 != lr2 &&             // Different live ranges already AND
  90       !_phc._ifg->test_edge_sq( lr1, lr2 ) ) {  // Do not interfere
  91     LRG *lrg1 = &_phc.lrgs(lr1);
  92     LRG *lrg2 = &_phc.lrgs(lr2);
  93     // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK.
  94 
  95     // Now, why is int->oop OK?  We end up declaring a raw-pointer as an oop
  96     // and in general that's a bad thing.  However, int->oop conversions only
  97     // happen at GC points, so the lifetime of the misclassified raw-pointer
  98     // is from the CheckCastPP (that converts it to an oop) backwards up
  99     // through a merge point and into the slow-path call, and around the
 100     // diamond up to the heap-top check and back down into the slow-path call.
 101     // The misclassified raw pointer is NOT live across the slow-path call,
 102     // and so does not appear in any GC info, so the fact that it is
 103     // misclassified is OK.
 104 
 105     if( (lrg1->_is_oop || !lrg2->_is_oop) && // not an oop->int cast AND
 106         // Compatible final mask
 107         lrg1->mask().overlap( lrg2->mask() ) ) {
 108       // Merge larger into smaller.
 109       if( lr1 > lr2 ) {
 110         uint  tmp =  lr1;  lr1 =  lr2;  lr2 =  tmp;
 111         Node   *n =   n1;   n1 =   n2;   n2 =    n;
 112         LRG *ltmp = lrg1; lrg1 = lrg2; lrg2 = ltmp;
 113       }
 114       // Union lr2 into lr1
 115       _phc.Union( n1, n2 );
 116       if (lrg1->_maxfreq < lrg2->_maxfreq)
 117         lrg1->_maxfreq = lrg2->_maxfreq;
 118       // Merge in the IFG
 119       _phc._ifg->Union( lr1, lr2 );
 120       // Combine register restrictions
 121       lrg1->AND(lrg2->mask());
 122     }
 123   }
 124 }
 125 
 126 // Copy coalescing
 127 void PhaseCoalesce::coalesce_driver() {
 128   verify();
 129   // Coalesce from high frequency to low
 130   for (uint i = 0; i < _phc._cfg.number_of_blocks(); i++) {
 131     coalesce(_phc._blks[i]);
 132   }
 133 }
 134 
 135 // I am inserting copies to come out of SSA form.  In the general case, I am
 136 // doing a parallel renaming.  I'm in the Named world now, so I can't do a
 137 // general parallel renaming.  All the copies now use  "names" (live-ranges)
 138 // to carry values instead of the explicit use-def chains.  Suppose I need to
 139 // insert 2 copies into the same block.  They copy L161->L128 and L128->L132.
 140 // If I insert them in the wrong order then L128 will get clobbered before it
 141 // can get used by the second copy.  This cannot happen in the SSA model;
 142 // direct use-def chains get me the right value.  It DOES happen in the named
 143 // model so I have to handle the reordering of copies.
 144 //
 145 // In general, I need to topo-sort the placed copies to avoid conflicts.
 146 // Its possible to have a closed cycle of copies (e.g., recirculating the same
 147 // values around a loop).  In this case I need a temp to break the cycle.
 148 void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, uint dst_name, uint src_name ) {
 149 
 150   // Scan backwards for the locations of the last use of the dst_name.
 151   // I am about to clobber the dst_name, so the copy must be inserted
 152   // after the last use.  Last use is really first-use on a backwards scan.
 153   uint i = b->end_idx()-1;
 154   while(1) {
 155     Node *n = b->get_node(i);
 156     // Check for end of virtual copies; this is also the end of the
 157     // parallel renaming effort.
 158     if (n->_idx < _unique) {
 159       break;
 160     }
 161     uint idx = n->is_Copy();
 162     assert( idx || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
 163     if (idx && _phc._lrg_map.find(n->in(idx)) == dst_name) {
 164       break;
 165     }
 166     i--;
 167   }
 168   uint last_use_idx = i;
 169 
 170   // Also search for any kill of src_name that exits the block.
 171   // Since the copy uses src_name, I have to come before any kill.
 172   uint kill_src_idx = b->end_idx();
 173   // There can be only 1 kill that exits any block and that is
 174   // the last kill.  Thus it is the first kill on a backwards scan.
 175   i = b->end_idx()-1;
 176   while (1) {
 177     Node *n = b->get_node(i);
 178     // Check for end of virtual copies; this is also the end of the
 179     // parallel renaming effort.
 180     if (n->_idx < _unique) {
 181       break;
 182     }
 183     assert( n->is_Copy() || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
 184     if (_phc._lrg_map.find(n) == src_name) {
 185       kill_src_idx = i;
 186       break;
 187     }
 188     i--;
 189   }
 190   // Need a temp?  Last use of dst comes after the kill of src?
 191   if (last_use_idx >= kill_src_idx) {
 192     // Need to break a cycle with a temp
 193     uint idx = copy->is_Copy();
 194     Node *tmp = copy->clone();
 195     uint max_lrg_id = _phc._lrg_map.max_lrg_id();
 196     _phc.new_lrg(tmp, max_lrg_id);
 197     _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
 198 
 199     // Insert new temp between copy and source
 200     tmp ->set_req(idx,copy->in(idx));
 201     copy->set_req(idx,tmp);
 202     // Save source in temp early, before source is killed
 203     b->insert_node(tmp, kill_src_idx);
 204     _phc._cfg.map_node_to_block(tmp, b);
 205     last_use_idx++;
 206   }
 207 
 208   // Insert just after last use
 209   b->insert_node(copy, last_use_idx + 1);
 210 }
 211 
 212 void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
 213   // We do LRGs compressing and fix a liveout data only here since the other
 214   // place in Split() is guarded by the assert which we never hit.
 215   _phc._lrg_map.compress_uf_map_for_nodes();
 216   // Fix block's liveout data for compressed live ranges.
 217   for (uint lrg = 1; lrg < _phc._lrg_map.max_lrg_id(); lrg++) {
 218     uint compressed_lrg = _phc._lrg_map.find(lrg);
 219     if (lrg != compressed_lrg) {
 220       for (uint bidx = 0; bidx < _phc._cfg.number_of_blocks(); bidx++) {
 221         IndexSet *liveout = _phc._live->live(_phc._cfg.get_block(bidx));
 222         if (liveout->member(lrg)) {
 223           liveout->remove(lrg);
 224           liveout->insert(compressed_lrg);
 225         }
 226       }
 227     }
 228   }
 229 
 230   // All new nodes added are actual copies to replace virtual copies.
 231   // Nodes with index less than '_unique' are original, non-virtual Nodes.
 232   _unique = C->unique();
 233 
 234   for (uint i = 0; i < _phc._cfg.number_of_blocks(); i++) {
 235     C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce");
 236     if (C->failing()) return;
 237     Block *b = _phc._cfg.get_block(i);
 238     uint cnt = b->num_preds();  // Number of inputs to the Phi
 239 
 240     for( uint l = 1; l<b->number_of_nodes(); l++ ) {
 241       Node *n = b->get_node(l);
 242 
 243       // Do not use removed-copies, use copied value instead
 244       uint ncnt = n->req();
 245       for( uint k = 1; k<ncnt; k++ ) {
 246         Node *copy = n->in(k);
 247         uint cidx = copy->is_Copy();
 248         if( cidx ) {
 249           Node *def = copy->in(cidx);
 250           if (_phc._lrg_map.find(copy) == _phc._lrg_map.find(def)) {
 251             n->set_req(k, def);
 252           }
 253         }
 254       }
 255 
 256       // Remove any explicit copies that get coalesced.
 257       uint cidx = n->is_Copy();
 258       if( cidx ) {
 259         Node *def = n->in(cidx);
 260         if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) {
 261           n->replace_by(def);
 262           n->set_req(cidx,NULL);
 263           b->remove_node(l);
 264           l--;
 265           continue;
 266         }
 267       }
 268 
 269       if (n->is_Phi()) {
 270         // Get the chosen name for the Phi
 271         uint phi_name = _phc._lrg_map.find(n);
 272         // Ignore the pre-allocated specials
 273         if (!phi_name) {
 274           continue;
 275         }
 276         // Check for mismatch inputs to Phi
 277         for (uint j = 1; j < cnt; j++) {
 278           Node *m = n->in(j);
 279           uint src_name = _phc._lrg_map.find(m);
 280           if (src_name != phi_name) {
 281             Block *pred = _phc._cfg.get_block_for_node(b->pred(j));
 282             Node *copy;
 283             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
 284             // Rematerialize constants instead of copying them.
 285             // We do this only for immediate constants, we avoid constant table loads
 286             // because that will unsafely extend the live range of the constant table base.
 287             if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() &&
 288                 m->as_Mach()->rematerialize()) {
 289               copy = m->clone();
 290               // Insert the copy in the predecessor basic block
 291               pred->add_inst(copy);
 292               // Copy any flags as well
 293               _phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map);
 294             } else {
 295               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
 296               copy = new MachSpillCopyNode(MachSpillCopyNode::PhiInput, m, *rm, *rm);
 297               // Find a good place to insert.  Kinda tricky, use a subroutine
 298               insert_copy_with_overlap(pred,copy,phi_name,src_name);
 299             }
 300             // Insert the copy in the use-def chain
 301             n->set_req(j, copy);
 302             _phc._cfg.map_node_to_block(copy, pred);
 303             // Extend ("register allocate") the names array for the copy.
 304             _phc._lrg_map.extend(copy->_idx, phi_name);
 305           } // End of if Phi names do not match
 306         } // End of for all inputs to Phi
 307       } else { // End of if Phi
 308 
 309         // Now check for 2-address instructions
 310         uint idx;
 311         if( n->is_Mach() && (idx=n->as_Mach()->two_adr()) ) {
 312           // Get the chosen name for the Node
 313           uint name = _phc._lrg_map.find(n);
 314           assert (name, "no 2-address specials");
 315           // Check for name mis-match on the 2-address input
 316           Node *m = n->in(idx);
 317           if (_phc._lrg_map.find(m) != name) {
 318             Node *copy;
 319             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
 320             // At this point it is unsafe to extend live ranges (6550579).
 321             // Rematerialize only constants as we do for Phi above.
 322             if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() &&
 323                 m->as_Mach()->rematerialize()) {
 324               copy = m->clone();
 325               // Insert the copy in the basic block, just before us
 326               b->insert_node(copy, l++);
 327               l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map);
 328             } else {
 329               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
 330               copy = new MachSpillCopyNode(MachSpillCopyNode::TwoAddress, m, *rm, *rm);
 331               // Insert the copy in the basic block, just before us
 332               b->insert_node(copy, l++);
 333             }
 334             // Insert the copy in the use-def chain
 335             n->set_req(idx, copy);
 336             // Extend ("register allocate") the names array for the copy.
 337             _phc._lrg_map.extend(copy->_idx, name);
 338             _phc._cfg.map_node_to_block(copy, b);
 339           }
 340 
 341         } // End of is two-adr
 342 
 343         // Insert a copy at a debug use for a lrg which has high frequency
 344         if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || _phc._cfg.is_uncommon(b)) {
 345           // Walk the debug inputs to the node and check for lrg freq
 346           JVMState* jvms = n->jvms();
 347           uint debug_start = jvms ? jvms->debug_start() : 999999;
 348           uint debug_end   = jvms ? jvms->debug_end()   : 999999;
 349           for(uint inpidx = debug_start; inpidx < debug_end; inpidx++) {
 350             // Do not split monitors; they are only needed for debug table
 351             // entries and need no code.
 352             if (jvms->is_monitor_use(inpidx)) {
 353               continue;
 354             }
 355             Node *inp = n->in(inpidx);
 356             uint nidx = _phc._lrg_map.live_range_id(inp);
 357             LRG &lrg = lrgs(nidx);
 358 
 359             // If this lrg has a high frequency use/def
 360             if( lrg._maxfreq >= _phc.high_frequency_lrg() ) {
 361               // If the live range is also live out of this block (like it
 362               // would be for a fast/slow idiom), the normal spill mechanism
 363               // does an excellent job.  If it is not live out of this block
 364               // (like it would be for debug info to uncommon trap) splitting
 365               // the live range now allows a better allocation in the high
 366               // frequency blocks.
 367               //   Build_IFG_virtual has converted the live sets to
 368               // live-IN info, not live-OUT info.
 369               uint k;
 370               for( k=0; k < b->_num_succs; k++ )
 371                 if( _phc._live->live(b->_succs[k])->member( nidx ) )
 372                   break;      // Live in to some successor block?
 373               if( k < b->_num_succs )
 374                 continue;     // Live out; do not pre-split
 375               // Split the lrg at this use
 376               const RegMask *rm = C->matcher()->idealreg2spillmask[inp->ideal_reg()];
 377               Node* copy = new MachSpillCopyNode(MachSpillCopyNode::DebugUse, inp, *rm, *rm);
 378               // Insert the copy in the use-def chain
 379               n->set_req(inpidx, copy );
 380               // Insert the copy in the basic block, just before us
 381               b->insert_node(copy,  l++);
 382               // Extend ("register allocate") the names array for the copy.
 383               uint max_lrg_id = _phc._lrg_map.max_lrg_id();
 384               _phc.new_lrg(copy, max_lrg_id);
 385               _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
 386               _phc._cfg.map_node_to_block(copy, b);
 387               //tty->print_cr("Split a debug use in Aggressive Coalesce");
 388             }  // End of if high frequency use/def
 389           }  // End of for all debug inputs
 390         }  // End of if low frequency safepoint
 391 
 392       } // End of if Phi
 393 
 394     } // End of for all instructions
 395   } // End of for all blocks
 396 }
 397 
 398 
 399 // Aggressive (but pessimistic) copy coalescing of a single block
 400 
 401 // The following coalesce pass represents a single round of aggressive
 402 // pessimistic coalesce.  "Aggressive" means no attempt to preserve
 403 // colorability when coalescing.  This occasionally means more spills, but
 404 // it also means fewer rounds of coalescing for better code - and that means
 405 // faster compiles.
 406 
 407 // "Pessimistic" means we do not hit the fixed point in one pass (and we are
 408 // reaching for the least fixed point to boot).  This is typically solved
 409 // with a few more rounds of coalescing, but the compiler must run fast.  We
 410 // could optimistically coalescing everything touching PhiNodes together
 411 // into one big live range, then check for self-interference.  Everywhere
 412 // the live range interferes with self it would have to be split.  Finding
 413 // the right split points can be done with some heuristics (based on
 414 // expected frequency of edges in the live range).  In short, it's a real
 415 // research problem and the timeline is too short to allow such research.
 416 // Further thoughts: (1) build the LR in a pass, (2) find self-interference
 417 // in another pass, (3) per each self-conflict, split, (4) split by finding
 418 // the low-cost cut (min-cut) of the LR, (5) edges in the LR are weighted
 419 // according to the GCM algorithm (or just exec freq on CFG edges).
 420 
 421 void PhaseAggressiveCoalesce::coalesce( Block *b ) {
 422   // Copies are still "virtual" - meaning we have not made them explicitly
 423   // copies.  Instead, Phi functions of successor blocks have mis-matched
 424   // live-ranges.  If I fail to coalesce, I'll have to insert a copy to line
 425   // up the live-ranges.  Check for Phis in successor blocks.
 426   uint i;
 427   for( i=0; i<b->_num_succs; i++ ) {
 428     Block *bs = b->_succs[i];
 429     // Find index of 'b' in 'bs' predecessors
 430     uint j=1;
 431     while (_phc._cfg.get_block_for_node(bs->pred(j)) != b) {
 432       j++;
 433     }
 434 
 435     // Visit all the Phis in successor block
 436     for( uint k = 1; k<bs->number_of_nodes(); k++ ) {
 437       Node *n = bs->get_node(k);
 438       if( !n->is_Phi() ) break;
 439       combine_these_two( n, n->in(j) );
 440     }
 441   } // End of for all successor blocks
 442 
 443 
 444   // Check _this_ block for 2-address instructions and copies.
 445   uint cnt = b->end_idx();
 446   for( i = 1; i<cnt; i++ ) {
 447     Node *n = b->get_node(i);
 448     uint idx;
 449     // 2-address instructions have a virtual Copy matching their input
 450     // to their output
 451     if (n->is_Mach() && (idx = n->as_Mach()->two_adr())) {
 452       MachNode *mach = n->as_Mach();
 453       combine_these_two(mach, mach->in(idx));
 454     }
 455   } // End of for all instructions in block
 456 }
 457 
 458 PhaseConservativeCoalesce::PhaseConservativeCoalesce(PhaseChaitin &chaitin) : PhaseCoalesce(chaitin) {
 459   _ulr.initialize(_phc._lrg_map.max_lrg_id());
 460 }
 461 
 462 void PhaseConservativeCoalesce::verify() {
 463 #ifdef ASSERT
 464   _phc.set_was_low();
 465 #endif
 466 }
 467 
 468 void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, uint lr1, uint lr2, Node *src_def, Node *dst_copy, Node *src_copy, Block *b, uint bindex ) {
 469   // Join live ranges.  Merge larger into smaller.  Union lr2 into lr1 in the
 470   // union-find tree
 471   _phc.Union( lr1_node, lr2_node );
 472 
 473   // Single-def live range ONLY if both live ranges are single-def.
 474   // If both are single def, then src_def powers one live range
 475   // and def_copy powers the other.  After merging, src_def powers
 476   // the combined live range.
 477   lrgs(lr1)._def = (lrgs(lr1).is_multidef() ||
 478                         lrgs(lr2).is_multidef() )
 479     ? NodeSentinel : src_def;
 480   lrgs(lr2)._def = NULL;    // No def for lrg 2
 481   lrgs(lr2).Clear();        // Force empty mask for LRG 2
 482   //lrgs(lr2)._size = 0;      // Live-range 2 goes dead
 483   lrgs(lr1)._is_oop |= lrgs(lr2)._is_oop;
 484   lrgs(lr2)._is_oop = 0;    // In particular, not an oop for GC info
 485 
 486   if (lrgs(lr1)._maxfreq < lrgs(lr2)._maxfreq)
 487     lrgs(lr1)._maxfreq = lrgs(lr2)._maxfreq;
 488 
 489   // Copy original value instead.  Intermediate copies go dead, and
 490   // the dst_copy becomes useless.
 491   int didx = dst_copy->is_Copy();
 492   dst_copy->set_req( didx, src_def );
 493   // Add copy to free list
 494   // _phc.free_spillcopy(b->_nodes[bindex]);
 495   assert( b->get_node(bindex) == dst_copy, "" );
 496   dst_copy->replace_by( dst_copy->in(didx) );
 497   dst_copy->set_req( didx, NULL);
 498   b->remove_node(bindex);
 499   if( bindex < b->_ihrp_index ) b->_ihrp_index--;
 500   if( bindex < b->_fhrp_index ) b->_fhrp_index--;
 501 
 502   // Stretched lr1; add it to liveness of intermediate blocks
 503   Block *b2 = _phc._cfg.get_block_for_node(src_copy);
 504   while( b != b2 ) {
 505     b = _phc._cfg.get_block_for_node(b->pred(1));
 506     _phc._live->live(b)->insert(lr1);
 507   }
 508 }
 509 
 510 // Factored code from copy_copy that computes extra interferences from
 511 // lengthening a live range by double-coalescing.
 512 uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, Node *src_copy, Block *b, uint bindex, RegMask &rm, uint reg_degree, uint rm_size, uint lr1, uint lr2 ) {
 513 
 514   assert(!lrgs(lr1)._fat_proj, "cannot coalesce fat_proj");
 515   assert(!lrgs(lr2)._fat_proj, "cannot coalesce fat_proj");
 516   Node *prev_copy = dst_copy->in(dst_copy->is_Copy());
 517   Block *b2 = b;
 518   uint bindex2 = bindex;
 519   while( 1 ) {
 520     // Find previous instruction
 521     bindex2--;                  // Chain backwards 1 instruction
 522     while( bindex2 == 0 ) {     // At block start, find prior block
 523       assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" );
 524       b2 = _phc._cfg.get_block_for_node(b2->pred(1));
 525       bindex2 = b2->end_idx()-1;
 526     }
 527     // Get prior instruction
 528     assert(bindex2 < b2->number_of_nodes(), "index out of bounds");
 529     Node *x = b2->get_node(bindex2);
 530     if( x == prev_copy ) {      // Previous copy in copy chain?
 531       if( prev_copy == src_copy)// Found end of chain and all interferences
 532         break;                  // So break out of loop
 533       // Else work back one in copy chain
 534       prev_copy = prev_copy->in(prev_copy->is_Copy());
 535     } else {                    // Else collect interferences
 536       uint lidx = _phc._lrg_map.find(x);
 537       // Found another def of live-range being stretched?
 538       if(lidx == lr1) {
 539         return max_juint;
 540       }
 541       if(lidx == lr2) {
 542         return max_juint;
 543       }
 544 
 545       // If we attempt to coalesce across a bound def
 546       if( lrgs(lidx).is_bound() ) {
 547         // Do not let the coalesced LRG expect to get the bound color
 548         rm.SUBTRACT( lrgs(lidx).mask() );
 549         // Recompute rm_size
 550         rm_size = rm.Size();
 551         //if( rm._flags ) rm_size += 1000000;
 552         if( reg_degree >= rm_size ) return max_juint;
 553       }
 554       if( rm.overlap(lrgs(lidx).mask()) ) {
 555         // Insert lidx into union LRG; returns TRUE if actually inserted
 556         if( _ulr.insert(lidx) ) {
 557           // Infinite-stack neighbors do not alter colorability, as they
 558           // can always color to some other color.
 559           if( !lrgs(lidx).mask().is_AllStack() ) {
 560             // If this coalesce will make any new neighbor uncolorable,
 561             // do not coalesce.
 562             if( lrgs(lidx).just_lo_degree() )
 563               return max_juint;
 564             // Bump our degree
 565             if( ++reg_degree >= rm_size )
 566               return max_juint;
 567           } // End of if not infinite-stack neighbor
 568         } // End of if actually inserted
 569       } // End of if live range overlaps
 570     } // End of else collect interferences for 1 node
 571   } // End of while forever, scan back for interferences
 572   return reg_degree;
 573 }
 574 
 575 void PhaseConservativeCoalesce::update_ifg(uint lr1, uint lr2, IndexSet *n_lr1, IndexSet *n_lr2) {
 576   // Some original neighbors of lr1 might have gone away
 577   // because the constrained register mask prevented them.
 578   // Remove lr1 from such neighbors.
 579   IndexSetIterator one(n_lr1);
 580   uint neighbor;
 581   LRG &lrg1 = lrgs(lr1);
 582   while ((neighbor = one.next()) != 0)
 583     if( !_ulr.member(neighbor) )
 584       if( _phc._ifg->neighbors(neighbor)->remove(lr1) )
 585         lrgs(neighbor).inc_degree( -lrg1.compute_degree(lrgs(neighbor)) );
 586 
 587 
 588   // lr2 is now called (coalesced into) lr1.
 589   // Remove lr2 from the IFG.
 590   IndexSetIterator two(n_lr2);
 591   LRG &lrg2 = lrgs(lr2);
 592   while ((neighbor = two.next()) != 0)
 593     if( _phc._ifg->neighbors(neighbor)->remove(lr2) )
 594       lrgs(neighbor).inc_degree( -lrg2.compute_degree(lrgs(neighbor)) );
 595 
 596   // Some neighbors of intermediate copies now interfere with the
 597   // combined live range.
 598   IndexSetIterator three(&_ulr);
 599   while ((neighbor = three.next()) != 0)
 600     if( _phc._ifg->neighbors(neighbor)->insert(lr1) )
 601       lrgs(neighbor).inc_degree( lrg1.compute_degree(lrgs(neighbor)) );
 602 }
 603 
 604 static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) {
 605   // Tag copy bias here
 606   if( !ifg->lrgs(lr1)._copy_bias )
 607     ifg->lrgs(lr1)._copy_bias = lr2;
 608   if( !ifg->lrgs(lr2)._copy_bias )
 609     ifg->lrgs(lr2)._copy_bias = lr1;
 610 }
 611 
 612 // See if I can coalesce a series of multiple copies together.  I need the
 613 // final dest copy and the original src copy.  They can be the same Node.
 614 // Compute the compatible register masks.
 615 bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block *b, uint bindex) {
 616 
 617   if (!dst_copy->is_SpillCopy()) {
 618     return false;
 619   }
 620   if (!src_copy->is_SpillCopy()) {
 621     return false;
 622   }
 623   Node *src_def = src_copy->in(src_copy->is_Copy());
 624   uint lr1 = _phc._lrg_map.find(dst_copy);
 625   uint lr2 = _phc._lrg_map.find(src_def);
 626 
 627   // Same live ranges already?
 628   if (lr1 == lr2) {
 629     return false;
 630   }
 631 
 632   // Interfere?
 633   if (_phc._ifg->test_edge_sq(lr1, lr2)) {
 634     return false;
 635   }
 636 
 637   // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK.
 638   if (!lrgs(lr1)._is_oop && lrgs(lr2)._is_oop) { // not an oop->int cast
 639     return false;
 640   }
 641 
 642   // Coalescing between an aligned live range and a mis-aligned live range?
 643   // No, no!  Alignment changes how we count degree.
 644   if (lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj) {
 645     return false;
 646   }
 647 
 648   // Sort; use smaller live-range number
 649   Node *lr1_node = dst_copy;
 650   Node *lr2_node = src_def;
 651   if (lr1 > lr2) {
 652     uint tmp = lr1; lr1 = lr2; lr2 = tmp;
 653     lr1_node = src_def;  lr2_node = dst_copy;
 654   }
 655 
 656   // Check for compatibility of the 2 live ranges by
 657   // intersecting their allowed register sets.
 658   RegMask rm = lrgs(lr1).mask();
 659   rm.AND(lrgs(lr2).mask());
 660   // Number of bits free
 661   uint rm_size = rm.Size();
 662 
 663   if (UseFPUForSpilling && rm.is_AllStack() ) {
 664     // Don't coalesce when frequency difference is large
 665     Block *dst_b = _phc._cfg.get_block_for_node(dst_copy);
 666     Block *src_def_b = _phc._cfg.get_block_for_node(src_def);
 667     if (src_def_b->_freq > 10*dst_b->_freq )
 668       return false;
 669   }
 670 
 671   // If we can use any stack slot, then effective size is infinite
 672   if( rm.is_AllStack() ) rm_size += 1000000;
 673   // Incompatible masks, no way to coalesce
 674   if( rm_size == 0 ) return false;
 675 
 676   // Another early bail-out test is when we are double-coalescing and the
 677   // 2 copies are separated by some control flow.
 678   if( dst_copy != src_copy ) {
 679     Block *src_b = _phc._cfg.get_block_for_node(src_copy);
 680     Block *b2 = b;
 681     while( b2 != src_b ) {
 682       if( b2->num_preds() > 2 ){// Found merge-point
 683         _phc._lost_opp_cflow_coalesce++;
 684         // extra record_bias commented out because Chris believes it is not
 685         // productive.  Since we can record only 1 bias, we want to choose one
 686         // that stands a chance of working and this one probably does not.
 687         //record_bias( _phc._lrgs, lr1, lr2 );
 688         return false;           // To hard to find all interferences
 689       }
 690       b2 = _phc._cfg.get_block_for_node(b2->pred(1));
 691     }
 692   }
 693 
 694   // Union the two interference sets together into '_ulr'
 695   uint reg_degree = _ulr.lrg_union( lr1, lr2, rm_size, _phc._ifg, rm );
 696 
 697   if( reg_degree >= rm_size ) {
 698     record_bias( _phc._ifg, lr1, lr2 );
 699     return false;
 700   }
 701 
 702   // Now I need to compute all the interferences between dst_copy and
 703   // src_copy.  I'm not willing visit the entire interference graph, so
 704   // I limit my search to things in dst_copy's block or in a straight
 705   // line of previous blocks.  I give up at merge points or when I get
 706   // more interferences than my degree.  I can stop when I find src_copy.
 707   if( dst_copy != src_copy ) {
 708     reg_degree = compute_separating_interferences(dst_copy, src_copy, b, bindex, rm, rm_size, reg_degree, lr1, lr2 );
 709     if( reg_degree == max_juint ) {
 710       record_bias( _phc._ifg, lr1, lr2 );
 711       return false;
 712     }
 713   } // End of if dst_copy & src_copy are different
 714 
 715 
 716   // ---- THE COMBINED LRG IS COLORABLE ----
 717 
 718   // YEAH - Now coalesce this copy away
 719   assert( lrgs(lr1).num_regs() == lrgs(lr2).num_regs(),   "" );
 720 
 721   IndexSet *n_lr1 = _phc._ifg->neighbors(lr1);
 722   IndexSet *n_lr2 = _phc._ifg->neighbors(lr2);
 723 
 724   // Update the interference graph
 725   update_ifg(lr1, lr2, n_lr1, n_lr2);
 726 
 727   _ulr.remove(lr1);
 728 
 729   // Uncomment the following code to trace Coalescing in great detail.
 730   //
 731   //if (false) {
 732   //  tty->cr();
 733   //  tty->print_cr("#######################################");
 734   //  tty->print_cr("union %d and %d", lr1, lr2);
 735   //  n_lr1->dump();
 736   //  n_lr2->dump();
 737   //  tty->print_cr("resulting set is");
 738   //  _ulr.dump();
 739   //}
 740 
 741   // Replace n_lr1 with the new combined live range.  _ulr will use
 742   // n_lr1's old memory on the next iteration.  n_lr2 is cleared to
 743   // send its internal memory to the free list.
 744   _ulr.swap(n_lr1);
 745   _ulr.clear();
 746   n_lr2->clear();
 747 
 748   lrgs(lr1).set_degree( _phc._ifg->effective_degree(lr1) );
 749   lrgs(lr2).set_degree( 0 );
 750 
 751   // Join live ranges.  Merge larger into smaller.  Union lr2 into lr1 in the
 752   // union-find tree
 753   union_helper( lr1_node, lr2_node, lr1, lr2, src_def, dst_copy, src_copy, b, bindex );
 754   // Combine register restrictions
 755   lrgs(lr1).set_mask(rm);
 756   lrgs(lr1).compute_set_mask_size();
 757   lrgs(lr1)._cost += lrgs(lr2)._cost;
 758   lrgs(lr1)._area += lrgs(lr2)._area;
 759 
 760   // While its uncommon to successfully coalesce live ranges that started out
 761   // being not-lo-degree, it can happen.  In any case the combined coalesced
 762   // live range better Simplify nicely.
 763   lrgs(lr1)._was_lo = 1;
 764 
 765   // kinda expensive to do all the time
 766   //tty->print_cr("warning: slow verify happening");
 767   //_phc._ifg->verify( &_phc );
 768   return true;
 769 }
 770 
 771 // Conservative (but pessimistic) copy coalescing of a single block
 772 void PhaseConservativeCoalesce::coalesce( Block *b ) {
 773   // Bail out on infrequent blocks
 774   if (_phc._cfg.is_uncommon(b)) {
 775     return;
 776   }
 777   // Check this block for copies.
 778   for( uint i = 1; i<b->end_idx(); i++ ) {
 779     // Check for actual copies on inputs.  Coalesce a copy into its
 780     // input if use and copy's input are compatible.
 781     Node *copy1 = b->get_node(i);
 782     uint idx1 = copy1->is_Copy();
 783     if( !idx1 ) continue;       // Not a copy
 784 
 785     if( copy_copy(copy1,copy1,b,i) ) {
 786       i--;                      // Retry, same location in block
 787       PhaseChaitin::_conserv_coalesce++;  // Collect stats on success
 788       continue;
 789     }
 790   }
 791 }