1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "compiler/oopMap.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/block.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/cfgnode.hpp"
  33 #include "opto/chaitin.hpp"
  34 #include "opto/coalesce.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/idealGraphPrinter.hpp"
  37 #include "opto/indexSet.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/memnode.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/opcodes.hpp"
  42 #include "opto/rootnode.hpp"
  43 
  44 #ifndef PRODUCT
  45 void LRG::dump() const {
  46   ttyLocker ttyl;
  47   tty->print("%d ",num_regs());
  48   _mask.dump();
  49   if( _msize_valid ) {
  50     if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size);
  51     else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size());
  52   } else {
  53     tty->print(", #?(%d) ",_mask.Size());
  54   }
  55 
  56   tty->print("EffDeg: ");
  57   if( _degree_valid ) tty->print( "%d ", _eff_degree );
  58   else tty->print("? ");
  59 
  60   if( is_multidef() ) {
  61     tty->print("MultiDef ");
  62     if (_defs != NULL) {
  63       tty->print("(");
  64       for (int i = 0; i < _defs->length(); i++) {
  65         tty->print("N%d ", _defs->at(i)->_idx);
  66       }
  67       tty->print(") ");
  68     }
  69   }
  70   else if( _def == 0 ) tty->print("Dead ");
  71   else tty->print("Def: N%d ",_def->_idx);
  72 
  73   tty->print("Cost:%4.2g Area:%4.2g Score:%4.2g ",_cost,_area, score());
  74   // Flags
  75   if( _is_oop ) tty->print("Oop ");
  76   if( _is_float ) tty->print("Float ");
  77   if( _is_vector ) tty->print("Vector ");
  78   if( _was_spilled1 ) tty->print("Spilled ");
  79   if( _was_spilled2 ) tty->print("Spilled2 ");
  80   if( _direct_conflict ) tty->print("Direct_conflict ");
  81   if( _fat_proj ) tty->print("Fat ");
  82   if( _was_lo ) tty->print("Lo ");
  83   if( _has_copy ) tty->print("Copy ");
  84   if( _at_risk ) tty->print("Risk ");
  85 
  86   if( _must_spill ) tty->print("Must_spill ");
  87   if( _is_bound ) tty->print("Bound ");
  88   if( _msize_valid ) {
  89     if( _degree_valid && lo_degree() ) tty->print("Trivial ");
  90   }
  91 
  92   tty->cr();
  93 }
  94 #endif
  95 
  96 // Compute score from cost and area.  Low score is best to spill.
  97 static double raw_score( double cost, double area ) {
  98   return cost - (area*RegisterCostAreaRatio) * 1.52588e-5;
  99 }
 100 
 101 double LRG::score() const {
 102   // Scale _area by RegisterCostAreaRatio/64K then subtract from cost.
 103   // Bigger area lowers score, encourages spilling this live range.
 104   // Bigger cost raise score, prevents spilling this live range.
 105   // (Note: 1/65536 is the magic constant below; I dont trust the C optimizer
 106   // to turn a divide by a constant into a multiply by the reciprical).
 107   double score = raw_score( _cost, _area);
 108 
 109   // Account for area.  Basically, LRGs covering large areas are better
 110   // to spill because more other LRGs get freed up.
 111   if( _area == 0.0 )            // No area?  Then no progress to spill
 112     return 1e35;
 113 
 114   if( _was_spilled2 )           // If spilled once before, we are unlikely
 115     return score + 1e30;        // to make progress again.
 116 
 117   if( _cost >= _area*3.0 )      // Tiny area relative to cost
 118     return score + 1e17;        // Probably no progress to spill
 119 
 120   if( (_cost+_cost) >= _area*3.0 ) // Small area relative to cost
 121     return score + 1e10;        // Likely no progress to spill
 122 
 123   return score;
 124 }
 125 
 126 #define NUMBUCKS 3
 127 
 128 // Straight out of Tarjan's union-find algorithm
 129 uint LiveRangeMap::find_compress(uint lrg) {
 130   uint cur = lrg;
 131   uint next = _uf_map.at(cur);
 132   while (next != cur) { // Scan chain of equivalences
 133     assert( next < cur, "always union smaller");
 134     cur = next; // until find a fixed-point
 135     next = _uf_map.at(cur);
 136   }
 137 
 138   // Core of union-find algorithm: update chain of
 139   // equivalences to be equal to the root.
 140   while (lrg != next) {
 141     uint tmp = _uf_map.at(lrg);
 142     _uf_map.at_put(lrg, next);
 143     lrg = tmp;
 144   }
 145   return lrg;
 146 }
 147 
 148 // Reset the Union-Find map to identity
 149 void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
 150   _max_lrg_id= max_lrg_id;
 151   // Force the Union-Find mapping to be at least this large
 152   _uf_map.at_put_grow(_max_lrg_id, 0);
 153   // Initialize it to be the ID mapping.
 154   for (uint i = 0; i < _max_lrg_id; ++i) {
 155     _uf_map.at_put(i, i);
 156   }
 157 }
 158 
 159 // Make all Nodes map directly to their final live range; no need for
 160 // the Union-Find mapping after this call.
 161 void LiveRangeMap::compress_uf_map_for_nodes() {
 162   // For all Nodes, compress mapping
 163   uint unique = _names.length();
 164   for (uint i = 0; i < unique; ++i) {
 165     uint lrg = _names.at(i);
 166     uint compressed_lrg = find(lrg);
 167     if (lrg != compressed_lrg) {
 168       _names.at_put(i, compressed_lrg);
 169     }
 170   }
 171 }
 172 
 173 // Like Find above, but no path compress, so bad asymptotic behavior
 174 uint LiveRangeMap::find_const(uint lrg) const {
 175   if (!lrg) {
 176     return lrg; // Ignore the zero LRG
 177   }
 178 
 179   // Off the end?  This happens during debugging dumps when you got
 180   // brand new live ranges but have not told the allocator yet.
 181   if (lrg >= _max_lrg_id) {
 182     return lrg;
 183   }
 184 
 185   uint next = _uf_map.at(lrg);
 186   while (next != lrg) { // Scan chain of equivalences
 187     assert(next < lrg, "always union smaller");
 188     lrg = next; // until find a fixed-point
 189     next = _uf_map.at(lrg);
 190   }
 191   return next;
 192 }
 193 
 194 PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
 195   : PhaseRegAlloc(unique, cfg, matcher,
 196 #ifndef PRODUCT
 197        print_chaitin_statistics
 198 #else
 199        NULL
 200 #endif
 201        )
 202   , _lrg_map(Thread::current()->resource_area(), unique)
 203   , _live(0)
 204   , _spilled_once(Thread::current()->resource_area())
 205   , _spilled_twice(Thread::current()->resource_area())
 206   , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0)
 207   , _oldphi(unique)
 208 #ifndef PRODUCT
 209   , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling"))
 210 #endif
 211 {
 212   Compile::TracePhase tp("ctorChaitin", &timers[_t_ctorChaitin]);
 213 
 214   _high_frequency_lrg = MIN2(double(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
 215 
 216   // Build a list of basic blocks, sorted by frequency
 217   _blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
 218   // Experiment with sorting strategies to speed compilation
 219   double  cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket
 220   Block **buckets[NUMBUCKS];             // Array of buckets
 221   uint    buckcnt[NUMBUCKS];             // Array of bucket counters
 222   double  buckval[NUMBUCKS];             // Array of bucket value cutoffs
 223   for (uint i = 0; i < NUMBUCKS; i++) {
 224     buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
 225     buckcnt[i] = 0;
 226     // Bump by three orders of magnitude each time
 227     cutoff *= 0.001;
 228     buckval[i] = cutoff;
 229     for (uint j = 0; j < _cfg.number_of_blocks(); j++) {
 230       buckets[i][j] = NULL;
 231     }
 232   }
 233   // Sort blocks into buckets
 234   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
 235     for (uint j = 0; j < NUMBUCKS; j++) {
 236       if ((j == NUMBUCKS - 1) || (_cfg.get_block(i)->_freq > buckval[j])) {
 237         // Assign block to end of list for appropriate bucket
 238         buckets[j][buckcnt[j]++] = _cfg.get_block(i);
 239         break; // kick out of inner loop
 240       }
 241     }
 242   }
 243   // Dump buckets into final block array
 244   uint blkcnt = 0;
 245   for (uint i = 0; i < NUMBUCKS; i++) {
 246     for (uint j = 0; j < buckcnt[i]; j++) {
 247       _blks[blkcnt++] = buckets[i][j];
 248     }
 249   }
 250 
 251   assert(blkcnt == _cfg.number_of_blocks(), "Block array not totally filled");
 252 }
 253 
 254 // union 2 sets together.
 255 void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
 256   uint src = _lrg_map.find(src_n);
 257   uint dst = _lrg_map.find(dst_n);
 258   assert(src, "");
 259   assert(dst, "");
 260   assert(src < _lrg_map.max_lrg_id(), "oob");
 261   assert(dst < _lrg_map.max_lrg_id(), "oob");
 262   assert(src < dst, "always union smaller");
 263   _lrg_map.uf_map(dst, src);
 264 }
 265 
 266 void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
 267   // Make the Node->LRG mapping
 268   _lrg_map.extend(x->_idx,lrg);
 269   // Make the Union-Find mapping an identity function
 270   _lrg_map.uf_extend(lrg, lrg);
 271 }
 272 
 273 
 274 int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id) {
 275   assert(b->find_node(copy) == (idx - 1), "incorrect insert index for copy kill projections");
 276   DEBUG_ONLY( Block* borig = _cfg.get_block_for_node(orig); )
 277   int found_projs = 0;
 278   uint cnt = orig->outcnt();
 279   for (uint i = 0; i < cnt; i++) {
 280     Node* proj = orig->raw_out(i);
 281     if (proj->is_MachProj()) {
 282       assert(proj->outcnt() == 0, "only kill projections are expected here");
 283       assert(_cfg.get_block_for_node(proj) == borig, "incorrect block for kill projections");
 284       found_projs++;
 285       // Copy kill projections after the cloned node
 286       Node* kills = proj->clone();
 287       kills->set_req(0, copy);
 288       b->insert_node(kills, idx++);
 289       _cfg.map_node_to_block(kills, b);
 290       new_lrg(kills, max_lrg_id++);
 291     }
 292   }
 293   return found_projs;
 294 }
 295 
 296 // Renumber the live ranges to compact them.  Makes the IFG smaller.
 297 void PhaseChaitin::compact() {
 298   Compile::TracePhase tp("chaitinCompact", &timers[_t_chaitinCompact]);
 299 
 300   // Current the _uf_map contains a series of short chains which are headed
 301   // by a self-cycle.  All the chains run from big numbers to little numbers.
 302   // The Find() call chases the chains & shortens them for the next Find call.
 303   // We are going to change this structure slightly.  Numbers above a moving
 304   // wave 'i' are unchanged.  Numbers below 'j' point directly to their
 305   // compacted live range with no further chaining.  There are no chains or
 306   // cycles below 'i', so the Find call no longer works.
 307   uint j=1;
 308   uint i;
 309   for (i = 1; i < _lrg_map.max_lrg_id(); i++) {
 310     uint lr = _lrg_map.uf_live_range_id(i);
 311     // Ignore unallocated live ranges
 312     if (!lr) {
 313       continue;
 314     }
 315     assert(lr <= i, "");
 316     _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr));
 317   }
 318   // Now change the Node->LR mapping to reflect the compacted names
 319   uint unique = _lrg_map.size();
 320   for (i = 0; i < unique; i++) {
 321     uint lrg_id = _lrg_map.live_range_id(i);
 322     _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id));
 323   }
 324 
 325   // Reset the Union-Find mapping
 326   _lrg_map.reset_uf_map(j);
 327 }
 328 
 329 void PhaseChaitin::Register_Allocate() {
 330 
 331   // Above the OLD FP (and in registers) are the incoming arguments.  Stack
 332   // slots in this area are called "arg_slots".  Above the NEW FP (and in
 333   // registers) is the outgoing argument area; above that is the spill/temp
 334   // area.  These are all "frame_slots".  Arg_slots start at the zero
 335   // stack_slots and count up to the known arg_size.  Frame_slots start at
 336   // the stack_slot #arg_size and go up.  After allocation I map stack
 337   // slots to actual offsets.  Stack-slots in the arg_slot area are biased
 338   // by the frame_size; stack-slots in the frame_slot area are biased by 0.
 339 
 340   _trip_cnt = 0;
 341   _alternate = 0;
 342   _matcher._allocation_started = true;
 343 
 344   ResourceArea split_arena;     // Arena for Split local resources
 345   ResourceArea live_arena;      // Arena for liveness & IFG info
 346   ResourceMark rm(&live_arena);
 347 
 348   // Need live-ness for the IFG; need the IFG for coalescing.  If the
 349   // liveness is JUST for coalescing, then I can get some mileage by renaming
 350   // all copy-related live ranges low and then using the max copy-related
 351   // live range as a cut-off for LIVE and the IFG.  In other words, I can
 352   // build a subset of LIVE and IFG just for copies.
 353   PhaseLive live(_cfg, _lrg_map.names(), &live_arena);
 354 
 355   // Need IFG for coalescing and coloring
 356   PhaseIFG ifg(&live_arena);
 357   _ifg = &ifg;
 358 
 359   // Come out of SSA world to the Named world.  Assign (virtual) registers to
 360   // Nodes.  Use the same register for all inputs and the output of PhiNodes
 361   // - effectively ending SSA form.  This requires either coalescing live
 362   // ranges or inserting copies.  For the moment, we insert "virtual copies"
 363   // - we pretend there is a copy prior to each Phi in predecessor blocks.
 364   // We will attempt to coalesce such "virtual copies" before we manifest
 365   // them for real.
 366   de_ssa();
 367 
 368 #ifdef ASSERT
 369   // Veify the graph before RA.
 370   verify(&live_arena);
 371 #endif
 372 
 373   {
 374     Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
 375     _live = NULL;                 // Mark live as being not available
 376     rm.reset_to_mark();           // Reclaim working storage
 377     IndexSet::reset_memory(C, &live_arena);
 378     ifg.init(_lrg_map.max_lrg_id()); // Empty IFG
 379     gather_lrg_masks( false );    // Collect LRG masks
 380     live.compute(_lrg_map.max_lrg_id()); // Compute liveness
 381     _live = &live;                // Mark LIVE as being available
 382   }
 383 
 384   // Base pointers are currently "used" by instructions which define new
 385   // derived pointers.  This makes base pointers live up to the where the
 386   // derived pointer is made, but not beyond.  Really, they need to be live
 387   // across any GC point where the derived value is live.  So this code looks
 388   // at all the GC points, and "stretches" the live range of any base pointer
 389   // to the GC point.
 390   if (stretch_base_pointer_live_ranges(&live_arena)) {
 391     Compile::TracePhase tp("computeLive (sbplr)", &timers[_t_computeLive]);
 392     // Since some live range stretched, I need to recompute live
 393     _live = NULL;
 394     rm.reset_to_mark();         // Reclaim working storage
 395     IndexSet::reset_memory(C, &live_arena);
 396     ifg.init(_lrg_map.max_lrg_id());
 397     gather_lrg_masks(false);
 398     live.compute(_lrg_map.max_lrg_id());
 399     _live = &live;
 400   }
 401   // Create the interference graph using virtual copies
 402   build_ifg_virtual();  // Include stack slots this time
 403 
 404   // The IFG is/was triangular.  I am 'squaring it up' so Union can run
 405   // faster.  Union requires a 'for all' operation which is slow on the
 406   // triangular adjacency matrix (quick reminder: the IFG is 'sparse' -
 407   // meaning I can visit all the Nodes neighbors less than a Node in time
 408   // O(# of neighbors), but I have to visit all the Nodes greater than a
 409   // given Node and search them for an instance, i.e., time O(#MaxLRG)).
 410   _ifg->SquareUp();
 411 
 412   // Aggressive (but pessimistic) copy coalescing.
 413   // This pass works on virtual copies.  Any virtual copies which are not
 414   // coalesced get manifested as actual copies
 415   {
 416     Compile::TracePhase tp("chaitinCoalesce1", &timers[_t_chaitinCoalesce1]);
 417 
 418     PhaseAggressiveCoalesce coalesce(*this);
 419     coalesce.coalesce_driver();
 420     // Insert un-coalesced copies.  Visit all Phis.  Where inputs to a Phi do
 421     // not match the Phi itself, insert a copy.
 422     coalesce.insert_copies(_matcher);
 423     if (C->failing()) {
 424       return;
 425     }
 426   }
 427 
 428   // After aggressive coalesce, attempt a first cut at coloring.
 429   // To color, we need the IFG and for that we need LIVE.
 430   {
 431     Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
 432     _live = NULL;
 433     rm.reset_to_mark();           // Reclaim working storage
 434     IndexSet::reset_memory(C, &live_arena);
 435     ifg.init(_lrg_map.max_lrg_id());
 436     gather_lrg_masks( true );
 437     live.compute(_lrg_map.max_lrg_id());
 438     _live = &live;
 439   }
 440 
 441   // Build physical interference graph
 442   uint must_spill = 0;
 443   must_spill = build_ifg_physical(&live_arena);
 444   // If we have a guaranteed spill, might as well spill now
 445   if (must_spill) {
 446     if(!_lrg_map.max_lrg_id()) {
 447       return;
 448     }
 449     // Bail out if unique gets too large (ie - unique > MaxNodeLimit)
 450     C->check_node_count(10*must_spill, "out of nodes before split");
 451     if (C->failing()) {
 452       return;
 453     }
 454 
 455     uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
 456     _lrg_map.set_max_lrg_id(new_max_lrg_id);
 457     // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
 458     // or we failed to split
 459     C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
 460     if (C->failing()) {
 461       return;
 462     }
 463 
 464     NOT_PRODUCT(C->verify_graph_edges();)
 465 
 466     compact();                  // Compact LRGs; return new lower max lrg
 467 
 468     {
 469       Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
 470       _live = NULL;
 471       rm.reset_to_mark();         // Reclaim working storage
 472       IndexSet::reset_memory(C, &live_arena);
 473       ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph
 474       gather_lrg_masks( true );   // Collect intersect mask
 475       live.compute(_lrg_map.max_lrg_id()); // Compute LIVE
 476       _live = &live;
 477     }
 478     build_ifg_physical(&live_arena);
 479     _ifg->SquareUp();
 480     _ifg->Compute_Effective_Degree();
 481     // Only do conservative coalescing if requested
 482     if (OptoCoalesce) {
 483       Compile::TracePhase tp("chaitinCoalesce2", &timers[_t_chaitinCoalesce2]);
 484       // Conservative (and pessimistic) copy coalescing of those spills
 485       PhaseConservativeCoalesce coalesce(*this);
 486       // If max live ranges greater than cutoff, don't color the stack.
 487       // This cutoff can be larger than below since it is only done once.
 488       coalesce.coalesce_driver();
 489     }
 490     _lrg_map.compress_uf_map_for_nodes();
 491 
 492 #ifdef ASSERT
 493     verify(&live_arena, true);
 494 #endif
 495   } else {
 496     ifg.SquareUp();
 497     ifg.Compute_Effective_Degree();
 498 #ifdef ASSERT
 499     set_was_low();
 500 #endif
 501   }
 502 
 503   // Prepare for Simplify & Select
 504   cache_lrg_info();           // Count degree of LRGs
 505 
 506   // Simplify the InterFerence Graph by removing LRGs of low degree.
 507   // LRGs of low degree are trivially colorable.
 508   Simplify();
 509 
 510   // Select colors by re-inserting LRGs back into the IFG in reverse order.
 511   // Return whether or not something spills.
 512   uint spills = Select( );
 513 
 514   // If we spill, split and recycle the entire thing
 515   while( spills ) {
 516     if( _trip_cnt++ > 24 ) {
 517       DEBUG_ONLY( dump_for_spill_split_recycle(); )
 518       if( _trip_cnt > 27 ) {
 519         C->record_method_not_compilable("failed spill-split-recycle sanity check");
 520         return;
 521       }
 522     }
 523 
 524     if (!_lrg_map.max_lrg_id()) {
 525       return;
 526     }
 527     uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
 528     _lrg_map.set_max_lrg_id(new_max_lrg_id);
 529     // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
 530     C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split");
 531     if (C->failing()) {
 532       return;
 533     }
 534 
 535     compact(); // Compact LRGs; return new lower max lrg
 536 
 537     // Nuke the live-ness and interference graph and LiveRanGe info
 538     {
 539       Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
 540       _live = NULL;
 541       rm.reset_to_mark();         // Reclaim working storage
 542       IndexSet::reset_memory(C, &live_arena);
 543       ifg.init(_lrg_map.max_lrg_id());
 544 
 545       // Create LiveRanGe array.
 546       // Intersect register masks for all USEs and DEFs
 547       gather_lrg_masks(true);
 548       live.compute(_lrg_map.max_lrg_id());
 549       _live = &live;
 550     }
 551     must_spill = build_ifg_physical(&live_arena);
 552     _ifg->SquareUp();
 553     _ifg->Compute_Effective_Degree();
 554 
 555     // Only do conservative coalescing if requested
 556     if (OptoCoalesce) {
 557       Compile::TracePhase tp("chaitinCoalesce3", &timers[_t_chaitinCoalesce3]);
 558       // Conservative (and pessimistic) copy coalescing
 559       PhaseConservativeCoalesce coalesce(*this);
 560       // Check for few live ranges determines how aggressive coalesce is.
 561       coalesce.coalesce_driver();
 562     }
 563     _lrg_map.compress_uf_map_for_nodes();
 564 #ifdef ASSERT
 565     verify(&live_arena, true);
 566 #endif
 567     cache_lrg_info();           // Count degree of LRGs
 568 
 569     // Simplify the InterFerence Graph by removing LRGs of low degree.
 570     // LRGs of low degree are trivially colorable.
 571     Simplify();
 572 
 573     // Select colors by re-inserting LRGs back into the IFG in reverse order.
 574     // Return whether or not something spills.
 575     spills = Select();
 576   }
 577 
 578   // Count number of Simplify-Select trips per coloring success.
 579   _allocator_attempts += _trip_cnt + 1;
 580   _allocator_successes += 1;
 581 
 582   // Peephole remove copies
 583   post_allocate_copy_removal();
 584 
 585   // Merge multidefs if multiple defs representing the same value are used in a single block.
 586   merge_multidefs();
 587 
 588 #ifdef ASSERT
 589   // Veify the graph after RA.
 590   verify(&live_arena);
 591 #endif
 592 
 593   // max_reg is past the largest *register* used.
 594   // Convert that to a frame_slot number.
 595   if (_max_reg <= _matcher._new_SP) {
 596     _framesize = C->out_preserve_stack_slots();
 597   }
 598   else {
 599     _framesize = _max_reg -_matcher._new_SP;
 600   }
 601   assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
 602 
 603   // This frame must preserve the required fp alignment
 604   _framesize = round_to(_framesize, Matcher::stack_alignment_in_slots());
 605   assert(_framesize <= 1000000, "sanity check");
 606 #ifndef PRODUCT
 607   _total_framesize += _framesize;
 608   if ((int)_framesize > _max_framesize) {
 609     _max_framesize = _framesize;
 610   }
 611 #endif
 612 
 613   // Convert CISC spills
 614   fixup_spills();
 615 
 616   // Log regalloc results
 617   CompileLog* log = Compile::current()->log();
 618   if (log != NULL) {
 619     log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing());
 620   }
 621 
 622   if (C->failing()) {
 623     return;
 624   }
 625 
 626   NOT_PRODUCT(C->verify_graph_edges();)
 627 
 628   // Move important info out of the live_arena to longer lasting storage.
 629   alloc_node_regs(_lrg_map.size());
 630   for (uint i=0; i < _lrg_map.size(); i++) {
 631     if (_lrg_map.live_range_id(i)) { // Live range associated with Node?
 632       LRG &lrg = lrgs(_lrg_map.live_range_id(i));
 633       if (!lrg.alive()) {
 634         set_bad(i);
 635       } else if (lrg.num_regs() == 1) {
 636         set1(i, lrg.reg());
 637       } else {                  // Must be a register-set
 638         if (!lrg._fat_proj) {   // Must be aligned adjacent register set
 639           // Live ranges record the highest register in their mask.
 640           // We want the low register for the AD file writer's convenience.
 641           OptoReg::Name hi = lrg.reg(); // Get hi register
 642           OptoReg::Name lo = OptoReg::add(hi, (1-lrg.num_regs())); // Find lo
 643           // We have to use pair [lo,lo+1] even for wide vectors because
 644           // the rest of code generation works only with pairs. It is safe
 645           // since for registers encoding only 'lo' is used.
 646           // Second reg from pair is used in ScheduleAndBundle on SPARC where
 647           // vector max size is 8 which corresponds to registers pair.
 648           // It is also used in BuildOopMaps but oop operations are not
 649           // vectorized.
 650           set2(i, lo);
 651         } else {                // Misaligned; extract 2 bits
 652           OptoReg::Name hi = lrg.reg(); // Get hi register
 653           lrg.Remove(hi);       // Yank from mask
 654           int lo = lrg.mask().find_first_elem(); // Find lo
 655           set_pair(i, hi, lo);
 656         }
 657       }
 658       if( lrg._is_oop ) _node_oops.set(i);
 659     } else {
 660       set_bad(i);
 661     }
 662   }
 663 
 664   // Done!
 665   _live = NULL;
 666   _ifg = NULL;
 667   C->set_indexSet_arena(NULL);  // ResourceArea is at end of scope
 668 }
 669 
 670 void PhaseChaitin::de_ssa() {
 671   // Set initial Names for all Nodes.  Most Nodes get the virtual register
 672   // number.  A few get the ZERO live range number.  These do not
 673   // get allocated, but instead rely on correct scheduling to ensure that
 674   // only one instance is simultaneously live at a time.
 675   uint lr_counter = 1;
 676   for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
 677     Block* block = _cfg.get_block(i);
 678     uint cnt = block->number_of_nodes();
 679 
 680     // Handle all the normal Nodes in the block
 681     for( uint j = 0; j < cnt; j++ ) {
 682       Node *n = block->get_node(j);
 683       // Pre-color to the zero live range, or pick virtual register
 684       const RegMask &rm = n->out_RegMask();
 685       _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
 686     }
 687   }
 688 
 689   // Reset the Union-Find mapping to be identity
 690   _lrg_map.reset_uf_map(lr_counter);
 691 }
 692 
 693 
 694 // Gather LiveRanGe information, including register masks.  Modification of
 695 // cisc spillable in_RegMasks should not be done before AggressiveCoalesce.
 696 void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
 697 
 698   // Nail down the frame pointer live range
 699   uint fp_lrg = _lrg_map.live_range_id(_cfg.get_root_node()->in(1)->in(TypeFunc::FramePtr));
 700   lrgs(fp_lrg)._cost += 1e12;   // Cost is infinite
 701 
 702   // For all blocks
 703   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
 704     Block* block = _cfg.get_block(i);
 705 
 706     // For all instructions
 707     for (uint j = 1; j < block->number_of_nodes(); j++) {
 708       Node* n = block->get_node(j);
 709       uint input_edge_start =1; // Skip control most nodes
 710       if (n->is_Mach()) {
 711         input_edge_start = n->as_Mach()->oper_input_base();
 712       }
 713       uint idx = n->is_Copy();
 714 
 715       // Get virtual register number, same as LiveRanGe index
 716       uint vreg = _lrg_map.live_range_id(n);
 717       LRG& lrg = lrgs(vreg);
 718       if (vreg) {              // No vreg means un-allocable (e.g. memory)
 719 
 720         // Collect has-copy bit
 721         if (idx) {
 722           lrg._has_copy = 1;
 723           uint clidx = _lrg_map.live_range_id(n->in(idx));
 724           LRG& copy_src = lrgs(clidx);
 725           copy_src._has_copy = 1;
 726         }
 727 
 728         // Check for float-vs-int live range (used in register-pressure
 729         // calculations)
 730         const Type *n_type = n->bottom_type();
 731         if (n_type->is_floatingpoint()) {
 732           lrg._is_float = 1;
 733         }
 734 
 735         // Check for twice prior spilling.  Once prior spilling might have
 736         // spilled 'soft', 2nd prior spill should have spilled 'hard' and
 737         // further spilling is unlikely to make progress.
 738         if (_spilled_once.test(n->_idx)) {
 739           lrg._was_spilled1 = 1;
 740           if (_spilled_twice.test(n->_idx)) {
 741             lrg._was_spilled2 = 1;
 742           }
 743         }
 744 
 745 #ifndef PRODUCT
 746         if (trace_spilling() && lrg._def != NULL) {
 747           // collect defs for MultiDef printing
 748           if (lrg._defs == NULL) {
 749             lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
 750             lrg._defs->append(lrg._def);
 751           }
 752           lrg._defs->append(n);
 753         }
 754 #endif
 755 
 756         // Check for a single def LRG; these can spill nicely
 757         // via rematerialization.  Flag as NULL for no def found
 758         // yet, or 'n' for single def or -1 for many defs.
 759         lrg._def = lrg._def ? NodeSentinel : n;
 760 
 761         // Limit result register mask to acceptable registers
 762         const RegMask &rm = n->out_RegMask();
 763         lrg.AND( rm );
 764 
 765         int ireg = n->ideal_reg();
 766         assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP,
 767                 "oops must be in Op_RegP's" );
 768 
 769         // Check for vector live range (only if vector register is used).
 770         // On SPARC vector uses RegD which could be misaligned so it is not
 771         // processes as vector in RA.
 772         if (RegMask::is_vector(ireg))
 773           lrg._is_vector = 1;
 774         assert(n_type->isa_vect() == NULL || lrg._is_vector || ireg == Op_RegD || ireg == Op_RegL,
 775                "vector must be in vector registers");
 776 
 777         // Check for bound register masks
 778         const RegMask &lrgmask = lrg.mask();
 779         if (lrgmask.is_bound(ireg)) {
 780           lrg._is_bound = 1;
 781         }
 782 
 783         // Check for maximum frequency value
 784         if (lrg._maxfreq < block->_freq) {
 785           lrg._maxfreq = block->_freq;
 786         }
 787 
 788         // Check for oop-iness, or long/double
 789         // Check for multi-kill projection
 790         switch (ireg) {
 791         case MachProjNode::fat_proj:
 792           // Fat projections have size equal to number of registers killed
 793           lrg.set_num_regs(rm.Size());
 794           lrg.set_reg_pressure(lrg.num_regs());
 795           lrg._fat_proj = 1;
 796           lrg._is_bound = 1;
 797           break;
 798         case Op_RegP:
 799 #ifdef _LP64
 800           lrg.set_num_regs(2);  // Size is 2 stack words
 801 #else
 802           lrg.set_num_regs(1);  // Size is 1 stack word
 803 #endif
 804           // Register pressure is tracked relative to the maximum values
 805           // suggested for that platform, INTPRESSURE and FLOATPRESSURE,
 806           // and relative to other types which compete for the same regs.
 807           //
 808           // The following table contains suggested values based on the
 809           // architectures as defined in each .ad file.
 810           // INTPRESSURE and FLOATPRESSURE may be tuned differently for
 811           // compile-speed or performance.
 812           // Note1:
 813           // SPARC and SPARCV9 reg_pressures are at 2 instead of 1
 814           // since .ad registers are defined as high and low halves.
 815           // These reg_pressure values remain compatible with the code
 816           // in is_high_pressure() which relates get_invalid_mask_size(),
 817           // Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE.
 818           // Note2:
 819           // SPARC -d32 has 24 registers available for integral values,
 820           // but only 10 of these are safe for 64-bit longs.
 821           // Using set_reg_pressure(2) for both int and long means
 822           // the allocator will believe it can fit 26 longs into
 823           // registers.  Using 2 for longs and 1 for ints means the
 824           // allocator will attempt to put 52 integers into registers.
 825           // The settings below limit this problem to methods with
 826           // many long values which are being run on 32-bit SPARC.
 827           //
 828           // ------------------- reg_pressure --------------------
 829           // Each entry is reg_pressure_per_value,number_of_regs
 830           //         RegL  RegI  RegFlags   RegF RegD    INTPRESSURE  FLOATPRESSURE
 831           // IA32     2     1     1          1    1          6           6
 832           // IA64     1     1     1          1    1         50          41
 833           // SPARC    2     2     2          2    2         48 (24)     52 (26)
 834           // SPARCV9  2     2     2          2    2         48 (24)     52 (26)
 835           // AMD64    1     1     1          1    1         14          15
 836           // -----------------------------------------------------
 837 #if defined(SPARC)
 838           lrg.set_reg_pressure(2);  // use for v9 as well
 839 #else
 840           lrg.set_reg_pressure(1);  // normally one value per register
 841 #endif
 842           if( n_type->isa_oop_ptr() ) {
 843             lrg._is_oop = 1;
 844           }
 845           break;
 846         case Op_RegL:           // Check for long or double
 847         case Op_RegD:
 848           lrg.set_num_regs(2);
 849           // Define platform specific register pressure
 850 #if defined(SPARC) || defined(ARM32)
 851           lrg.set_reg_pressure(2);
 852 #elif defined(IA32)
 853           if( ireg == Op_RegL ) {
 854             lrg.set_reg_pressure(2);
 855           } else {
 856             lrg.set_reg_pressure(1);
 857           }
 858 #else
 859           lrg.set_reg_pressure(1);  // normally one value per register
 860 #endif
 861           // If this def of a double forces a mis-aligned double,
 862           // flag as '_fat_proj' - really flag as allowing misalignment
 863           // AND changes how we count interferences.  A mis-aligned
 864           // double can interfere with TWO aligned pairs, or effectively
 865           // FOUR registers!
 866           if (rm.is_misaligned_pair()) {
 867             lrg._fat_proj = 1;
 868             lrg._is_bound = 1;
 869           }
 870           break;
 871         case Op_RegF:
 872         case Op_RegI:
 873         case Op_RegN:
 874         case Op_RegFlags:
 875         case 0:                 // not an ideal register
 876           lrg.set_num_regs(1);
 877 #ifdef SPARC
 878           lrg.set_reg_pressure(2);
 879 #else
 880           lrg.set_reg_pressure(1);
 881 #endif
 882           break;
 883         case Op_VecS:
 884           assert(Matcher::vector_size_supported(T_BYTE,4), "sanity");
 885           assert(RegMask::num_registers(Op_VecS) == RegMask::SlotsPerVecS, "sanity");
 886           lrg.set_num_regs(RegMask::SlotsPerVecS);
 887           lrg.set_reg_pressure(1);
 888           break;
 889         case Op_VecD:
 890           assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecD), "sanity");
 891           assert(RegMask::num_registers(Op_VecD) == RegMask::SlotsPerVecD, "sanity");
 892           assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecD), "vector should be aligned");
 893           lrg.set_num_regs(RegMask::SlotsPerVecD);
 894           lrg.set_reg_pressure(1);
 895           break;
 896         case Op_VecX:
 897           assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecX), "sanity");
 898           assert(RegMask::num_registers(Op_VecX) == RegMask::SlotsPerVecX, "sanity");
 899           assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecX), "vector should be aligned");
 900           lrg.set_num_regs(RegMask::SlotsPerVecX);
 901           lrg.set_reg_pressure(1);
 902           break;
 903         case Op_VecY:
 904           assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecY), "sanity");
 905           assert(RegMask::num_registers(Op_VecY) == RegMask::SlotsPerVecY, "sanity");
 906           assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecY), "vector should be aligned");
 907           lrg.set_num_regs(RegMask::SlotsPerVecY);
 908           lrg.set_reg_pressure(1);
 909           break;
 910         case Op_VecZ:
 911           assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecZ), "sanity");
 912           assert(RegMask::num_registers(Op_VecZ) == RegMask::SlotsPerVecZ, "sanity");
 913           assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecZ), "vector should be aligned");
 914           lrg.set_num_regs(RegMask::SlotsPerVecZ);
 915           lrg.set_reg_pressure(1);
 916           break;
 917         default:
 918           ShouldNotReachHere();
 919         }
 920       }
 921 
 922       // Now do the same for inputs
 923       uint cnt = n->req();
 924       // Setup for CISC SPILLING
 925       uint inp = (uint)AdlcVMDeps::Not_cisc_spillable;
 926       if( UseCISCSpill && after_aggressive ) {
 927         inp = n->cisc_operand();
 928         if( inp != (uint)AdlcVMDeps::Not_cisc_spillable )
 929           // Convert operand number to edge index number
 930           inp = n->as_Mach()->operand_index(inp);
 931       }
 932       // Prepare register mask for each input
 933       for( uint k = input_edge_start; k < cnt; k++ ) {
 934         uint vreg = _lrg_map.live_range_id(n->in(k));
 935         if (!vreg) {
 936           continue;
 937         }
 938 
 939         // If this instruction is CISC Spillable, add the flags
 940         // bit to its appropriate input
 941         if( UseCISCSpill && after_aggressive && inp == k ) {
 942 #ifndef PRODUCT
 943           if( TraceCISCSpill ) {
 944             tty->print("  use_cisc_RegMask: ");
 945             n->dump();
 946           }
 947 #endif
 948           n->as_Mach()->use_cisc_RegMask();
 949         }
 950 
 951         LRG &lrg = lrgs(vreg);
 952         // // Testing for floating point code shape
 953         // Node *test = n->in(k);
 954         // if( test->is_Mach() ) {
 955         //   MachNode *m = test->as_Mach();
 956         //   int  op = m->ideal_Opcode();
 957         //   if (n->is_Call() && (op == Op_AddF || op == Op_MulF) ) {
 958         //     int zzz = 1;
 959         //   }
 960         // }
 961 
 962         // Limit result register mask to acceptable registers.
 963         // Do not limit registers from uncommon uses before
 964         // AggressiveCoalesce.  This effectively pre-virtual-splits
 965         // around uncommon uses of common defs.
 966         const RegMask &rm = n->in_RegMask(k);
 967         if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * block->_freq) {
 968           // Since we are BEFORE aggressive coalesce, leave the register
 969           // mask untrimmed by the call.  This encourages more coalescing.
 970           // Later, AFTER aggressive, this live range will have to spill
 971           // but the spiller handles slow-path calls very nicely.
 972         } else {
 973           lrg.AND( rm );
 974         }
 975 
 976         // Check for bound register masks
 977         const RegMask &lrgmask = lrg.mask();
 978         int kreg = n->in(k)->ideal_reg();
 979         bool is_vect = RegMask::is_vector(kreg);
 980         assert(n->in(k)->bottom_type()->isa_vect() == NULL ||
 981                is_vect || kreg == Op_RegD || kreg == Op_RegL,
 982                "vector must be in vector registers");
 983         if (lrgmask.is_bound(kreg))
 984           lrg._is_bound = 1;
 985 
 986         // If this use of a double forces a mis-aligned double,
 987         // flag as '_fat_proj' - really flag as allowing misalignment
 988         // AND changes how we count interferences.  A mis-aligned
 989         // double can interfere with TWO aligned pairs, or effectively
 990         // FOUR registers!
 991 #ifdef ASSERT
 992         if (is_vect) {
 993           if (lrg.num_regs() != 0) {
 994             assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned");
 995             assert(!lrg._fat_proj, "sanity");
 996             assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity");
 997           } else {
 998             assert(n->is_Phi(), "not all inputs processed only if Phi");
 999           }
1000         }
1001 #endif
1002         if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) {
1003           lrg._fat_proj = 1;
1004           lrg._is_bound = 1;
1005         }
1006         // if the LRG is an unaligned pair, we will have to spill
1007         // so clear the LRG's register mask if it is not already spilled
1008         if (!is_vect && !n->is_SpillCopy() &&
1009             (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
1010             lrgmask.is_misaligned_pair()) {
1011           lrg.Clear();
1012         }
1013 
1014         // Check for maximum frequency value
1015         if (lrg._maxfreq < block->_freq) {
1016           lrg._maxfreq = block->_freq;
1017         }
1018 
1019       } // End for all allocated inputs
1020     } // end for all instructions
1021   } // end for all blocks
1022 
1023   // Final per-liverange setup
1024   for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) {
1025     LRG &lrg = lrgs(i2);
1026     assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
1027     if (lrg.num_regs() > 1 && !lrg._fat_proj) {
1028       lrg.clear_to_sets();
1029     }
1030     lrg.compute_set_mask_size();
1031     if (lrg.not_free()) {      // Handle case where we lose from the start
1032       lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
1033       lrg._direct_conflict = 1;
1034     }
1035     lrg.set_degree(0);          // no neighbors in IFG yet
1036   }
1037 }
1038 
1039 // Set the was-lo-degree bit.  Conservative coalescing should not change the
1040 // colorability of the graph.  If any live range was of low-degree before
1041 // coalescing, it should Simplify.  This call sets the was-lo-degree bit.
1042 // The bit is checked in Simplify.
1043 void PhaseChaitin::set_was_low() {
1044 #ifdef ASSERT
1045   for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1046     int size = lrgs(i).num_regs();
1047     uint old_was_lo = lrgs(i)._was_lo;
1048     lrgs(i)._was_lo = 0;
1049     if( lrgs(i).lo_degree() ) {
1050       lrgs(i)._was_lo = 1;      // Trivially of low degree
1051     } else {                    // Else check the Brigg's assertion
1052       // Brigg's observation is that the lo-degree neighbors of a
1053       // hi-degree live range will not interfere with the color choices
1054       // of said hi-degree live range.  The Simplify reverse-stack-coloring
1055       // order takes care of the details.  Hence you do not have to count
1056       // low-degree neighbors when determining if this guy colors.
1057       int briggs_degree = 0;
1058       IndexSet *s = _ifg->neighbors(i);
1059       IndexSetIterator elements(s);
1060       uint lidx;
1061       while((lidx = elements.next()) != 0) {
1062         if( !lrgs(lidx).lo_degree() )
1063           briggs_degree += MAX2(size,lrgs(lidx).num_regs());
1064       }
1065       if( briggs_degree < lrgs(i).degrees_of_freedom() )
1066         lrgs(i)._was_lo = 1;    // Low degree via the briggs assertion
1067     }
1068     assert(old_was_lo <= lrgs(i)._was_lo, "_was_lo may not decrease");
1069   }
1070 #endif
1071 }
1072 
1073 #define REGISTER_CONSTRAINED 16
1074 
1075 // Compute cost/area ratio, in case we spill.  Build the lo-degree list.
1076 void PhaseChaitin::cache_lrg_info( ) {
1077   Compile::TracePhase tp("chaitinCacheLRG", &timers[_t_chaitinCacheLRG]);
1078 
1079   for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1080     LRG &lrg = lrgs(i);
1081 
1082     // Check for being of low degree: means we can be trivially colored.
1083     // Low degree, dead or must-spill guys just get to simplify right away
1084     if( lrg.lo_degree() ||
1085        !lrg.alive() ||
1086         lrg._must_spill ) {
1087       // Split low degree list into those guys that must get a
1088       // register and those that can go to register or stack.
1089       // The idea is LRGs that can go register or stack color first when
1090       // they have a good chance of getting a register.  The register-only
1091       // lo-degree live ranges always get a register.
1092       OptoReg::Name hi_reg = lrg.mask().find_last_elem();
1093       if( OptoReg::is_stack(hi_reg)) { // Can go to stack?
1094         lrg._next = _lo_stk_degree;
1095         _lo_stk_degree = i;
1096       } else {
1097         lrg._next = _lo_degree;
1098         _lo_degree = i;
1099       }
1100     } else {                    // Else high degree
1101       lrgs(_hi_degree)._prev = i;
1102       lrg._next = _hi_degree;
1103       lrg._prev = 0;
1104       _hi_degree = i;
1105     }
1106   }
1107 }
1108 
1109 // Simplify the IFG by removing LRGs of low degree that have NO copies
1110 void PhaseChaitin::Pre_Simplify( ) {
1111 
1112   // Warm up the lo-degree no-copy list
1113   int lo_no_copy = 0;
1114   for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1115     if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
1116         !lrgs(i).alive() ||
1117         lrgs(i)._must_spill) {
1118       lrgs(i)._next = lo_no_copy;
1119       lo_no_copy = i;
1120     }
1121   }
1122 
1123   while( lo_no_copy ) {
1124     uint lo = lo_no_copy;
1125     lo_no_copy = lrgs(lo)._next;
1126     int size = lrgs(lo).num_regs();
1127 
1128     // Put the simplified guy on the simplified list.
1129     lrgs(lo)._next = _simplified;
1130     _simplified = lo;
1131 
1132     // Yank this guy from the IFG.
1133     IndexSet *adj = _ifg->remove_node( lo );
1134 
1135     // If any neighbors' degrees fall below their number of
1136     // allowed registers, then put that neighbor on the low degree
1137     // list.  Note that 'degree' can only fall and 'numregs' is
1138     // unchanged by this action.  Thus the two are equal at most once,
1139     // so LRGs hit the lo-degree worklists at most once.
1140     IndexSetIterator elements(adj);
1141     uint neighbor;
1142     while ((neighbor = elements.next()) != 0) {
1143       LRG *n = &lrgs(neighbor);
1144       assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1145 
1146       // Check for just becoming of-low-degree
1147       if( n->just_lo_degree() && !n->_has_copy ) {
1148         assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1149         // Put on lo-degree list
1150         n->_next = lo_no_copy;
1151         lo_no_copy = neighbor;
1152       }
1153     }
1154   } // End of while lo-degree no_copy worklist not empty
1155 
1156   // No more lo-degree no-copy live ranges to simplify
1157 }
1158 
1159 // Simplify the IFG by removing LRGs of low degree.
1160 void PhaseChaitin::Simplify( ) {
1161   Compile::TracePhase tp("chaitinSimplify", &timers[_t_chaitinSimplify]);
1162 
1163   while( 1 ) {                  // Repeat till simplified it all
1164     // May want to explore simplifying lo_degree before _lo_stk_degree.
1165     // This might result in more spills coloring into registers during
1166     // Select().
1167     while( _lo_degree || _lo_stk_degree ) {
1168       // If possible, pull from lo_stk first
1169       uint lo;
1170       if( _lo_degree ) {
1171         lo = _lo_degree;
1172         _lo_degree = lrgs(lo)._next;
1173       } else {
1174         lo = _lo_stk_degree;
1175         _lo_stk_degree = lrgs(lo)._next;
1176       }
1177 
1178       // Put the simplified guy on the simplified list.
1179       lrgs(lo)._next = _simplified;
1180       _simplified = lo;
1181       // If this guy is "at risk" then mark his current neighbors
1182       if( lrgs(lo)._at_risk ) {
1183         IndexSetIterator elements(_ifg->neighbors(lo));
1184         uint datum;
1185         while ((datum = elements.next()) != 0) {
1186           lrgs(datum)._risk_bias = lo;
1187         }
1188       }
1189 
1190       // Yank this guy from the IFG.
1191       IndexSet *adj = _ifg->remove_node( lo );
1192 
1193       // If any neighbors' degrees fall below their number of
1194       // allowed registers, then put that neighbor on the low degree
1195       // list.  Note that 'degree' can only fall and 'numregs' is
1196       // unchanged by this action.  Thus the two are equal at most once,
1197       // so LRGs hit the lo-degree worklist at most once.
1198       IndexSetIterator elements(adj);
1199       uint neighbor;
1200       while ((neighbor = elements.next()) != 0) {
1201         LRG *n = &lrgs(neighbor);
1202 #ifdef ASSERT
1203         if( VerifyOpto || VerifyRegisterAllocator ) {
1204           assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1205         }
1206 #endif
1207 
1208         // Check for just becoming of-low-degree just counting registers.
1209         // _must_spill live ranges are already on the low degree list.
1210         if( n->just_lo_degree() && !n->_must_spill ) {
1211           assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1212           // Pull from hi-degree list
1213           uint prev = n->_prev;
1214           uint next = n->_next;
1215           if( prev ) lrgs(prev)._next = next;
1216           else _hi_degree = next;
1217           lrgs(next)._prev = prev;
1218           n->_next = _lo_degree;
1219           _lo_degree = neighbor;
1220         }
1221       }
1222     } // End of while lo-degree/lo_stk_degree worklist not empty
1223 
1224     // Check for got everything: is hi-degree list empty?
1225     if( !_hi_degree ) break;
1226 
1227     // Time to pick a potential spill guy
1228     uint lo_score = _hi_degree;
1229     double score = lrgs(lo_score).score();
1230     double area = lrgs(lo_score)._area;
1231     double cost = lrgs(lo_score)._cost;
1232     bool bound = lrgs(lo_score)._is_bound;
1233 
1234     // Find cheapest guy
1235     debug_only( int lo_no_simplify=0; );
1236     for( uint i = _hi_degree; i; i = lrgs(i)._next ) {
1237       assert( !(*_ifg->_yanked)[i], "" );
1238       // It's just vaguely possible to move hi-degree to lo-degree without
1239       // going through a just-lo-degree stage: If you remove a double from
1240       // a float live range it's degree will drop by 2 and you can skip the
1241       // just-lo-degree stage.  It's very rare (shows up after 5000+ methods
1242       // in -Xcomp of Java2Demo).  So just choose this guy to simplify next.
1243       if( lrgs(i).lo_degree() ) {
1244         lo_score = i;
1245         break;
1246       }
1247       debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; );
1248       double iscore = lrgs(i).score();
1249       double iarea = lrgs(i)._area;
1250       double icost = lrgs(i)._cost;
1251       bool ibound = lrgs(i)._is_bound;
1252 
1253       // Compare cost/area of i vs cost/area of lo_score.  Smaller cost/area
1254       // wins.  Ties happen because all live ranges in question have spilled
1255       // a few times before and the spill-score adds a huge number which
1256       // washes out the low order bits.  We are choosing the lesser of 2
1257       // evils; in this case pick largest area to spill.
1258       // Ties also happen when live ranges are defined and used only inside
1259       // one block. In which case their area is 0 and score set to max.
1260       // In such case choose bound live range over unbound to free registers
1261       // or with smaller cost to spill.
1262       if( iscore < score ||
1263           (iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) ||
1264           (iscore == score && iarea == area &&
1265            ( (ibound && !bound) || ibound == bound && (icost < cost) )) ) {
1266         lo_score = i;
1267         score = iscore;
1268         area = iarea;
1269         cost = icost;
1270         bound = ibound;
1271       }
1272     }
1273     LRG *lo_lrg = &lrgs(lo_score);
1274     // The live range we choose for spilling is either hi-degree, or very
1275     // rarely it can be low-degree.  If we choose a hi-degree live range
1276     // there better not be any lo-degree choices.
1277     assert( lo_lrg->lo_degree() || !lo_no_simplify, "Live range was lo-degree before coalesce; should simplify" );
1278 
1279     // Pull from hi-degree list
1280     uint prev = lo_lrg->_prev;
1281     uint next = lo_lrg->_next;
1282     if( prev ) lrgs(prev)._next = next;
1283     else _hi_degree = next;
1284     lrgs(next)._prev = prev;
1285     // Jam him on the lo-degree list, despite his high degree.
1286     // Maybe he'll get a color, and maybe he'll spill.
1287     // Only Select() will know.
1288     lrgs(lo_score)._at_risk = true;
1289     _lo_degree = lo_score;
1290     lo_lrg->_next = 0;
1291 
1292   } // End of while not simplified everything
1293 
1294 }
1295 
1296 // Is 'reg' register legal for 'lrg'?
1297 static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) {
1298   if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) &&
1299       lrg.mask().Member(OptoReg::add(reg,-chunk))) {
1300     // RA uses OptoReg which represent the highest element of a registers set.
1301     // For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set
1302     // in which XMMd is used by RA to represent such vectors. A double value
1303     // uses [XMM,XMMb] pairs and XMMb is used by RA for it.
1304     // The register mask uses largest bits set of overlapping register sets.
1305     // On x86 with AVX it uses 8 bits for each XMM registers set.
1306     //
1307     // The 'lrg' already has cleared-to-set register mask (done in Select()
1308     // before calling choose_color()). Passing mask.Member(reg) check above
1309     // indicates that the size (num_regs) of 'reg' set is less or equal to
1310     // 'lrg' set size.
1311     // For set size 1 any register which is member of 'lrg' mask is legal.
1312     if (lrg.num_regs()==1)
1313       return true;
1314     // For larger sets only an aligned register with the same set size is legal.
1315     int mask = lrg.num_regs()-1;
1316     if ((reg&mask) == mask)
1317       return true;
1318   }
1319   return false;
1320 }
1321 
1322 // Choose a color using the biasing heuristic
1323 OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
1324 
1325   // Check for "at_risk" LRG's
1326   uint risk_lrg = _lrg_map.find(lrg._risk_bias);
1327   if( risk_lrg != 0 ) {
1328     // Walk the colored neighbors of the "at_risk" candidate
1329     // Choose a color which is both legal and already taken by a neighbor
1330     // of the "at_risk" candidate in order to improve the chances of the
1331     // "at_risk" candidate of coloring
1332     IndexSetIterator elements(_ifg->neighbors(risk_lrg));
1333     uint datum;
1334     while ((datum = elements.next()) != 0) {
1335       OptoReg::Name reg = lrgs(datum).reg();
1336       // If this LRG's register is legal for us, choose it
1337       if (is_legal_reg(lrg, reg, chunk))
1338         return reg;
1339     }
1340   }
1341 
1342   uint copy_lrg = _lrg_map.find(lrg._copy_bias);
1343   if( copy_lrg != 0 ) {
1344     // If he has a color,
1345     if( !(*(_ifg->_yanked))[copy_lrg] ) {
1346       OptoReg::Name reg = lrgs(copy_lrg).reg();
1347       //  And it is legal for you,
1348       if (is_legal_reg(lrg, reg, chunk))
1349         return reg;
1350     } else if( chunk == 0 ) {
1351       // Choose a color which is legal for him
1352       RegMask tempmask = lrg.mask();
1353       tempmask.AND(lrgs(copy_lrg).mask());
1354       tempmask.clear_to_sets(lrg.num_regs());
1355       OptoReg::Name reg = tempmask.find_first_set(lrg.num_regs());
1356       if (OptoReg::is_valid(reg))
1357         return reg;
1358     }
1359   }
1360 
1361   // If no bias info exists, just go with the register selection ordering
1362   if (lrg._is_vector || lrg.num_regs() == 2) {
1363     // Find an aligned set
1364     return OptoReg::add(lrg.mask().find_first_set(lrg.num_regs()),chunk);
1365   }
1366 
1367   // CNC - Fun hack.  Alternate 1st and 2nd selection.  Enables post-allocate
1368   // copy removal to remove many more copies, by preventing a just-assigned
1369   // register from being repeatedly assigned.
1370   OptoReg::Name reg = lrg.mask().find_first_elem();
1371   if( (++_alternate & 1) && OptoReg::is_valid(reg) ) {
1372     // This 'Remove; find; Insert' idiom is an expensive way to find the
1373     // SECOND element in the mask.
1374     lrg.Remove(reg);
1375     OptoReg::Name reg2 = lrg.mask().find_first_elem();
1376     lrg.Insert(reg);
1377     if( OptoReg::is_reg(reg2))
1378       reg = reg2;
1379   }
1380   return OptoReg::add( reg, chunk );
1381 }
1382 
1383 // Choose a color in the current chunk
1384 OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {
1385   assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)");
1386   assert(C->out_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP+0)), "must not allocate stack0 (inside preserve area)");
1387 
1388   if( lrg.num_regs() == 1 ||    // Common Case
1389       !lrg._fat_proj )          // Aligned+adjacent pairs ok
1390     // Use a heuristic to "bias" the color choice
1391     return bias_color(lrg, chunk);
1392 
1393   assert(!lrg._is_vector, "should be not vector here" );
1394   assert( lrg.num_regs() >= 2, "dead live ranges do not color" );
1395 
1396   // Fat-proj case or misaligned double argument.
1397   assert(lrg.compute_mask_size() == lrg.num_regs() ||
1398          lrg.num_regs() == 2,"fat projs exactly color" );
1399   assert( !chunk, "always color in 1st chunk" );
1400   // Return the highest element in the set.
1401   return lrg.mask().find_last_elem();
1402 }
1403 
1404 // Select colors by re-inserting LRGs back into the IFG.  LRGs are re-inserted
1405 // in reverse order of removal.  As long as nothing of hi-degree was yanked,
1406 // everything going back is guaranteed a color.  Select that color.  If some
1407 // hi-degree LRG cannot get a color then we record that we must spill.
1408 uint PhaseChaitin::Select( ) {
1409   Compile::TracePhase tp("chaitinSelect", &timers[_t_chaitinSelect]);
1410 
1411   uint spill_reg = LRG::SPILL_REG;
1412   _max_reg = OptoReg::Name(0);  // Past max register used
1413   while( _simplified ) {
1414     // Pull next LRG from the simplified list - in reverse order of removal
1415     uint lidx = _simplified;
1416     LRG *lrg = &lrgs(lidx);
1417     _simplified = lrg->_next;
1418 
1419 
1420 #ifndef PRODUCT
1421     if (trace_spilling()) {
1422       ttyLocker ttyl;
1423       tty->print_cr("L%d selecting degree %d degrees_of_freedom %d", lidx, lrg->degree(),
1424                     lrg->degrees_of_freedom());
1425       lrg->dump();
1426     }
1427 #endif
1428 
1429     // Re-insert into the IFG
1430     _ifg->re_insert(lidx);
1431     if( !lrg->alive() ) continue;
1432     // capture allstackedness flag before mask is hacked
1433     const int is_allstack = lrg->mask().is_AllStack();
1434 
1435     // Yeah, yeah, yeah, I know, I know.  I can refactor this
1436     // to avoid the GOTO, although the refactored code will not
1437     // be much clearer.  We arrive here IFF we have a stack-based
1438     // live range that cannot color in the current chunk, and it
1439     // has to move into the next free stack chunk.
1440     int chunk = 0;              // Current chunk is first chunk
1441     retry_next_chunk:
1442 
1443     // Remove neighbor colors
1444     IndexSet *s = _ifg->neighbors(lidx);
1445 
1446     debug_only(RegMask orig_mask = lrg->mask();)
1447     IndexSetIterator elements(s);
1448     uint neighbor;
1449     while ((neighbor = elements.next()) != 0) {
1450       // Note that neighbor might be a spill_reg.  In this case, exclusion
1451       // of its color will be a no-op, since the spill_reg chunk is in outer
1452       // space.  Also, if neighbor is in a different chunk, this exclusion
1453       // will be a no-op.  (Later on, if lrg runs out of possible colors in
1454       // its chunk, a new chunk of color may be tried, in which case
1455       // examination of neighbors is started again, at retry_next_chunk.)
1456       LRG &nlrg = lrgs(neighbor);
1457       OptoReg::Name nreg = nlrg.reg();
1458       // Only subtract masks in the same chunk
1459       if( nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE ) {
1460 #ifndef PRODUCT
1461         uint size = lrg->mask().Size();
1462         RegMask rm = lrg->mask();
1463 #endif
1464         lrg->SUBTRACT(nlrg.mask());
1465 #ifndef PRODUCT
1466         if (trace_spilling() && lrg->mask().Size() != size) {
1467           ttyLocker ttyl;
1468           tty->print("L%d ", lidx);
1469           rm.dump();
1470           tty->print(" intersected L%d ", neighbor);
1471           nlrg.mask().dump();
1472           tty->print(" removed ");
1473           rm.SUBTRACT(lrg->mask());
1474           rm.dump();
1475           tty->print(" leaving ");
1476           lrg->mask().dump();
1477           tty->cr();
1478         }
1479 #endif
1480       }
1481     }
1482     //assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness");
1483     // Aligned pairs need aligned masks
1484     assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1485     if (lrg->num_regs() > 1 && !lrg->_fat_proj) {
1486       lrg->clear_to_sets();
1487     }
1488 
1489     // Check if a color is available and if so pick the color
1490     OptoReg::Name reg = choose_color( *lrg, chunk );
1491 #ifdef SPARC
1492     debug_only(lrg->compute_set_mask_size());
1493     assert(lrg->num_regs() < 2 || lrg->is_bound() || is_even(reg-1), "allocate all doubles aligned");
1494 #endif
1495 
1496     //---------------
1497     // If we fail to color and the AllStack flag is set, trigger
1498     // a chunk-rollover event
1499     if(!OptoReg::is_valid(OptoReg::add(reg,-chunk)) && is_allstack) {
1500       // Bump register mask up to next stack chunk
1501       chunk += RegMask::CHUNK_SIZE;
1502       lrg->Set_All();
1503 
1504       goto retry_next_chunk;
1505     }
1506 
1507     //---------------
1508     // Did we get a color?
1509     else if( OptoReg::is_valid(reg)) {
1510 #ifndef PRODUCT
1511       RegMask avail_rm = lrg->mask();
1512 #endif
1513 
1514       // Record selected register
1515       lrg->set_reg(reg);
1516 
1517       if( reg >= _max_reg )     // Compute max register limit
1518         _max_reg = OptoReg::add(reg,1);
1519       // Fold reg back into normal space
1520       reg = OptoReg::add(reg,-chunk);
1521 
1522       // If the live range is not bound, then we actually had some choices
1523       // to make.  In this case, the mask has more bits in it than the colors
1524       // chosen.  Restrict the mask to just what was picked.
1525       int n_regs = lrg->num_regs();
1526       assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1527       if (n_regs == 1 || !lrg->_fat_proj) {
1528         assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecZ, "sanity");
1529         lrg->Clear();           // Clear the mask
1530         lrg->Insert(reg);       // Set regmask to match selected reg
1531         // For vectors and pairs, also insert the low bit of the pair
1532         for (int i = 1; i < n_regs; i++)
1533           lrg->Insert(OptoReg::add(reg,-i));
1534         lrg->set_mask_size(n_regs);
1535       } else {                  // Else fatproj
1536         // mask must be equal to fatproj bits, by definition
1537       }
1538 #ifndef PRODUCT
1539       if (trace_spilling()) {
1540         ttyLocker ttyl;
1541         tty->print("L%d selected ", lidx);
1542         lrg->mask().dump();
1543         tty->print(" from ");
1544         avail_rm.dump();
1545         tty->cr();
1546       }
1547 #endif
1548       // Note that reg is the highest-numbered register in the newly-bound mask.
1549     } // end color available case
1550 
1551     //---------------
1552     // Live range is live and no colors available
1553     else {
1554       assert( lrg->alive(), "" );
1555       assert( !lrg->_fat_proj || lrg->is_multidef() ||
1556               lrg->_def->outcnt() > 0, "fat_proj cannot spill");
1557       assert( !orig_mask.is_AllStack(), "All Stack does not spill" );
1558 
1559       // Assign the special spillreg register
1560       lrg->set_reg(OptoReg::Name(spill_reg++));
1561       // Do not empty the regmask; leave mask_size lying around
1562       // for use during Spilling
1563 #ifndef PRODUCT
1564       if( trace_spilling() ) {
1565         ttyLocker ttyl;
1566         tty->print("L%d spilling with neighbors: ", lidx);
1567         s->dump();
1568         debug_only(tty->print(" original mask: "));
1569         debug_only(orig_mask.dump());
1570         dump_lrg(lidx);
1571       }
1572 #endif
1573     } // end spill case
1574 
1575   }
1576 
1577   return spill_reg-LRG::SPILL_REG;      // Return number of spills
1578 }
1579 
1580 // Copy 'was_spilled'-edness from the source Node to the dst Node.
1581 void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
1582   if( _spilled_once.test(src->_idx) ) {
1583     _spilled_once.set(dst->_idx);
1584     lrgs(_lrg_map.find(dst))._was_spilled1 = 1;
1585     if( _spilled_twice.test(src->_idx) ) {
1586       _spilled_twice.set(dst->_idx);
1587       lrgs(_lrg_map.find(dst))._was_spilled2 = 1;
1588     }
1589   }
1590 }
1591 
1592 // Set the 'spilled_once' or 'spilled_twice' flag on a node.
1593 void PhaseChaitin::set_was_spilled( Node *n ) {
1594   if( _spilled_once.test_set(n->_idx) )
1595     _spilled_twice.set(n->_idx);
1596 }
1597 
1598 // Convert Ideal spill instructions into proper FramePtr + offset Loads and
1599 // Stores.  Use-def chains are NOT preserved, but Node->LRG->reg maps are.
1600 void PhaseChaitin::fixup_spills() {
1601   // This function does only cisc spill work.
1602   if( !UseCISCSpill ) return;
1603 
1604   Compile::TracePhase tp("fixupSpills", &timers[_t_fixupSpills]);
1605 
1606   // Grab the Frame Pointer
1607   Node *fp = _cfg.get_root_block()->head()->in(1)->in(TypeFunc::FramePtr);
1608 
1609   // For all blocks
1610   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
1611     Block* block = _cfg.get_block(i);
1612 
1613     // For all instructions in block
1614     uint last_inst = block->end_idx();
1615     for (uint j = 1; j <= last_inst; j++) {
1616       Node* n = block->get_node(j);
1617 
1618       // Dead instruction???
1619       assert( n->outcnt() != 0 ||// Nothing dead after post alloc
1620               C->top() == n ||  // Or the random TOP node
1621               n->is_Proj(),     // Or a fat-proj kill node
1622               "No dead instructions after post-alloc" );
1623 
1624       int inp = n->cisc_operand();
1625       if( inp != AdlcVMDeps::Not_cisc_spillable ) {
1626         // Convert operand number to edge index number
1627         MachNode *mach = n->as_Mach();
1628         inp = mach->operand_index(inp);
1629         Node *src = n->in(inp);   // Value to load or store
1630         LRG &lrg_cisc = lrgs(_lrg_map.find_const(src));
1631         OptoReg::Name src_reg = lrg_cisc.reg();
1632         // Doubles record the HIGH register of an adjacent pair.
1633         src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs());
1634         if( OptoReg::is_stack(src_reg) ) { // If input is on stack
1635           // This is a CISC Spill, get stack offset and construct new node
1636 #ifndef PRODUCT
1637           if( TraceCISCSpill ) {
1638             tty->print("    reg-instr:  ");
1639             n->dump();
1640           }
1641 #endif
1642           int stk_offset = reg2offset(src_reg);
1643           // Bailout if we might exceed node limit when spilling this instruction
1644           C->check_node_count(0, "out of nodes fixing spills");
1645           if (C->failing())  return;
1646           // Transform node
1647           MachNode *cisc = mach->cisc_version(stk_offset)->as_Mach();
1648           cisc->set_req(inp,fp);          // Base register is frame pointer
1649           if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) {
1650             assert( cisc->oper_input_base() == 2, "Only adding one edge");
1651             cisc->ins_req(1,src);         // Requires a memory edge
1652           }
1653           block->map_node(cisc, j);          // Insert into basic block
1654           n->subsume_by(cisc, C); // Correct graph
1655           //
1656           ++_used_cisc_instructions;
1657 #ifndef PRODUCT
1658           if( TraceCISCSpill ) {
1659             tty->print("    cisc-instr: ");
1660             cisc->dump();
1661           }
1662 #endif
1663         } else {
1664 #ifndef PRODUCT
1665           if( TraceCISCSpill ) {
1666             tty->print("    using reg-instr: ");
1667             n->dump();
1668           }
1669 #endif
1670           ++_unused_cisc_instructions;    // input can be on stack
1671         }
1672       }
1673 
1674     } // End of for all instructions
1675 
1676   } // End of for all blocks
1677 }
1678 
1679 // Helper to stretch above; recursively discover the base Node for a
1680 // given derived Node.  Easy for AddP-related machine nodes, but needs
1681 // to be recursive for derived Phis.
1682 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1683   // See if already computed; if so return it
1684   if( derived_base_map[derived->_idx] )
1685     return derived_base_map[derived->_idx];
1686 
1687   // See if this happens to be a base.
1688   // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1689   // pointers derived from NULL!  These are always along paths that
1690   // can't happen at run-time but the optimizer cannot deduce it so
1691   // we have to handle it gracefully.
1692   assert(!derived->bottom_type()->isa_narrowoop() ||
1693           derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1694   const TypePtr *tj = derived->bottom_type()->isa_ptr();
1695   // If its an OOP with a non-zero offset, then it is derived.
1696   if( tj == NULL || tj->_offset == 0 ) {
1697     derived_base_map[derived->_idx] = derived;
1698     return derived;
1699   }
1700   // Derived is NULL+offset?  Base is NULL!
1701   if( derived->is_Con() ) {
1702     Node *base = _matcher.mach_null();
1703     assert(base != NULL, "sanity");
1704     if (base->in(0) == NULL) {
1705       // Initialize it once and make it shared:
1706       // set control to _root and place it into Start block
1707       // (where top() node is placed).
1708       base->init_req(0, _cfg.get_root_node());
1709       Block *startb = _cfg.get_block_for_node(C->top());
1710       uint node_pos = startb->find_node(C->top());
1711       startb->insert_node(base, node_pos);
1712       _cfg.map_node_to_block(base, startb);
1713       assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1714 
1715       // The loadConP0 might have projection nodes depending on architecture
1716       // Add the projection nodes to the CFG
1717       for (DUIterator_Fast imax, i = base->fast_outs(imax); i < imax; i++) {
1718         Node* use = base->fast_out(i);
1719         if (use->is_MachProj()) {
1720           startb->insert_node(use, ++node_pos);
1721           _cfg.map_node_to_block(use, startb);
1722           new_lrg(use, maxlrg++);
1723         }
1724       }
1725     }
1726     if (_lrg_map.live_range_id(base) == 0) {
1727       new_lrg(base, maxlrg++);
1728     }
1729     assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
1730     derived_base_map[derived->_idx] = base;
1731     return base;
1732   }
1733 
1734   // Check for AddP-related opcodes
1735   if (!derived->is_Phi()) {
1736     assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, "but is: %s", derived->Name());
1737     Node *base = derived->in(AddPNode::Base);
1738     derived_base_map[derived->_idx] = base;
1739     return base;
1740   }
1741 
1742   // Recursively find bases for Phis.
1743   // First check to see if we can avoid a base Phi here.
1744   Node *base = find_base_for_derived( derived_base_map, derived->in(1),maxlrg);
1745   uint i;
1746   for( i = 2; i < derived->req(); i++ )
1747     if( base != find_base_for_derived( derived_base_map,derived->in(i),maxlrg))
1748       break;
1749   // Went to the end without finding any different bases?
1750   if( i == derived->req() ) {   // No need for a base Phi here
1751     derived_base_map[derived->_idx] = base;
1752     return base;
1753   }
1754 
1755   // Now we see we need a base-Phi here to merge the bases
1756   const Type *t = base->bottom_type();
1757   base = new PhiNode( derived->in(0), t );
1758   for( i = 1; i < derived->req(); i++ ) {
1759     base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg));
1760     t = t->meet(base->in(i)->bottom_type());
1761   }
1762   base->as_Phi()->set_type(t);
1763 
1764   // Search the current block for an existing base-Phi
1765   Block *b = _cfg.get_block_for_node(derived);
1766   for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
1767     Node *phi = b->get_node(i);
1768     if( !phi->is_Phi() ) {      // Found end of Phis with no match?
1769       b->insert_node(base,  i); // Must insert created Phi here as base
1770       _cfg.map_node_to_block(base, b);
1771       new_lrg(base,maxlrg++);
1772       break;
1773     }
1774     // See if Phi matches.
1775     uint j;
1776     for( j = 1; j < base->req(); j++ )
1777       if( phi->in(j) != base->in(j) &&
1778           !(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different NULLs
1779         break;
1780     if( j == base->req() ) {    // All inputs match?
1781       base = phi;               // Then use existing 'phi' and drop 'base'
1782       break;
1783     }
1784   }
1785 
1786 
1787   // Cache info for later passes
1788   derived_base_map[derived->_idx] = base;
1789   return base;
1790 }
1791 
1792 // At each Safepoint, insert extra debug edges for each pair of derived value/
1793 // base pointer that is live across the Safepoint for oopmap building.  The
1794 // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
1795 // required edge set.
1796 bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
1797   int must_recompute_live = false;
1798   uint maxlrg = _lrg_map.max_lrg_id();
1799   Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique());
1800   memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
1801 
1802   // For all blocks in RPO do...
1803   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
1804     Block* block = _cfg.get_block(i);
1805     // Note use of deep-copy constructor.  I cannot hammer the original
1806     // liveout bits, because they are needed by the following coalesce pass.
1807     IndexSet liveout(_live->live(block));
1808 
1809     for (uint j = block->end_idx() + 1; j > 1; j--) {
1810       Node* n = block->get_node(j - 1);
1811 
1812       // Pre-split compares of loop-phis.  Loop-phis form a cycle we would
1813       // like to see in the same register.  Compare uses the loop-phi and so
1814       // extends its live range BUT cannot be part of the cycle.  If this
1815       // extended live range overlaps with the update of the loop-phi value
1816       // we need both alive at the same time -- which requires at least 1
1817       // copy.  But because Intel has only 2-address registers we end up with
1818       // at least 2 copies, one before the loop-phi update instruction and
1819       // one after.  Instead we split the input to the compare just after the
1820       // phi.
1821       if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
1822         Node *phi = n->in(1);
1823         if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
1824           Block *phi_block = _cfg.get_block_for_node(phi);
1825           if (_cfg.get_block_for_node(phi_block->pred(2)) == block) {
1826             const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
1827             Node *spill = new MachSpillCopyNode(MachSpillCopyNode::LoopPhiInput, phi, *mask, *mask);
1828             insert_proj( phi_block, 1, spill, maxlrg++ );
1829             n->set_req(1,spill);
1830             must_recompute_live = true;
1831           }
1832         }
1833       }
1834 
1835       // Get value being defined
1836       uint lidx = _lrg_map.live_range_id(n);
1837       // Ignore the occasional brand-new live range
1838       if (lidx && lidx < _lrg_map.max_lrg_id()) {
1839         // Remove from live-out set
1840         liveout.remove(lidx);
1841 
1842         // Copies do not define a new value and so do not interfere.
1843         // Remove the copies source from the liveout set before interfering.
1844         uint idx = n->is_Copy();
1845         if (idx) {
1846           liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1847         }
1848       }
1849 
1850       // Found a safepoint?
1851       JVMState *jvms = n->jvms();
1852       if( jvms ) {
1853         // Now scan for a live derived pointer
1854         IndexSetIterator elements(&liveout);
1855         uint neighbor;
1856         while ((neighbor = elements.next()) != 0) {
1857           // Find reaching DEF for base and derived values
1858           // This works because we are still in SSA during this call.
1859           Node *derived = lrgs(neighbor)._def;
1860           const TypePtr *tj = derived->bottom_type()->isa_ptr();
1861           assert(!derived->bottom_type()->isa_narrowoop() ||
1862                   derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1863           // If its an OOP with a non-zero offset, then it is derived.
1864           if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
1865             Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1866             assert(base->_idx < _lrg_map.size(), "");
1867             // Add reaching DEFs of derived pointer and base pointer as a
1868             // pair of inputs
1869             n->add_req(derived);
1870             n->add_req(base);
1871 
1872             // See if the base pointer is already live to this point.
1873             // Since I'm working on the SSA form, live-ness amounts to
1874             // reaching def's.  So if I find the base's live range then
1875             // I know the base's def reaches here.
1876             if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1877                  !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1878                  (_lrg_map.live_range_id(base) > 0) && // not a constant
1879                  _cfg.get_block_for_node(base) != block) { // base not def'd in blk)
1880               // Base pointer is not currently live.  Since I stretched
1881               // the base pointer to here and it crosses basic-block
1882               // boundaries, the global live info is now incorrect.
1883               // Recompute live.
1884               must_recompute_live = true;
1885             } // End of if base pointer is not live to debug info
1886           }
1887         } // End of scan all live data for derived ptrs crossing GC point
1888       } // End of if found a GC point
1889 
1890       // Make all inputs live
1891       if (!n->is_Phi()) {      // Phi function uses come from prior block
1892         for (uint k = 1; k < n->req(); k++) {
1893           uint lidx = _lrg_map.live_range_id(n->in(k));
1894           if (lidx < _lrg_map.max_lrg_id()) {
1895             liveout.insert(lidx);
1896           }
1897         }
1898       }
1899 
1900     } // End of forall instructions in block
1901     liveout.clear();  // Free the memory used by liveout.
1902 
1903   } // End of forall blocks
1904   _lrg_map.set_max_lrg_id(maxlrg);
1905 
1906   // If I created a new live range I need to recompute live
1907   if (maxlrg != _ifg->_maxlrg) {
1908     must_recompute_live = true;
1909   }
1910 
1911   return must_recompute_live != 0;
1912 }
1913 
1914 // Extend the node to LRG mapping
1915 
1916 void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
1917   _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node));
1918 }
1919 
1920 #ifndef PRODUCT
1921 void PhaseChaitin::dump(const Node *n) const {
1922   uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
1923   tty->print("L%d",r);
1924   if (r && n->Opcode() != Op_Phi) {
1925     if( _node_regs ) {          // Got a post-allocation copy of allocation?
1926       tty->print("[");
1927       OptoReg::Name second = get_reg_second(n);
1928       if( OptoReg::is_valid(second) ) {
1929         if( OptoReg::is_reg(second) )
1930           tty->print("%s:",Matcher::regName[second]);
1931         else
1932           tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(second));
1933       }
1934       OptoReg::Name first = get_reg_first(n);
1935       if( OptoReg::is_reg(first) )
1936         tty->print("%s]",Matcher::regName[first]);
1937       else
1938          tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(first));
1939     } else
1940     n->out_RegMask().dump();
1941   }
1942   tty->print("/N%d\t",n->_idx);
1943   tty->print("%s === ", n->Name());
1944   uint k;
1945   for (k = 0; k < n->req(); k++) {
1946     Node *m = n->in(k);
1947     if (!m) {
1948       tty->print("_ ");
1949     }
1950     else {
1951       uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
1952       tty->print("L%d",r);
1953       // Data MultiNode's can have projections with no real registers.
1954       // Don't die while dumping them.
1955       int op = n->Opcode();
1956       if( r && op != Op_Phi && op != Op_Proj && op != Op_SCMemProj) {
1957         if( _node_regs ) {
1958           tty->print("[");
1959           OptoReg::Name second = get_reg_second(n->in(k));
1960           if( OptoReg::is_valid(second) ) {
1961             if( OptoReg::is_reg(second) )
1962               tty->print("%s:",Matcher::regName[second]);
1963             else
1964               tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer),
1965                          reg2offset_unchecked(second));
1966           }
1967           OptoReg::Name first = get_reg_first(n->in(k));
1968           if( OptoReg::is_reg(first) )
1969             tty->print("%s]",Matcher::regName[first]);
1970           else
1971             tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer),
1972                        reg2offset_unchecked(first));
1973         } else
1974           n->in_RegMask(k).dump();
1975       }
1976       tty->print("/N%d ",m->_idx);
1977     }
1978   }
1979   if( k < n->len() && n->in(k) ) tty->print("| ");
1980   for( ; k < n->len(); k++ ) {
1981     Node *m = n->in(k);
1982     if(!m) {
1983       break;
1984     }
1985     uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
1986     tty->print("L%d",r);
1987     tty->print("/N%d ",m->_idx);
1988   }
1989   if( n->is_Mach() ) n->as_Mach()->dump_spec(tty);
1990   else n->dump_spec(tty);
1991   if( _spilled_once.test(n->_idx ) ) {
1992     tty->print(" Spill_1");
1993     if( _spilled_twice.test(n->_idx ) )
1994       tty->print(" Spill_2");
1995   }
1996   tty->print("\n");
1997 }
1998 
1999 void PhaseChaitin::dump(const Block *b) const {
2000   b->dump_head(&_cfg);
2001 
2002   // For all instructions
2003   for( uint j = 0; j < b->number_of_nodes(); j++ )
2004     dump(b->get_node(j));
2005   // Print live-out info at end of block
2006   if( _live ) {
2007     tty->print("Liveout: ");
2008     IndexSet *live = _live->live(b);
2009     IndexSetIterator elements(live);
2010     tty->print("{");
2011     uint i;
2012     while ((i = elements.next()) != 0) {
2013       tty->print("L%d ", _lrg_map.find_const(i));
2014     }
2015     tty->print_cr("}");
2016   }
2017   tty->print("\n");
2018 }
2019 
2020 void PhaseChaitin::dump() const {
2021   tty->print( "--- Chaitin -- argsize: %d  framesize: %d ---\n",
2022               _matcher._new_SP, _framesize );
2023 
2024   // For all blocks
2025   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2026     dump(_cfg.get_block(i));
2027   }
2028   // End of per-block dump
2029   tty->print("\n");
2030 
2031   if (!_ifg) {
2032     tty->print("(No IFG.)\n");
2033     return;
2034   }
2035 
2036   // Dump LRG array
2037   tty->print("--- Live RanGe Array ---\n");
2038   for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) {
2039     tty->print("L%d: ",i2);
2040     if (i2 < _ifg->_maxlrg) {
2041       lrgs(i2).dump();
2042     }
2043     else {
2044       tty->print_cr("new LRG");
2045     }
2046   }
2047   tty->cr();
2048 
2049   // Dump lo-degree list
2050   tty->print("Lo degree: ");
2051   for(uint i3 = _lo_degree; i3; i3 = lrgs(i3)._next )
2052     tty->print("L%d ",i3);
2053   tty->cr();
2054 
2055   // Dump lo-stk-degree list
2056   tty->print("Lo stk degree: ");
2057   for(uint i4 = _lo_stk_degree; i4; i4 = lrgs(i4)._next )
2058     tty->print("L%d ",i4);
2059   tty->cr();
2060 
2061   // Dump lo-degree list
2062   tty->print("Hi degree: ");
2063   for(uint i5 = _hi_degree; i5; i5 = lrgs(i5)._next )
2064     tty->print("L%d ",i5);
2065   tty->cr();
2066 }
2067 
2068 void PhaseChaitin::dump_degree_lists() const {
2069   // Dump lo-degree list
2070   tty->print("Lo degree: ");
2071   for( uint i = _lo_degree; i; i = lrgs(i)._next )
2072     tty->print("L%d ",i);
2073   tty->cr();
2074 
2075   // Dump lo-stk-degree list
2076   tty->print("Lo stk degree: ");
2077   for(uint i2 = _lo_stk_degree; i2; i2 = lrgs(i2)._next )
2078     tty->print("L%d ",i2);
2079   tty->cr();
2080 
2081   // Dump lo-degree list
2082   tty->print("Hi degree: ");
2083   for(uint i3 = _hi_degree; i3; i3 = lrgs(i3)._next )
2084     tty->print("L%d ",i3);
2085   tty->cr();
2086 }
2087 
2088 void PhaseChaitin::dump_simplified() const {
2089   tty->print("Simplified: ");
2090   for( uint i = _simplified; i; i = lrgs(i)._next )
2091     tty->print("L%d ",i);
2092   tty->cr();
2093 }
2094 
2095 static char *print_reg( OptoReg::Name reg, const PhaseChaitin *pc, char *buf ) {
2096   if ((int)reg < 0)
2097     sprintf(buf, "<OptoReg::%d>", (int)reg);
2098   else if (OptoReg::is_reg(reg))
2099     strcpy(buf, Matcher::regName[reg]);
2100   else
2101     sprintf(buf,"%s + #%d",OptoReg::regname(OptoReg::c_frame_pointer),
2102             pc->reg2offset(reg));
2103   return buf+strlen(buf);
2104 }
2105 
2106 // Dump a register name into a buffer.  Be intelligent if we get called
2107 // before allocation is complete.
2108 char *PhaseChaitin::dump_register( const Node *n, char *buf  ) const {
2109   if( this == NULL ) {          // Not got anything?
2110     sprintf(buf,"N%d",n->_idx); // Then use Node index
2111   } else if( _node_regs ) {
2112     // Post allocation, use direct mappings, no LRG info available
2113     print_reg( get_reg_first(n), this, buf );
2114   } else {
2115     uint lidx = _lrg_map.find_const(n); // Grab LRG number
2116     if( !_ifg ) {
2117       sprintf(buf,"L%d",lidx);  // No register binding yet
2118     } else if( !lidx ) {        // Special, not allocated value
2119       strcpy(buf,"Special");
2120     } else {
2121       if (lrgs(lidx)._is_vector) {
2122         if (lrgs(lidx).mask().is_bound_set(lrgs(lidx).num_regs()))
2123           print_reg( lrgs(lidx).reg(), this, buf ); // a bound machine register
2124         else
2125           sprintf(buf,"L%d",lidx); // No register binding yet
2126       } else if( (lrgs(lidx).num_regs() == 1)
2127                  ? lrgs(lidx).mask().is_bound1()
2128                  : lrgs(lidx).mask().is_bound_pair() ) {
2129         // Hah!  We have a bound machine register
2130         print_reg( lrgs(lidx).reg(), this, buf );
2131       } else {
2132         sprintf(buf,"L%d",lidx); // No register binding yet
2133       }
2134     }
2135   }
2136   return buf+strlen(buf);
2137 }
2138 
2139 void PhaseChaitin::dump_for_spill_split_recycle() const {
2140   if( WizardMode && (PrintCompilation || PrintOpto) ) {
2141     // Display which live ranges need to be split and the allocator's state
2142     tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2143     for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2144       if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2145         tty->print("L%d: ", bidx);
2146         lrgs(bidx).dump();
2147       }
2148     }
2149     tty->cr();
2150     dump();
2151   }
2152 }
2153 
2154 void PhaseChaitin::dump_frame() const {
2155   const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2156   const TypeTuple *domain = C->tf()->domain();
2157   const int        argcnt = domain->cnt() - TypeFunc::Parms;
2158 
2159   // Incoming arguments in registers dump
2160   for( int k = 0; k < argcnt; k++ ) {
2161     OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2162     if( OptoReg::is_reg(parmreg))  {
2163       const char *reg_name = OptoReg::regname(parmreg);
2164       tty->print("#r%3.3d %s", parmreg, reg_name);
2165       parmreg = _matcher._parm_regs[k].second();
2166       if( OptoReg::is_reg(parmreg))  {
2167         tty->print(":%s", OptoReg::regname(parmreg));
2168       }
2169       tty->print("   : parm %d: ", k);
2170       domain->field_at(k + TypeFunc::Parms)->dump();
2171       tty->cr();
2172     }
2173   }
2174 
2175   // Check for un-owned padding above incoming args
2176   OptoReg::Name reg = _matcher._new_SP;
2177   if( reg > _matcher._in_arg_limit ) {
2178     reg = OptoReg::add(reg, -1);
2179     tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg));
2180   }
2181 
2182   // Incoming argument area dump
2183   OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots());
2184   while( reg > begin_in_arg ) {
2185     reg = OptoReg::add(reg, -1);
2186     tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2187     int j;
2188     for( j = 0; j < argcnt; j++) {
2189       if( _matcher._parm_regs[j].first() == reg ||
2190           _matcher._parm_regs[j].second() == reg ) {
2191         tty->print("parm %d: ",j);
2192         domain->field_at(j + TypeFunc::Parms)->dump();
2193         tty->cr();
2194         break;
2195       }
2196     }
2197     if( j >= argcnt )
2198       tty->print_cr("HOLE, owned by SELF");
2199   }
2200 
2201   // Old outgoing preserve area
2202   while( reg > _matcher._old_SP ) {
2203     reg = OptoReg::add(reg, -1);
2204     tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg));
2205   }
2206 
2207   // Old SP
2208   tty->print_cr("# -- Old %s -- Framesize: %d --",fp,
2209     reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize);
2210 
2211   // Preserve area dump
2212   int fixed_slots = C->fixed_slots();
2213   OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots());
2214   OptoReg::Name return_addr = _matcher.return_addr();
2215 
2216   reg = OptoReg::add(reg, -1);
2217   while (OptoReg::is_stack(reg)) {
2218     tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2219     if (return_addr == reg) {
2220       tty->print_cr("return address");
2221     } else if (reg >= begin_in_preserve) {
2222       // Preserved slots are present on x86
2223       if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word))
2224         tty->print_cr("saved fp register");
2225       else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) &&
2226                VerifyStackAtCalls)
2227         tty->print_cr("0xBADB100D   +VerifyStackAtCalls");
2228       else
2229         tty->print_cr("in_preserve");
2230     } else if ((int)OptoReg::reg2stack(reg) < fixed_slots) {
2231       tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg));
2232     } else {
2233       tty->print_cr("pad2, stack alignment");
2234     }
2235     reg = OptoReg::add(reg, -1);
2236   }
2237 
2238   // Spill area dump
2239   reg = OptoReg::add(_matcher._new_SP, _framesize );
2240   while( reg > _matcher._out_arg_limit ) {
2241     reg = OptoReg::add(reg, -1);
2242     tty->print_cr("#r%3.3d %s+%2d: spill",reg,fp,reg2offset_unchecked(reg));
2243   }
2244 
2245   // Outgoing argument area dump
2246   while( reg > OptoReg::add(_matcher._new_SP, C->out_preserve_stack_slots()) ) {
2247     reg = OptoReg::add(reg, -1);
2248     tty->print_cr("#r%3.3d %s+%2d: outgoing argument",reg,fp,reg2offset_unchecked(reg));
2249   }
2250 
2251   // Outgoing new preserve area
2252   while( reg > _matcher._new_SP ) {
2253     reg = OptoReg::add(reg, -1);
2254     tty->print_cr("#r%3.3d %s+%2d: new out preserve",reg,fp,reg2offset_unchecked(reg));
2255   }
2256   tty->print_cr("#");
2257 }
2258 
2259 void PhaseChaitin::dump_bb( uint pre_order ) const {
2260   tty->print_cr("---dump of B%d---",pre_order);
2261   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2262     Block* block = _cfg.get_block(i);
2263     if (block->_pre_order == pre_order) {
2264       dump(block);
2265     }
2266   }
2267 }
2268 
2269 void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
2270   tty->print_cr("---dump of L%d---",lidx);
2271 
2272   if (_ifg) {
2273     if (lidx >= _lrg_map.max_lrg_id()) {
2274       tty->print("Attempt to print live range index beyond max live range.\n");
2275       return;
2276     }
2277     tty->print("L%d: ",lidx);
2278     if (lidx < _ifg->_maxlrg) {
2279       lrgs(lidx).dump();
2280     } else {
2281       tty->print_cr("new LRG");
2282     }
2283   }
2284   if( _ifg && lidx < _ifg->_maxlrg) {
2285     tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
2286     _ifg->neighbors(lidx)->dump();
2287     tty->cr();
2288   }
2289   // For all blocks
2290   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2291     Block* block = _cfg.get_block(i);
2292     int dump_once = 0;
2293 
2294     // For all instructions
2295     for( uint j = 0; j < block->number_of_nodes(); j++ ) {
2296       Node *n = block->get_node(j);
2297       if (_lrg_map.find_const(n) == lidx) {
2298         if (!dump_once++) {
2299           tty->cr();
2300           block->dump_head(&_cfg);
2301         }
2302         dump(n);
2303         continue;
2304       }
2305       if (!defs_only) {
2306         uint cnt = n->req();
2307         for( uint k = 1; k < cnt; k++ ) {
2308           Node *m = n->in(k);
2309           if (!m)  {
2310             continue;  // be robust in the dumper
2311           }
2312           if (_lrg_map.find_const(m) == lidx) {
2313             if (!dump_once++) {
2314               tty->cr();
2315               block->dump_head(&_cfg);
2316             }
2317             dump(n);
2318           }
2319         }
2320       }
2321     }
2322   } // End of per-block dump
2323   tty->cr();
2324 }
2325 #endif // not PRODUCT
2326 
2327 int PhaseChaitin::_final_loads  = 0;
2328 int PhaseChaitin::_final_stores = 0;
2329 int PhaseChaitin::_final_memoves= 0;
2330 int PhaseChaitin::_final_copies = 0;
2331 double PhaseChaitin::_final_load_cost  = 0;
2332 double PhaseChaitin::_final_store_cost = 0;
2333 double PhaseChaitin::_final_memove_cost= 0;
2334 double PhaseChaitin::_final_copy_cost  = 0;
2335 int PhaseChaitin::_conserv_coalesce = 0;
2336 int PhaseChaitin::_conserv_coalesce_pair = 0;
2337 int PhaseChaitin::_conserv_coalesce_trie = 0;
2338 int PhaseChaitin::_conserv_coalesce_quad = 0;
2339 int PhaseChaitin::_post_alloc = 0;
2340 int PhaseChaitin::_lost_opp_pp_coalesce = 0;
2341 int PhaseChaitin::_lost_opp_cflow_coalesce = 0;
2342 int PhaseChaitin::_used_cisc_instructions   = 0;
2343 int PhaseChaitin::_unused_cisc_instructions = 0;
2344 int PhaseChaitin::_allocator_attempts       = 0;
2345 int PhaseChaitin::_allocator_successes      = 0;
2346 
2347 #ifndef PRODUCT
2348 uint PhaseChaitin::_high_pressure           = 0;
2349 uint PhaseChaitin::_low_pressure            = 0;
2350 
2351 void PhaseChaitin::print_chaitin_statistics() {
2352   tty->print_cr("Inserted %d spill loads, %d spill stores, %d mem-mem moves and %d copies.", _final_loads, _final_stores, _final_memoves, _final_copies);
2353   tty->print_cr("Total load cost= %6.0f, store cost = %6.0f, mem-mem cost = %5.2f, copy cost = %5.0f.", _final_load_cost, _final_store_cost, _final_memove_cost, _final_copy_cost);
2354   tty->print_cr("Adjusted spill cost = %7.0f.",
2355                 _final_load_cost*4.0 + _final_store_cost  * 2.0 +
2356                 _final_copy_cost*1.0 + _final_memove_cost*12.0);
2357   tty->print("Conservatively coalesced %d copies, %d pairs",
2358                 _conserv_coalesce, _conserv_coalesce_pair);
2359   if( _conserv_coalesce_trie || _conserv_coalesce_quad )
2360     tty->print(", %d tries, %d quads", _conserv_coalesce_trie, _conserv_coalesce_quad);
2361   tty->print_cr(", %d post alloc.", _post_alloc);
2362   if( _lost_opp_pp_coalesce || _lost_opp_cflow_coalesce )
2363     tty->print_cr("Lost coalesce opportunity, %d private-private, and %d cflow interfered.",
2364                   _lost_opp_pp_coalesce, _lost_opp_cflow_coalesce );
2365   if( _used_cisc_instructions || _unused_cisc_instructions )
2366     tty->print_cr("Used cisc instruction  %d,  remained in register %d",
2367                    _used_cisc_instructions, _unused_cisc_instructions);
2368   if( _allocator_successes != 0 )
2369     tty->print_cr("Average allocation trips %f", (float)_allocator_attempts/(float)_allocator_successes);
2370   tty->print_cr("High Pressure Blocks = %d, Low Pressure Blocks = %d", _high_pressure, _low_pressure);
2371 }
2372 #endif // not PRODUCT