1 /*
   2  * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "compiler/oopMap.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/block.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/cfgnode.hpp"
  33 #include "opto/chaitin.hpp"
  34 #include "opto/coalesce.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/idealGraphPrinter.hpp"
  37 #include "opto/indexSet.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/memnode.hpp"
  40 #include "opto/opcodes.hpp"
  41 #include "opto/rootnode.hpp"
  42 
  43 #ifndef PRODUCT
  44 void LRG::dump() const {
  45   ttyLocker ttyl;
  46   tty->print("%d ",num_regs());
  47   _mask.dump();
  48   if( _msize_valid ) {
  49     if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size);
  50     else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size());
  51   } else {
  52     tty->print(", #?(%d) ",_mask.Size());
  53   }
  54 
  55   tty->print("EffDeg: ");
  56   if( _degree_valid ) tty->print( "%d ", _eff_degree );
  57   else tty->print("? ");
  58 
  59   if( is_multidef() ) {
  60     tty->print("MultiDef ");
  61     if (_defs != NULL) {
  62       tty->print("(");
  63       for (int i = 0; i < _defs->length(); i++) {
  64         tty->print("N%d ", _defs->at(i)->_idx);
  65       }
  66       tty->print(") ");
  67     }
  68   }
  69   else if( _def == 0 ) tty->print("Dead ");
  70   else tty->print("Def: N%d ",_def->_idx);
  71 
  72   tty->print("Cost:%4.2g Area:%4.2g Score:%4.2g ",_cost,_area, score());
  73   // Flags
  74   if( _is_oop ) tty->print("Oop ");
  75   if( _is_float ) tty->print("Float ");
  76   if( _is_vector ) tty->print("Vector ");
  77   if( _was_spilled1 ) tty->print("Spilled ");
  78   if( _was_spilled2 ) tty->print("Spilled2 ");
  79   if( _direct_conflict ) tty->print("Direct_conflict ");
  80   if( _fat_proj ) tty->print("Fat ");
  81   if( _was_lo ) tty->print("Lo ");
  82   if( _has_copy ) tty->print("Copy ");
  83   if( _at_risk ) tty->print("Risk ");
  84 
  85   if( _must_spill ) tty->print("Must_spill ");
  86   if( _is_bound ) tty->print("Bound ");
  87   if( _msize_valid ) {
  88     if( _degree_valid && lo_degree() ) tty->print("Trivial ");
  89   }
  90 
  91   tty->cr();
  92 }
  93 #endif
  94 
  95 // Compute score from cost and area.  Low score is best to spill.
  96 static double raw_score( double cost, double area ) {
  97   return cost - (area*RegisterCostAreaRatio) * 1.52588e-5;
  98 }
  99 
 100 double LRG::score() const {
 101   // Scale _area by RegisterCostAreaRatio/64K then subtract from cost.
 102   // Bigger area lowers score, encourages spilling this live range.
 103   // Bigger cost raise score, prevents spilling this live range.
 104   // (Note: 1/65536 is the magic constant below; I dont trust the C optimizer
 105   // to turn a divide by a constant into a multiply by the reciprical).
 106   double score = raw_score( _cost, _area);
 107 
 108   // Account for area.  Basically, LRGs covering large areas are better
 109   // to spill because more other LRGs get freed up.
 110   if( _area == 0.0 )            // No area?  Then no progress to spill
 111     return 1e35;
 112 
 113   if( _was_spilled2 )           // If spilled once before, we are unlikely
 114     return score + 1e30;        // to make progress again.
 115 
 116   if( _cost >= _area*3.0 )      // Tiny area relative to cost
 117     return score + 1e17;        // Probably no progress to spill
 118 
 119   if( (_cost+_cost) >= _area*3.0 ) // Small area relative to cost
 120     return score + 1e10;        // Likely no progress to spill
 121 
 122   return score;
 123 }
 124 
 125 #define NUMBUCKS 3
 126 
 127 // Straight out of Tarjan's union-find algorithm
 128 uint LiveRangeMap::find_compress(uint lrg) {
 129   uint cur = lrg;
 130   uint next = _uf_map.at(cur);
 131   while (next != cur) { // Scan chain of equivalences
 132     assert( next < cur, "always union smaller");
 133     cur = next; // until find a fixed-point
 134     next = _uf_map.at(cur);
 135   }
 136 
 137   // Core of union-find algorithm: update chain of
 138   // equivalences to be equal to the root.
 139   while (lrg != next) {
 140     uint tmp = _uf_map.at(lrg);
 141     _uf_map.at_put(lrg, next);
 142     lrg = tmp;
 143   }
 144   return lrg;
 145 }
 146 
 147 // Reset the Union-Find map to identity
 148 void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
 149   _max_lrg_id= max_lrg_id;
 150   // Force the Union-Find mapping to be at least this large
 151   _uf_map.at_put_grow(_max_lrg_id, 0);
 152   // Initialize it to be the ID mapping.
 153   for (uint i = 0; i < _max_lrg_id; ++i) {
 154     _uf_map.at_put(i, i);
 155   }
 156 }
 157 
 158 // Make all Nodes map directly to their final live range; no need for
 159 // the Union-Find mapping after this call.
 160 void LiveRangeMap::compress_uf_map_for_nodes() {
 161   // For all Nodes, compress mapping
 162   uint unique = _names.length();
 163   for (uint i = 0; i < unique; ++i) {
 164     uint lrg = _names.at(i);
 165     uint compressed_lrg = find(lrg);
 166     if (lrg != compressed_lrg) {
 167       _names.at_put(i, compressed_lrg);
 168     }
 169   }
 170 }
 171 
 172 // Like Find above, but no path compress, so bad asymptotic behavior
 173 uint LiveRangeMap::find_const(uint lrg) const {
 174   if (!lrg) {
 175     return lrg; // Ignore the zero LRG
 176   }
 177 
 178   // Off the end?  This happens during debugging dumps when you got
 179   // brand new live ranges but have not told the allocator yet.
 180   if (lrg >= _max_lrg_id) {
 181     return lrg;
 182   }
 183 
 184   uint next = _uf_map.at(lrg);
 185   while (next != lrg) { // Scan chain of equivalences
 186     assert(next < lrg, "always union smaller");
 187     lrg = next; // until find a fixed-point
 188     next = _uf_map.at(lrg);
 189   }
 190   return next;
 191 }
 192 
 193 PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
 194   : PhaseRegAlloc(unique, cfg, matcher,
 195 #ifndef PRODUCT
 196        print_chaitin_statistics
 197 #else
 198        NULL
 199 #endif
 200        )
 201   , _lrg_map(Thread::current()->resource_area(), unique)
 202   , _live(0)
 203   , _spilled_once(Thread::current()->resource_area())
 204   , _spilled_twice(Thread::current()->resource_area())
 205   , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0)
 206   , _oldphi(unique)
 207 #ifndef PRODUCT
 208   , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling"))
 209 #endif
 210 {
 211   NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); )
 212 
 213   _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
 214 
 215   // Build a list of basic blocks, sorted by frequency
 216   _blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
 217   // Experiment with sorting strategies to speed compilation
 218   double  cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket
 219   Block **buckets[NUMBUCKS];             // Array of buckets
 220   uint    buckcnt[NUMBUCKS];             // Array of bucket counters
 221   double  buckval[NUMBUCKS];             // Array of bucket value cutoffs
 222   for (uint i = 0; i < NUMBUCKS; i++) {
 223     buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
 224     buckcnt[i] = 0;
 225     // Bump by three orders of magnitude each time
 226     cutoff *= 0.001;
 227     buckval[i] = cutoff;
 228     for (uint j = 0; j < _cfg.number_of_blocks(); j++) {
 229       buckets[i][j] = NULL;
 230     }
 231   }
 232   // Sort blocks into buckets
 233   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
 234     for (uint j = 0; j < NUMBUCKS; j++) {
 235       if ((j == NUMBUCKS - 1) || (_cfg.get_block(i)->_freq > buckval[j])) {
 236         // Assign block to end of list for appropriate bucket
 237         buckets[j][buckcnt[j]++] = _cfg.get_block(i);
 238         break; // kick out of inner loop
 239       }
 240     }
 241   }
 242   // Dump buckets into final block array
 243   uint blkcnt = 0;
 244   for (uint i = 0; i < NUMBUCKS; i++) {
 245     for (uint j = 0; j < buckcnt[i]; j++) {
 246       _blks[blkcnt++] = buckets[i][j];
 247     }
 248   }
 249 
 250   assert(blkcnt == _cfg.number_of_blocks(), "Block array not totally filled");
 251 }
 252 
 253 // union 2 sets together.
 254 void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
 255   uint src = _lrg_map.find(src_n);
 256   uint dst = _lrg_map.find(dst_n);
 257   assert(src, "");
 258   assert(dst, "");
 259   assert(src < _lrg_map.max_lrg_id(), "oob");
 260   assert(dst < _lrg_map.max_lrg_id(), "oob");
 261   assert(src < dst, "always union smaller");
 262   _lrg_map.uf_map(dst, src);
 263 }
 264 
 265 void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
 266   // Make the Node->LRG mapping
 267   _lrg_map.extend(x->_idx,lrg);
 268   // Make the Union-Find mapping an identity function
 269   _lrg_map.uf_extend(lrg, lrg);
 270 }
 271 
 272 
 273 int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id) {
 274   assert(b->find_node(copy) == (idx - 1), "incorrect insert index for copy kill projections");
 275   DEBUG_ONLY( Block* borig = _cfg.get_block_for_node(orig); )
 276   int found_projs = 0;
 277   uint cnt = orig->outcnt();
 278   for (uint i = 0; i < cnt; i++) {
 279     Node* proj = orig->raw_out(i);
 280     if (proj->is_MachProj()) {
 281       assert(proj->outcnt() == 0, "only kill projections are expected here");
 282       assert(_cfg.get_block_for_node(proj) == borig, "incorrect block for kill projections");
 283       found_projs++;
 284       // Copy kill projections after the cloned node
 285       Node* kills = proj->clone();
 286       kills->set_req(0, copy);
 287       b->insert_node(kills, idx++);
 288       _cfg.map_node_to_block(kills, b);
 289       new_lrg(kills, max_lrg_id++);
 290     }
 291   }
 292   return found_projs;
 293 }
 294 
 295 // Renumber the live ranges to compact them.  Makes the IFG smaller.
 296 void PhaseChaitin::compact() {
 297   // Current the _uf_map contains a series of short chains which are headed
 298   // by a self-cycle.  All the chains run from big numbers to little numbers.
 299   // The Find() call chases the chains & shortens them for the next Find call.
 300   // We are going to change this structure slightly.  Numbers above a moving
 301   // wave 'i' are unchanged.  Numbers below 'j' point directly to their
 302   // compacted live range with no further chaining.  There are no chains or
 303   // cycles below 'i', so the Find call no longer works.
 304   uint j=1;
 305   uint i;
 306   for (i = 1; i < _lrg_map.max_lrg_id(); i++) {
 307     uint lr = _lrg_map.uf_live_range_id(i);
 308     // Ignore unallocated live ranges
 309     if (!lr) {
 310       continue;
 311     }
 312     assert(lr <= i, "");
 313     _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr));
 314   }
 315   // Now change the Node->LR mapping to reflect the compacted names
 316   uint unique = _lrg_map.size();
 317   for (i = 0; i < unique; i++) {
 318     uint lrg_id = _lrg_map.live_range_id(i);
 319     _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id));
 320   }
 321 
 322   // Reset the Union-Find mapping
 323   _lrg_map.reset_uf_map(j);
 324 }
 325 
 326 void PhaseChaitin::Register_Allocate() {
 327 
 328   // Above the OLD FP (and in registers) are the incoming arguments.  Stack
 329   // slots in this area are called "arg_slots".  Above the NEW FP (and in
 330   // registers) is the outgoing argument area; above that is the spill/temp
 331   // area.  These are all "frame_slots".  Arg_slots start at the zero
 332   // stack_slots and count up to the known arg_size.  Frame_slots start at
 333   // the stack_slot #arg_size and go up.  After allocation I map stack
 334   // slots to actual offsets.  Stack-slots in the arg_slot area are biased
 335   // by the frame_size; stack-slots in the frame_slot area are biased by 0.
 336 
 337   _trip_cnt = 0;
 338   _alternate = 0;
 339   _matcher._allocation_started = true;
 340 
 341   ResourceArea split_arena;     // Arena for Split local resources
 342   ResourceArea live_arena;      // Arena for liveness & IFG info
 343   ResourceMark rm(&live_arena);
 344 
 345   // Need live-ness for the IFG; need the IFG for coalescing.  If the
 346   // liveness is JUST for coalescing, then I can get some mileage by renaming
 347   // all copy-related live ranges low and then using the max copy-related
 348   // live range as a cut-off for LIVE and the IFG.  In other words, I can
 349   // build a subset of LIVE and IFG just for copies.
 350   PhaseLive live(_cfg, _lrg_map.names(), &live_arena);
 351 
 352   // Need IFG for coalescing and coloring
 353   PhaseIFG ifg(&live_arena);
 354   _ifg = &ifg;
 355 
 356   // Come out of SSA world to the Named world.  Assign (virtual) registers to
 357   // Nodes.  Use the same register for all inputs and the output of PhiNodes
 358   // - effectively ending SSA form.  This requires either coalescing live
 359   // ranges or inserting copies.  For the moment, we insert "virtual copies"
 360   // - we pretend there is a copy prior to each Phi in predecessor blocks.
 361   // We will attempt to coalesce such "virtual copies" before we manifest
 362   // them for real.
 363   de_ssa();
 364 
 365 #ifdef ASSERT
 366   // Veify the graph before RA.
 367   verify(&live_arena);
 368 #endif
 369 
 370   {
 371     NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
 372     _live = NULL;                 // Mark live as being not available
 373     rm.reset_to_mark();           // Reclaim working storage
 374     IndexSet::reset_memory(C, &live_arena);
 375     ifg.init(_lrg_map.max_lrg_id()); // Empty IFG
 376     gather_lrg_masks( false );    // Collect LRG masks
 377     live.compute(_lrg_map.max_lrg_id()); // Compute liveness
 378     _live = &live;                // Mark LIVE as being available
 379   }
 380 
 381   // Base pointers are currently "used" by instructions which define new
 382   // derived pointers.  This makes base pointers live up to the where the
 383   // derived pointer is made, but not beyond.  Really, they need to be live
 384   // across any GC point where the derived value is live.  So this code looks
 385   // at all the GC points, and "stretches" the live range of any base pointer
 386   // to the GC point.
 387   if (stretch_base_pointer_live_ranges(&live_arena)) {
 388     NOT_PRODUCT(Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler);)
 389     // Since some live range stretched, I need to recompute live
 390     _live = NULL;
 391     rm.reset_to_mark();         // Reclaim working storage
 392     IndexSet::reset_memory(C, &live_arena);
 393     ifg.init(_lrg_map.max_lrg_id());
 394     gather_lrg_masks(false);
 395     live.compute(_lrg_map.max_lrg_id());
 396     _live = &live;
 397   }
 398   // Create the interference graph using virtual copies
 399   build_ifg_virtual();  // Include stack slots this time
 400 
 401   // Aggressive (but pessimistic) copy coalescing.
 402   // This pass works on virtual copies.  Any virtual copies which are not
 403   // coalesced get manifested as actual copies
 404   {
 405     // The IFG is/was triangular.  I am 'squaring it up' so Union can run
 406     // faster.  Union requires a 'for all' operation which is slow on the
 407     // triangular adjacency matrix (quick reminder: the IFG is 'sparse' -
 408     // meaning I can visit all the Nodes neighbors less than a Node in time
 409     // O(# of neighbors), but I have to visit all the Nodes greater than a
 410     // given Node and search them for an instance, i.e., time O(#MaxLRG)).
 411     _ifg->SquareUp();
 412 
 413     PhaseAggressiveCoalesce coalesce(*this);
 414     coalesce.coalesce_driver();
 415     // Insert un-coalesced copies.  Visit all Phis.  Where inputs to a Phi do
 416     // not match the Phi itself, insert a copy.
 417     coalesce.insert_copies(_matcher);
 418     if (C->failing()) {
 419       return;
 420     }
 421   }
 422 
 423   // After aggressive coalesce, attempt a first cut at coloring.
 424   // To color, we need the IFG and for that we need LIVE.
 425   {
 426     NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
 427     _live = NULL;
 428     rm.reset_to_mark();           // Reclaim working storage
 429     IndexSet::reset_memory(C, &live_arena);
 430     ifg.init(_lrg_map.max_lrg_id());
 431     gather_lrg_masks( true );
 432     live.compute(_lrg_map.max_lrg_id());
 433     _live = &live;
 434   }
 435 
 436   // Build physical interference graph
 437   uint must_spill = 0;
 438   must_spill = build_ifg_physical(&live_arena);
 439   // If we have a guaranteed spill, might as well spill now
 440   if (must_spill) {
 441     if(!_lrg_map.max_lrg_id()) {
 442       return;
 443     }
 444     // Bail out if unique gets too large (ie - unique > MaxNodeLimit)
 445     C->check_node_count(10*must_spill, "out of nodes before split");
 446     if (C->failing()) {
 447       return;
 448     }
 449 
 450     uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
 451     _lrg_map.set_max_lrg_id(new_max_lrg_id);
 452     // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
 453     // or we failed to split
 454     C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
 455     if (C->failing()) {
 456       return;
 457     }
 458 
 459     NOT_PRODUCT(C->verify_graph_edges();)
 460 
 461     compact();                  // Compact LRGs; return new lower max lrg
 462 
 463     {
 464       NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
 465       _live = NULL;
 466       rm.reset_to_mark();         // Reclaim working storage
 467       IndexSet::reset_memory(C, &live_arena);
 468       ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph
 469       gather_lrg_masks( true );   // Collect intersect mask
 470       live.compute(_lrg_map.max_lrg_id()); // Compute LIVE
 471       _live = &live;
 472     }
 473     build_ifg_physical(&live_arena);
 474     _ifg->SquareUp();
 475     _ifg->Compute_Effective_Degree();
 476     // Only do conservative coalescing if requested
 477     if (OptoCoalesce) {
 478       // Conservative (and pessimistic) copy coalescing of those spills
 479       PhaseConservativeCoalesce coalesce(*this);
 480       // If max live ranges greater than cutoff, don't color the stack.
 481       // This cutoff can be larger than below since it is only done once.
 482       coalesce.coalesce_driver();
 483     }
 484     _lrg_map.compress_uf_map_for_nodes();
 485 
 486 #ifdef ASSERT
 487     verify(&live_arena, true);
 488 #endif
 489   } else {
 490     ifg.SquareUp();
 491     ifg.Compute_Effective_Degree();
 492 #ifdef ASSERT
 493     set_was_low();
 494 #endif
 495   }
 496 
 497   // Prepare for Simplify & Select
 498   cache_lrg_info();           // Count degree of LRGs
 499 
 500   // Simplify the InterFerence Graph by removing LRGs of low degree.
 501   // LRGs of low degree are trivially colorable.
 502   Simplify();
 503 
 504   // Select colors by re-inserting LRGs back into the IFG in reverse order.
 505   // Return whether or not something spills.
 506   uint spills = Select( );
 507 
 508   // If we spill, split and recycle the entire thing
 509   while( spills ) {
 510     if( _trip_cnt++ > 24 ) {
 511       DEBUG_ONLY( dump_for_spill_split_recycle(); )
 512       if( _trip_cnt > 27 ) {
 513         C->record_method_not_compilable("failed spill-split-recycle sanity check");
 514         return;
 515       }
 516     }
 517 
 518     if (!_lrg_map.max_lrg_id()) {
 519       return;
 520     }
 521     uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
 522     _lrg_map.set_max_lrg_id(new_max_lrg_id);
 523     // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
 524     C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split");
 525     if (C->failing()) {
 526       return;
 527     }
 528 
 529     compact(); // Compact LRGs; return new lower max lrg
 530 
 531     // Nuke the live-ness and interference graph and LiveRanGe info
 532     {
 533       NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
 534       _live = NULL;
 535       rm.reset_to_mark();         // Reclaim working storage
 536       IndexSet::reset_memory(C, &live_arena);
 537       ifg.init(_lrg_map.max_lrg_id());
 538 
 539       // Create LiveRanGe array.
 540       // Intersect register masks for all USEs and DEFs
 541       gather_lrg_masks(true);
 542       live.compute(_lrg_map.max_lrg_id());
 543       _live = &live;
 544     }
 545     must_spill = build_ifg_physical(&live_arena);
 546     _ifg->SquareUp();
 547     _ifg->Compute_Effective_Degree();
 548 
 549     // Only do conservative coalescing if requested
 550     if (OptoCoalesce) {
 551       // Conservative (and pessimistic) copy coalescing
 552       PhaseConservativeCoalesce coalesce(*this);
 553       // Check for few live ranges determines how aggressive coalesce is.
 554       coalesce.coalesce_driver();
 555     }
 556     _lrg_map.compress_uf_map_for_nodes();
 557 #ifdef ASSERT
 558     verify(&live_arena, true);
 559 #endif
 560     cache_lrg_info();           // Count degree of LRGs
 561 
 562     // Simplify the InterFerence Graph by removing LRGs of low degree.
 563     // LRGs of low degree are trivially colorable.
 564     Simplify();
 565 
 566     // Select colors by re-inserting LRGs back into the IFG in reverse order.
 567     // Return whether or not something spills.
 568     spills = Select();
 569   }
 570 
 571   // Count number of Simplify-Select trips per coloring success.
 572   _allocator_attempts += _trip_cnt + 1;
 573   _allocator_successes += 1;
 574 
 575   // Peephole remove copies
 576   post_allocate_copy_removal();
 577 
 578 #ifdef ASSERT
 579   // Veify the graph after RA.
 580   verify(&live_arena);
 581 #endif
 582 
 583   // max_reg is past the largest *register* used.
 584   // Convert that to a frame_slot number.
 585   if (_max_reg <= _matcher._new_SP) {
 586     _framesize = C->out_preserve_stack_slots();
 587   }
 588   else {
 589     _framesize = _max_reg -_matcher._new_SP;
 590   }
 591   assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
 592 
 593   // This frame must preserve the required fp alignment
 594   _framesize = round_to(_framesize, Matcher::stack_alignment_in_slots());
 595   assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" );
 596 #ifndef PRODUCT
 597   _total_framesize += _framesize;
 598   if ((int)_framesize > _max_framesize) {
 599     _max_framesize = _framesize;
 600   }
 601 #endif
 602 
 603   // Convert CISC spills
 604   fixup_spills();
 605 
 606   // Log regalloc results
 607   CompileLog* log = Compile::current()->log();
 608   if (log != NULL) {
 609     log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing());
 610   }
 611 
 612   if (C->failing()) {
 613     return;
 614   }
 615 
 616   NOT_PRODUCT(C->verify_graph_edges();)
 617 
 618   // Move important info out of the live_arena to longer lasting storage.
 619   alloc_node_regs(_lrg_map.size());
 620   for (uint i=0; i < _lrg_map.size(); i++) {
 621     if (_lrg_map.live_range_id(i)) { // Live range associated with Node?
 622       LRG &lrg = lrgs(_lrg_map.live_range_id(i));
 623       if (!lrg.alive()) {
 624         set_bad(i);
 625       } else if (lrg.num_regs() == 1) {
 626         set1(i, lrg.reg());
 627       } else {                  // Must be a register-set
 628         if (!lrg._fat_proj) {   // Must be aligned adjacent register set
 629           // Live ranges record the highest register in their mask.
 630           // We want the low register for the AD file writer's convenience.
 631           OptoReg::Name hi = lrg.reg(); // Get hi register
 632           OptoReg::Name lo = OptoReg::add(hi, (1-lrg.num_regs())); // Find lo
 633           // We have to use pair [lo,lo+1] even for wide vectors because
 634           // the rest of code generation works only with pairs. It is safe
 635           // since for registers encoding only 'lo' is used.
 636           // Second reg from pair is used in ScheduleAndBundle on SPARC where
 637           // vector max size is 8 which corresponds to registers pair.
 638           // It is also used in BuildOopMaps but oop operations are not
 639           // vectorized.
 640           set2(i, lo);
 641         } else {                // Misaligned; extract 2 bits
 642           OptoReg::Name hi = lrg.reg(); // Get hi register
 643           lrg.Remove(hi);       // Yank from mask
 644           int lo = lrg.mask().find_first_elem(); // Find lo
 645           set_pair(i, hi, lo);
 646         }
 647       }
 648       if( lrg._is_oop ) _node_oops.set(i);
 649     } else {
 650       set_bad(i);
 651     }
 652   }
 653 
 654   // Done!
 655   _live = NULL;
 656   _ifg = NULL;
 657   C->set_indexSet_arena(NULL);  // ResourceArea is at end of scope
 658 }
 659 
 660 void PhaseChaitin::de_ssa() {
 661   // Set initial Names for all Nodes.  Most Nodes get the virtual register
 662   // number.  A few get the ZERO live range number.  These do not
 663   // get allocated, but instead rely on correct scheduling to ensure that
 664   // only one instance is simultaneously live at a time.
 665   uint lr_counter = 1;
 666   for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
 667     Block* block = _cfg.get_block(i);
 668     uint cnt = block->number_of_nodes();
 669 
 670     // Handle all the normal Nodes in the block
 671     for( uint j = 0; j < cnt; j++ ) {
 672       Node *n = block->get_node(j);
 673       // Pre-color to the zero live range, or pick virtual register
 674       const RegMask &rm = n->out_RegMask();
 675       _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
 676     }
 677   }
 678 
 679   // Reset the Union-Find mapping to be identity
 680   _lrg_map.reset_uf_map(lr_counter);
 681 }
 682 
 683 
 684 // Gather LiveRanGe information, including register masks.  Modification of
 685 // cisc spillable in_RegMasks should not be done before AggressiveCoalesce.
 686 void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
 687 
 688   // Nail down the frame pointer live range
 689   uint fp_lrg = _lrg_map.live_range_id(_cfg.get_root_node()->in(1)->in(TypeFunc::FramePtr));
 690   lrgs(fp_lrg)._cost += 1e12;   // Cost is infinite
 691 
 692   // For all blocks
 693   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
 694     Block* block = _cfg.get_block(i);
 695 
 696     // For all instructions
 697     for (uint j = 1; j < block->number_of_nodes(); j++) {
 698       Node* n = block->get_node(j);
 699       uint input_edge_start =1; // Skip control most nodes
 700       if (n->is_Mach()) {
 701         input_edge_start = n->as_Mach()->oper_input_base();
 702       }
 703       uint idx = n->is_Copy();
 704 
 705       // Get virtual register number, same as LiveRanGe index
 706       uint vreg = _lrg_map.live_range_id(n);
 707       LRG& lrg = lrgs(vreg);
 708       if (vreg) {              // No vreg means un-allocable (e.g. memory)
 709 
 710         // Collect has-copy bit
 711         if (idx) {
 712           lrg._has_copy = 1;
 713           uint clidx = _lrg_map.live_range_id(n->in(idx));
 714           LRG& copy_src = lrgs(clidx);
 715           copy_src._has_copy = 1;
 716         }
 717 
 718         // Check for float-vs-int live range (used in register-pressure
 719         // calculations)
 720         const Type *n_type = n->bottom_type();
 721         if (n_type->is_floatingpoint()) {
 722           lrg._is_float = 1;
 723         }
 724 
 725         // Check for twice prior spilling.  Once prior spilling might have
 726         // spilled 'soft', 2nd prior spill should have spilled 'hard' and
 727         // further spilling is unlikely to make progress.
 728         if (_spilled_once.test(n->_idx)) {
 729           lrg._was_spilled1 = 1;
 730           if (_spilled_twice.test(n->_idx)) {
 731             lrg._was_spilled2 = 1;
 732           }
 733         }
 734 
 735 #ifndef PRODUCT
 736         if (trace_spilling() && lrg._def != NULL) {
 737           // collect defs for MultiDef printing
 738           if (lrg._defs == NULL) {
 739             lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
 740             lrg._defs->append(lrg._def);
 741           }
 742           lrg._defs->append(n);
 743         }
 744 #endif
 745 
 746         // Check for a single def LRG; these can spill nicely
 747         // via rematerialization.  Flag as NULL for no def found
 748         // yet, or 'n' for single def or -1 for many defs.
 749         lrg._def = lrg._def ? NodeSentinel : n;
 750 
 751         // Limit result register mask to acceptable registers
 752         const RegMask &rm = n->out_RegMask();
 753         lrg.AND( rm );
 754 
 755         int ireg = n->ideal_reg();
 756         assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP,
 757                 "oops must be in Op_RegP's" );
 758 
 759         // Check for vector live range (only if vector register is used).
 760         // On SPARC vector uses RegD which could be misaligned so it is not
 761         // processes as vector in RA.
 762         if (RegMask::is_vector(ireg))
 763           lrg._is_vector = 1;
 764         assert(n_type->isa_vect() == NULL || lrg._is_vector || ireg == Op_RegD,
 765                "vector must be in vector registers");
 766 
 767         // Check for bound register masks
 768         const RegMask &lrgmask = lrg.mask();
 769         if (lrgmask.is_bound(ireg)) {
 770           lrg._is_bound = 1;
 771         }
 772 
 773         // Check for maximum frequency value
 774         if (lrg._maxfreq < block->_freq) {
 775           lrg._maxfreq = block->_freq;
 776         }
 777 
 778         // Check for oop-iness, or long/double
 779         // Check for multi-kill projection
 780         switch (ireg) {
 781         case MachProjNode::fat_proj:
 782           // Fat projections have size equal to number of registers killed
 783           lrg.set_num_regs(rm.Size());
 784           lrg.set_reg_pressure(lrg.num_regs());
 785           lrg._fat_proj = 1;
 786           lrg._is_bound = 1;
 787           break;
 788         case Op_RegP:
 789 #ifdef _LP64
 790           lrg.set_num_regs(2);  // Size is 2 stack words
 791 #else
 792           lrg.set_num_regs(1);  // Size is 1 stack word
 793 #endif
 794           // Register pressure is tracked relative to the maximum values
 795           // suggested for that platform, INTPRESSURE and FLOATPRESSURE,
 796           // and relative to other types which compete for the same regs.
 797           //
 798           // The following table contains suggested values based on the
 799           // architectures as defined in each .ad file.
 800           // INTPRESSURE and FLOATPRESSURE may be tuned differently for
 801           // compile-speed or performance.
 802           // Note1:
 803           // SPARC and SPARCV9 reg_pressures are at 2 instead of 1
 804           // since .ad registers are defined as high and low halves.
 805           // These reg_pressure values remain compatible with the code
 806           // in is_high_pressure() which relates get_invalid_mask_size(),
 807           // Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE.
 808           // Note2:
 809           // SPARC -d32 has 24 registers available for integral values,
 810           // but only 10 of these are safe for 64-bit longs.
 811           // Using set_reg_pressure(2) for both int and long means
 812           // the allocator will believe it can fit 26 longs into
 813           // registers.  Using 2 for longs and 1 for ints means the
 814           // allocator will attempt to put 52 integers into registers.
 815           // The settings below limit this problem to methods with
 816           // many long values which are being run on 32-bit SPARC.
 817           //
 818           // ------------------- reg_pressure --------------------
 819           // Each entry is reg_pressure_per_value,number_of_regs
 820           //         RegL  RegI  RegFlags   RegF RegD    INTPRESSURE  FLOATPRESSURE
 821           // IA32     2     1     1          1    1          6           6
 822           // IA64     1     1     1          1    1         50          41
 823           // SPARC    2     2     2          2    2         48 (24)     52 (26)
 824           // SPARCV9  2     2     2          2    2         48 (24)     52 (26)
 825           // AMD64    1     1     1          1    1         14          15
 826           // -----------------------------------------------------
 827 #if defined(SPARC)
 828           lrg.set_reg_pressure(2);  // use for v9 as well
 829 #else
 830           lrg.set_reg_pressure(1);  // normally one value per register
 831 #endif
 832           if( n_type->isa_oop_ptr() ) {
 833             lrg._is_oop = 1;
 834           }
 835           break;
 836         case Op_RegL:           // Check for long or double
 837         case Op_RegD:
 838           lrg.set_num_regs(2);
 839           // Define platform specific register pressure
 840 #if defined(SPARC) || defined(ARM)
 841           lrg.set_reg_pressure(2);
 842 #elif defined(IA32)
 843           if( ireg == Op_RegL ) {
 844             lrg.set_reg_pressure(2);
 845           } else {
 846             lrg.set_reg_pressure(1);
 847           }
 848 #else
 849           lrg.set_reg_pressure(1);  // normally one value per register
 850 #endif
 851           // If this def of a double forces a mis-aligned double,
 852           // flag as '_fat_proj' - really flag as allowing misalignment
 853           // AND changes how we count interferences.  A mis-aligned
 854           // double can interfere with TWO aligned pairs, or effectively
 855           // FOUR registers!
 856           if (rm.is_misaligned_pair()) {
 857             lrg._fat_proj = 1;
 858             lrg._is_bound = 1;
 859           }
 860           break;
 861         case Op_RegF:
 862         case Op_RegI:
 863         case Op_RegN:
 864         case Op_RegFlags:
 865         case 0:                 // not an ideal register
 866           lrg.set_num_regs(1);
 867 #ifdef SPARC
 868           lrg.set_reg_pressure(2);
 869 #else
 870           lrg.set_reg_pressure(1);
 871 #endif
 872           break;
 873         case Op_VecS:
 874           assert(Matcher::vector_size_supported(T_BYTE,4), "sanity");
 875           assert(RegMask::num_registers(Op_VecS) == RegMask::SlotsPerVecS, "sanity");
 876           lrg.set_num_regs(RegMask::SlotsPerVecS);
 877           lrg.set_reg_pressure(1);
 878           break;
 879         case Op_VecD:
 880           assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecD), "sanity");
 881           assert(RegMask::num_registers(Op_VecD) == RegMask::SlotsPerVecD, "sanity");
 882           assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecD), "vector should be aligned");
 883           lrg.set_num_regs(RegMask::SlotsPerVecD);
 884           lrg.set_reg_pressure(1);
 885           break;
 886         case Op_VecX:
 887           assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecX), "sanity");
 888           assert(RegMask::num_registers(Op_VecX) == RegMask::SlotsPerVecX, "sanity");
 889           assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecX), "vector should be aligned");
 890           lrg.set_num_regs(RegMask::SlotsPerVecX);
 891           lrg.set_reg_pressure(1);
 892           break;
 893         case Op_VecY:
 894           assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecY), "sanity");
 895           assert(RegMask::num_registers(Op_VecY) == RegMask::SlotsPerVecY, "sanity");
 896           assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecY), "vector should be aligned");
 897           lrg.set_num_regs(RegMask::SlotsPerVecY);
 898           lrg.set_reg_pressure(1);
 899           break;
 900         default:
 901           ShouldNotReachHere();
 902         }
 903       }
 904 
 905       // Now do the same for inputs
 906       uint cnt = n->req();
 907       // Setup for CISC SPILLING
 908       uint inp = (uint)AdlcVMDeps::Not_cisc_spillable;
 909       if( UseCISCSpill && after_aggressive ) {
 910         inp = n->cisc_operand();
 911         if( inp != (uint)AdlcVMDeps::Not_cisc_spillable )
 912           // Convert operand number to edge index number
 913           inp = n->as_Mach()->operand_index(inp);
 914       }
 915       // Prepare register mask for each input
 916       for( uint k = input_edge_start; k < cnt; k++ ) {
 917         uint vreg = _lrg_map.live_range_id(n->in(k));
 918         if (!vreg) {
 919           continue;
 920         }
 921 
 922         // If this instruction is CISC Spillable, add the flags
 923         // bit to its appropriate input
 924         if( UseCISCSpill && after_aggressive && inp == k ) {
 925 #ifndef PRODUCT
 926           if( TraceCISCSpill ) {
 927             tty->print("  use_cisc_RegMask: ");
 928             n->dump();
 929           }
 930 #endif
 931           n->as_Mach()->use_cisc_RegMask();
 932         }
 933 
 934         LRG &lrg = lrgs(vreg);
 935         // // Testing for floating point code shape
 936         // Node *test = n->in(k);
 937         // if( test->is_Mach() ) {
 938         //   MachNode *m = test->as_Mach();
 939         //   int  op = m->ideal_Opcode();
 940         //   if (n->is_Call() && (op == Op_AddF || op == Op_MulF) ) {
 941         //     int zzz = 1;
 942         //   }
 943         // }
 944 
 945         // Limit result register mask to acceptable registers.
 946         // Do not limit registers from uncommon uses before
 947         // AggressiveCoalesce.  This effectively pre-virtual-splits
 948         // around uncommon uses of common defs.
 949         const RegMask &rm = n->in_RegMask(k);
 950         if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * block->_freq) {
 951           // Since we are BEFORE aggressive coalesce, leave the register
 952           // mask untrimmed by the call.  This encourages more coalescing.
 953           // Later, AFTER aggressive, this live range will have to spill
 954           // but the spiller handles slow-path calls very nicely.
 955         } else {
 956           lrg.AND( rm );
 957         }
 958 
 959         // Check for bound register masks
 960         const RegMask &lrgmask = lrg.mask();
 961         int kreg = n->in(k)->ideal_reg();
 962         bool is_vect = RegMask::is_vector(kreg);
 963         assert(n->in(k)->bottom_type()->isa_vect() == NULL ||
 964                is_vect || kreg == Op_RegD,
 965                "vector must be in vector registers");
 966         if (lrgmask.is_bound(kreg))
 967           lrg._is_bound = 1;
 968 
 969         // If this use of a double forces a mis-aligned double,
 970         // flag as '_fat_proj' - really flag as allowing misalignment
 971         // AND changes how we count interferences.  A mis-aligned
 972         // double can interfere with TWO aligned pairs, or effectively
 973         // FOUR registers!
 974 #ifdef ASSERT
 975         if (is_vect) {
 976           assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned");
 977           assert(!lrg._fat_proj, "sanity");
 978           assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity");
 979         }
 980 #endif
 981         if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) {
 982           lrg._fat_proj = 1;
 983           lrg._is_bound = 1;
 984         }
 985         // if the LRG is an unaligned pair, we will have to spill
 986         // so clear the LRG's register mask if it is not already spilled
 987         if (!is_vect && !n->is_SpillCopy() &&
 988             (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
 989             lrgmask.is_misaligned_pair()) {
 990           lrg.Clear();
 991         }
 992 
 993         // Check for maximum frequency value
 994         if (lrg._maxfreq < block->_freq) {
 995           lrg._maxfreq = block->_freq;
 996         }
 997 
 998       } // End for all allocated inputs
 999     } // end for all instructions
1000   } // end for all blocks
1001 
1002   // Final per-liverange setup
1003   for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) {
1004     LRG &lrg = lrgs(i2);
1005     assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
1006     if (lrg.num_regs() > 1 && !lrg._fat_proj) {
1007       lrg.clear_to_sets();
1008     }
1009     lrg.compute_set_mask_size();
1010     if (lrg.not_free()) {      // Handle case where we lose from the start
1011       lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
1012       lrg._direct_conflict = 1;
1013     }
1014     lrg.set_degree(0);          // no neighbors in IFG yet
1015   }
1016 }
1017 
1018 // Set the was-lo-degree bit.  Conservative coalescing should not change the
1019 // colorability of the graph.  If any live range was of low-degree before
1020 // coalescing, it should Simplify.  This call sets the was-lo-degree bit.
1021 // The bit is checked in Simplify.
1022 void PhaseChaitin::set_was_low() {
1023 #ifdef ASSERT
1024   for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1025     int size = lrgs(i).num_regs();
1026     uint old_was_lo = lrgs(i)._was_lo;
1027     lrgs(i)._was_lo = 0;
1028     if( lrgs(i).lo_degree() ) {
1029       lrgs(i)._was_lo = 1;      // Trivially of low degree
1030     } else {                    // Else check the Brigg's assertion
1031       // Brigg's observation is that the lo-degree neighbors of a
1032       // hi-degree live range will not interfere with the color choices
1033       // of said hi-degree live range.  The Simplify reverse-stack-coloring
1034       // order takes care of the details.  Hence you do not have to count
1035       // low-degree neighbors when determining if this guy colors.
1036       int briggs_degree = 0;
1037       IndexSet *s = _ifg->neighbors(i);
1038       IndexSetIterator elements(s);
1039       uint lidx;
1040       while((lidx = elements.next()) != 0) {
1041         if( !lrgs(lidx).lo_degree() )
1042           briggs_degree += MAX2(size,lrgs(lidx).num_regs());
1043       }
1044       if( briggs_degree < lrgs(i).degrees_of_freedom() )
1045         lrgs(i)._was_lo = 1;    // Low degree via the briggs assertion
1046     }
1047     assert(old_was_lo <= lrgs(i)._was_lo, "_was_lo may not decrease");
1048   }
1049 #endif
1050 }
1051 
1052 #define REGISTER_CONSTRAINED 16
1053 
1054 // Compute cost/area ratio, in case we spill.  Build the lo-degree list.
1055 void PhaseChaitin::cache_lrg_info( ) {
1056 
1057   for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1058     LRG &lrg = lrgs(i);
1059 
1060     // Check for being of low degree: means we can be trivially colored.
1061     // Low degree, dead or must-spill guys just get to simplify right away
1062     if( lrg.lo_degree() ||
1063        !lrg.alive() ||
1064         lrg._must_spill ) {
1065       // Split low degree list into those guys that must get a
1066       // register and those that can go to register or stack.
1067       // The idea is LRGs that can go register or stack color first when
1068       // they have a good chance of getting a register.  The register-only
1069       // lo-degree live ranges always get a register.
1070       OptoReg::Name hi_reg = lrg.mask().find_last_elem();
1071       if( OptoReg::is_stack(hi_reg)) { // Can go to stack?
1072         lrg._next = _lo_stk_degree;
1073         _lo_stk_degree = i;
1074       } else {
1075         lrg._next = _lo_degree;
1076         _lo_degree = i;
1077       }
1078     } else {                    // Else high degree
1079       lrgs(_hi_degree)._prev = i;
1080       lrg._next = _hi_degree;
1081       lrg._prev = 0;
1082       _hi_degree = i;
1083     }
1084   }
1085 }
1086 
1087 // Simplify the IFG by removing LRGs of low degree that have NO copies
1088 void PhaseChaitin::Pre_Simplify( ) {
1089 
1090   // Warm up the lo-degree no-copy list
1091   int lo_no_copy = 0;
1092   for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1093     if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
1094         !lrgs(i).alive() ||
1095         lrgs(i)._must_spill) {
1096       lrgs(i)._next = lo_no_copy;
1097       lo_no_copy = i;
1098     }
1099   }
1100 
1101   while( lo_no_copy ) {
1102     uint lo = lo_no_copy;
1103     lo_no_copy = lrgs(lo)._next;
1104     int size = lrgs(lo).num_regs();
1105 
1106     // Put the simplified guy on the simplified list.
1107     lrgs(lo)._next = _simplified;
1108     _simplified = lo;
1109 
1110     // Yank this guy from the IFG.
1111     IndexSet *adj = _ifg->remove_node( lo );
1112 
1113     // If any neighbors' degrees fall below their number of
1114     // allowed registers, then put that neighbor on the low degree
1115     // list.  Note that 'degree' can only fall and 'numregs' is
1116     // unchanged by this action.  Thus the two are equal at most once,
1117     // so LRGs hit the lo-degree worklists at most once.
1118     IndexSetIterator elements(adj);
1119     uint neighbor;
1120     while ((neighbor = elements.next()) != 0) {
1121       LRG *n = &lrgs(neighbor);
1122       assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1123 
1124       // Check for just becoming of-low-degree
1125       if( n->just_lo_degree() && !n->_has_copy ) {
1126         assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1127         // Put on lo-degree list
1128         n->_next = lo_no_copy;
1129         lo_no_copy = neighbor;
1130       }
1131     }
1132   } // End of while lo-degree no_copy worklist not empty
1133 
1134   // No more lo-degree no-copy live ranges to simplify
1135 }
1136 
1137 // Simplify the IFG by removing LRGs of low degree.
1138 void PhaseChaitin::Simplify( ) {
1139 
1140   while( 1 ) {                  // Repeat till simplified it all
1141     // May want to explore simplifying lo_degree before _lo_stk_degree.
1142     // This might result in more spills coloring into registers during
1143     // Select().
1144     while( _lo_degree || _lo_stk_degree ) {
1145       // If possible, pull from lo_stk first
1146       uint lo;
1147       if( _lo_degree ) {
1148         lo = _lo_degree;
1149         _lo_degree = lrgs(lo)._next;
1150       } else {
1151         lo = _lo_stk_degree;
1152         _lo_stk_degree = lrgs(lo)._next;
1153       }
1154 
1155       // Put the simplified guy on the simplified list.
1156       lrgs(lo)._next = _simplified;
1157       _simplified = lo;
1158       // If this guy is "at risk" then mark his current neighbors
1159       if( lrgs(lo)._at_risk ) {
1160         IndexSetIterator elements(_ifg->neighbors(lo));
1161         uint datum;
1162         while ((datum = elements.next()) != 0) {
1163           lrgs(datum)._risk_bias = lo;
1164         }
1165       }
1166 
1167       // Yank this guy from the IFG.
1168       IndexSet *adj = _ifg->remove_node( lo );
1169 
1170       // If any neighbors' degrees fall below their number of
1171       // allowed registers, then put that neighbor on the low degree
1172       // list.  Note that 'degree' can only fall and 'numregs' is
1173       // unchanged by this action.  Thus the two are equal at most once,
1174       // so LRGs hit the lo-degree worklist at most once.
1175       IndexSetIterator elements(adj);
1176       uint neighbor;
1177       while ((neighbor = elements.next()) != 0) {
1178         LRG *n = &lrgs(neighbor);
1179 #ifdef ASSERT
1180         if( VerifyOpto || VerifyRegisterAllocator ) {
1181           assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1182         }
1183 #endif
1184 
1185         // Check for just becoming of-low-degree just counting registers.
1186         // _must_spill live ranges are already on the low degree list.
1187         if( n->just_lo_degree() && !n->_must_spill ) {
1188           assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1189           // Pull from hi-degree list
1190           uint prev = n->_prev;
1191           uint next = n->_next;
1192           if( prev ) lrgs(prev)._next = next;
1193           else _hi_degree = next;
1194           lrgs(next)._prev = prev;
1195           n->_next = _lo_degree;
1196           _lo_degree = neighbor;
1197         }
1198       }
1199     } // End of while lo-degree/lo_stk_degree worklist not empty
1200 
1201     // Check for got everything: is hi-degree list empty?
1202     if( !_hi_degree ) break;
1203 
1204     // Time to pick a potential spill guy
1205     uint lo_score = _hi_degree;
1206     double score = lrgs(lo_score).score();
1207     double area = lrgs(lo_score)._area;
1208     double cost = lrgs(lo_score)._cost;
1209     bool bound = lrgs(lo_score)._is_bound;
1210 
1211     // Find cheapest guy
1212     debug_only( int lo_no_simplify=0; );
1213     for( uint i = _hi_degree; i; i = lrgs(i)._next ) {
1214       assert( !(*_ifg->_yanked)[i], "" );
1215       // It's just vaguely possible to move hi-degree to lo-degree without
1216       // going through a just-lo-degree stage: If you remove a double from
1217       // a float live range it's degree will drop by 2 and you can skip the
1218       // just-lo-degree stage.  It's very rare (shows up after 5000+ methods
1219       // in -Xcomp of Java2Demo).  So just choose this guy to simplify next.
1220       if( lrgs(i).lo_degree() ) {
1221         lo_score = i;
1222         break;
1223       }
1224       debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; );
1225       double iscore = lrgs(i).score();
1226       double iarea = lrgs(i)._area;
1227       double icost = lrgs(i)._cost;
1228       bool ibound = lrgs(i)._is_bound;
1229 
1230       // Compare cost/area of i vs cost/area of lo_score.  Smaller cost/area
1231       // wins.  Ties happen because all live ranges in question have spilled
1232       // a few times before and the spill-score adds a huge number which
1233       // washes out the low order bits.  We are choosing the lesser of 2
1234       // evils; in this case pick largest area to spill.
1235       // Ties also happen when live ranges are defined and used only inside
1236       // one block. In which case their area is 0 and score set to max.
1237       // In such case choose bound live range over unbound to free registers
1238       // or with smaller cost to spill.
1239       if( iscore < score ||
1240           (iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) ||
1241           (iscore == score && iarea == area &&
1242            ( (ibound && !bound) || ibound == bound && (icost < cost) )) ) {
1243         lo_score = i;
1244         score = iscore;
1245         area = iarea;
1246         cost = icost;
1247         bound = ibound;
1248       }
1249     }
1250     LRG *lo_lrg = &lrgs(lo_score);
1251     // The live range we choose for spilling is either hi-degree, or very
1252     // rarely it can be low-degree.  If we choose a hi-degree live range
1253     // there better not be any lo-degree choices.
1254     assert( lo_lrg->lo_degree() || !lo_no_simplify, "Live range was lo-degree before coalesce; should simplify" );
1255 
1256     // Pull from hi-degree list
1257     uint prev = lo_lrg->_prev;
1258     uint next = lo_lrg->_next;
1259     if( prev ) lrgs(prev)._next = next;
1260     else _hi_degree = next;
1261     lrgs(next)._prev = prev;
1262     // Jam him on the lo-degree list, despite his high degree.
1263     // Maybe he'll get a color, and maybe he'll spill.
1264     // Only Select() will know.
1265     lrgs(lo_score)._at_risk = true;
1266     _lo_degree = lo_score;
1267     lo_lrg->_next = 0;
1268 
1269   } // End of while not simplified everything
1270 
1271 }
1272 
1273 // Is 'reg' register legal for 'lrg'?
1274 static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) {
1275   if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) &&
1276       lrg.mask().Member(OptoReg::add(reg,-chunk))) {
1277     // RA uses OptoReg which represent the highest element of a registers set.
1278     // For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set
1279     // in which XMMd is used by RA to represent such vectors. A double value
1280     // uses [XMM,XMMb] pairs and XMMb is used by RA for it.
1281     // The register mask uses largest bits set of overlapping register sets.
1282     // On x86 with AVX it uses 8 bits for each XMM registers set.
1283     //
1284     // The 'lrg' already has cleared-to-set register mask (done in Select()
1285     // before calling choose_color()). Passing mask.Member(reg) check above
1286     // indicates that the size (num_regs) of 'reg' set is less or equal to
1287     // 'lrg' set size.
1288     // For set size 1 any register which is member of 'lrg' mask is legal.
1289     if (lrg.num_regs()==1)
1290       return true;
1291     // For larger sets only an aligned register with the same set size is legal.
1292     int mask = lrg.num_regs()-1;
1293     if ((reg&mask) == mask)
1294       return true;
1295   }
1296   return false;
1297 }
1298 
1299 // Choose a color using the biasing heuristic
1300 OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
1301 
1302   // Check for "at_risk" LRG's
1303   uint risk_lrg = _lrg_map.find(lrg._risk_bias);
1304   if( risk_lrg != 0 ) {
1305     // Walk the colored neighbors of the "at_risk" candidate
1306     // Choose a color which is both legal and already taken by a neighbor
1307     // of the "at_risk" candidate in order to improve the chances of the
1308     // "at_risk" candidate of coloring
1309     IndexSetIterator elements(_ifg->neighbors(risk_lrg));
1310     uint datum;
1311     while ((datum = elements.next()) != 0) {
1312       OptoReg::Name reg = lrgs(datum).reg();
1313       // If this LRG's register is legal for us, choose it
1314       if (is_legal_reg(lrg, reg, chunk))
1315         return reg;
1316     }
1317   }
1318 
1319   uint copy_lrg = _lrg_map.find(lrg._copy_bias);
1320   if( copy_lrg != 0 ) {
1321     // If he has a color,
1322     if( !(*(_ifg->_yanked))[copy_lrg] ) {
1323       OptoReg::Name reg = lrgs(copy_lrg).reg();
1324       //  And it is legal for you,
1325       if (is_legal_reg(lrg, reg, chunk))
1326         return reg;
1327     } else if( chunk == 0 ) {
1328       // Choose a color which is legal for him
1329       RegMask tempmask = lrg.mask();
1330       tempmask.AND(lrgs(copy_lrg).mask());
1331       tempmask.clear_to_sets(lrg.num_regs());
1332       OptoReg::Name reg = tempmask.find_first_set(lrg.num_regs());
1333       if (OptoReg::is_valid(reg))
1334         return reg;
1335     }
1336   }
1337 
1338   // If no bias info exists, just go with the register selection ordering
1339   if (lrg._is_vector || lrg.num_regs() == 2) {
1340     // Find an aligned set
1341     return OptoReg::add(lrg.mask().find_first_set(lrg.num_regs()),chunk);
1342   }
1343 
1344   // CNC - Fun hack.  Alternate 1st and 2nd selection.  Enables post-allocate
1345   // copy removal to remove many more copies, by preventing a just-assigned
1346   // register from being repeatedly assigned.
1347   OptoReg::Name reg = lrg.mask().find_first_elem();
1348   if( (++_alternate & 1) && OptoReg::is_valid(reg) ) {
1349     // This 'Remove; find; Insert' idiom is an expensive way to find the
1350     // SECOND element in the mask.
1351     lrg.Remove(reg);
1352     OptoReg::Name reg2 = lrg.mask().find_first_elem();
1353     lrg.Insert(reg);
1354     if( OptoReg::is_reg(reg2))
1355       reg = reg2;
1356   }
1357   return OptoReg::add( reg, chunk );
1358 }
1359 
1360 // Choose a color in the current chunk
1361 OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {
1362   assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)");
1363   assert(C->out_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP+0)), "must not allocate stack0 (inside preserve area)");
1364 
1365   if( lrg.num_regs() == 1 ||    // Common Case
1366       !lrg._fat_proj )          // Aligned+adjacent pairs ok
1367     // Use a heuristic to "bias" the color choice
1368     return bias_color(lrg, chunk);
1369 
1370   assert(!lrg._is_vector, "should be not vector here" );
1371   assert( lrg.num_regs() >= 2, "dead live ranges do not color" );
1372 
1373   // Fat-proj case or misaligned double argument.
1374   assert(lrg.compute_mask_size() == lrg.num_regs() ||
1375          lrg.num_regs() == 2,"fat projs exactly color" );
1376   assert( !chunk, "always color in 1st chunk" );
1377   // Return the highest element in the set.
1378   return lrg.mask().find_last_elem();
1379 }
1380 
1381 // Select colors by re-inserting LRGs back into the IFG.  LRGs are re-inserted
1382 // in reverse order of removal.  As long as nothing of hi-degree was yanked,
1383 // everything going back is guaranteed a color.  Select that color.  If some
1384 // hi-degree LRG cannot get a color then we record that we must spill.
1385 uint PhaseChaitin::Select( ) {
1386   uint spill_reg = LRG::SPILL_REG;
1387   _max_reg = OptoReg::Name(0);  // Past max register used
1388   while( _simplified ) {
1389     // Pull next LRG from the simplified list - in reverse order of removal
1390     uint lidx = _simplified;
1391     LRG *lrg = &lrgs(lidx);
1392     _simplified = lrg->_next;
1393 
1394 
1395 #ifndef PRODUCT
1396     if (trace_spilling()) {
1397       ttyLocker ttyl;
1398       tty->print_cr("L%d selecting degree %d degrees_of_freedom %d", lidx, lrg->degree(),
1399                     lrg->degrees_of_freedom());
1400       lrg->dump();
1401     }
1402 #endif
1403 
1404     // Re-insert into the IFG
1405     _ifg->re_insert(lidx);
1406     if( !lrg->alive() ) continue;
1407     // capture allstackedness flag before mask is hacked
1408     const int is_allstack = lrg->mask().is_AllStack();
1409 
1410     // Yeah, yeah, yeah, I know, I know.  I can refactor this
1411     // to avoid the GOTO, although the refactored code will not
1412     // be much clearer.  We arrive here IFF we have a stack-based
1413     // live range that cannot color in the current chunk, and it
1414     // has to move into the next free stack chunk.
1415     int chunk = 0;              // Current chunk is first chunk
1416     retry_next_chunk:
1417 
1418     // Remove neighbor colors
1419     IndexSet *s = _ifg->neighbors(lidx);
1420 
1421     debug_only(RegMask orig_mask = lrg->mask();)
1422     IndexSetIterator elements(s);
1423     uint neighbor;
1424     while ((neighbor = elements.next()) != 0) {
1425       // Note that neighbor might be a spill_reg.  In this case, exclusion
1426       // of its color will be a no-op, since the spill_reg chunk is in outer
1427       // space.  Also, if neighbor is in a different chunk, this exclusion
1428       // will be a no-op.  (Later on, if lrg runs out of possible colors in
1429       // its chunk, a new chunk of color may be tried, in which case
1430       // examination of neighbors is started again, at retry_next_chunk.)
1431       LRG &nlrg = lrgs(neighbor);
1432       OptoReg::Name nreg = nlrg.reg();
1433       // Only subtract masks in the same chunk
1434       if( nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE ) {
1435 #ifndef PRODUCT
1436         uint size = lrg->mask().Size();
1437         RegMask rm = lrg->mask();
1438 #endif
1439         lrg->SUBTRACT(nlrg.mask());
1440 #ifndef PRODUCT
1441         if (trace_spilling() && lrg->mask().Size() != size) {
1442           ttyLocker ttyl;
1443           tty->print("L%d ", lidx);
1444           rm.dump();
1445           tty->print(" intersected L%d ", neighbor);
1446           nlrg.mask().dump();
1447           tty->print(" removed ");
1448           rm.SUBTRACT(lrg->mask());
1449           rm.dump();
1450           tty->print(" leaving ");
1451           lrg->mask().dump();
1452           tty->cr();
1453         }
1454 #endif
1455       }
1456     }
1457     //assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness");
1458     // Aligned pairs need aligned masks
1459     assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1460     if (lrg->num_regs() > 1 && !lrg->_fat_proj) {
1461       lrg->clear_to_sets();
1462     }
1463 
1464     // Check if a color is available and if so pick the color
1465     OptoReg::Name reg = choose_color( *lrg, chunk );
1466 #ifdef SPARC
1467     debug_only(lrg->compute_set_mask_size());
1468     assert(lrg->num_regs() < 2 || lrg->is_bound() || is_even(reg-1), "allocate all doubles aligned");
1469 #endif
1470 
1471     //---------------
1472     // If we fail to color and the AllStack flag is set, trigger
1473     // a chunk-rollover event
1474     if(!OptoReg::is_valid(OptoReg::add(reg,-chunk)) && is_allstack) {
1475       // Bump register mask up to next stack chunk
1476       chunk += RegMask::CHUNK_SIZE;
1477       lrg->Set_All();
1478 
1479       goto retry_next_chunk;
1480     }
1481 
1482     //---------------
1483     // Did we get a color?
1484     else if( OptoReg::is_valid(reg)) {
1485 #ifndef PRODUCT
1486       RegMask avail_rm = lrg->mask();
1487 #endif
1488 
1489       // Record selected register
1490       lrg->set_reg(reg);
1491 
1492       if( reg >= _max_reg )     // Compute max register limit
1493         _max_reg = OptoReg::add(reg,1);
1494       // Fold reg back into normal space
1495       reg = OptoReg::add(reg,-chunk);
1496 
1497       // If the live range is not bound, then we actually had some choices
1498       // to make.  In this case, the mask has more bits in it than the colors
1499       // chosen.  Restrict the mask to just what was picked.
1500       int n_regs = lrg->num_regs();
1501       assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1502       if (n_regs == 1 || !lrg->_fat_proj) {
1503         assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecY, "sanity");
1504         lrg->Clear();           // Clear the mask
1505         lrg->Insert(reg);       // Set regmask to match selected reg
1506         // For vectors and pairs, also insert the low bit of the pair
1507         for (int i = 1; i < n_regs; i++)
1508           lrg->Insert(OptoReg::add(reg,-i));
1509         lrg->set_mask_size(n_regs);
1510       } else {                  // Else fatproj
1511         // mask must be equal to fatproj bits, by definition
1512       }
1513 #ifndef PRODUCT
1514       if (trace_spilling()) {
1515         ttyLocker ttyl;
1516         tty->print("L%d selected ", lidx);
1517         lrg->mask().dump();
1518         tty->print(" from ");
1519         avail_rm.dump();
1520         tty->cr();
1521       }
1522 #endif
1523       // Note that reg is the highest-numbered register in the newly-bound mask.
1524     } // end color available case
1525 
1526     //---------------
1527     // Live range is live and no colors available
1528     else {
1529       assert( lrg->alive(), "" );
1530       assert( !lrg->_fat_proj || lrg->is_multidef() ||
1531               lrg->_def->outcnt() > 0, "fat_proj cannot spill");
1532       assert( !orig_mask.is_AllStack(), "All Stack does not spill" );
1533 
1534       // Assign the special spillreg register
1535       lrg->set_reg(OptoReg::Name(spill_reg++));
1536       // Do not empty the regmask; leave mask_size lying around
1537       // for use during Spilling
1538 #ifndef PRODUCT
1539       if( trace_spilling() ) {
1540         ttyLocker ttyl;
1541         tty->print("L%d spilling with neighbors: ", lidx);
1542         s->dump();
1543         debug_only(tty->print(" original mask: "));
1544         debug_only(orig_mask.dump());
1545         dump_lrg(lidx);
1546       }
1547 #endif
1548     } // end spill case
1549 
1550   }
1551 
1552   return spill_reg-LRG::SPILL_REG;      // Return number of spills
1553 }
1554 
1555 // Copy 'was_spilled'-edness from the source Node to the dst Node.
1556 void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
1557   if( _spilled_once.test(src->_idx) ) {
1558     _spilled_once.set(dst->_idx);
1559     lrgs(_lrg_map.find(dst))._was_spilled1 = 1;
1560     if( _spilled_twice.test(src->_idx) ) {
1561       _spilled_twice.set(dst->_idx);
1562       lrgs(_lrg_map.find(dst))._was_spilled2 = 1;
1563     }
1564   }
1565 }
1566 
1567 // Set the 'spilled_once' or 'spilled_twice' flag on a node.
1568 void PhaseChaitin::set_was_spilled( Node *n ) {
1569   if( _spilled_once.test_set(n->_idx) )
1570     _spilled_twice.set(n->_idx);
1571 }
1572 
1573 // Convert Ideal spill instructions into proper FramePtr + offset Loads and
1574 // Stores.  Use-def chains are NOT preserved, but Node->LRG->reg maps are.
1575 void PhaseChaitin::fixup_spills() {
1576   // This function does only cisc spill work.
1577   if( !UseCISCSpill ) return;
1578 
1579   NOT_PRODUCT( Compile::TracePhase t3("fixupSpills", &_t_fixupSpills, TimeCompiler); )
1580 
1581   // Grab the Frame Pointer
1582   Node *fp = _cfg.get_root_block()->head()->in(1)->in(TypeFunc::FramePtr);
1583 
1584   // For all blocks
1585   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
1586     Block* block = _cfg.get_block(i);
1587 
1588     // For all instructions in block
1589     uint last_inst = block->end_idx();
1590     for (uint j = 1; j <= last_inst; j++) {
1591       Node* n = block->get_node(j);
1592 
1593       // Dead instruction???
1594       assert( n->outcnt() != 0 ||// Nothing dead after post alloc
1595               C->top() == n ||  // Or the random TOP node
1596               n->is_Proj(),     // Or a fat-proj kill node
1597               "No dead instructions after post-alloc" );
1598 
1599       int inp = n->cisc_operand();
1600       if( inp != AdlcVMDeps::Not_cisc_spillable ) {
1601         // Convert operand number to edge index number
1602         MachNode *mach = n->as_Mach();
1603         inp = mach->operand_index(inp);
1604         Node *src = n->in(inp);   // Value to load or store
1605         LRG &lrg_cisc = lrgs(_lrg_map.find_const(src));
1606         OptoReg::Name src_reg = lrg_cisc.reg();
1607         // Doubles record the HIGH register of an adjacent pair.
1608         src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs());
1609         if( OptoReg::is_stack(src_reg) ) { // If input is on stack
1610           // This is a CISC Spill, get stack offset and construct new node
1611 #ifndef PRODUCT
1612           if( TraceCISCSpill ) {
1613             tty->print("    reg-instr:  ");
1614             n->dump();
1615           }
1616 #endif
1617           int stk_offset = reg2offset(src_reg);
1618           // Bailout if we might exceed node limit when spilling this instruction
1619           C->check_node_count(0, "out of nodes fixing spills");
1620           if (C->failing())  return;
1621           // Transform node
1622           MachNode *cisc = mach->cisc_version(stk_offset, C)->as_Mach();
1623           cisc->set_req(inp,fp);          // Base register is frame pointer
1624           if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) {
1625             assert( cisc->oper_input_base() == 2, "Only adding one edge");
1626             cisc->ins_req(1,src);         // Requires a memory edge
1627           }
1628           block->map_node(cisc, j);          // Insert into basic block
1629           n->subsume_by(cisc, C); // Correct graph
1630           //
1631           ++_used_cisc_instructions;
1632 #ifndef PRODUCT
1633           if( TraceCISCSpill ) {
1634             tty->print("    cisc-instr: ");
1635             cisc->dump();
1636           }
1637 #endif
1638         } else {
1639 #ifndef PRODUCT
1640           if( TraceCISCSpill ) {
1641             tty->print("    using reg-instr: ");
1642             n->dump();
1643           }
1644 #endif
1645           ++_unused_cisc_instructions;    // input can be on stack
1646         }
1647       }
1648 
1649     } // End of for all instructions
1650 
1651   } // End of for all blocks
1652 }
1653 
1654 // Helper to stretch above; recursively discover the base Node for a
1655 // given derived Node.  Easy for AddP-related machine nodes, but needs
1656 // to be recursive for derived Phis.
1657 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1658   // See if already computed; if so return it
1659   if( derived_base_map[derived->_idx] )
1660     return derived_base_map[derived->_idx];
1661 
1662   // See if this happens to be a base.
1663   // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1664   // pointers derived from NULL!  These are always along paths that
1665   // can't happen at run-time but the optimizer cannot deduce it so
1666   // we have to handle it gracefully.
1667   assert(!derived->bottom_type()->isa_narrowoop() ||
1668           derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1669   const TypePtr *tj = derived->bottom_type()->isa_ptr();
1670   // If its an OOP with a non-zero offset, then it is derived.
1671   if( tj == NULL || tj->_offset == 0 ) {
1672     derived_base_map[derived->_idx] = derived;
1673     return derived;
1674   }
1675   // Derived is NULL+offset?  Base is NULL!
1676   if( derived->is_Con() ) {
1677     Node *base = _matcher.mach_null();
1678     assert(base != NULL, "sanity");
1679     if (base->in(0) == NULL) {
1680       // Initialize it once and make it shared:
1681       // set control to _root and place it into Start block
1682       // (where top() node is placed).
1683       base->init_req(0, _cfg.get_root_node());
1684       Block *startb = _cfg.get_block_for_node(C->top());
1685       uint node_pos = startb->find_node(C->top());
1686       startb->insert_node(base, node_pos);
1687       _cfg.map_node_to_block(base, startb);
1688       assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1689 
1690       // The loadConP0 might have projection nodes depending on architecture
1691       // Add the projection nodes to the CFG
1692       for (DUIterator_Fast imax, i = base->fast_outs(imax); i < imax; i++) {
1693         Node* use = base->fast_out(i);
1694         if (use->is_MachProj()) {
1695           startb->insert_node(use, ++node_pos);
1696           _cfg.map_node_to_block(use, startb);
1697           new_lrg(use, maxlrg++);
1698         }
1699       }
1700     }
1701     if (_lrg_map.live_range_id(base) == 0) {
1702       new_lrg(base, maxlrg++);
1703     }
1704     assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
1705     derived_base_map[derived->_idx] = base;
1706     return base;
1707   }
1708 
1709   // Check for AddP-related opcodes
1710   if (!derived->is_Phi()) {
1711     assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name()));
1712     Node *base = derived->in(AddPNode::Base);
1713     derived_base_map[derived->_idx] = base;
1714     return base;
1715   }
1716 
1717   // Recursively find bases for Phis.
1718   // First check to see if we can avoid a base Phi here.
1719   Node *base = find_base_for_derived( derived_base_map, derived->in(1),maxlrg);
1720   uint i;
1721   for( i = 2; i < derived->req(); i++ )
1722     if( base != find_base_for_derived( derived_base_map,derived->in(i),maxlrg))
1723       break;
1724   // Went to the end without finding any different bases?
1725   if( i == derived->req() ) {   // No need for a base Phi here
1726     derived_base_map[derived->_idx] = base;
1727     return base;
1728   }
1729 
1730   // Now we see we need a base-Phi here to merge the bases
1731   const Type *t = base->bottom_type();
1732   base = new (C) PhiNode( derived->in(0), t );
1733   for( i = 1; i < derived->req(); i++ ) {
1734     base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg));
1735     t = t->meet(base->in(i)->bottom_type());
1736   }
1737   base->as_Phi()->set_type(t);
1738 
1739   // Search the current block for an existing base-Phi
1740   Block *b = _cfg.get_block_for_node(derived);
1741   for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
1742     Node *phi = b->get_node(i);
1743     if( !phi->is_Phi() ) {      // Found end of Phis with no match?
1744       b->insert_node(base,  i); // Must insert created Phi here as base
1745       _cfg.map_node_to_block(base, b);
1746       new_lrg(base,maxlrg++);
1747       break;
1748     }
1749     // See if Phi matches.
1750     uint j;
1751     for( j = 1; j < base->req(); j++ )
1752       if( phi->in(j) != base->in(j) &&
1753           !(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different NULLs
1754         break;
1755     if( j == base->req() ) {    // All inputs match?
1756       base = phi;               // Then use existing 'phi' and drop 'base'
1757       break;
1758     }
1759   }
1760 
1761 
1762   // Cache info for later passes
1763   derived_base_map[derived->_idx] = base;
1764   return base;
1765 }
1766 
1767 // At each Safepoint, insert extra debug edges for each pair of derived value/
1768 // base pointer that is live across the Safepoint for oopmap building.  The
1769 // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
1770 // required edge set.
1771 bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
1772   int must_recompute_live = false;
1773   uint maxlrg = _lrg_map.max_lrg_id();
1774   Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique());
1775   memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
1776 
1777   // For all blocks in RPO do...
1778   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
1779     Block* block = _cfg.get_block(i);
1780     // Note use of deep-copy constructor.  I cannot hammer the original
1781     // liveout bits, because they are needed by the following coalesce pass.
1782     IndexSet liveout(_live->live(block));
1783 
1784     for (uint j = block->end_idx() + 1; j > 1; j--) {
1785       Node* n = block->get_node(j - 1);
1786 
1787       // Pre-split compares of loop-phis.  Loop-phis form a cycle we would
1788       // like to see in the same register.  Compare uses the loop-phi and so
1789       // extends its live range BUT cannot be part of the cycle.  If this
1790       // extended live range overlaps with the update of the loop-phi value
1791       // we need both alive at the same time -- which requires at least 1
1792       // copy.  But because Intel has only 2-address registers we end up with
1793       // at least 2 copies, one before the loop-phi update instruction and
1794       // one after.  Instead we split the input to the compare just after the
1795       // phi.
1796       if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
1797         Node *phi = n->in(1);
1798         if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
1799           Block *phi_block = _cfg.get_block_for_node(phi);
1800           if (_cfg.get_block_for_node(phi_block->pred(2)) == block) {
1801             const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
1802             Node *spill = new (C) LoopPhiInputSpillNode(phi, *mask, *mask);
1803             insert_proj( phi_block, 1, spill, maxlrg++ );
1804             n->set_req(1,spill);
1805             must_recompute_live = true;
1806           }
1807         }
1808       }
1809 
1810       // Get value being defined
1811       uint lidx = _lrg_map.live_range_id(n);
1812       // Ignore the occasional brand-new live range
1813       if (lidx && lidx < _lrg_map.max_lrg_id()) {
1814         // Remove from live-out set
1815         liveout.remove(lidx);
1816 
1817         // Copies do not define a new value and so do not interfere.
1818         // Remove the copies source from the liveout set before interfering.
1819         uint idx = n->is_Copy();
1820         if (idx) {
1821           liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1822         }
1823       }
1824 
1825       // Found a safepoint?
1826       JVMState *jvms = n->jvms();
1827       if( jvms ) {
1828         // Now scan for a live derived pointer
1829         IndexSetIterator elements(&liveout);
1830         uint neighbor;
1831         while ((neighbor = elements.next()) != 0) {
1832           // Find reaching DEF for base and derived values
1833           // This works because we are still in SSA during this call.
1834           Node *derived = lrgs(neighbor)._def;
1835           const TypePtr *tj = derived->bottom_type()->isa_ptr();
1836           assert(!derived->bottom_type()->isa_narrowoop() ||
1837                   derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1838           // If its an OOP with a non-zero offset, then it is derived.
1839           if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
1840             Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1841             assert(base->_idx < _lrg_map.size(), "");
1842             // Add reaching DEFs of derived pointer and base pointer as a
1843             // pair of inputs
1844             n->add_req(derived);
1845             n->add_req(base);
1846 
1847             // See if the base pointer is already live to this point.
1848             // Since I'm working on the SSA form, live-ness amounts to
1849             // reaching def's.  So if I find the base's live range then
1850             // I know the base's def reaches here.
1851             if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1852                  !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1853                  (_lrg_map.live_range_id(base) > 0) && // not a constant
1854                  _cfg.get_block_for_node(base) != block) { // base not def'd in blk)
1855               // Base pointer is not currently live.  Since I stretched
1856               // the base pointer to here and it crosses basic-block
1857               // boundaries, the global live info is now incorrect.
1858               // Recompute live.
1859               must_recompute_live = true;
1860             } // End of if base pointer is not live to debug info
1861           }
1862         } // End of scan all live data for derived ptrs crossing GC point
1863       } // End of if found a GC point
1864 
1865       // Make all inputs live
1866       if (!n->is_Phi()) {      // Phi function uses come from prior block
1867         for (uint k = 1; k < n->req(); k++) {
1868           uint lidx = _lrg_map.live_range_id(n->in(k));
1869           if (lidx < _lrg_map.max_lrg_id()) {
1870             liveout.insert(lidx);
1871           }
1872         }
1873       }
1874 
1875     } // End of forall instructions in block
1876     liveout.clear();  // Free the memory used by liveout.
1877 
1878   } // End of forall blocks
1879   _lrg_map.set_max_lrg_id(maxlrg);
1880 
1881   // If I created a new live range I need to recompute live
1882   if (maxlrg != _ifg->_maxlrg) {
1883     must_recompute_live = true;
1884   }
1885 
1886   return must_recompute_live != 0;
1887 }
1888 
1889 // Extend the node to LRG mapping
1890 
1891 void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
1892   _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node));
1893 }
1894 
1895 #ifndef PRODUCT
1896 void PhaseChaitin::dump(const Node *n) const {
1897   uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
1898   tty->print("L%d",r);
1899   if (r && n->Opcode() != Op_Phi) {
1900     if( _node_regs ) {          // Got a post-allocation copy of allocation?
1901       tty->print("[");
1902       OptoReg::Name second = get_reg_second(n);
1903       if( OptoReg::is_valid(second) ) {
1904         if( OptoReg::is_reg(second) )
1905           tty->print("%s:",Matcher::regName[second]);
1906         else
1907           tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(second));
1908       }
1909       OptoReg::Name first = get_reg_first(n);
1910       if( OptoReg::is_reg(first) )
1911         tty->print("%s]",Matcher::regName[first]);
1912       else
1913          tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(first));
1914     } else
1915     n->out_RegMask().dump();
1916   }
1917   tty->print("/N%d\t",n->_idx);
1918   tty->print("%s === ", n->Name());
1919   uint k;
1920   for (k = 0; k < n->req(); k++) {
1921     Node *m = n->in(k);
1922     if (!m) {
1923       tty->print("_ ");
1924     }
1925     else {
1926       uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
1927       tty->print("L%d",r);
1928       // Data MultiNode's can have projections with no real registers.
1929       // Don't die while dumping them.
1930       int op = n->Opcode();
1931       if( r && op != Op_Phi && op != Op_Proj && op != Op_SCMemProj) {
1932         if( _node_regs ) {
1933           tty->print("[");
1934           OptoReg::Name second = get_reg_second(n->in(k));
1935           if( OptoReg::is_valid(second) ) {
1936             if( OptoReg::is_reg(second) )
1937               tty->print("%s:",Matcher::regName[second]);
1938             else
1939               tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer),
1940                          reg2offset_unchecked(second));
1941           }
1942           OptoReg::Name first = get_reg_first(n->in(k));
1943           if( OptoReg::is_reg(first) )
1944             tty->print("%s]",Matcher::regName[first]);
1945           else
1946             tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer),
1947                        reg2offset_unchecked(first));
1948         } else
1949           n->in_RegMask(k).dump();
1950       }
1951       tty->print("/N%d ",m->_idx);
1952     }
1953   }
1954   if( k < n->len() && n->in(k) ) tty->print("| ");
1955   for( ; k < n->len(); k++ ) {
1956     Node *m = n->in(k);
1957     if(!m) {
1958       break;
1959     }
1960     uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
1961     tty->print("L%d",r);
1962     tty->print("/N%d ",m->_idx);
1963   }
1964   if( n->is_Mach() ) n->as_Mach()->dump_spec(tty);
1965   else n->dump_spec(tty);
1966   if( _spilled_once.test(n->_idx ) ) {
1967     tty->print(" Spill_1");
1968     if( _spilled_twice.test(n->_idx ) )
1969       tty->print(" Spill_2");
1970   }
1971   tty->print("\n");
1972 }
1973 
1974 void PhaseChaitin::dump(const Block *b) const {
1975   b->dump_head(&_cfg);
1976 
1977   // For all instructions
1978   for( uint j = 0; j < b->number_of_nodes(); j++ )
1979     dump(b->get_node(j));
1980   // Print live-out info at end of block
1981   if( _live ) {
1982     tty->print("Liveout: ");
1983     IndexSet *live = _live->live(b);
1984     IndexSetIterator elements(live);
1985     tty->print("{");
1986     uint i;
1987     while ((i = elements.next()) != 0) {
1988       tty->print("L%d ", _lrg_map.find_const(i));
1989     }
1990     tty->print_cr("}");
1991   }
1992   tty->print("\n");
1993 }
1994 
1995 void PhaseChaitin::dump() const {
1996   tty->print( "--- Chaitin -- argsize: %d  framesize: %d ---\n",
1997               _matcher._new_SP, _framesize );
1998 
1999   // For all blocks
2000   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2001     dump(_cfg.get_block(i));
2002   }
2003   // End of per-block dump
2004   tty->print("\n");
2005 
2006   if (!_ifg) {
2007     tty->print("(No IFG.)\n");
2008     return;
2009   }
2010 
2011   // Dump LRG array
2012   tty->print("--- Live RanGe Array ---\n");
2013   for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) {
2014     tty->print("L%d: ",i2);
2015     if (i2 < _ifg->_maxlrg) {
2016       lrgs(i2).dump();
2017     }
2018     else {
2019       tty->print_cr("new LRG");
2020     }
2021   }
2022   tty->print_cr("");
2023 
2024   // Dump lo-degree list
2025   tty->print("Lo degree: ");
2026   for(uint i3 = _lo_degree; i3; i3 = lrgs(i3)._next )
2027     tty->print("L%d ",i3);
2028   tty->print_cr("");
2029 
2030   // Dump lo-stk-degree list
2031   tty->print("Lo stk degree: ");
2032   for(uint i4 = _lo_stk_degree; i4; i4 = lrgs(i4)._next )
2033     tty->print("L%d ",i4);
2034   tty->print_cr("");
2035 
2036   // Dump lo-degree list
2037   tty->print("Hi degree: ");
2038   for(uint i5 = _hi_degree; i5; i5 = lrgs(i5)._next )
2039     tty->print("L%d ",i5);
2040   tty->print_cr("");
2041 }
2042 
2043 void PhaseChaitin::dump_degree_lists() const {
2044   // Dump lo-degree list
2045   tty->print("Lo degree: ");
2046   for( uint i = _lo_degree; i; i = lrgs(i)._next )
2047     tty->print("L%d ",i);
2048   tty->print_cr("");
2049 
2050   // Dump lo-stk-degree list
2051   tty->print("Lo stk degree: ");
2052   for(uint i2 = _lo_stk_degree; i2; i2 = lrgs(i2)._next )
2053     tty->print("L%d ",i2);
2054   tty->print_cr("");
2055 
2056   // Dump lo-degree list
2057   tty->print("Hi degree: ");
2058   for(uint i3 = _hi_degree; i3; i3 = lrgs(i3)._next )
2059     tty->print("L%d ",i3);
2060   tty->print_cr("");
2061 }
2062 
2063 void PhaseChaitin::dump_simplified() const {
2064   tty->print("Simplified: ");
2065   for( uint i = _simplified; i; i = lrgs(i)._next )
2066     tty->print("L%d ",i);
2067   tty->print_cr("");
2068 }
2069 
2070 static char *print_reg( OptoReg::Name reg, const PhaseChaitin *pc, char *buf ) {
2071   if ((int)reg < 0)
2072     sprintf(buf, "<OptoReg::%d>", (int)reg);
2073   else if (OptoReg::is_reg(reg))
2074     strcpy(buf, Matcher::regName[reg]);
2075   else
2076     sprintf(buf,"%s + #%d",OptoReg::regname(OptoReg::c_frame_pointer),
2077             pc->reg2offset(reg));
2078   return buf+strlen(buf);
2079 }
2080 
2081 // Dump a register name into a buffer.  Be intelligent if we get called
2082 // before allocation is complete.
2083 char *PhaseChaitin::dump_register( const Node *n, char *buf  ) const {
2084   if( !this ) {                 // Not got anything?
2085     sprintf(buf,"N%d",n->_idx); // Then use Node index
2086   } else if( _node_regs ) {
2087     // Post allocation, use direct mappings, no LRG info available
2088     print_reg( get_reg_first(n), this, buf );
2089   } else {
2090     uint lidx = _lrg_map.find_const(n); // Grab LRG number
2091     if( !_ifg ) {
2092       sprintf(buf,"L%d",lidx);  // No register binding yet
2093     } else if( !lidx ) {        // Special, not allocated value
2094       strcpy(buf,"Special");
2095     } else {
2096       if (lrgs(lidx)._is_vector) {
2097         if (lrgs(lidx).mask().is_bound_set(lrgs(lidx).num_regs()))
2098           print_reg( lrgs(lidx).reg(), this, buf ); // a bound machine register
2099         else
2100           sprintf(buf,"L%d",lidx); // No register binding yet
2101       } else if( (lrgs(lidx).num_regs() == 1)
2102                  ? lrgs(lidx).mask().is_bound1()
2103                  : lrgs(lidx).mask().is_bound_pair() ) {
2104         // Hah!  We have a bound machine register
2105         print_reg( lrgs(lidx).reg(), this, buf );
2106       } else {
2107         sprintf(buf,"L%d",lidx); // No register binding yet
2108       }
2109     }
2110   }
2111   return buf+strlen(buf);
2112 }
2113 
2114 void PhaseChaitin::dump_for_spill_split_recycle() const {
2115   if( WizardMode && (PrintCompilation || PrintOpto) ) {
2116     // Display which live ranges need to be split and the allocator's state
2117     tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2118     for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2119       if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2120         tty->print("L%d: ", bidx);
2121         lrgs(bidx).dump();
2122       }
2123     }
2124     tty->cr();
2125     dump();
2126   }
2127 }
2128 
2129 void PhaseChaitin::dump_frame() const {
2130   const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2131   const TypeTuple *domain = C->tf()->domain();
2132   const int        argcnt = domain->cnt() - TypeFunc::Parms;
2133 
2134   // Incoming arguments in registers dump
2135   for( int k = 0; k < argcnt; k++ ) {
2136     OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2137     if( OptoReg::is_reg(parmreg))  {
2138       const char *reg_name = OptoReg::regname(parmreg);
2139       tty->print("#r%3.3d %s", parmreg, reg_name);
2140       parmreg = _matcher._parm_regs[k].second();
2141       if( OptoReg::is_reg(parmreg))  {
2142         tty->print(":%s", OptoReg::regname(parmreg));
2143       }
2144       tty->print("   : parm %d: ", k);
2145       domain->field_at(k + TypeFunc::Parms)->dump();
2146       tty->print_cr("");
2147     }
2148   }
2149 
2150   // Check for un-owned padding above incoming args
2151   OptoReg::Name reg = _matcher._new_SP;
2152   if( reg > _matcher._in_arg_limit ) {
2153     reg = OptoReg::add(reg, -1);
2154     tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg));
2155   }
2156 
2157   // Incoming argument area dump
2158   OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots());
2159   while( reg > begin_in_arg ) {
2160     reg = OptoReg::add(reg, -1);
2161     tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2162     int j;
2163     for( j = 0; j < argcnt; j++) {
2164       if( _matcher._parm_regs[j].first() == reg ||
2165           _matcher._parm_regs[j].second() == reg ) {
2166         tty->print("parm %d: ",j);
2167         domain->field_at(j + TypeFunc::Parms)->dump();
2168         tty->print_cr("");
2169         break;
2170       }
2171     }
2172     if( j >= argcnt )
2173       tty->print_cr("HOLE, owned by SELF");
2174   }
2175 
2176   // Old outgoing preserve area
2177   while( reg > _matcher._old_SP ) {
2178     reg = OptoReg::add(reg, -1);
2179     tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg));
2180   }
2181 
2182   // Old SP
2183   tty->print_cr("# -- Old %s -- Framesize: %d --",fp,
2184     reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize);
2185 
2186   // Preserve area dump
2187   int fixed_slots = C->fixed_slots();
2188   OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots());
2189   OptoReg::Name return_addr = _matcher.return_addr();
2190 
2191   reg = OptoReg::add(reg, -1);
2192   while (OptoReg::is_stack(reg)) {
2193     tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2194     if (return_addr == reg) {
2195       tty->print_cr("return address");
2196     } else if (reg >= begin_in_preserve) {
2197       // Preserved slots are present on x86
2198       if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word))
2199         tty->print_cr("saved fp register");
2200       else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) &&
2201                VerifyStackAtCalls)
2202         tty->print_cr("0xBADB100D   +VerifyStackAtCalls");
2203       else
2204         tty->print_cr("in_preserve");
2205     } else if ((int)OptoReg::reg2stack(reg) < fixed_slots) {
2206       tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg));
2207     } else {
2208       tty->print_cr("pad2, stack alignment");
2209     }
2210     reg = OptoReg::add(reg, -1);
2211   }
2212 
2213   // Spill area dump
2214   reg = OptoReg::add(_matcher._new_SP, _framesize );
2215   while( reg > _matcher._out_arg_limit ) {
2216     reg = OptoReg::add(reg, -1);
2217     tty->print_cr("#r%3.3d %s+%2d: spill",reg,fp,reg2offset_unchecked(reg));
2218   }
2219 
2220   // Outgoing argument area dump
2221   while( reg > OptoReg::add(_matcher._new_SP, C->out_preserve_stack_slots()) ) {
2222     reg = OptoReg::add(reg, -1);
2223     tty->print_cr("#r%3.3d %s+%2d: outgoing argument",reg,fp,reg2offset_unchecked(reg));
2224   }
2225 
2226   // Outgoing new preserve area
2227   while( reg > _matcher._new_SP ) {
2228     reg = OptoReg::add(reg, -1);
2229     tty->print_cr("#r%3.3d %s+%2d: new out preserve",reg,fp,reg2offset_unchecked(reg));
2230   }
2231   tty->print_cr("#");
2232 }
2233 
2234 void PhaseChaitin::dump_bb( uint pre_order ) const {
2235   tty->print_cr("---dump of B%d---",pre_order);
2236   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2237     Block* block = _cfg.get_block(i);
2238     if (block->_pre_order == pre_order) {
2239       dump(block);
2240     }
2241   }
2242 }
2243 
2244 void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
2245   tty->print_cr("---dump of L%d---",lidx);
2246 
2247   if (_ifg) {
2248     if (lidx >= _lrg_map.max_lrg_id()) {
2249       tty->print("Attempt to print live range index beyond max live range.\n");
2250       return;
2251     }
2252     tty->print("L%d: ",lidx);
2253     if (lidx < _ifg->_maxlrg) {
2254       lrgs(lidx).dump();
2255     } else {
2256       tty->print_cr("new LRG");
2257     }
2258   }
2259   if( _ifg && lidx < _ifg->_maxlrg) {
2260     tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
2261     _ifg->neighbors(lidx)->dump();
2262     tty->cr();
2263   }
2264   // For all blocks
2265   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2266     Block* block = _cfg.get_block(i);
2267     int dump_once = 0;
2268 
2269     // For all instructions
2270     for( uint j = 0; j < block->number_of_nodes(); j++ ) {
2271       Node *n = block->get_node(j);
2272       if (_lrg_map.find_const(n) == lidx) {
2273         if (!dump_once++) {
2274           tty->cr();
2275           block->dump_head(&_cfg);
2276         }
2277         dump(n);
2278         continue;
2279       }
2280       if (!defs_only) {
2281         uint cnt = n->req();
2282         for( uint k = 1; k < cnt; k++ ) {
2283           Node *m = n->in(k);
2284           if (!m)  {
2285             continue;  // be robust in the dumper
2286           }
2287           if (_lrg_map.find_const(m) == lidx) {
2288             if (!dump_once++) {
2289               tty->cr();
2290               block->dump_head(&_cfg);
2291             }
2292             dump(n);
2293           }
2294         }
2295       }
2296     }
2297   } // End of per-block dump
2298   tty->cr();
2299 }
2300 #endif // not PRODUCT
2301 
2302 int PhaseChaitin::_final_loads  = 0;
2303 int PhaseChaitin::_final_stores = 0;
2304 int PhaseChaitin::_final_memoves= 0;
2305 int PhaseChaitin::_final_copies = 0;
2306 double PhaseChaitin::_final_load_cost  = 0;
2307 double PhaseChaitin::_final_store_cost = 0;
2308 double PhaseChaitin::_final_memove_cost= 0;
2309 double PhaseChaitin::_final_copy_cost  = 0;
2310 int PhaseChaitin::_conserv_coalesce = 0;
2311 int PhaseChaitin::_conserv_coalesce_pair = 0;
2312 int PhaseChaitin::_conserv_coalesce_trie = 0;
2313 int PhaseChaitin::_conserv_coalesce_quad = 0;
2314 int PhaseChaitin::_post_alloc = 0;
2315 int PhaseChaitin::_lost_opp_pp_coalesce = 0;
2316 int PhaseChaitin::_lost_opp_cflow_coalesce = 0;
2317 int PhaseChaitin::_used_cisc_instructions   = 0;
2318 int PhaseChaitin::_unused_cisc_instructions = 0;
2319 int PhaseChaitin::_allocator_attempts       = 0;
2320 int PhaseChaitin::_allocator_successes      = 0;
2321 
2322 #ifndef PRODUCT
2323 uint PhaseChaitin::_high_pressure           = 0;
2324 uint PhaseChaitin::_low_pressure            = 0;
2325 
2326 void PhaseChaitin::print_chaitin_statistics() {
2327   tty->print_cr("Inserted %d spill loads, %d spill stores, %d mem-mem moves and %d copies.", _final_loads, _final_stores, _final_memoves, _final_copies);
2328   tty->print_cr("Total load cost= %6.0f, store cost = %6.0f, mem-mem cost = %5.2f, copy cost = %5.0f.", _final_load_cost, _final_store_cost, _final_memove_cost, _final_copy_cost);
2329   tty->print_cr("Adjusted spill cost = %7.0f.",
2330                 _final_load_cost*4.0 + _final_store_cost  * 2.0 +
2331                 _final_copy_cost*1.0 + _final_memove_cost*12.0);
2332   tty->print("Conservatively coalesced %d copies, %d pairs",
2333                 _conserv_coalesce, _conserv_coalesce_pair);
2334   if( _conserv_coalesce_trie || _conserv_coalesce_quad )
2335     tty->print(", %d tries, %d quads", _conserv_coalesce_trie, _conserv_coalesce_quad);
2336   tty->print_cr(", %d post alloc.", _post_alloc);
2337   if( _lost_opp_pp_coalesce || _lost_opp_cflow_coalesce )
2338     tty->print_cr("Lost coalesce opportunity, %d private-private, and %d cflow interfered.",
2339                   _lost_opp_pp_coalesce, _lost_opp_cflow_coalesce );
2340   if( _used_cisc_instructions || _unused_cisc_instructions )
2341     tty->print_cr("Used cisc instruction  %d,  remained in register %d",
2342                    _used_cisc_instructions, _unused_cisc_instructions);
2343   if( _allocator_successes != 0 )
2344     tty->print_cr("Average allocation trips %f", (float)_allocator_attempts/(float)_allocator_successes);
2345   tty->print_cr("High Pressure Blocks = %d, Low Pressure Blocks = %d", _high_pressure, _low_pressure);
2346 }
2347 #endif // not PRODUCT