1 /*
   2  * Copyright 2002-2008 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_buildOopMap.cpp.incl"
  27 
  28 // The functions in this file builds OopMaps after all scheduling is done.
  29 //
  30 // OopMaps contain a list of all registers and stack-slots containing oops (so
  31 // they can be updated by GC).  OopMaps also contain a list of derived-pointer
  32 // base-pointer pairs.  When the base is moved, the derived pointer moves to
  33 // follow it.  Finally, any registers holding callee-save values are also
  34 // recorded.  These might contain oops, but only the caller knows.
  35 //
  36 // BuildOopMaps implements a simple forward reaching-defs solution.  At each
  37 // GC point we'll have the reaching-def Nodes.  If the reaching Nodes are
  38 // typed as pointers (no offset), then they are oops.  Pointers+offsets are
  39 // derived pointers, and bases can be found from them.  Finally, we'll also
  40 // track reaching callee-save values.  Note that a copy of a callee-save value
  41 // "kills" it's source, so that only 1 copy of a callee-save value is alive at
  42 // a time.
  43 //
  44 // We run a simple bitvector liveness pass to help trim out dead oops.  Due to
  45 // irreducible loops, we can have a reaching def of an oop that only reaches
  46 // along one path and no way to know if it's valid or not on the other path.
  47 // The bitvectors are quite dense and the liveness pass is fast.
  48 //
  49 // At GC points, we consult this information to build OopMaps.  All reaching
  50 // defs typed as oops are added to the OopMap.  Only 1 instance of a
  51 // callee-save register can be recorded.  For derived pointers, we'll have to
  52 // find and record the register holding the base.
  53 //
  54 // The reaching def's is a simple 1-pass worklist approach.  I tried a clever
  55 // breadth-first approach but it was worse (showed O(n^2) in the
  56 // pick-next-block code).
  57 //
  58 // The relevant data is kept in a struct of arrays (it could just as well be
  59 // an array of structs, but the struct-of-arrays is generally a little more
  60 // efficient).  The arrays are indexed by register number (including
  61 // stack-slots as registers) and so is bounded by 200 to 300 elements in
  62 // practice.  One array will map to a reaching def Node (or NULL for
  63 // conflict/dead).  The other array will map to a callee-saved register or
  64 // OptoReg::Bad for not-callee-saved.
  65 
  66 
  67 //------------------------------OopFlow----------------------------------------
  68 // Structure to pass around
  69 struct OopFlow : public ResourceObj {
  70   short *_callees;              // Array mapping register to callee-saved
  71   Node **_defs;                 // array mapping register to reaching def
  72                                 // or NULL if dead/conflict
  73   // OopFlow structs, when not being actively modified, describe the _end_ of
  74   // this block.
  75   Block *_b;                    // Block for this struct
  76   OopFlow *_next;               // Next free OopFlow
  77 
  78   OopFlow( short *callees, Node **defs ) : _callees(callees), _defs(defs),
  79     _b(NULL), _next(NULL) { }
  80 
  81   // Given reaching-defs for this block start, compute it for this block end
  82   void compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash );
  83 
  84   // Merge these two OopFlows into the 'this' pointer.
  85   void merge( OopFlow *flow, int max_reg );
  86 
  87   // Copy a 'flow' over an existing flow
  88   void clone( OopFlow *flow, int max_size);
  89 
  90   // Make a new OopFlow from scratch
  91   static OopFlow *make( Arena *A, int max_size );
  92 
  93   // Build an oopmap from the current flow info
  94   OopMap *build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live );
  95 };
  96 
  97 //------------------------------compute_reach----------------------------------
  98 // Given reaching-defs for this block start, compute it for this block end
  99 void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) {
 100 
 101   for( uint i=0; i<_b->_nodes.size(); i++ ) {
 102     Node *n = _b->_nodes[i];
 103 
 104     if( n->jvms() ) {           // Build an OopMap here?
 105       JVMState *jvms = n->jvms();
 106       // no map needed for leaf calls
 107       if( n->is_MachSafePoint() && !n->is_MachCallLeaf() ) {
 108         int *live = (int*) (*safehash)[n];
 109         assert( live, "must find live" );
 110         n->as_MachSafePoint()->set_oop_map( build_oop_map(n,max_reg,regalloc, live) );
 111       }
 112     }
 113 
 114     // Assign new reaching def's.
 115     // Note that I padded the _defs and _callees arrays so it's legal
 116     // to index at _defs[OptoReg::Bad].
 117     OptoReg::Name first = regalloc->get_reg_first(n);
 118     OptoReg::Name second = regalloc->get_reg_second(n);
 119     _defs[first] = n;
 120     _defs[second] = n;
 121 
 122     // Pass callee-save info around copies
 123     int idx = n->is_Copy();
 124     if( idx ) {                 // Copies move callee-save info
 125       OptoReg::Name old_first = regalloc->get_reg_first(n->in(idx));
 126       OptoReg::Name old_second = regalloc->get_reg_second(n->in(idx));
 127       int tmp_first = _callees[old_first];
 128       int tmp_second = _callees[old_second];
 129       _callees[old_first] = OptoReg::Bad; // callee-save is moved, dead in old location
 130       _callees[old_second] = OptoReg::Bad;
 131       _callees[first] = tmp_first;
 132       _callees[second] = tmp_second;
 133     } else if( n->is_Phi() ) {  // Phis do not mod callee-saves
 134       assert( _callees[first] == _callees[regalloc->get_reg_first(n->in(1))], "" );
 135       assert( _callees[second] == _callees[regalloc->get_reg_second(n->in(1))], "" );
 136       assert( _callees[first] == _callees[regalloc->get_reg_first(n->in(n->req()-1))], "" );
 137       assert( _callees[second] == _callees[regalloc->get_reg_second(n->in(n->req()-1))], "" );
 138     } else {
 139       _callees[first] = OptoReg::Bad; // No longer holding a callee-save value
 140       _callees[second] = OptoReg::Bad;
 141 
 142       // Find base case for callee saves
 143       if( n->is_Proj() && n->in(0)->is_Start() ) {
 144         if( OptoReg::is_reg(first) &&
 145             regalloc->_matcher.is_save_on_entry(first) )
 146           _callees[first] = first;
 147         if( OptoReg::is_reg(second) &&
 148             regalloc->_matcher.is_save_on_entry(second) )
 149           _callees[second] = second;
 150       }
 151     }
 152   }
 153 }
 154 
 155 //------------------------------merge------------------------------------------
 156 // Merge the given flow into the 'this' flow
 157 void OopFlow::merge( OopFlow *flow, int max_reg ) {
 158   assert( _b == NULL, "merging into a happy flow" );
 159   assert( flow->_b, "this flow is still alive" );
 160   assert( flow != this, "no self flow" );
 161 
 162   // Do the merge.  If there are any differences, drop to 'bottom' which
 163   // is OptoReg::Bad or NULL depending.
 164   for( int i=0; i<max_reg; i++ ) {
 165     // Merge the callee-save's
 166     if( _callees[i] != flow->_callees[i] )
 167       _callees[i] = OptoReg::Bad;
 168     // Merge the reaching defs
 169     if( _defs[i] != flow->_defs[i] )
 170       _defs[i] = NULL;
 171   }
 172 
 173 }
 174 
 175 //------------------------------clone------------------------------------------
 176 void OopFlow::clone( OopFlow *flow, int max_size ) {
 177   _b = flow->_b;
 178   memcpy( _callees, flow->_callees, sizeof(short)*max_size);
 179   memcpy( _defs   , flow->_defs   , sizeof(Node*)*max_size);
 180 }
 181 
 182 //------------------------------make-------------------------------------------
 183 OopFlow *OopFlow::make( Arena *A, int max_size ) {
 184   short *callees = NEW_ARENA_ARRAY(A,short,max_size+1);
 185   Node **defs    = NEW_ARENA_ARRAY(A,Node*,max_size+1);
 186   debug_only( memset(defs,0,(max_size+1)*sizeof(Node*)) );
 187   OopFlow *flow = new (A) OopFlow(callees+1, defs+1);
 188   assert( &flow->_callees[OptoReg::Bad] == callees, "Ok to index at OptoReg::Bad" );
 189   assert( &flow->_defs   [OptoReg::Bad] == defs   , "Ok to index at OptoReg::Bad" );
 190   return flow;
 191 }
 192 
 193 //------------------------------bit twiddlers----------------------------------
 194 static int get_live_bit( int *live, int reg ) {
 195   return live[reg>>LogBitsPerInt] &   (1<<(reg&(BitsPerInt-1))); }
 196 static void set_live_bit( int *live, int reg ) {
 197          live[reg>>LogBitsPerInt] |=  (1<<(reg&(BitsPerInt-1))); }
 198 static void clr_live_bit( int *live, int reg ) {
 199          live[reg>>LogBitsPerInt] &= ~(1<<(reg&(BitsPerInt-1))); }
 200 
 201 //------------------------------build_oop_map----------------------------------
 202 // Build an oopmap from the current flow info
 203 OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live ) {
 204   int framesize = regalloc->_framesize;
 205   int max_inarg_slot = OptoReg::reg2stack(regalloc->_matcher._new_SP);
 206   debug_only( char *dup_check = NEW_RESOURCE_ARRAY(char,OptoReg::stack0());
 207               memset(dup_check,0,OptoReg::stack0()) );
 208 
 209   OopMap *omap = new OopMap( framesize,  max_inarg_slot );
 210   MachCallNode *mcall = n->is_MachCall() ? n->as_MachCall() : NULL;
 211   JVMState* jvms = n->jvms();
 212 
 213   // For all registers do...
 214   for( int reg=0; reg<max_reg; reg++ ) {
 215     if( get_live_bit(live,reg) == 0 )
 216       continue;                 // Ignore if not live
 217 
 218     // %%% C2 can use 2 OptoRegs when the physical register is only one 64bit
 219     // register in that case we'll get an non-concrete register for the second
 220     // half. We only need to tell the map the register once!
 221     //
 222     // However for the moment we disable this change and leave things as they
 223     // were.
 224 
 225     VMReg r = OptoReg::as_VMReg(OptoReg::Name(reg), framesize, max_inarg_slot);
 226 
 227     if (false && r->is_reg() && !r->is_concrete()) {
 228       continue;
 229     }
 230 
 231     // See if dead (no reaching def).
 232     Node *def = _defs[reg];     // Get reaching def
 233     assert( def, "since live better have reaching def" );
 234 
 235     // Classify the reaching def as oop, derived, callee-save, dead, or other
 236     const Type *t = def->bottom_type();
 237     if( t->isa_oop_ptr() ) {    // Oop or derived?
 238       assert( !OptoReg::is_valid(_callees[reg]), "oop can't be callee save" );
 239 #ifdef _LP64
 240       // 64-bit pointers record oop-ishness on 2 aligned adjacent registers.
 241       // Make sure both are record from the same reaching def, but do not
 242       // put both into the oopmap.
 243       if( (reg&1) == 1 ) {      // High half of oop-pair?
 244         assert( _defs[reg-1] == _defs[reg], "both halves from same reaching def" );
 245         continue;               // Do not record high parts in oopmap
 246       }
 247 #endif
 248 
 249       // Check for a legal reg name in the oopMap and bailout if it is not.
 250       if (!omap->legal_vm_reg_name(r)) {
 251         regalloc->C->record_method_not_compilable("illegal oopMap register name");
 252         continue;
 253       }
 254       if( t->is_ptr()->_offset == 0 ) { // Not derived?
 255         if( mcall ) {
 256           // Outgoing argument GC mask responsibility belongs to the callee,
 257           // not the caller.  Inspect the inputs to the call, to see if
 258           // this live-range is one of them.
 259           uint cnt = mcall->tf()->domain()->cnt();
 260           uint j;
 261           for( j = TypeFunc::Parms; j < cnt; j++)
 262             if( mcall->in(j) == def )
 263               break;            // reaching def is an argument oop
 264           if( j < cnt )         // arg oops dont go in GC map
 265             continue;           // Continue on to the next register
 266         }
 267         omap->set_oop(r);
 268       } else {                  // Else it's derived.
 269         // Find the base of the derived value.
 270         uint i;
 271         // Fast, common case, scan
 272         for( i = jvms->oopoff(); i < n->req(); i+=2 )
 273           if( n->in(i) == def ) break; // Common case
 274         if( i == n->req() ) {   // Missed, try a more generous scan
 275           // Scan again, but this time peek through copies
 276           for( i = jvms->oopoff(); i < n->req(); i+=2 ) {
 277             Node *m = n->in(i); // Get initial derived value
 278             while( 1 ) {
 279               Node *d = def;    // Get initial reaching def
 280               while( 1 ) {      // Follow copies of reaching def to end
 281                 if( m == d ) goto found; // breaks 3 loops
 282                 int idx = d->is_Copy();
 283                 if( !idx ) break;
 284                 d = d->in(idx);     // Link through copy
 285               }
 286               int idx = m->is_Copy();
 287               if( !idx ) break;
 288               m = m->in(idx);
 289             }
 290           }
 291          guarantee( 0, "must find derived/base pair" );
 292         }
 293       found: ;
 294         Node *base = n->in(i+1); // Base is other half of pair
 295         int breg = regalloc->get_reg_first(base);
 296         VMReg b = OptoReg::as_VMReg(OptoReg::Name(breg), framesize, max_inarg_slot);
 297 
 298         // I record liveness at safepoints BEFORE I make the inputs
 299         // live.  This is because argument oops are NOT live at a
 300         // safepoint (or at least they cannot appear in the oopmap).
 301         // Thus bases of base/derived pairs might not be in the
 302         // liveness data but they need to appear in the oopmap.
 303         if( get_live_bit(live,breg) == 0 ) {// Not live?
 304           // Flag it, so next derived pointer won't re-insert into oopmap
 305           set_live_bit(live,breg);
 306           // Already missed our turn?
 307           if( breg < reg ) {
 308             if (b->is_stack() || b->is_concrete() || true ) {
 309               omap->set_oop( b);
 310             }
 311           }
 312         }
 313         if (b->is_stack() || b->is_concrete() || true ) {
 314           omap->set_derived_oop( r, b);
 315         }
 316       }
 317 
 318     } else if( t->isa_narrowoop() ) {
 319       assert( !OptoReg::is_valid(_callees[reg]), "oop can't be callee save" );
 320       // Check for a legal reg name in the oopMap and bailout if it is not.
 321       if (!omap->legal_vm_reg_name(r)) {
 322         regalloc->C->record_method_not_compilable("illegal oopMap register name");
 323         continue;
 324       }
 325       if( mcall ) {
 326           // Outgoing argument GC mask responsibility belongs to the callee,
 327           // not the caller.  Inspect the inputs to the call, to see if
 328           // this live-range is one of them.
 329         uint cnt = mcall->tf()->domain()->cnt();
 330         uint j;
 331         for( j = TypeFunc::Parms; j < cnt; j++)
 332           if( mcall->in(j) == def )
 333             break;            // reaching def is an argument oop
 334         if( j < cnt )         // arg oops dont go in GC map
 335           continue;           // Continue on to the next register
 336       }
 337       omap->set_narrowoop(r);
 338     } else if( OptoReg::is_valid(_callees[reg])) { // callee-save?
 339       // It's a callee-save value
 340       assert( dup_check[_callees[reg]]==0, "trying to callee save same reg twice" );
 341       debug_only( dup_check[_callees[reg]]=1; )
 342       VMReg callee = OptoReg::as_VMReg(OptoReg::Name(_callees[reg]));
 343       if ( callee->is_concrete() || true ) {
 344         omap->set_callee_saved( r, callee);
 345       }
 346 
 347     } else {
 348       // Other - some reaching non-oop value
 349       omap->set_value( r);
 350     }
 351 
 352   }
 353 
 354 #ifdef ASSERT
 355   /* Nice, Intel-only assert
 356   int cnt_callee_saves=0;
 357   int reg2 = 0;
 358   while (OptoReg::is_reg(reg2)) {
 359     if( dup_check[reg2] != 0) cnt_callee_saves++;
 360     assert( cnt_callee_saves==3 || cnt_callee_saves==5, "missed some callee-save" );
 361     reg2++;
 362   }
 363   */
 364 #endif
 365 
 366   return omap;
 367 }
 368 
 369 //------------------------------do_liveness------------------------------------
 370 // Compute backwards liveness on registers
 371 static void do_liveness( PhaseRegAlloc *regalloc, PhaseCFG *cfg, Block_List *worklist, int max_reg_ints, Arena *A, Dict *safehash ) {
 372   int *live = NEW_ARENA_ARRAY(A, int, (cfg->_num_blocks+1) * max_reg_ints);
 373   int *tmp_live = &live[cfg->_num_blocks * max_reg_ints];
 374   Node *root = cfg->C->root();
 375   // On CISC platforms, get the node representing the stack pointer  that regalloc
 376   // used for spills
 377   Node *fp = NodeSentinel;
 378   if (UseCISCSpill && root->req() > 1) {
 379     fp = root->in(1)->in(TypeFunc::FramePtr);
 380   }
 381   memset( live, 0, cfg->_num_blocks * (max_reg_ints<<LogBytesPerInt) );
 382   // Push preds onto worklist
 383   for( uint i=1; i<root->req(); i++ )
 384     worklist->push(cfg->_bbs[root->in(i)->_idx]);
 385 
 386   // ZKM.jar includes tiny infinite loops which are unreached from below.
 387   // If we missed any blocks, we'll retry here after pushing all missed
 388   // blocks on the worklist.  Normally this outer loop never trips more
 389   // than once.
 390   while( 1 ) {
 391 
 392     while( worklist->size() ) { // Standard worklist algorithm
 393       Block *b = worklist->rpop();
 394 
 395       // Copy first successor into my tmp_live space
 396       int s0num = b->_succs[0]->_pre_order;
 397       int *t = &live[s0num*max_reg_ints];
 398       for( int i=0; i<max_reg_ints; i++ )
 399         tmp_live[i] = t[i];
 400 
 401       // OR in the remaining live registers
 402       for( uint j=1; j<b->_num_succs; j++ ) {
 403         uint sjnum = b->_succs[j]->_pre_order;
 404         int *t = &live[sjnum*max_reg_ints];
 405         for( int i=0; i<max_reg_ints; i++ )
 406           tmp_live[i] |= t[i];
 407       }
 408 
 409       // Now walk tmp_live up the block backwards, computing live
 410       for( int k=b->_nodes.size()-1; k>=0; k-- ) {
 411         Node *n = b->_nodes[k];
 412         // KILL def'd bits
 413         int first = regalloc->get_reg_first(n);
 414         int second = regalloc->get_reg_second(n);
 415         if( OptoReg::is_valid(first) ) clr_live_bit(tmp_live,first);
 416         if( OptoReg::is_valid(second) ) clr_live_bit(tmp_live,second);
 417 
 418         MachNode *m = n->is_Mach() ? n->as_Mach() : NULL;
 419 
 420         // Check if m is potentially a CISC alternate instruction (i.e, possibly
 421         // synthesized by RegAlloc from a conventional instruction and a
 422         // spilled input)
 423         bool is_cisc_alternate = false;
 424         if (UseCISCSpill && m) {
 425           is_cisc_alternate = m->is_cisc_alternate();
 426         }
 427 
 428         // GEN use'd bits
 429         for( uint l=1; l<n->req(); l++ ) {
 430           Node *def = n->in(l);
 431           assert(def != 0, "input edge required");
 432           int first = regalloc->get_reg_first(def);
 433           int second = regalloc->get_reg_second(def);
 434           if( OptoReg::is_valid(first) ) set_live_bit(tmp_live,first);
 435           if( OptoReg::is_valid(second) ) set_live_bit(tmp_live,second);
 436           // If we use the stack pointer in a cisc-alternative instruction,
 437           // check for use as a memory operand.  Then reconstruct the RegName
 438           // for this stack location, and set the appropriate bit in the
 439           // live vector 4987749.
 440           if (is_cisc_alternate && def == fp) {
 441             const TypePtr *adr_type = NULL;
 442             intptr_t offset;
 443             const Node* base = m->get_base_and_disp(offset, adr_type);
 444             if (base == NodeSentinel) {
 445               // Machnode has multiple memory inputs. We are unable to reason
 446               // with these, but are presuming (with trepidation) that not any of
 447               // them are oops. This can be fixed by making get_base_and_disp()
 448               // look at a specific input instead of all inputs.
 449               assert(!def->bottom_type()->isa_oop_ptr(), "expecting non-oop mem input");
 450             } else if (base != fp || offset == Type::OffsetBot) {
 451               // Do nothing: the fp operand is either not from a memory use
 452               // (base == NULL) OR the fp is used in a non-memory context
 453               // (base is some other register) OR the offset is not constant,
 454               // so it is not a stack slot.
 455             } else {
 456               assert(offset >= 0, "unexpected negative offset");
 457               offset -= (offset % jintSize);  // count the whole word
 458               int stack_reg = regalloc->offset2reg(offset);
 459               if (OptoReg::is_stack(stack_reg)) {
 460                 set_live_bit(tmp_live, stack_reg);
 461               } else {
 462                 assert(false, "stack_reg not on stack?");
 463               }
 464             }
 465           }
 466         }
 467 
 468         if( n->jvms() ) {       // Record liveness at safepoint
 469 
 470           // This placement of this stanza means inputs to calls are
 471           // considered live at the callsite's OopMap.  Argument oops are
 472           // hence live, but NOT included in the oopmap.  See cutout in
 473           // build_oop_map.  Debug oops are live (and in OopMap).
 474           int *n_live = NEW_ARENA_ARRAY(A, int, max_reg_ints);
 475           for( int l=0; l<max_reg_ints; l++ )
 476             n_live[l] = tmp_live[l];
 477           safehash->Insert(n,n_live);
 478         }
 479 
 480       }
 481 
 482       // Now at block top, see if we have any changes.  If so, propagate
 483       // to prior blocks.
 484       int *old_live = &live[b->_pre_order*max_reg_ints];
 485       int l;
 486       for( l=0; l<max_reg_ints; l++ )
 487         if( tmp_live[l] != old_live[l] )
 488           break;
 489       if( l<max_reg_ints ) {     // Change!
 490         // Copy in new value
 491         for( l=0; l<max_reg_ints; l++ )
 492           old_live[l] = tmp_live[l];
 493         // Push preds onto worklist
 494         for( l=1; l<(int)b->num_preds(); l++ )
 495           worklist->push(cfg->_bbs[b->pred(l)->_idx]);
 496       }
 497     }
 498 
 499     // Scan for any missing safepoints.  Happens to infinite loops
 500     // ala ZKM.jar
 501     uint i;
 502     for( i=1; i<cfg->_num_blocks; i++ ) {
 503       Block *b = cfg->_blocks[i];
 504       uint j;
 505       for( j=1; j<b->_nodes.size(); j++ )
 506         if( b->_nodes[j]->jvms() &&
 507             (*safehash)[b->_nodes[j]] == NULL )
 508            break;
 509       if( j<b->_nodes.size() ) break;
 510     }
 511     if( i == cfg->_num_blocks )
 512       break;                    // Got 'em all
 513 #ifndef PRODUCT
 514     if( PrintOpto && Verbose )
 515       tty->print_cr("retripping live calc");
 516 #endif
 517     // Force the issue (expensively): recheck everybody
 518     for( i=1; i<cfg->_num_blocks; i++ )
 519       worklist->push(cfg->_blocks[i]);
 520   }
 521 
 522 }
 523 
 524 //------------------------------BuildOopMaps-----------------------------------
 525 // Collect GC mask info - where are all the OOPs?
 526 void Compile::BuildOopMaps() {
 527   NOT_PRODUCT( TracePhase t3("bldOopMaps", &_t_buildOopMaps, TimeCompiler); )
 528   // Can't resource-mark because I need to leave all those OopMaps around,
 529   // or else I need to resource-mark some arena other than the default.
 530   // ResourceMark rm;              // Reclaim all OopFlows when done
 531   int max_reg = _regalloc->_max_reg; // Current array extent
 532 
 533   Arena *A = Thread::current()->resource_area();
 534   Block_List worklist;          // Worklist of pending blocks
 535 
 536   int max_reg_ints = round_to(max_reg, BitsPerInt)>>LogBitsPerInt;
 537   Dict *safehash = NULL;        // Used for assert only
 538   // Compute a backwards liveness per register.  Needs a bitarray of
 539   // #blocks x (#registers, rounded up to ints)
 540   safehash = new Dict(cmpkey,hashkey,A);
 541   do_liveness( _regalloc, _cfg, &worklist, max_reg_ints, A, safehash );
 542   OopFlow *free_list = NULL;    // Free, unused
 543 
 544   // Array mapping blocks to completed oopflows
 545   OopFlow **flows = NEW_ARENA_ARRAY(A, OopFlow*, _cfg->_num_blocks);
 546   memset( flows, 0, _cfg->_num_blocks*sizeof(OopFlow*) );
 547 
 548 
 549   // Do the first block 'by hand' to prime the worklist
 550   Block *entry = _cfg->_blocks[1];
 551   OopFlow *rootflow = OopFlow::make(A,max_reg);
 552   // Initialize to 'bottom' (not 'top')
 553   memset( rootflow->_callees, OptoReg::Bad, max_reg*sizeof(short) );
 554   memset( rootflow->_defs   ,            0, max_reg*sizeof(Node*) );
 555   flows[entry->_pre_order] = rootflow;
 556 
 557   // Do the first block 'by hand' to prime the worklist
 558   rootflow->_b = entry;
 559   rootflow->compute_reach( _regalloc, max_reg, safehash );
 560   for( uint i=0; i<entry->_num_succs; i++ )
 561     worklist.push(entry->_succs[i]);
 562 
 563   // Now worklist contains blocks which have some, but perhaps not all,
 564   // predecessors visited.
 565   while( worklist.size() ) {
 566     // Scan for a block with all predecessors visited, or any randoms slob
 567     // otherwise.  All-preds-visited order allows me to recycle OopFlow
 568     // structures rapidly and cut down on the memory footprint.
 569     // Note: not all predecessors might be visited yet (must happen for
 570     // irreducible loops).  This is OK, since every live value must have the
 571     // SAME reaching def for the block, so any reaching def is OK.
 572     uint i;
 573 
 574     Block *b = worklist.pop();
 575     // Ignore root block
 576     if( b == _cfg->_broot ) continue;
 577     // Block is already done?  Happens if block has several predecessors,
 578     // he can get on the worklist more than once.
 579     if( flows[b->_pre_order] ) continue;
 580 
 581     // If this block has a visited predecessor AND that predecessor has this
 582     // last block as his only undone child, we can move the OopFlow from the
 583     // pred to this block.  Otherwise we have to grab a new OopFlow.
 584     OopFlow *flow = NULL;       // Flag for finding optimized flow
 585     Block *pred = (Block*)0xdeadbeef;
 586     uint j;
 587     // Scan this block's preds to find a done predecessor
 588     for( j=1; j<b->num_preds(); j++ ) {
 589       Block *p = _cfg->_bbs[b->pred(j)->_idx];
 590       OopFlow *p_flow = flows[p->_pre_order];
 591       if( p_flow ) {            // Predecessor is done
 592         assert( p_flow->_b == p, "cross check" );
 593         pred = p;               // Record some predecessor
 594         // If all successors of p are done except for 'b', then we can carry
 595         // p_flow forward to 'b' without copying, otherwise we have to draw
 596         // from the free_list and clone data.
 597         uint k;
 598         for( k=0; k<p->_num_succs; k++ )
 599           if( !flows[p->_succs[k]->_pre_order] &&
 600               p->_succs[k] != b )
 601             break;
 602 
 603         // Either carry-forward the now-unused OopFlow for b's use
 604         // or draw a new one from the free list
 605         if( k==p->_num_succs ) {
 606           flow = p_flow;
 607           break;                // Found an ideal pred, use him
 608         }
 609       }
 610     }
 611 
 612     if( flow ) {
 613       // We have an OopFlow that's the last-use of a predecessor.
 614       // Carry it forward.
 615     } else {                    // Draw a new OopFlow from the freelist
 616       if( !free_list )
 617         free_list = OopFlow::make(A,max_reg);
 618       flow = free_list;
 619       assert( flow->_b == NULL, "oopFlow is not free" );
 620       free_list = flow->_next;
 621       flow->_next = NULL;
 622 
 623       // Copy/clone over the data
 624       flow->clone(flows[pred->_pre_order], max_reg);
 625     }
 626 
 627     // Mark flow for block.  Blocks can only be flowed over once,
 628     // because after the first time they are guarded from entering
 629     // this code again.
 630     assert( flow->_b == pred, "have some prior flow" );
 631     flow->_b = NULL;
 632 
 633     // Now push flow forward
 634     flows[b->_pre_order] = flow;// Mark flow for this block
 635     flow->_b = b;
 636     flow->compute_reach( _regalloc, max_reg, safehash );
 637 
 638     // Now push children onto worklist
 639     for( i=0; i<b->_num_succs; i++ )
 640       worklist.push(b->_succs[i]);
 641 
 642   }
 643 }