1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "opto/callnode.hpp"
  28 #include "opto/chaitin.hpp"
  29 #include "opto/live.hpp"
  30 #include "opto/machnode.hpp"
  31 
  32 
  33 // Compute live-in/live-out.  We use a totally incremental algorithm.  The LIVE
  34 // problem is monotonic.  The steady-state solution looks like this: pull a
  35 // block from the worklist.  It has a set of delta's - values which are newly
  36 // live-in from the block.  Push these to the live-out sets of all predecessor
  37 // blocks.  At each predecessor, the new live-out values are ANDed with what is
  38 // already live-out (extra stuff is added to the live-out sets).  Then the
  39 // remaining new live-out values are ANDed with what is locally defined.
  40 // Leftover bits become the new live-in for the predecessor block, and the pred
  41 // block is put on the worklist.
  42 //   The locally live-in stuff is computed once and added to predecessor
  43 // live-out sets.  This separate compilation is done in the outer loop below.
  44 PhaseLive::PhaseLive(const PhaseCFG &cfg, const LRG_List &names, Arena *arena, bool keep_deltas)
  45   : Phase(LIVE),
  46   _cfg(cfg),
  47   _names(names),
  48   _arena(arena),
  49   _live(0),
  50   _livein(0),
  51   _keep_deltas(keep_deltas) {
  52 }
  53 
  54 void PhaseLive::compute(uint maxlrg) {
  55   _maxlrg   = maxlrg;
  56   _worklist = new (_arena) Block_List();
  57 
  58   // Init the sparse live arrays.  This data is live on exit from here!
  59   // The _live info is the live-out info.
  60   _live = (IndexSet*)_arena->Amalloc(sizeof(IndexSet) * _cfg.number_of_blocks());
  61   uint i;
  62   for (i = 0; i < _cfg.number_of_blocks(); i++) {
  63     _live[i].initialize(_maxlrg);
  64   }
  65 
  66   if (_keep_deltas) {
  67     _livein = (IndexSet*)_arena->Amalloc(sizeof(IndexSet) * _cfg.number_of_blocks());
  68     for (i = 0; i < _cfg.number_of_blocks(); i++) {
  69       _livein[i].initialize(_maxlrg);
  70     }
  71   }
  72 
  73   // Init the sparse arrays for delta-sets.
  74   ResourceMark rm;              // Nuke temp storage on exit
  75 
  76   // Does the memory used by _defs and _deltas get reclaimed?  Does it matter?  TT
  77 
  78   // Array of values defined locally in blocks
  79   _defs = NEW_RESOURCE_ARRAY(IndexSet,_cfg.number_of_blocks());
  80   for (i = 0; i < _cfg.number_of_blocks(); i++) {
  81     _defs[i].initialize(_maxlrg);
  82   }
  83 
  84   // Array of delta-set pointers, indexed by block pre_order-1.
  85   _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg.number_of_blocks());
  86   memset( _deltas, 0, sizeof(IndexSet*)* _cfg.number_of_blocks());
  87 
  88   _free_IndexSet = NULL;
  89 
  90   // Blocks having done pass-1
  91   VectorSet first_pass(Thread::current()->resource_area());
  92 
  93   // Outer loop: must compute local live-in sets and push into predecessors.
  94   for (uint j = _cfg.number_of_blocks(); j > 0; j--) {
  95     Block* block = _cfg.get_block(j - 1);
  96 
  97     // Compute the local live-in set.  Start with any new live-out bits.
  98     IndexSet* use = getset(block);
  99     IndexSet* def = &_defs[block->_pre_order-1];
 100     DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
 101     uint i;
 102     for (i = block->number_of_nodes(); i > 1; i--) {
 103       Node* n = block->get_node(i-1);
 104       if (n->is_Phi()) {
 105         break;
 106       }
 107 
 108       uint r = _names.at(n->_idx);
 109       assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block");
 110       def->insert( r );
 111       use->remove( r );
 112       uint cnt = n->req();
 113       for (uint k = 1; k < cnt; k++) {
 114         Node *nk = n->in(k);
 115         uint nkidx = nk->_idx;
 116         if (_cfg.get_block_for_node(nk) != block) {
 117           uint u = _names.at(nkidx);
 118           use->insert(u);
 119           DEBUG_ONLY(def_outside->insert(u);)
 120         }
 121       }
 122     }
 123 #ifdef ASSERT
 124     def_outside->set_next(_free_IndexSet);
 125     _free_IndexSet = def_outside;     // Drop onto free list
 126 #endif
 127     // Remove anything defined by Phis and the block start instruction
 128     for (uint k = i; k > 0; k--) {
 129       uint r = _names.at(block->get_node(k - 1)->_idx);
 130       def->insert(r);
 131       use->remove(r);
 132     }
 133 
 134     // Push these live-in things to predecessors
 135     for (uint l = 1; l < block->num_preds(); l++) {
 136       Block* p = _cfg.get_block_for_node(block->pred(l));
 137       add_liveout(p, use, first_pass);
 138 
 139       // PhiNode uses go in the live-out set of prior blocks.
 140       for (uint k = i; k > 0; k--) {
 141         Node *phi = block->get_node(k - 1);
 142         if (l < phi->req()) {
 143           add_liveout(p, _names.at(phi->in(l)->_idx), first_pass);
 144         }
 145       }
 146     }
 147     freeset(block);
 148     first_pass.set(block->_pre_order);
 149 
 150     // Inner loop: blocks that picked up new live-out values to be propagated
 151     while (_worklist->size()) {
 152       Block* block = _worklist->pop();
 153       IndexSet *delta = getset(block);
 154       assert( delta->count(), "missing delta set" );
 155 
 156       // Add new-live-in to predecessors live-out sets
 157       for (uint l = 1; l < block->num_preds(); l++) {
 158         Block* predecessor = _cfg.get_block_for_node(block->pred(l));
 159         add_liveout(predecessor, delta, first_pass);
 160       }
 161 
 162       freeset(block);
 163     } // End of while-worklist-not-empty
 164 
 165   } // End of for-all-blocks-outer-loop
 166 
 167   // We explicitly clear all of the IndexSets which we are about to release.
 168   // This allows us to recycle their internal memory into IndexSet's free list.
 169 
 170   for (i = 0; i < _cfg.number_of_blocks(); i++) {
 171     _defs[i].clear();
 172     if (_deltas[i]) {
 173       // Is this always true?
 174       _deltas[i]->clear();
 175     }
 176   }
 177   IndexSet *free = _free_IndexSet;
 178   while (free != NULL) {
 179     IndexSet *temp = free;
 180     free = free->next();
 181     temp->clear();
 182   }
 183 
 184 }
 185 
 186 #ifndef PRODUCT
 187 void PhaseLive::stats(uint iters) const {
 188 }
 189 #endif
 190 
 191 // Get an IndexSet for a block.  Return existing one, if any.  Make a new
 192 // empty one if a prior one does not exist.
 193 IndexSet *PhaseLive::getset( Block *p ) {
 194   IndexSet *delta = _deltas[p->_pre_order-1];
 195   if( !delta )                  // Not on worklist?
 196     // Get a free set; flag as being on worklist
 197     delta = _deltas[p->_pre_order-1] = getfreeset();
 198   return delta;                 // Return set of new live-out items
 199 }
 200 
 201 // Pull from free list, or allocate.  Internal allocation on the returned set
 202 // is always from thread local storage.
 203 IndexSet *PhaseLive::getfreeset( ) {
 204   IndexSet *f = _free_IndexSet;
 205   if( !f ) {
 206     f = new IndexSet;
 207 //    f->set_arena(Thread::current()->resource_area());
 208     f->initialize(_maxlrg, Thread::current()->resource_area());
 209   } else {
 210     // Pull from free list
 211     _free_IndexSet = f->next();
 212   //f->_cnt = 0;                        // Reset to empty
 213 //    f->set_arena(Thread::current()->resource_area());
 214     f->initialize(_maxlrg, Thread::current()->resource_area());
 215   }
 216   return f;
 217 }
 218 
 219 // Free an IndexSet from a block.
 220 void PhaseLive::freeset( Block *p ) {
 221   IndexSet *f = _deltas[p->_pre_order-1];
 222   if ( _keep_deltas ) {
 223     add_livein(p, f);
 224   }
 225   f->set_next(_free_IndexSet);
 226   _free_IndexSet = f;           // Drop onto free list
 227   _deltas[p->_pre_order-1] = NULL;
 228 }
 229 
 230 // Add a live-out value to a given blocks live-out set.  If it is new, then
 231 // also add it to the delta set and stick the block on the worklist.
 232 void PhaseLive::add_liveout( Block *p, uint r, VectorSet &first_pass ) {
 233   IndexSet *live = &_live[p->_pre_order-1];
 234   if( live->insert(r) ) {       // If actually inserted...
 235     // We extended the live-out set.  See if the value is generated locally.
 236     // If it is not, then we must extend the live-in set.
 237     if( !_defs[p->_pre_order-1].member( r ) ) {
 238       if( !_deltas[p->_pre_order-1] && // Not on worklist?
 239           first_pass.test(p->_pre_order) )
 240         _worklist->push(p);     // Actually go on worklist if already 1st pass
 241       getset(p)->insert(r);
 242     }
 243   }
 244 }
 245 
 246 // Add a vector of live-out values to a given blocks live-out set.
 247 void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) {
 248   IndexSet *live = &_live[p->_pre_order-1];
 249   IndexSet *defs = &_defs[p->_pre_order-1];
 250   IndexSet *on_worklist = _deltas[p->_pre_order-1];
 251   IndexSet *delta = on_worklist ? on_worklist : getfreeset();
 252 
 253   IndexSetIterator elements(lo);
 254   uint r;
 255   while ((r = elements.next()) != 0) {
 256     if( live->insert(r) &&      // If actually inserted...
 257         !defs->member( r ) )    // and not defined locally
 258       delta->insert(r);         // Then add to live-in set
 259   }
 260 
 261   if( delta->count() ) {                // If actually added things
 262     _deltas[p->_pre_order-1] = delta; // Flag as on worklist now
 263     if( !on_worklist &&         // Not on worklist?
 264         first_pass.test(p->_pre_order) )
 265       _worklist->push(p);       // Actually go on worklist if already 1st pass
 266   } else {                      // Nothing there; just free it
 267     delta->set_next(_free_IndexSet);
 268     _free_IndexSet = delta;     // Drop onto free list
 269   }
 270 }
 271 
 272 // Add a vector of live-in values to a given blocks live-in set.
 273 void PhaseLive::add_livein(Block *p, IndexSet *lo) {
 274   IndexSet *livein = &_livein[p->_pre_order-1];
 275   IndexSetIterator elements(lo);
 276   uint r;
 277   while ((r = elements.next()) != 0) {
 278     livein->insert(r);         // Then add to live-in set
 279   }
 280 }
 281 
 282 #ifndef PRODUCT
 283 // Dump the live-out set for a block
 284 void PhaseLive::dump( const Block *b ) const {
 285   tty->print("Block %d: ",b->_pre_order);
 286   if ( _keep_deltas ) {
 287     tty->print("LiveIn: ");  _livein[b->_pre_order-1].dump();
 288   }
 289   tty->print("LiveOut: ");  _live[b->_pre_order-1].dump();
 290   uint cnt = b->number_of_nodes();
 291   for( uint i=0; i<cnt; i++ ) {
 292     tty->print("L%d/", _names.at(b->get_node(i)->_idx));
 293     b->get_node(i)->dump();
 294   }
 295   tty->print("\n");
 296 }
 297 
 298 // Verify that base pointers and derived pointers are still sane.
 299 void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
 300 #ifdef ASSERT
 301   Unique_Node_List worklist(a);
 302   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
 303     Block* block = _cfg.get_block(i);
 304     for (uint j = block->end_idx() + 1; j > 1; j--) {
 305       Node* n = block->get_node(j-1);
 306       if (n->is_Phi()) {
 307         break;
 308       }
 309       // Found a safepoint?
 310       if (n->is_MachSafePoint()) {
 311         MachSafePointNode *sfpt = n->as_MachSafePoint();
 312         JVMState* jvms = sfpt->jvms();
 313         if (jvms != NULL) {
 314           // Now scan for a live derived pointer
 315           if (jvms->oopoff() < sfpt->req()) {
 316             // Check each derived/base pair
 317             for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) {
 318               Node *check = sfpt->in(idx);
 319               bool is_derived = ((idx - jvms->oopoff()) & 1) == 0;
 320               // search upwards through spills and spill phis for AddP
 321               worklist.clear();
 322               worklist.push(check);
 323               uint k = 0;
 324               while( k < worklist.size() ) {
 325                 check = worklist.at(k);
 326                 assert(check,"Bad base or derived pointer");
 327                 // See PhaseChaitin::find_base_for_derived() for all cases.
 328                 int isc = check->is_Copy();
 329                 if( isc ) {
 330                   worklist.push(check->in(isc));
 331                 } else if( check->is_Phi() ) {
 332                   for (uint m = 1; m < check->req(); m++)
 333                     worklist.push(check->in(m));
 334                 } else if( check->is_Con() ) {
 335                   if (is_derived) {
 336                     // Derived is NULL+offset
 337                     assert(!is_derived || check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad derived pointer");
 338                   } else {
 339                     assert(check->bottom_type()->is_ptr()->_offset == 0,"Bad base pointer");
 340                     // Base either ConP(NULL) or loadConP
 341                     if (check->is_Mach()) {
 342                       assert(check->as_Mach()->ideal_Opcode() == Op_ConP,"Bad base pointer");
 343                     } else {
 344                       assert(check->Opcode() == Op_ConP &&
 345                              check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad base pointer");
 346                     }
 347                   }
 348                 } else if( check->bottom_type()->is_ptr()->_offset == 0 ) {
 349                   if(check->is_Proj() || check->is_Mach() &&
 350                      (check->as_Mach()->ideal_Opcode() == Op_CreateEx ||
 351                       check->as_Mach()->ideal_Opcode() == Op_ThreadLocal ||
 352                       check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
 353                       check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
 354 #ifdef _LP64
 355                       UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
 356                       UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
 357                       UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
 358 #endif
 359                       check->as_Mach()->ideal_Opcode() == Op_LoadP ||
 360                       check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
 361                     // Valid nodes
 362                   } else {
 363                     check->dump();
 364                     assert(false,"Bad base or derived pointer");
 365                   }
 366                 } else {
 367                   assert(is_derived,"Bad base pointer");
 368                   assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP,"Bad derived pointer");
 369                 }
 370                 k++;
 371                 assert(k < 100000,"Derived pointer checking in infinite loop");
 372               } // End while
 373             }
 374           } // End of check for derived pointers
 375         } // End of Kcheck for debug info
 376       } // End of if found a safepoint
 377     } // End of forall instructions in block
 378   } // End of forall blocks
 379 #endif
 380 }
 381 
 382 // Verify that graphs and base pointers are still sane.
 383 void PhaseChaitin::verify( ResourceArea *a, bool verify_ifg ) const {
 384 #ifdef ASSERT
 385   if( VerifyOpto || VerifyRegisterAllocator ) {
 386     _cfg.verify();
 387     verify_base_ptrs(a);
 388     if(verify_ifg)
 389       _ifg->verify(this);
 390   }
 391 #endif
 392 }
 393 
 394 #endif