1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "opto/block.hpp"
  28 #include "opto/callnode.hpp"
  29 #include "opto/cfgnode.hpp"
  30 #include "opto/idealGraphPrinter.hpp"
  31 #include "opto/loopnode.hpp"
  32 #include "opto/machnode.hpp"
  33 #include "opto/opcodes.hpp"
  34 #include "opto/phaseX.hpp"
  35 #include "opto/regalloc.hpp"
  36 #include "opto/rootnode.hpp"
  37 
  38 //=============================================================================
  39 #define NODE_HASH_MINIMUM_SIZE    255
  40 //------------------------------NodeHash---------------------------------------
  41 NodeHash::NodeHash(uint est_max_size) :
  42   _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
  43   _a(Thread::current()->resource_area()),
  44   _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ), // (Node**)_a->Amalloc(_max * sizeof(Node*)) ),
  45   _inserts(0), _insert_limit( insert_limit() ),
  46   _look_probes(0), _lookup_hits(0), _lookup_misses(0),
  47   _total_insert_probes(0), _total_inserts(0),
  48   _insert_probes(0), _grows(0) {
  49   // _sentinel must be in the current node space
  50   _sentinel = new (Compile::current()) ProjNode(NULL, TypeFunc::Control);
  51   memset(_table,0,sizeof(Node*)*_max);
  52 }
  53 
  54 //------------------------------NodeHash---------------------------------------
  55 NodeHash::NodeHash(Arena *arena, uint est_max_size) :
  56   _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
  57   _a(arena),
  58   _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ),
  59   _inserts(0), _insert_limit( insert_limit() ),
  60   _look_probes(0), _lookup_hits(0), _lookup_misses(0),
  61   _delete_probes(0), _delete_hits(0), _delete_misses(0),
  62   _total_insert_probes(0), _total_inserts(0),
  63   _insert_probes(0), _grows(0) {
  64   // _sentinel must be in the current node space
  65   _sentinel = new (Compile::current()) ProjNode(NULL, TypeFunc::Control);
  66   memset(_table,0,sizeof(Node*)*_max);
  67 }
  68 
  69 //------------------------------NodeHash---------------------------------------
  70 NodeHash::NodeHash(NodeHash *nh) {
  71   debug_only(_table = (Node**)badAddress);   // interact correctly w/ operator=
  72   // just copy in all the fields
  73   *this = *nh;
  74   // nh->_sentinel must be in the current node space
  75 }
  76 
  77 void NodeHash::replace_with(NodeHash *nh) {
  78   debug_only(_table = (Node**)badAddress);   // interact correctly w/ operator=
  79   // just copy in all the fields
  80   *this = *nh;
  81   // nh->_sentinel must be in the current node space
  82 }
  83 
  84 //------------------------------hash_find--------------------------------------
  85 // Find in hash table
  86 Node *NodeHash::hash_find( const Node *n ) {
  87   // ((Node*)n)->set_hash( n->hash() );
  88   uint hash = n->hash();
  89   if (hash == Node::NO_HASH) {
  90     debug_only( _lookup_misses++ );
  91     return NULL;
  92   }
  93   uint key = hash & (_max-1);
  94   uint stride = key | 0x01;
  95   debug_only( _look_probes++ );
  96   Node *k = _table[key];        // Get hashed value
  97   if( !k ) {                    // ?Miss?
  98     debug_only( _lookup_misses++ );
  99     return NULL;                // Miss!
 100   }
 101 
 102   int op = n->Opcode();
 103   uint req = n->req();
 104   while( 1 ) {                  // While probing hash table
 105     if( k->req() == req &&      // Same count of inputs
 106         k->Opcode() == op ) {   // Same Opcode
 107       for( uint i=0; i<req; i++ )
 108         if( n->in(i)!=k->in(i)) // Different inputs?
 109           goto collision;       // "goto" is a speed hack...
 110       if( n->cmp(*k) ) {        // Check for any special bits
 111         debug_only( _lookup_hits++ );
 112         return k;               // Hit!
 113       }
 114     }
 115   collision:
 116     debug_only( _look_probes++ );
 117     key = (key + stride/*7*/) & (_max-1); // Stride through table with relative prime
 118     k = _table[key];            // Get hashed value
 119     if( !k ) {                  // ?Miss?
 120       debug_only( _lookup_misses++ );
 121       return NULL;              // Miss!
 122     }
 123   }
 124   ShouldNotReachHere();
 125   return NULL;
 126 }
 127 
 128 //------------------------------hash_find_insert-------------------------------
 129 // Find in hash table, insert if not already present
 130 // Used to preserve unique entries in hash table
 131 Node *NodeHash::hash_find_insert( Node *n ) {
 132   // n->set_hash( );
 133   uint hash = n->hash();
 134   if (hash == Node::NO_HASH) {
 135     debug_only( _lookup_misses++ );
 136     return NULL;
 137   }
 138   uint key = hash & (_max-1);
 139   uint stride = key | 0x01;     // stride must be relatively prime to table siz
 140   uint first_sentinel = 0;      // replace a sentinel if seen.
 141   debug_only( _look_probes++ );
 142   Node *k = _table[key];        // Get hashed value
 143   if( !k ) {                    // ?Miss?
 144     debug_only( _lookup_misses++ );
 145     _table[key] = n;            // Insert into table!
 146     debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
 147     check_grow();               // Grow table if insert hit limit
 148     return NULL;                // Miss!
 149   }
 150   else if( k == _sentinel ) {
 151     first_sentinel = key;      // Can insert here
 152   }
 153 
 154   int op = n->Opcode();
 155   uint req = n->req();
 156   while( 1 ) {                  // While probing hash table
 157     if( k->req() == req &&      // Same count of inputs
 158         k->Opcode() == op ) {   // Same Opcode
 159       for( uint i=0; i<req; i++ )
 160         if( n->in(i)!=k->in(i)) // Different inputs?
 161           goto collision;       // "goto" is a speed hack...
 162       if( n->cmp(*k) ) {        // Check for any special bits
 163         debug_only( _lookup_hits++ );
 164         return k;               // Hit!
 165       }
 166     }
 167   collision:
 168     debug_only( _look_probes++ );
 169     key = (key + stride) & (_max-1); // Stride through table w/ relative prime
 170     k = _table[key];            // Get hashed value
 171     if( !k ) {                  // ?Miss?
 172       debug_only( _lookup_misses++ );
 173       key = (first_sentinel == 0) ? key : first_sentinel; // ?saw sentinel?
 174       _table[key] = n;          // Insert into table!
 175       debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
 176       check_grow();             // Grow table if insert hit limit
 177       return NULL;              // Miss!
 178     }
 179     else if( first_sentinel == 0 && k == _sentinel ) {
 180       first_sentinel = key;    // Can insert here
 181     }
 182 
 183   }
 184   ShouldNotReachHere();
 185   return NULL;
 186 }
 187 
 188 //------------------------------hash_insert------------------------------------
 189 // Insert into hash table
 190 void NodeHash::hash_insert( Node *n ) {
 191   // // "conflict" comments -- print nodes that conflict
 192   // bool conflict = false;
 193   // n->set_hash();
 194   uint hash = n->hash();
 195   if (hash == Node::NO_HASH) {
 196     return;
 197   }
 198   check_grow();
 199   uint key = hash & (_max-1);
 200   uint stride = key | 0x01;
 201 
 202   while( 1 ) {                  // While probing hash table
 203     debug_only( _insert_probes++ );
 204     Node *k = _table[key];      // Get hashed value
 205     if( !k || (k == _sentinel) ) break;       // Found a slot
 206     assert( k != n, "already inserted" );
 207     // if( PrintCompilation && PrintOptoStatistics && Verbose ) { tty->print("  conflict: "); k->dump(); conflict = true; }
 208     key = (key + stride) & (_max-1); // Stride through table w/ relative prime
 209   }
 210   _table[key] = n;              // Insert into table!
 211   debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
 212   // if( conflict ) { n->dump(); }
 213 }
 214 
 215 //------------------------------hash_delete------------------------------------
 216 // Replace in hash table with sentinel
 217 bool NodeHash::hash_delete( const Node *n ) {
 218   Node *k;
 219   uint hash = n->hash();
 220   if (hash == Node::NO_HASH) {
 221     debug_only( _delete_misses++ );
 222     return false;
 223   }
 224   uint key = hash & (_max-1);
 225   uint stride = key | 0x01;
 226   debug_only( uint counter = 0; );
 227   for( ; /* (k != NULL) && (k != _sentinel) */; ) {
 228     debug_only( counter++ );
 229     debug_only( _delete_probes++ );
 230     k = _table[key];            // Get hashed value
 231     if( !k ) {                  // Miss?
 232       debug_only( _delete_misses++ );
 233 #ifdef ASSERT
 234       if( VerifyOpto ) {
 235         for( uint i=0; i < _max; i++ )
 236           assert( _table[i] != n, "changed edges with rehashing" );
 237       }
 238 #endif
 239       return false;             // Miss! Not in chain
 240     }
 241     else if( n == k ) {
 242       debug_only( _delete_hits++ );
 243       _table[key] = _sentinel;  // Hit! Label as deleted entry
 244       debug_only(((Node*)n)->exit_hash_lock()); // Unlock the node upon removal from table.
 245       return true;
 246     }
 247     else {
 248       // collision: move through table with prime offset
 249       key = (key + stride/*7*/) & (_max-1);
 250       assert( counter <= _insert_limit, "Cycle in hash-table");
 251     }
 252   }
 253   ShouldNotReachHere();
 254   return false;
 255 }
 256 
 257 //------------------------------round_up---------------------------------------
 258 // Round up to nearest power of 2
 259 uint NodeHash::round_up( uint x ) {
 260   x += (x>>2);                  // Add 25% slop
 261   if( x <16 ) return 16;        // Small stuff
 262   uint i=16;
 263   while( i < x ) i <<= 1;       // Double to fit
 264   return i;                     // Return hash table size
 265 }
 266 
 267 //------------------------------grow-------------------------------------------
 268 // Grow _table to next power of 2 and insert old entries
 269 void  NodeHash::grow() {
 270   // Record old state
 271   uint   old_max   = _max;
 272   Node **old_table = _table;
 273   // Construct new table with twice the space
 274   _grows++;
 275   _total_inserts       += _inserts;
 276   _total_insert_probes += _insert_probes;
 277   _inserts         = 0;
 278   _insert_probes   = 0;
 279   _max     = _max << 1;
 280   _table   = NEW_ARENA_ARRAY( _a , Node* , _max ); // (Node**)_a->Amalloc( _max * sizeof(Node*) );
 281   memset(_table,0,sizeof(Node*)*_max);
 282   _insert_limit = insert_limit();
 283   // Insert old entries into the new table
 284   for( uint i = 0; i < old_max; i++ ) {
 285     Node *m = *old_table++;
 286     if( !m || m == _sentinel ) continue;
 287     debug_only(m->exit_hash_lock()); // Unlock the node upon removal from old table.
 288     hash_insert(m);
 289   }
 290 }
 291 
 292 //------------------------------clear------------------------------------------
 293 // Clear all entries in _table to NULL but keep storage
 294 void  NodeHash::clear() {
 295 #ifdef ASSERT
 296   // Unlock all nodes upon removal from table.
 297   for (uint i = 0; i < _max; i++) {
 298     Node* n = _table[i];
 299     if (!n || n == _sentinel)  continue;
 300     n->exit_hash_lock();
 301   }
 302 #endif
 303 
 304   memset( _table, 0, _max * sizeof(Node*) );
 305 }
 306 
 307 //-----------------------remove_useless_nodes----------------------------------
 308 // Remove useless nodes from value table,
 309 // implementation does not depend on hash function
 310 void NodeHash::remove_useless_nodes(VectorSet &useful) {
 311 
 312   // Dead nodes in the hash table inherited from GVN should not replace
 313   // existing nodes, remove dead nodes.
 314   uint max = size();
 315   Node *sentinel_node = sentinel();
 316   for( uint i = 0; i < max; ++i ) {
 317     Node *n = at(i);
 318     if(n != NULL && n != sentinel_node && !useful.test(n->_idx)) {
 319       debug_only(n->exit_hash_lock()); // Unlock the node when removed
 320       _table[i] = sentinel_node;       // Replace with placeholder
 321     }
 322   }
 323 }
 324 
 325 
 326 void NodeHash::check_no_speculative_types() {
 327 #ifdef ASSERT
 328   uint max = size();
 329   Node *sentinel_node = sentinel();
 330   for (uint i = 0; i < max; ++i) {
 331     Node *n = at(i);
 332     if(n != NULL && n != sentinel_node && n->is_Type() && n->outcnt() > 0) {
 333       TypeNode* tn = n->as_Type();
 334       const Type* t = tn->type();
 335       const Type* t_no_spec = t->remove_speculative();
 336       assert(t == t_no_spec, "dead node in hash table or missed node during speculative cleanup");
 337     }
 338   }
 339 #endif
 340 }
 341 
 342 #ifndef PRODUCT
 343 //------------------------------dump-------------------------------------------
 344 // Dump statistics for the hash table
 345 void NodeHash::dump() {
 346   _total_inserts       += _inserts;
 347   _total_insert_probes += _insert_probes;
 348   if (PrintCompilation && PrintOptoStatistics && Verbose && (_inserts > 0)) {
 349     if (WizardMode) {
 350       for (uint i=0; i<_max; i++) {
 351         if (_table[i])
 352           tty->print("%d/%d/%d ",i,_table[i]->hash()&(_max-1),_table[i]->_idx);
 353       }
 354     }
 355     tty->print("\nGVN Hash stats:  %d grows to %d max_size\n", _grows, _max);
 356     tty->print("  %d/%d (%8.1f%% full)\n", _inserts, _max, (double)_inserts/_max*100.0);
 357     tty->print("  %dp/(%dh+%dm) (%8.2f probes/lookup)\n", _look_probes, _lookup_hits, _lookup_misses, (double)_look_probes/(_lookup_hits+_lookup_misses));
 358     tty->print("  %dp/%di (%8.2f probes/insert)\n", _total_insert_probes, _total_inserts, (double)_total_insert_probes/_total_inserts);
 359     // sentinels increase lookup cost, but not insert cost
 360     assert((_lookup_misses+_lookup_hits)*4+100 >= _look_probes, "bad hash function");
 361     assert( _inserts+(_inserts>>3) < _max, "table too full" );
 362     assert( _inserts*3+100 >= _insert_probes, "bad hash function" );
 363   }
 364 }
 365 
 366 Node *NodeHash::find_index(uint idx) { // For debugging
 367   // Find an entry by its index value
 368   for( uint i = 0; i < _max; i++ ) {
 369     Node *m = _table[i];
 370     if( !m || m == _sentinel ) continue;
 371     if( m->_idx == (uint)idx ) return m;
 372   }
 373   return NULL;
 374 }
 375 #endif
 376 
 377 #ifdef ASSERT
 378 NodeHash::~NodeHash() {
 379   // Unlock all nodes upon destruction of table.
 380   if (_table != (Node**)badAddress)  clear();
 381 }
 382 
 383 void NodeHash::operator=(const NodeHash& nh) {
 384   // Unlock all nodes upon replacement of table.
 385   if (&nh == this)  return;
 386   if (_table != (Node**)badAddress)  clear();
 387   memcpy(this, &nh, sizeof(*this));
 388   // Do not increment hash_lock counts again.
 389   // Instead, be sure we never again use the source table.
 390   ((NodeHash*)&nh)->_table = (Node**)badAddress;
 391 }
 392 
 393 
 394 #endif
 395 
 396 
 397 //=============================================================================
 398 //------------------------------PhaseRemoveUseless-----------------------------
 399 // 1) Use a breadthfirst walk to collect useful nodes reachable from root.
 400 PhaseRemoveUseless::PhaseRemoveUseless( PhaseGVN *gvn, Unique_Node_List *worklist ) : Phase(Remove_Useless),
 401   _useful(Thread::current()->resource_area()) {
 402 
 403   // Implementation requires 'UseLoopSafepoints == true' and an edge from root
 404   // to each SafePointNode at a backward branch.  Inserted in add_safepoint().
 405   if( !UseLoopSafepoints || !OptoRemoveUseless ) return;
 406 
 407   // Identify nodes that are reachable from below, useful.
 408   C->identify_useful_nodes(_useful);
 409   // Update dead node list
 410   C->update_dead_node_list(_useful);
 411 
 412   // Remove all useless nodes from PhaseValues' recorded types
 413   // Must be done before disconnecting nodes to preserve hash-table-invariant
 414   gvn->remove_useless_nodes(_useful.member_set());
 415 
 416   // Remove all useless nodes from future worklist
 417   worklist->remove_useless_nodes(_useful.member_set());
 418 
 419   // Disconnect 'useless' nodes that are adjacent to useful nodes
 420   C->remove_useless_nodes(_useful);
 421 
 422   // Remove edges from "root" to each SafePoint at a backward branch.
 423   // They were inserted during parsing (see add_safepoint()) to make infinite
 424   // loops without calls or exceptions visible to root, i.e., useful.
 425   Node *root = C->root();
 426   if( root != NULL ) {
 427     for( uint i = root->req(); i < root->len(); ++i ) {
 428       Node *n = root->in(i);
 429       if( n != NULL && n->is_SafePoint() ) {
 430         root->rm_prec(i);
 431         --i;
 432       }
 433     }
 434   }
 435 }
 436 
 437 
 438 //=============================================================================
 439 //------------------------------PhaseTransform---------------------------------
 440 PhaseTransform::PhaseTransform( PhaseNumber pnum ) : Phase(pnum),
 441   _arena(Thread::current()->resource_area()),
 442   _nodes(_arena),
 443   _types(_arena)
 444 {
 445   init_con_caches();
 446 #ifndef PRODUCT
 447   clear_progress();
 448   clear_transforms();
 449   set_allow_progress(true);
 450 #endif
 451   // Force allocation for currently existing nodes
 452   _types.map(C->unique(), NULL);
 453 }
 454 
 455 //------------------------------PhaseTransform---------------------------------
 456 PhaseTransform::PhaseTransform( Arena *arena, PhaseNumber pnum ) : Phase(pnum),
 457   _arena(arena),
 458   _nodes(arena),
 459   _types(arena)
 460 {
 461   init_con_caches();
 462 #ifndef PRODUCT
 463   clear_progress();
 464   clear_transforms();
 465   set_allow_progress(true);
 466 #endif
 467   // Force allocation for currently existing nodes
 468   _types.map(C->unique(), NULL);
 469 }
 470 
 471 //------------------------------PhaseTransform---------------------------------
 472 // Initialize with previously generated type information
 473 PhaseTransform::PhaseTransform( PhaseTransform *pt, PhaseNumber pnum ) : Phase(pnum),
 474   _arena(pt->_arena),
 475   _nodes(pt->_nodes),
 476   _types(pt->_types)
 477 {
 478   init_con_caches();
 479 #ifndef PRODUCT
 480   clear_progress();
 481   clear_transforms();
 482   set_allow_progress(true);
 483 #endif
 484 }
 485 
 486 void PhaseTransform::init_con_caches() {
 487   memset(_icons,0,sizeof(_icons));
 488   memset(_lcons,0,sizeof(_lcons));
 489   memset(_zcons,0,sizeof(_zcons));
 490 }
 491 
 492 
 493 //--------------------------------find_int_type--------------------------------
 494 const TypeInt* PhaseTransform::find_int_type(Node* n) {
 495   if (n == NULL)  return NULL;
 496   // Call type_or_null(n) to determine node's type since we might be in
 497   // parse phase and call n->Value() may return wrong type.
 498   // (For example, a phi node at the beginning of loop parsing is not ready.)
 499   const Type* t = type_or_null(n);
 500   if (t == NULL)  return NULL;
 501   return t->isa_int();
 502 }
 503 
 504 
 505 //-------------------------------find_long_type--------------------------------
 506 const TypeLong* PhaseTransform::find_long_type(Node* n) {
 507   if (n == NULL)  return NULL;
 508   // (See comment above on type_or_null.)
 509   const Type* t = type_or_null(n);
 510   if (t == NULL)  return NULL;
 511   return t->isa_long();
 512 }
 513 
 514 
 515 #ifndef PRODUCT
 516 void PhaseTransform::dump_old2new_map() const {
 517   _nodes.dump();
 518 }
 519 
 520 void PhaseTransform::dump_new( uint nidx ) const {
 521   for( uint i=0; i<_nodes.Size(); i++ )
 522     if( _nodes[i] && _nodes[i]->_idx == nidx ) {
 523       _nodes[i]->dump();
 524       tty->cr();
 525       tty->print_cr("Old index= %d",i);
 526       return;
 527     }
 528   tty->print_cr("Node %d not found in the new indices", nidx);
 529 }
 530 
 531 //------------------------------dump_types-------------------------------------
 532 void PhaseTransform::dump_types( ) const {
 533   _types.dump();
 534 }
 535 
 536 //------------------------------dump_nodes_and_types---------------------------
 537 void PhaseTransform::dump_nodes_and_types(const Node *root, uint depth, bool only_ctrl) {
 538   VectorSet visited(Thread::current()->resource_area());
 539   dump_nodes_and_types_recur( root, depth, only_ctrl, visited );
 540 }
 541 
 542 //------------------------------dump_nodes_and_types_recur---------------------
 543 void PhaseTransform::dump_nodes_and_types_recur( const Node *n, uint depth, bool only_ctrl, VectorSet &visited) {
 544   if( !n ) return;
 545   if( depth == 0 ) return;
 546   if( visited.test_set(n->_idx) ) return;
 547   for( uint i=0; i<n->len(); i++ ) {
 548     if( only_ctrl && !(n->is_Region()) && i != TypeFunc::Control ) continue;
 549     dump_nodes_and_types_recur( n->in(i), depth-1, only_ctrl, visited );
 550   }
 551   n->dump();
 552   if (type_or_null(n) != NULL) {
 553     tty->print("      "); type(n)->dump(); tty->cr();
 554   }
 555 }
 556 
 557 #endif
 558 
 559 
 560 //=============================================================================
 561 //------------------------------PhaseValues------------------------------------
 562 // Set minimum table size to "255"
 563 PhaseValues::PhaseValues( Arena *arena, uint est_max_size ) : PhaseTransform(arena, GVN), _table(arena, est_max_size) {
 564   NOT_PRODUCT( clear_new_values(); )
 565 }
 566 
 567 //------------------------------PhaseValues------------------------------------
 568 // Set minimum table size to "255"
 569 PhaseValues::PhaseValues( PhaseValues *ptv ) : PhaseTransform( ptv, GVN ),
 570   _table(&ptv->_table) {
 571   NOT_PRODUCT( clear_new_values(); )
 572 }
 573 
 574 //------------------------------PhaseValues------------------------------------
 575 // Used by +VerifyOpto.  Clear out hash table but copy _types array.
 576 PhaseValues::PhaseValues( PhaseValues *ptv, const char *dummy ) : PhaseTransform( ptv, GVN ),
 577   _table(ptv->arena(),ptv->_table.size()) {
 578   NOT_PRODUCT( clear_new_values(); )
 579 }
 580 
 581 //------------------------------~PhaseValues-----------------------------------
 582 #ifndef PRODUCT
 583 PhaseValues::~PhaseValues() {
 584   _table.dump();
 585 
 586   // Statistics for value progress and efficiency
 587   if( PrintCompilation && Verbose && WizardMode ) {
 588     tty->print("\n%sValues: %d nodes ---> %d/%d (%d)",
 589       is_IterGVN() ? "Iter" : "    ", C->unique(), made_progress(), made_transforms(), made_new_values());
 590     if( made_transforms() != 0 ) {
 591       tty->print_cr("  ratio %f", made_progress()/(float)made_transforms() );
 592     } else {
 593       tty->cr();
 594     }
 595   }
 596 }
 597 #endif
 598 
 599 //------------------------------makecon----------------------------------------
 600 ConNode* PhaseTransform::makecon(const Type *t) {
 601   assert(t->singleton(), "must be a constant");
 602   assert(!t->empty() || t == Type::TOP, "must not be vacuous range");
 603   switch (t->base()) {  // fast paths
 604   case Type::Half:
 605   case Type::Top:  return (ConNode*) C->top();
 606   case Type::Int:  return intcon( t->is_int()->get_con() );
 607   case Type::Long: return longcon( t->is_long()->get_con() );
 608   }
 609   if (t->is_zero_type())
 610     return zerocon(t->basic_type());
 611   return uncached_makecon(t);
 612 }
 613 
 614 //--------------------------uncached_makecon-----------------------------------
 615 // Make an idealized constant - one of ConINode, ConPNode, etc.
 616 ConNode* PhaseValues::uncached_makecon(const Type *t) {
 617   assert(t->singleton(), "must be a constant");
 618   ConNode* x = ConNode::make(C, t);
 619   ConNode* k = (ConNode*)hash_find_insert(x); // Value numbering
 620   if (k == NULL) {
 621     set_type(x, t);             // Missed, provide type mapping
 622     GrowableArray<Node_Notes*>* nna = C->node_note_array();
 623     if (nna != NULL) {
 624       Node_Notes* loc = C->locate_node_notes(nna, x->_idx, true);
 625       loc->clear(); // do not put debug info on constants
 626     }
 627   } else {
 628     x->destruct();              // Hit, destroy duplicate constant
 629     x = k;                      // use existing constant
 630   }
 631   return x;
 632 }
 633 
 634 //------------------------------intcon-----------------------------------------
 635 // Fast integer constant.  Same as "transform(new ConINode(TypeInt::make(i)))"
 636 ConINode* PhaseTransform::intcon(int i) {
 637   // Small integer?  Check cache! Check that cached node is not dead
 638   if (i >= _icon_min && i <= _icon_max) {
 639     ConINode* icon = _icons[i-_icon_min];
 640     if (icon != NULL && icon->in(TypeFunc::Control) != NULL)
 641       return icon;
 642   }
 643   ConINode* icon = (ConINode*) uncached_makecon(TypeInt::make(i));
 644   assert(icon->is_Con(), "");
 645   if (i >= _icon_min && i <= _icon_max)
 646     _icons[i-_icon_min] = icon;   // Cache small integers
 647   return icon;
 648 }
 649 
 650 //------------------------------longcon----------------------------------------
 651 // Fast long constant.
 652 ConLNode* PhaseTransform::longcon(jlong l) {
 653   // Small integer?  Check cache! Check that cached node is not dead
 654   if (l >= _lcon_min && l <= _lcon_max) {
 655     ConLNode* lcon = _lcons[l-_lcon_min];
 656     if (lcon != NULL && lcon->in(TypeFunc::Control) != NULL)
 657       return lcon;
 658   }
 659   ConLNode* lcon = (ConLNode*) uncached_makecon(TypeLong::make(l));
 660   assert(lcon->is_Con(), "");
 661   if (l >= _lcon_min && l <= _lcon_max)
 662     _lcons[l-_lcon_min] = lcon;      // Cache small integers
 663   return lcon;
 664 }
 665 
 666 //------------------------------zerocon-----------------------------------------
 667 // Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
 668 ConNode* PhaseTransform::zerocon(BasicType bt) {
 669   assert((uint)bt <= _zcon_max, "domain check");
 670   ConNode* zcon = _zcons[bt];
 671   if (zcon != NULL && zcon->in(TypeFunc::Control) != NULL)
 672     return zcon;
 673   zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
 674   _zcons[bt] = zcon;
 675   return zcon;
 676 }
 677 
 678 
 679 
 680 //=============================================================================
 681 //------------------------------transform--------------------------------------
 682 // Return a node which computes the same function as this node, but in a
 683 // faster or cheaper fashion.
 684 Node *PhaseGVN::transform( Node *n ) {
 685   return transform_no_reclaim(n);
 686 }
 687 
 688 //------------------------------transform--------------------------------------
 689 // Return a node which computes the same function as this node, but
 690 // in a faster or cheaper fashion.
 691 Node *PhaseGVN::transform_no_reclaim( Node *n ) {
 692   NOT_PRODUCT( set_transforms(); )
 693 
 694   // Apply the Ideal call in a loop until it no longer applies
 695   Node *k = n;
 696   NOT_PRODUCT( uint loop_count = 0; )
 697   while( 1 ) {
 698     Node *i = k->Ideal(this, /*can_reshape=*/false);
 699     if( !i ) break;
 700     assert( i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
 701     k = i;
 702     assert(loop_count++ < K, "infinite loop in PhaseGVN::transform");
 703   }
 704   NOT_PRODUCT( if( loop_count != 0 ) { set_progress(); } )
 705 
 706 
 707   // If brand new node, make space in type array.
 708   ensure_type_or_null(k);
 709 
 710   // Since I just called 'Value' to compute the set of run-time values
 711   // for this Node, and 'Value' is non-local (and therefore expensive) I'll
 712   // cache Value.  Later requests for the local phase->type of this Node can
 713   // use the cached Value instead of suffering with 'bottom_type'.
 714   const Type *t = k->Value(this); // Get runtime Value set
 715   assert(t != NULL, "value sanity");
 716   if (type_or_null(k) != t) {
 717 #ifndef PRODUCT
 718     // Do not count initial visit to node as a transformation
 719     if (type_or_null(k) == NULL) {
 720       inc_new_values();
 721       set_progress();
 722     }
 723 #endif
 724     set_type(k, t);
 725     // If k is a TypeNode, capture any more-precise type permanently into Node
 726     k->raise_bottom_type(t);
 727   }
 728 
 729   if( t->singleton() && !k->is_Con() ) {
 730     NOT_PRODUCT( set_progress(); )
 731     return makecon(t);          // Turn into a constant
 732   }
 733 
 734   // Now check for Identities
 735   Node *i = k->Identity(this);  // Look for a nearby replacement
 736   if( i != k ) {                // Found? Return replacement!
 737     NOT_PRODUCT( set_progress(); )
 738     return i;
 739   }
 740 
 741   // Global Value Numbering
 742   i = hash_find_insert(k);      // Insert if new
 743   if( i && (i != k) ) {
 744     // Return the pre-existing node
 745     NOT_PRODUCT( set_progress(); )
 746     return i;
 747   }
 748 
 749   // Return Idealized original
 750   return k;
 751 }
 752 
 753 #ifdef ASSERT
 754 //------------------------------dead_loop_check--------------------------------
 755 // Check for a simple dead loop when a data node references itself directly
 756 // or through an other data node excluding cons and phis.
 757 void PhaseGVN::dead_loop_check( Node *n ) {
 758   // Phi may reference itself in a loop
 759   if (n != NULL && !n->is_dead_loop_safe() && !n->is_CFG()) {
 760     // Do 2 levels check and only data inputs.
 761     bool no_dead_loop = true;
 762     uint cnt = n->req();
 763     for (uint i = 1; i < cnt && no_dead_loop; i++) {
 764       Node *in = n->in(i);
 765       if (in == n) {
 766         no_dead_loop = false;
 767       } else if (in != NULL && !in->is_dead_loop_safe()) {
 768         uint icnt = in->req();
 769         for (uint j = 1; j < icnt && no_dead_loop; j++) {
 770           if (in->in(j) == n || in->in(j) == in)
 771             no_dead_loop = false;
 772         }
 773       }
 774     }
 775     if (!no_dead_loop) n->dump(3);
 776     assert(no_dead_loop, "dead loop detected");
 777   }
 778 }
 779 #endif
 780 
 781 //=============================================================================
 782 //------------------------------PhaseIterGVN-----------------------------------
 783 // Initialize hash table to fresh and clean for +VerifyOpto
 784 PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ),
 785                                                                       _stack(C->unique() >> 1),
 786                                                                       _delay_transform(false) {
 787 }
 788 
 789 //------------------------------PhaseIterGVN-----------------------------------
 790 // Initialize with previous PhaseIterGVN info; used by PhaseCCP
 791 PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn ) : PhaseGVN(igvn),
 792                                                    _worklist( igvn->_worklist ),
 793                                                    _stack( igvn->_stack ),
 794                                                    _delay_transform(igvn->_delay_transform)
 795 {
 796 }
 797 
 798 //------------------------------PhaseIterGVN-----------------------------------
 799 // Initialize with previous PhaseGVN info from Parser
 800 PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
 801                                               _worklist(*C->for_igvn()),
 802                                               _stack(C->unique() >> 1),
 803                                               _delay_transform(false)
 804 {
 805   uint max;
 806 
 807   // Dead nodes in the hash table inherited from GVN were not treated as
 808   // roots during def-use info creation; hence they represent an invisible
 809   // use.  Clear them out.
 810   max = _table.size();
 811   for( uint i = 0; i < max; ++i ) {
 812     Node *n = _table.at(i);
 813     if(n != NULL && n != _table.sentinel() && n->outcnt() == 0) {
 814       if( n->is_top() ) continue;
 815       assert( false, "Parse::remove_useless_nodes missed this node");
 816       hash_delete(n);
 817     }
 818   }
 819 
 820   // Any Phis or Regions on the worklist probably had uses that could not
 821   // make more progress because the uses were made while the Phis and Regions
 822   // were in half-built states.  Put all uses of Phis and Regions on worklist.
 823   max = _worklist.size();
 824   for( uint j = 0; j < max; j++ ) {
 825     Node *n = _worklist.at(j);
 826     uint uop = n->Opcode();
 827     if( uop == Op_Phi || uop == Op_Region ||
 828         n->is_Type() ||
 829         n->is_Mem() )
 830       add_users_to_worklist(n);
 831   }
 832 }
 833 
 834 /**
 835  * Initialize worklist for each node.
 836  */
 837 void PhaseIterGVN::init_worklist(Node* first) {
 838   Unique_Node_List to_process;
 839   to_process.push(first);
 840 
 841   while (to_process.size() > 0) {
 842     Node* n = to_process.pop();
 843     if (!_worklist.member(n)) {
 844       _worklist.push(n);
 845 
 846       uint cnt = n->req();
 847       for(uint i = 0; i < cnt; i++) {
 848         Node* m = n->in(i);
 849         if (m != NULL) {
 850           to_process.push(m);
 851         }
 852       }
 853     }
 854   }
 855 }
 856 
 857 #ifndef PRODUCT
 858 void PhaseIterGVN::verify_step(Node* n) {
 859   if (VerifyIterativeGVN) {
 860     _verify_window[_verify_counter % _verify_window_size] = n;
 861     ++_verify_counter;
 862     ResourceMark rm;
 863     ResourceArea* area = Thread::current()->resource_area();
 864     VectorSet old_space(area), new_space(area);
 865     if (C->unique() < 1000 ||
 866         0 == _verify_counter % (C->unique() < 10000 ? 10 : 100)) {
 867       ++_verify_full_passes;
 868       Node::verify_recur(C->root(), -1, old_space, new_space);
 869     }
 870     const int verify_depth = 4;
 871     for ( int i = 0; i < _verify_window_size; i++ ) {
 872       Node* n = _verify_window[i];
 873       if ( n == NULL )  continue;
 874       if( n->in(0) == NodeSentinel ) {  // xform_idom
 875         _verify_window[i] = n->in(1);
 876         --i; continue;
 877       }
 878       // Typical fanout is 1-2, so this call visits about 6 nodes.
 879       Node::verify_recur(n, verify_depth, old_space, new_space);
 880     }
 881   }
 882 }
 883 
 884 void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype) {
 885   if (TraceIterativeGVN) {
 886     uint wlsize = _worklist.size();
 887     const Type* newtype = type_or_null(n);
 888     if (nn != n) {
 889       // print old node
 890       tty->print("< ");
 891       if (oldtype != newtype && oldtype != NULL) {
 892         oldtype->dump();
 893       }
 894       do { tty->print("\t"); } while (tty->position() < 16);
 895       tty->print("<");
 896       n->dump();
 897     }
 898     if (oldtype != newtype || nn != n) {
 899       // print new node and/or new type
 900       if (oldtype == NULL) {
 901         tty->print("* ");
 902       } else if (nn != n) {
 903         tty->print("> ");
 904       } else {
 905         tty->print("= ");
 906       }
 907       if (newtype == NULL) {
 908         tty->print("null");
 909       } else {
 910         newtype->dump();
 911       }
 912       do { tty->print("\t"); } while (tty->position() < 16);
 913       nn->dump();
 914     }
 915     if (Verbose && wlsize < _worklist.size()) {
 916       tty->print("  Push {");
 917       while (wlsize != _worklist.size()) {
 918         Node* pushed = _worklist.at(wlsize++);
 919         tty->print(" %d", pushed->_idx);
 920       }
 921       tty->print_cr(" }");
 922     }
 923     if (nn != n) {
 924       // ignore n, it might be subsumed
 925       verify_step((Node*) NULL);
 926     }
 927   }
 928 }
 929 
 930 void PhaseIterGVN::init_verifyPhaseIterGVN() {
 931   _verify_counter = 0;
 932   _verify_full_passes = 0;
 933   for (int i = 0; i < _verify_window_size; i++) {
 934     _verify_window[i] = NULL;
 935   }
 936 }
 937 
 938 void PhaseIterGVN::verify_PhaseIterGVN() {
 939   C->verify_graph_edges();
 940   if( VerifyOpto && allow_progress() ) {
 941     // Must turn off allow_progress to enable assert and break recursion
 942     C->root()->verify();
 943     { // Check if any progress was missed using IterGVN
 944       // Def-Use info enables transformations not attempted in wash-pass
 945       // e.g. Region/Phi cleanup, ...
 946       // Null-check elision -- may not have reached fixpoint
 947       //                       do not propagate to dominated nodes
 948       ResourceMark rm;
 949       PhaseIterGVN igvn2(this,"Verify"); // Fresh and clean!
 950       // Fill worklist completely
 951       igvn2.init_worklist(C->root());
 952 
 953       igvn2.set_allow_progress(false);
 954       igvn2.optimize();
 955       igvn2.set_allow_progress(true);
 956     }
 957   }
 958   if (VerifyIterativeGVN && PrintOpto) {
 959     if (_verify_counter == _verify_full_passes) {
 960       tty->print_cr("VerifyIterativeGVN: %d transforms and verify passes",
 961                     (int) _verify_full_passes);
 962     } else {
 963       tty->print_cr("VerifyIterativeGVN: %d transforms, %d full verify passes",
 964                   (int) _verify_counter, (int) _verify_full_passes);
 965     }
 966   }
 967 }
 968 #endif /* PRODUCT */
 969 
 970 #ifdef ASSERT
 971 /**
 972  * Dumps information that can help to debug the problem. A debug
 973  * build fails with an assert.
 974  */
 975 void PhaseIterGVN::dump_infinite_loop_info(Node* n) {
 976   n->dump(4);
 977   _worklist.dump();
 978   assert(false, "infinite loop in PhaseIterGVN::optimize");
 979 }
 980 
 981 /**
 982  * Prints out information about IGVN if the 'verbose' option is used.
 983  */
 984 void PhaseIterGVN::trace_PhaseIterGVN_verbose(Node* n, int num_processed) {
 985   if (TraceIterativeGVN && Verbose) {
 986     tty->print("  Pop ");
 987     n->dump();
 988     if ((num_processed % 100) == 0) {
 989       _worklist.print_set();
 990     }
 991   }
 992 }
 993 #endif /* ASSERT */
 994 
 995 void PhaseIterGVN::optimize() {
 996   DEBUG_ONLY(uint num_processed  = 0;)
 997   NOT_PRODUCT(init_verifyPhaseIterGVN();)
 998 
 999   uint loop_count = 0;
1000   // Pull from worklist and transform the node. If the node has changed,
1001   // update edge info and put uses on worklist.
1002   while(_worklist.size()) {
1003     if (C->check_node_count(NodeLimitFudgeFactor * 2, "Out of nodes")) {
1004       return;
1005     }
1006     Node* n  = _worklist.pop();
1007     if (++loop_count >= K * C->live_nodes()) {
1008       DEBUG_ONLY(dump_infinite_loop_info(n);)
1009       C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize");
1010       return;
1011     }
1012     DEBUG_ONLY(trace_PhaseIterGVN_verbose(n, num_processed++);)
1013     if (n->outcnt() != 0) {
1014       NOT_PRODUCT(const Type* oldtype = type_or_null(n));
1015       // Do the transformation
1016       Node* nn = transform_old(n);
1017       NOT_PRODUCT(trace_PhaseIterGVN(n, nn, oldtype);)
1018     } else if (!n->is_top()) {
1019       remove_dead_node(n);
1020     }
1021   }
1022   NOT_PRODUCT(verify_PhaseIterGVN();)
1023 }
1024 
1025 
1026 /**
1027  * Register a new node with the optimizer.  Update the types array, the def-use
1028  * info.  Put on worklist.
1029  */
1030 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1031   set_type_bottom(n);
1032   _worklist.push(n);
1033   if (orig != NULL)  C->copy_node_notes_to(n, orig);
1034   return n;
1035 }
1036 
1037 //------------------------------transform--------------------------------------
1038 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1039 Node *PhaseIterGVN::transform( Node *n ) {
1040   if (_delay_transform) {
1041     // Register the node but don't optimize for now
1042     register_new_node_with_optimizer(n);
1043     return n;
1044   }
1045 
1046   // If brand new node, make space in type array, and give it a type.
1047   ensure_type_or_null(n);
1048   if (type_or_null(n) == NULL) {
1049     set_type_bottom(n);
1050   }
1051 
1052   return transform_old(n);
1053 }
1054 
1055 Node *PhaseIterGVN::transform_old(Node* n) {
1056   DEBUG_ONLY(uint loop_count = 0;);
1057   NOT_PRODUCT(set_transforms());
1058 
1059   // Remove 'n' from hash table in case it gets modified
1060   _table.hash_delete(n);
1061   if (VerifyIterativeGVN) {
1062    assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1063   }
1064 
1065   // Apply the Ideal call in a loop until it no longer applies
1066   Node* k = n;
1067   DEBUG_ONLY(dead_loop_check(k);)
1068   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1069   Node* i = k->Ideal(this, /*can_reshape=*/true);
1070   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
1071 #ifndef PRODUCT
1072   verify_step(k);
1073   if (i && VerifyOpto ) {
1074     if (!allow_progress()) {
1075       if (i->is_Add() && (i->outcnt() == 1)) {
1076         // Switched input to left side because this is the only use
1077       } else if (i->is_If() && (i->in(0) == NULL)) {
1078         // This IF is dead because it is dominated by an equivalent IF When
1079         // dominating if changed, info is not propagated sparsely to 'this'
1080         // Propagating this info further will spuriously identify other
1081         // progress.
1082         return i;
1083       } else
1084         set_progress();
1085     } else {
1086       set_progress();
1087     }
1088   }
1089 #endif
1090 
1091   while (i != NULL) {
1092 #ifndef PRODUCT
1093     if (loop_count >= K) {
1094       dump_infinite_loop_info(i);
1095     }
1096     loop_count++;
1097 #endif
1098     assert((i->_idx >= k->_idx) || i->is_top(), "Idealize should return new nodes, use Identity to return old nodes");
1099     // Made a change; put users of original Node on worklist
1100     add_users_to_worklist(k);
1101     // Replacing root of transform tree?
1102     if (k != i) {
1103       // Make users of old Node now use new.
1104       subsume_node(k, i);
1105       k = i;
1106     }
1107     DEBUG_ONLY(dead_loop_check(k);)
1108     // Try idealizing again
1109     DEBUG_ONLY(is_new = (k->outcnt() == 0);)
1110     i = k->Ideal(this, /*can_reshape=*/true);
1111     assert(i != k || is_new || (i->outcnt() > 0), "don't return dead nodes");
1112 #ifndef PRODUCT
1113     verify_step(k);
1114     if (i && VerifyOpto) {
1115       set_progress();
1116     }
1117 #endif
1118   }
1119 
1120   // If brand new node, make space in type array.
1121   ensure_type_or_null(k);
1122 
1123   // See what kind of values 'k' takes on at runtime
1124   const Type* t = k->Value(this);
1125   assert(t != NULL, "value sanity");
1126 
1127   // Since I just called 'Value' to compute the set of run-time values
1128   // for this Node, and 'Value' is non-local (and therefore expensive) I'll
1129   // cache Value.  Later requests for the local phase->type of this Node can
1130   // use the cached Value instead of suffering with 'bottom_type'.
1131   if (type_or_null(k) != t) {
1132 #ifndef PRODUCT
1133     inc_new_values();
1134     set_progress();
1135 #endif
1136     set_type(k, t);
1137     // If k is a TypeNode, capture any more-precise type permanently into Node
1138     k->raise_bottom_type(t);
1139     // Move users of node to worklist
1140     add_users_to_worklist(k);
1141   }
1142   // If 'k' computes a constant, replace it with a constant
1143   if (t->singleton() && !k->is_Con()) {
1144     NOT_PRODUCT(set_progress();)
1145     Node* con = makecon(t);     // Make a constant
1146     add_users_to_worklist(k);
1147     subsume_node(k, con);       // Everybody using k now uses con
1148     return con;
1149   }
1150 
1151   // Now check for Identities
1152   i = k->Identity(this);      // Look for a nearby replacement
1153   if (i != k) {                // Found? Return replacement!
1154     NOT_PRODUCT(set_progress();)
1155     add_users_to_worklist(k);
1156     subsume_node(k, i);       // Everybody using k now uses i
1157     return i;
1158   }
1159 
1160   // Global Value Numbering
1161   i = hash_find_insert(k);      // Check for pre-existing node
1162   if (i && (i != k)) {
1163     // Return the pre-existing node if it isn't dead
1164     NOT_PRODUCT(set_progress();)
1165     add_users_to_worklist(k);
1166     subsume_node(k, i);       // Everybody using k now uses i
1167     return i;
1168   }
1169 
1170   // Return Idealized original
1171   return k;
1172 }
1173 
1174 //---------------------------------saturate------------------------------------
1175 const Type* PhaseIterGVN::saturate(const Type* new_type, const Type* old_type,
1176                                    const Type* limit_type) const {
1177   return new_type->narrow(old_type);
1178 }
1179 
1180 //------------------------------remove_globally_dead_node----------------------
1181 // Kill a globally dead Node.  All uses are also globally dead and are
1182 // aggressively trimmed.
1183 void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
1184   enum DeleteProgress {
1185     PROCESS_INPUTS,
1186     PROCESS_OUTPUTS
1187   };
1188   assert(_stack.is_empty(), "not empty");
1189   _stack.push(dead, PROCESS_INPUTS);
1190 
1191   while (_stack.is_nonempty()) {
1192     dead = _stack.node();
1193     uint progress_state = _stack.index();
1194     assert(dead != C->root(), "killing root, eh?");
1195     assert(!dead->is_top(), "add check for top when pushing");
1196     NOT_PRODUCT( set_progress(); )
1197     if (progress_state == PROCESS_INPUTS) {
1198       // After following inputs, continue to outputs
1199       _stack.set_index(PROCESS_OUTPUTS);
1200       if (!dead->is_Con()) { // Don't kill cons but uses
1201         bool recurse = false;
1202         // Remove from hash table
1203         _table.hash_delete( dead );
1204         // Smash all inputs to 'dead', isolating him completely
1205         for (uint i = 0; i < dead->req(); i++) {
1206           Node *in = dead->in(i);
1207           if (in != NULL && in != C->top()) {  // Points to something?
1208             int nrep = dead->replace_edge(in, NULL);  // Kill edges
1209             assert((nrep > 0), "sanity");
1210             if (in->outcnt() == 0) { // Made input go dead?
1211               _stack.push(in, PROCESS_INPUTS); // Recursively remove
1212               recurse = true;
1213             } else if (in->outcnt() == 1 &&
1214                        in->has_special_unique_user()) {
1215               _worklist.push(in->unique_out());
1216             } else if (in->outcnt() <= 2 && dead->is_Phi()) {
1217               if (in->Opcode() == Op_Region) {
1218                 _worklist.push(in);
1219               } else if (in->is_Store()) {
1220                 DUIterator_Fast imax, i = in->fast_outs(imax);
1221                 _worklist.push(in->fast_out(i));
1222                 i++;
1223                 if (in->outcnt() == 2) {
1224                   _worklist.push(in->fast_out(i));
1225                   i++;
1226                 }
1227                 assert(!(i < imax), "sanity");
1228               }
1229             }
1230             if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
1231                 in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) {
1232               // A Load that directly follows an InitializeNode is
1233               // going away. The Stores that follow are candidates
1234               // again to be captured by the InitializeNode.
1235               for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
1236                 Node *n = in->fast_out(j);
1237                 if (n->is_Store()) {
1238                   _worklist.push(n);
1239                 }
1240               }
1241             }
1242           } // if (in != NULL && in != C->top())
1243         } // for (uint i = 0; i < dead->req(); i++)
1244         if (recurse) {
1245           continue;
1246         }
1247       } // if (!dead->is_Con())
1248     } // if (progress_state == PROCESS_INPUTS)
1249 
1250     // Aggressively kill globally dead uses
1251     // (Rather than pushing all the outs at once, we push one at a time,
1252     // plus the parent to resume later, because of the indefinite number
1253     // of edge deletions per loop trip.)
1254     if (dead->outcnt() > 0) {
1255       // Recursively remove output edges
1256       _stack.push(dead->raw_out(0), PROCESS_INPUTS);
1257     } else {
1258       // Finished disconnecting all input and output edges.
1259       _stack.pop();
1260       // Remove dead node from iterative worklist
1261       _worklist.remove(dead);
1262       // Constant node that has no out-edges and has only one in-edge from
1263       // root is usually dead. However, sometimes reshaping walk makes
1264       // it reachable by adding use edges. So, we will NOT count Con nodes
1265       // as dead to be conservative about the dead node count at any
1266       // given time.
1267       if (!dead->is_Con()) {
1268         C->record_dead_node(dead->_idx);
1269       }
1270       if (dead->is_macro()) {
1271         C->remove_macro_node(dead);
1272       }
1273       if (dead->is_expensive()) {
1274         C->remove_expensive_node(dead);
1275       }
1276     }
1277   } // while (_stack.is_nonempty())
1278 }
1279 
1280 //------------------------------subsume_node-----------------------------------
1281 // Remove users from node 'old' and add them to node 'nn'.
1282 void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
1283   assert( old != hash_find(old), "should already been removed" );
1284   assert( old != C->top(), "cannot subsume top node");
1285   // Copy debug or profile information to the new version:
1286   C->copy_node_notes_to(nn, old);
1287   // Move users of node 'old' to node 'nn'
1288   for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
1289     Node* use = old->last_out(i);  // for each use...
1290     // use might need re-hashing (but it won't if it's a new node)
1291     bool is_in_table = _table.hash_delete( use );
1292     // Update use-def info as well
1293     // We remove all occurrences of old within use->in,
1294     // so as to avoid rehashing any node more than once.
1295     // The hash table probe swamps any outer loop overhead.
1296     uint num_edges = 0;
1297     for (uint jmax = use->len(), j = 0; j < jmax; j++) {
1298       if (use->in(j) == old) {
1299         use->set_req(j, nn);
1300         ++num_edges;
1301       }
1302     }
1303     // Insert into GVN hash table if unique
1304     // If a duplicate, 'use' will be cleaned up when pulled off worklist
1305     if( is_in_table ) {
1306       hash_find_insert(use);
1307     }
1308     i -= num_edges;    // we deleted 1 or more copies of this edge
1309   }
1310 
1311   // Smash all inputs to 'old', isolating him completely
1312   Node *temp = new (C) Node(1);
1313   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1314   remove_dead_node( old );
1315   temp->del_req(0);         // Yank bogus edge
1316 #ifndef PRODUCT
1317   if( VerifyIterativeGVN ) {
1318     for ( int i = 0; i < _verify_window_size; i++ ) {
1319       if ( _verify_window[i] == old )
1320         _verify_window[i] = nn;
1321     }
1322   }
1323 #endif
1324   _worklist.remove(temp);   // this can be necessary
1325   temp->destruct();         // reuse the _idx of this little guy
1326 }
1327 
1328 //------------------------------add_users_to_worklist--------------------------
1329 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1330   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1331     _worklist.push(n->fast_out(i));  // Push on worklist
1332   }
1333 }
1334 
1335 void PhaseIterGVN::add_users_to_worklist( Node *n ) {
1336   add_users_to_worklist0(n);
1337 
1338   // Move users of node to worklist
1339   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1340     Node* use = n->fast_out(i); // Get use
1341 
1342     if( use->is_Multi() ||      // Multi-definer?  Push projs on worklist
1343         use->is_Store() )       // Enable store/load same address
1344       add_users_to_worklist0(use);
1345 
1346     // If we changed the receiver type to a call, we need to revisit
1347     // the Catch following the call.  It's looking for a non-NULL
1348     // receiver to know when to enable the regular fall-through path
1349     // in addition to the NullPtrException path.
1350     if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1351       Node* p = use->as_CallDynamicJava()->proj_out(TypeFunc::Control);
1352       if (p != NULL) {
1353         add_users_to_worklist0(p);
1354       }
1355     }
1356 
1357     if( use->is_Cmp() ) {       // Enable CMP/BOOL optimization
1358       add_users_to_worklist(use); // Put Bool on worklist
1359       // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1360       // phi merging either 0 or 1 onto the worklist
1361       if (use->outcnt() > 0) {
1362         Node* bol = use->raw_out(0);
1363         if (bol->outcnt() > 0) {
1364           Node* iff = bol->raw_out(0);
1365           if (iff->outcnt() == 2) {
1366             Node* ifproj0 = iff->raw_out(0);
1367             Node* ifproj1 = iff->raw_out(1);
1368             if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1369               Node* region0 = ifproj0->raw_out(0);
1370               Node* region1 = ifproj1->raw_out(0);
1371               if( region0 == region1 )
1372                 add_users_to_worklist0(region0);
1373             }
1374           }
1375         }
1376       }
1377     }
1378 
1379     uint use_op = use->Opcode();
1380     // If changed Cast input, check Phi users for simple cycles
1381     if( use->is_ConstraintCast() || use->is_CheckCastPP() ) {
1382       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1383         Node* u = use->fast_out(i2);
1384         if (u->is_Phi())
1385           _worklist.push(u);
1386       }
1387     }
1388     // If changed LShift inputs, check RShift users for useless sign-ext
1389     if( use_op == Op_LShiftI ) {
1390       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1391         Node* u = use->fast_out(i2);
1392         if (u->Opcode() == Op_RShiftI)
1393           _worklist.push(u);
1394       }
1395     }
1396     // If changed AddP inputs, check Stores for loop invariant
1397     if( use_op == Op_AddP ) {
1398       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1399         Node* u = use->fast_out(i2);
1400         if (u->is_Mem())
1401           _worklist.push(u);
1402       }
1403     }
1404     // If changed initialization activity, check dependent Stores
1405     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1406       InitializeNode* init = use->as_Allocate()->initialization();
1407       if (init != NULL) {
1408         Node* imem = init->proj_out(TypeFunc::Memory);
1409         if (imem != NULL)  add_users_to_worklist0(imem);
1410       }
1411     }
1412     if (use_op == Op_Initialize) {
1413       Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
1414       if (imem != NULL)  add_users_to_worklist0(imem);
1415     }
1416   }
1417 }
1418 
1419 /**
1420  * Remove the speculative part of all types that we know of
1421  */
1422 void PhaseIterGVN::remove_speculative_types()  {
1423   assert(UseTypeSpeculation, "speculation is off");
1424   for (uint i = 0; i < _types.Size(); i++)  {
1425     const Type* t = _types.fast_lookup(i);
1426     if (t != NULL) {
1427       _types.map(i, t->remove_speculative());
1428     }
1429   }
1430   _table.check_no_speculative_types();
1431 }
1432 
1433 //=============================================================================
1434 #ifndef PRODUCT
1435 uint PhaseCCP::_total_invokes   = 0;
1436 uint PhaseCCP::_total_constants = 0;
1437 #endif
1438 //------------------------------PhaseCCP---------------------------------------
1439 // Conditional Constant Propagation, ala Wegman & Zadeck
1440 PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
1441   NOT_PRODUCT( clear_constants(); )
1442   assert( _worklist.size() == 0, "" );
1443   // Clear out _nodes from IterGVN.  Must be clear to transform call.
1444   _nodes.clear();               // Clear out from IterGVN
1445   analyze();
1446 }
1447 
1448 #ifndef PRODUCT
1449 //------------------------------~PhaseCCP--------------------------------------
1450 PhaseCCP::~PhaseCCP() {
1451   inc_invokes();
1452   _total_constants += count_constants();
1453 }
1454 #endif
1455 
1456 
1457 #ifdef ASSERT
1458 static bool ccp_type_widens(const Type* t, const Type* t0) {
1459   assert(t->meet(t0) == t, "Not monotonic");
1460   switch (t->base() == t0->base() ? t->base() : Type::Top) {
1461   case Type::Int:
1462     assert(t0->isa_int()->_widen <= t->isa_int()->_widen, "widen increases");
1463     break;
1464   case Type::Long:
1465     assert(t0->isa_long()->_widen <= t->isa_long()->_widen, "widen increases");
1466     break;
1467   }
1468   return true;
1469 }
1470 #endif //ASSERT
1471 
1472 //------------------------------analyze----------------------------------------
1473 void PhaseCCP::analyze() {
1474   // Initialize all types to TOP, optimistic analysis
1475   for (int i = C->unique() - 1; i >= 0; i--)  {
1476     _types.map(i,Type::TOP);
1477   }
1478 
1479   // Push root onto worklist
1480   Unique_Node_List worklist;
1481   worklist.push(C->root());
1482 
1483   // Pull from worklist; compute new value; push changes out.
1484   // This loop is the meat of CCP.
1485   while( worklist.size() ) {
1486     Node *n = worklist.pop();
1487     const Type *t = n->Value(this);
1488     if (t != type(n)) {
1489       assert(ccp_type_widens(t, type(n)), "ccp type must widen");
1490 #ifndef PRODUCT
1491       if( TracePhaseCCP ) {
1492         t->dump();
1493         do { tty->print("\t"); } while (tty->position() < 16);
1494         n->dump();
1495       }
1496 #endif
1497       set_type(n, t);
1498       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1499         Node* m = n->fast_out(i);   // Get user
1500         if( m->is_Region() ) {  // New path to Region?  Must recheck Phis too
1501           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1502             Node* p = m->fast_out(i2); // Propagate changes to uses
1503             if( p->bottom_type() != type(p) ) // If not already bottomed out
1504               worklist.push(p); // Propagate change to user
1505           }
1506         }
1507         // If we changed the receiver type to a call, we need to revisit
1508         // the Catch following the call.  It's looking for a non-NULL
1509         // receiver to know when to enable the regular fall-through path
1510         // in addition to the NullPtrException path
1511         if (m->is_Call()) {
1512           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1513             Node* p = m->fast_out(i2);  // Propagate changes to uses
1514             if (p->is_Proj() && p->as_Proj()->_con == TypeFunc::Control && p->outcnt() == 1)
1515               worklist.push(p->unique_out());
1516           }
1517         }
1518         if( m->bottom_type() != type(m) ) // If not already bottomed out
1519           worklist.push(m);     // Propagate change to user
1520       }
1521     }
1522   }
1523 }
1524 
1525 //------------------------------do_transform-----------------------------------
1526 // Top level driver for the recursive transformer
1527 void PhaseCCP::do_transform() {
1528   // Correct leaves of new-space Nodes; they point to old-space.
1529   C->set_root( transform(C->root())->as_Root() );
1530   assert( C->top(),  "missing TOP node" );
1531   assert( C->root(), "missing root" );
1532 
1533   // Eagerly remove castPP nodes here. CastPP nodes might not be
1534   // removed in the subsequent IGVN phase if a node that changes
1535   // in(1) of a castPP is processed prior to the castPP node.
1536   for (uint i = 0; i < _worklist.size(); i++) {
1537     Node* n = _worklist.at(i);
1538 
1539     if (n->is_ConstraintCast()) {
1540       Node* nn = n->Identity(this);
1541       if (nn != n) {
1542         replace_node(n, nn);
1543         --i;
1544       }
1545     }
1546   }
1547 }
1548 
1549 //------------------------------transform--------------------------------------
1550 // Given a Node in old-space, clone him into new-space.
1551 // Convert any of his old-space children into new-space children.
1552 Node *PhaseCCP::transform( Node *n ) {
1553   Node *new_node = _nodes[n->_idx]; // Check for transformed node
1554   if( new_node != NULL )
1555     return new_node;                // Been there, done that, return old answer
1556   new_node = transform_once(n);     // Check for constant
1557   _nodes.map( n->_idx, new_node );  // Flag as having been cloned
1558 
1559   // Allocate stack of size _nodes.Size()/2 to avoid frequent realloc
1560   GrowableArray <Node *> trstack(C->unique() >> 1);
1561 
1562   trstack.push(new_node);           // Process children of cloned node
1563   while ( trstack.is_nonempty() ) {
1564     Node *clone = trstack.pop();
1565     uint cnt = clone->req();
1566     for( uint i = 0; i < cnt; i++ ) {          // For all inputs do
1567       Node *input = clone->in(i);
1568       if( input != NULL ) {                    // Ignore NULLs
1569         Node *new_input = _nodes[input->_idx]; // Check for cloned input node
1570         if( new_input == NULL ) {
1571           new_input = transform_once(input);   // Check for constant
1572           _nodes.map( input->_idx, new_input );// Flag as having been cloned
1573           trstack.push(new_input);
1574         }
1575         assert( new_input == clone->in(i), "insanity check");
1576       }
1577     }
1578   }
1579   return new_node;
1580 }
1581 
1582 
1583 //------------------------------transform_once---------------------------------
1584 // For PhaseCCP, transformation is IDENTITY unless Node computed a constant.
1585 Node *PhaseCCP::transform_once( Node *n ) {
1586   const Type *t = type(n);
1587   // Constant?  Use constant Node instead
1588   if( t->singleton() ) {
1589     Node *nn = n;               // Default is to return the original constant
1590     if( t == Type::TOP ) {
1591       // cache my top node on the Compile instance
1592       if( C->cached_top_node() == NULL || C->cached_top_node()->in(0) == NULL ) {
1593         C->set_cached_top_node( ConNode::make(C, Type::TOP) );
1594         set_type(C->top(), Type::TOP);
1595       }
1596       nn = C->top();
1597     }
1598     if( !n->is_Con() ) {
1599       if( t != Type::TOP ) {
1600         nn = makecon(t);        // ConNode::make(t);
1601         NOT_PRODUCT( inc_constants(); )
1602       } else if( n->is_Region() ) { // Unreachable region
1603         // Note: nn == C->top()
1604         n->set_req(0, NULL);        // Cut selfreference
1605         // Eagerly remove dead phis to avoid phis copies creation.
1606         for (DUIterator i = n->outs(); n->has_out(i); i++) {
1607           Node* m = n->out(i);
1608           if( m->is_Phi() ) {
1609             assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
1610             replace_node(m, nn);
1611             --i; // deleted this phi; rescan starting with next position
1612           }
1613         }
1614       }
1615       replace_node(n,nn);       // Update DefUse edges for new constant
1616     }
1617     return nn;
1618   }
1619 
1620   // If x is a TypeNode, capture any more-precise type permanently into Node
1621   if (t != n->bottom_type()) {
1622     hash_delete(n);             // changing bottom type may force a rehash
1623     n->raise_bottom_type(t);
1624     _worklist.push(n);          // n re-enters the hash table via the worklist
1625   }
1626 
1627   // Idealize graph using DU info.  Must clone() into new-space.
1628   // DU info is generally used to show profitability, progress or safety
1629   // (but generally not needed for correctness).
1630   Node *nn = n->Ideal_DU_postCCP(this);
1631 
1632   // TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks
1633   switch( n->Opcode() ) {
1634   case Op_FastLock:      // Revisit FastLocks for lock coarsening
1635   case Op_If:
1636   case Op_CountedLoopEnd:
1637   case Op_Region:
1638   case Op_Loop:
1639   case Op_CountedLoop:
1640   case Op_Conv2B:
1641   case Op_Opaque1:
1642   case Op_Opaque2:
1643     _worklist.push(n);
1644     break;
1645   default:
1646     break;
1647   }
1648   if( nn ) {
1649     _worklist.push(n);
1650     // Put users of 'n' onto worklist for second igvn transform
1651     add_users_to_worklist(n);
1652     return nn;
1653   }
1654 
1655   return  n;
1656 }
1657 
1658 //---------------------------------saturate------------------------------------
1659 const Type* PhaseCCP::saturate(const Type* new_type, const Type* old_type,
1660                                const Type* limit_type) const {
1661   const Type* wide_type = new_type->widen(old_type, limit_type);
1662   if (wide_type != new_type) {          // did we widen?
1663     // If so, we may have widened beyond the limit type.  Clip it back down.
1664     new_type = wide_type->filter(limit_type);
1665   }
1666   return new_type;
1667 }
1668 
1669 //------------------------------print_statistics-------------------------------
1670 #ifndef PRODUCT
1671 void PhaseCCP::print_statistics() {
1672   tty->print_cr("CCP: %d  constants found: %d", _total_invokes, _total_constants);
1673 }
1674 #endif
1675 
1676 
1677 //=============================================================================
1678 #ifndef PRODUCT
1679 uint PhasePeephole::_total_peepholes = 0;
1680 #endif
1681 //------------------------------PhasePeephole----------------------------------
1682 // Conditional Constant Propagation, ala Wegman & Zadeck
1683 PhasePeephole::PhasePeephole( PhaseRegAlloc *regalloc, PhaseCFG &cfg )
1684   : PhaseTransform(Peephole), _regalloc(regalloc), _cfg(cfg) {
1685   NOT_PRODUCT( clear_peepholes(); )
1686 }
1687 
1688 #ifndef PRODUCT
1689 //------------------------------~PhasePeephole---------------------------------
1690 PhasePeephole::~PhasePeephole() {
1691   _total_peepholes += count_peepholes();
1692 }
1693 #endif
1694 
1695 //------------------------------transform--------------------------------------
1696 Node *PhasePeephole::transform( Node *n ) {
1697   ShouldNotCallThis();
1698   return NULL;
1699 }
1700 
1701 //------------------------------do_transform-----------------------------------
1702 void PhasePeephole::do_transform() {
1703   bool method_name_not_printed = true;
1704 
1705   // Examine each basic block
1706   for (uint block_number = 1; block_number < _cfg.number_of_blocks(); ++block_number) {
1707     Block* block = _cfg.get_block(block_number);
1708     bool block_not_printed = true;
1709 
1710     // and each instruction within a block
1711     uint end_index = block->number_of_nodes();
1712     // block->end_idx() not valid after PhaseRegAlloc
1713     for( uint instruction_index = 1; instruction_index < end_index; ++instruction_index ) {
1714       Node     *n = block->get_node(instruction_index);
1715       if( n->is_Mach() ) {
1716         MachNode *m = n->as_Mach();
1717         int deleted_count = 0;
1718         // check for peephole opportunities
1719         MachNode *m2 = m->peephole( block, instruction_index, _regalloc, deleted_count, C );
1720         if( m2 != NULL ) {
1721 #ifndef PRODUCT
1722           if( PrintOptoPeephole ) {
1723             // Print method, first time only
1724             if( C->method() && method_name_not_printed ) {
1725               C->method()->print_short_name(); tty->cr();
1726               method_name_not_printed = false;
1727             }
1728             // Print this block
1729             if( Verbose && block_not_printed) {
1730               tty->print_cr("in block");
1731               block->dump();
1732               block_not_printed = false;
1733             }
1734             // Print instructions being deleted
1735             for( int i = (deleted_count - 1); i >= 0; --i ) {
1736               block->get_node(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
1737             }
1738             tty->print_cr("replaced with");
1739             // Print new instruction
1740             m2->format(_regalloc);
1741             tty->print("\n\n");
1742           }
1743 #endif
1744           // Remove old nodes from basic block and update instruction_index
1745           // (old nodes still exist and may have edges pointing to them
1746           //  as register allocation info is stored in the allocator using
1747           //  the node index to live range mappings.)
1748           uint safe_instruction_index = (instruction_index - deleted_count);
1749           for( ; (instruction_index > safe_instruction_index); --instruction_index ) {
1750             block->remove_node( instruction_index );
1751           }
1752           // install new node after safe_instruction_index
1753           block->insert_node(m2, safe_instruction_index + 1);
1754           end_index = block->number_of_nodes() - 1; // Recompute new block size
1755           NOT_PRODUCT( inc_peepholes(); )
1756         }
1757       }
1758     }
1759   }
1760 }
1761 
1762 //------------------------------print_statistics-------------------------------
1763 #ifndef PRODUCT
1764 void PhasePeephole::print_statistics() {
1765   tty->print_cr("Peephole: peephole rules applied: %d",  _total_peepholes);
1766 }
1767 #endif
1768 
1769 
1770 //=============================================================================
1771 //------------------------------set_req_X--------------------------------------
1772 void Node::set_req_X( uint i, Node *n, PhaseIterGVN *igvn ) {
1773   assert( is_not_dead(n), "can not use dead node");
1774   assert( igvn->hash_find(this) != this, "Need to remove from hash before changing edges" );
1775   Node *old = in(i);
1776   set_req(i, n);
1777 
1778   // old goes dead?
1779   if( old ) {
1780     switch (old->outcnt()) {
1781     case 0:
1782       // Put into the worklist to kill later. We do not kill it now because the
1783       // recursive kill will delete the current node (this) if dead-loop exists
1784       if (!old->is_top())
1785         igvn->_worklist.push( old );
1786       break;
1787     case 1:
1788       if( old->is_Store() || old->has_special_unique_user() )
1789         igvn->add_users_to_worklist( old );
1790       break;
1791     case 2:
1792       if( old->is_Store() )
1793         igvn->add_users_to_worklist( old );
1794       if( old->Opcode() == Op_Region )
1795         igvn->_worklist.push(old);
1796       break;
1797     case 3:
1798       if( old->Opcode() == Op_Region ) {
1799         igvn->_worklist.push(old);
1800         igvn->add_users_to_worklist( old );
1801       }
1802       break;
1803     default:
1804       break;
1805     }
1806   }
1807 
1808 }
1809 
1810 //-------------------------------replace_by-----------------------------------
1811 // Using def-use info, replace one node for another.  Follow the def-use info
1812 // to all users of the OLD node.  Then make all uses point to the NEW node.
1813 void Node::replace_by(Node *new_node) {
1814   assert(!is_top(), "top node has no DU info");
1815   for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
1816     Node* use = last_out(i);
1817     uint uses_found = 0;
1818     for (uint j = 0; j < use->len(); j++) {
1819       if (use->in(j) == this) {
1820         if (j < use->req())
1821               use->set_req(j, new_node);
1822         else  use->set_prec(j, new_node);
1823         uses_found++;
1824       }
1825     }
1826     i -= uses_found;    // we deleted 1 or more copies of this edge
1827   }
1828 }
1829 
1830 //=============================================================================
1831 //-----------------------------------------------------------------------------
1832 void Type_Array::grow( uint i ) {
1833   if( !_max ) {
1834     _max = 1;
1835     _types = (const Type**)_a->Amalloc( _max * sizeof(Type*) );
1836     _types[0] = NULL;
1837   }
1838   uint old = _max;
1839   while( i >= _max ) _max <<= 1;        // Double to fit
1840   _types = (const Type**)_a->Arealloc( _types, old*sizeof(Type*),_max*sizeof(Type*));
1841   memset( &_types[old], 0, (_max-old)*sizeof(Type*) );
1842 }
1843 
1844 //------------------------------dump-------------------------------------------
1845 #ifndef PRODUCT
1846 void Type_Array::dump() const {
1847   uint max = Size();
1848   for( uint i = 0; i < max; i++ ) {
1849     if( _types[i] != NULL ) {
1850       tty->print("  %d\t== ", i); _types[i]->dump(); tty->cr();
1851     }
1852   }
1853 }
1854 #endif