< prev index next >

src/share/vm/opto/phaseX.cpp

Print this page




  91 }
  92 
  93 //------------------------------hash_find--------------------------------------
  94 // Find in hash table
  95 Node *NodeHash::hash_find( const Node *n ) {
  96   // ((Node*)n)->set_hash( n->hash() );
  97   uint hash = n->hash();
  98   if (hash == Node::NO_HASH) {
  99     NOT_PRODUCT( _lookup_misses++ );
 100     return NULL;
 101   }
 102   uint key = hash & (_max-1);
 103   uint stride = key | 0x01;
 104   NOT_PRODUCT( _look_probes++ );
 105   Node *k = _table[key];        // Get hashed value
 106   if( !k ) {                    // ?Miss?
 107     NOT_PRODUCT( _lookup_misses++ );
 108     return NULL;                // Miss!
 109   }
 110 
 111   int op = n->Opcode();
 112   uint req = n->req();
 113   while( 1 ) {                  // While probing hash table
 114     if( k->req() == req &&      // Same count of inputs
 115         k->Opcode() == op ) {   // Same Opcode
 116       for( uint i=0; i<req; i++ )
 117         if( n->in(i)!=k->in(i)) // Different inputs?
 118           goto collision;       // "goto" is a speed hack...
 119       if( n->cmp(*k) ) {        // Check for any special bits
 120         NOT_PRODUCT( _lookup_hits++ );
 121         return k;               // Hit!
 122       }
 123     }
 124   collision:
 125     NOT_PRODUCT( _look_probes++ );
 126     key = (key + stride/*7*/) & (_max-1); // Stride through table with relative prime
 127     k = _table[key];            // Get hashed value
 128     if( !k ) {                  // ?Miss?
 129       NOT_PRODUCT( _lookup_misses++ );
 130       return NULL;              // Miss!
 131     }


 143   if (hash == Node::NO_HASH) {
 144     NOT_PRODUCT( _lookup_misses++ );
 145     return NULL;
 146   }
 147   uint key = hash & (_max-1);
 148   uint stride = key | 0x01;     // stride must be relatively prime to table siz
 149   uint first_sentinel = 0;      // replace a sentinel if seen.
 150   NOT_PRODUCT( _look_probes++ );
 151   Node *k = _table[key];        // Get hashed value
 152   if( !k ) {                    // ?Miss?
 153     NOT_PRODUCT( _lookup_misses++ );
 154     _table[key] = n;            // Insert into table!
 155     debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
 156     check_grow();               // Grow table if insert hit limit
 157     return NULL;                // Miss!
 158   }
 159   else if( k == _sentinel ) {
 160     first_sentinel = key;      // Can insert here
 161   }
 162 
 163   int op = n->Opcode();
 164   uint req = n->req();
 165   while( 1 ) {                  // While probing hash table
 166     if( k->req() == req &&      // Same count of inputs
 167         k->Opcode() == op ) {   // Same Opcode
 168       for( uint i=0; i<req; i++ )
 169         if( n->in(i)!=k->in(i)) // Different inputs?
 170           goto collision;       // "goto" is a speed hack...
 171       if( n->cmp(*k) ) {        // Check for any special bits
 172         NOT_PRODUCT( _lookup_hits++ );
 173         return k;               // Hit!
 174       }
 175     }
 176   collision:
 177     NOT_PRODUCT( _look_probes++ );
 178     key = (key + stride) & (_max-1); // Stride through table w/ relative prime
 179     k = _table[key];            // Get hashed value
 180     if( !k ) {                  // ?Miss?
 181       NOT_PRODUCT( _lookup_misses++ );
 182       key = (first_sentinel == 0) ? key : first_sentinel; // ?saw sentinel?
 183       _table[key] = n;          // Insert into table!


 913 
 914   // Dead nodes in the hash table inherited from GVN were not treated as
 915   // roots during def-use info creation; hence they represent an invisible
 916   // use.  Clear them out.
 917   max = _table.size();
 918   for( uint i = 0; i < max; ++i ) {
 919     Node *n = _table.at(i);
 920     if(n != NULL && n != _table.sentinel() && n->outcnt() == 0) {
 921       if( n->is_top() ) continue;
 922       assert( false, "Parse::remove_useless_nodes missed this node");
 923       hash_delete(n);
 924     }
 925   }
 926 
 927   // Any Phis or Regions on the worklist probably had uses that could not
 928   // make more progress because the uses were made while the Phis and Regions
 929   // were in half-built states.  Put all uses of Phis and Regions on worklist.
 930   max = _worklist.size();
 931   for( uint j = 0; j < max; j++ ) {
 932     Node *n = _worklist.at(j);
 933     uint uop = n->Opcode();
 934     if( uop == Op_Phi || uop == Op_Region ||
 935         n->is_Type() ||
 936         n->is_Mem() )
 937       add_users_to_worklist(n);
 938   }
 939 }
 940 
 941 /**
 942  * Initialize worklist for each node.
 943  */
 944 void PhaseIterGVN::init_worklist(Node* first) {
 945   Unique_Node_List to_process;
 946   to_process.push(first);
 947 
 948   while (to_process.size() > 0) {
 949     Node* n = to_process.pop();
 950     if (!_worklist.member(n)) {
 951       _worklist.push(n);
 952 
 953       uint cnt = n->req();
 954       for(uint i = 0; i < cnt; i++) {


1337     if (progress_state == PROCESS_INPUTS) {
1338       // After following inputs, continue to outputs
1339       _stack.set_index(PROCESS_OUTPUTS);
1340       if (!dead->is_Con()) { // Don't kill cons but uses
1341         bool recurse = false;
1342         // Remove from hash table
1343         _table.hash_delete( dead );
1344         // Smash all inputs to 'dead', isolating him completely
1345         for (uint i = 0; i < dead->req(); i++) {
1346           Node *in = dead->in(i);
1347           if (in != NULL && in != C->top()) {  // Points to something?
1348             int nrep = dead->replace_edge(in, NULL);  // Kill edges
1349             assert((nrep > 0), "sanity");
1350             if (in->outcnt() == 0) { // Made input go dead?
1351               _stack.push(in, PROCESS_INPUTS); // Recursively remove
1352               recurse = true;
1353             } else if (in->outcnt() == 1 &&
1354                        in->has_special_unique_user()) {
1355               _worklist.push(in->unique_out());
1356             } else if (in->outcnt() <= 2 && dead->is_Phi()) {
1357               if (in->Opcode() == Op_Region) {
1358                 _worklist.push(in);
1359               } else if (in->is_Store()) {
1360                 DUIterator_Fast imax, i = in->fast_outs(imax);
1361                 _worklist.push(in->fast_out(i));
1362                 i++;
1363                 if (in->outcnt() == 2) {
1364                   _worklist.push(in->fast_out(i));
1365                   i++;
1366                 }
1367                 assert(!(i < imax), "sanity");
1368               }
1369             }
1370             if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
1371                 in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) {
1372               // A Load that directly follows an InitializeNode is
1373               // going away. The Stores that follow are candidates
1374               // again to be captured by the InitializeNode.
1375               for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
1376                 Node *n = in->fast_out(j);
1377                 if (n->is_Store()) {


1498 
1499   // Move users of node to worklist
1500   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1501     Node* use = n->fast_out(i); // Get use
1502 
1503     if( use->is_Multi() ||      // Multi-definer?  Push projs on worklist
1504         use->is_Store() )       // Enable store/load same address
1505       add_users_to_worklist0(use);
1506 
1507     // If we changed the receiver type to a call, we need to revisit
1508     // the Catch following the call.  It's looking for a non-NULL
1509     // receiver to know when to enable the regular fall-through path
1510     // in addition to the NullPtrException path.
1511     if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1512       Node* p = use->as_CallDynamicJava()->proj_out(TypeFunc::Control);
1513       if (p != NULL) {
1514         add_users_to_worklist0(p);
1515       }
1516     }
1517 
1518     uint use_op = use->Opcode();
1519     if(use->is_Cmp()) {       // Enable CMP/BOOL optimization
1520       add_users_to_worklist(use); // Put Bool on worklist
1521       if (use->outcnt() > 0) {
1522         Node* bol = use->raw_out(0);
1523         if (bol->outcnt() > 0) {
1524           Node* iff = bol->raw_out(0);
1525           if (iff->outcnt() == 2) {
1526             // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1527             // phi merging either 0 or 1 onto the worklist
1528             Node* ifproj0 = iff->raw_out(0);
1529             Node* ifproj1 = iff->raw_out(1);
1530             if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1531               Node* region0 = ifproj0->raw_out(0);
1532               Node* region1 = ifproj1->raw_out(0);
1533               if( region0 == region1 )
1534                 add_users_to_worklist0(region0);
1535             }
1536           }
1537         }
1538       }
1539       if (use_op == Op_CmpI) {
1540         Node* phi = countedloop_phi_from_cmp((CmpINode*)use, n);
1541         if (phi != NULL) {
1542           // If an opaque node feeds into the limit condition of a
1543           // CountedLoop, we need to process the Phi node for the
1544           // induction variable when the opaque node is removed:
1545           // the range of values taken by the Phi is now known and
1546           // so its type is also known.
1547           _worklist.push(phi);
1548         }
1549         Node* in1 = use->in(1);
1550         for (uint i = 0; i < in1->outcnt(); i++) {
1551           if (in1->raw_out(i)->Opcode() == Op_CastII) {
1552             Node* castii = in1->raw_out(i);
1553             if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
1554               Node* ifnode = castii->in(0)->in(0);
1555               if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
1556                 // Reprocess a CastII node that may depend on an
1557                 // opaque node value when the opaque node is
1558                 // removed. In case it carries a dependency we can do
1559                 // a better job of computing its type.
1560                 _worklist.push(castii);
1561               }
1562             }
1563           }
1564         }
1565       }
1566     }
1567 
1568     // If changed Cast input, check Phi users for simple cycles
1569     if (use->is_ConstraintCast()) {
1570       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1571         Node* u = use->fast_out(i2);
1572         if (u->is_Phi())
1573           _worklist.push(u);
1574       }
1575     }
1576     // If changed LShift inputs, check RShift users for useless sign-ext
1577     if( use_op == Op_LShiftI ) {
1578       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1579         Node* u = use->fast_out(i2);
1580         if (u->Opcode() == Op_RShiftI)
1581           _worklist.push(u);
1582       }
1583     }
1584     // If changed AddI/SubI inputs, check CmpU for range check optimization.
1585     if (use_op == Op_AddI || use_op == Op_SubI) {
1586       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1587         Node* u = use->fast_out(i2);
1588         if (u->is_Cmp() && (u->Opcode() == Op_CmpU)) {
1589           _worklist.push(u);
1590         }
1591       }
1592     }
1593     // If changed AddP inputs, check Stores for loop invariant
1594     if( use_op == Op_AddP ) {
1595       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1596         Node* u = use->fast_out(i2);
1597         if (u->is_Mem())
1598           _worklist.push(u);
1599       }
1600     }
1601     // If changed initialization activity, check dependent Stores
1602     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1603       InitializeNode* init = use->as_Allocate()->initialization();
1604       if (init != NULL) {
1605         Node* imem = init->proj_out(TypeFunc::Memory);
1606         if (imem != NULL)  add_users_to_worklist0(imem);
1607       }
1608     }
1609     if (use_op == Op_Initialize) {
1610       Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
1611       if (imem != NULL)  add_users_to_worklist0(imem);
1612     }
1613   }
1614 }
1615 
1616 /**
1617  * Remove the speculative part of all types that we know of
1618  */
1619 void PhaseIterGVN::remove_speculative_types()  {
1620   assert(UseTypeSpeculation, "speculation is off");
1621   for (uint i = 0; i < _types.Size(); i++)  {
1622     const Type* t = _types.fast_lookup(i);
1623     if (t != NULL) {
1624       _types.map(i, t->remove_speculative());
1625     }
1626   }
1627   _table.check_no_speculative_types();
1628 }
1629 


1705         // If we changed the receiver type to a call, we need to revisit
1706         // the Catch following the call.  It's looking for a non-NULL
1707         // receiver to know when to enable the regular fall-through path
1708         // in addition to the NullPtrException path
1709         if (m->is_Call()) {
1710           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1711             Node* p = m->fast_out(i2);  // Propagate changes to uses
1712             if (p->is_Proj() && p->as_Proj()->_con == TypeFunc::Control && p->outcnt() == 1) {
1713               worklist.push(p->unique_out());
1714             }
1715           }
1716         }
1717         if (m->bottom_type() != type(m)) { // If not already bottomed out
1718           worklist.push(m);     // Propagate change to user
1719         }
1720 
1721         // CmpU nodes can get their type information from two nodes up in the
1722         // graph (instead of from the nodes immediately above). Make sure they
1723         // are added to the worklist if nodes they depend on are updated, since
1724         // they could be missed and get wrong types otherwise.
1725         uint m_op = m->Opcode();
1726         if (m_op == Op_AddI || m_op == Op_SubI) {
1727           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1728             Node* p = m->fast_out(i2); // Propagate changes to uses
1729             if (p->Opcode() == Op_CmpU) {
1730               // Got a CmpU which might need the new type information from node n.
1731               if(p->bottom_type() != type(p)) { // If not already bottomed out
1732                 worklist.push(p); // Propagate change to user
1733               }
1734             }
1735           }
1736         }
1737         // If n is used in a counted loop exit condition then the type
1738         // of the counted loop's Phi depends on the type of n. See
1739         // PhiNode::Value().
1740         if (m_op == Op_CmpI) {
1741           PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1742           if (phi != NULL) {
1743             worklist.push(phi);
1744           }
1745         }
1746       }
1747     }
1748   }
1749 }
1750 
1751 //------------------------------do_transform-----------------------------------
1752 // Top level driver for the recursive transformer
1753 void PhaseCCP::do_transform() {
1754   // Correct leaves of new-space Nodes; they point to old-space.
1755   C->set_root( transform(C->root())->as_Root() );
1756   assert( C->top(),  "missing TOP node" );
1757   assert( C->root(), "missing root" );
1758 }
1759 
1760 //------------------------------transform--------------------------------------


1820             assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
1821             replace_node(m, nn);
1822             --i; // deleted this phi; rescan starting with next position
1823           }
1824         }
1825       }
1826       replace_node(n,nn);       // Update DefUse edges for new constant
1827     }
1828     return nn;
1829   }
1830 
1831   // If x is a TypeNode, capture any more-precise type permanently into Node
1832   if (t != n->bottom_type()) {
1833     hash_delete(n);             // changing bottom type may force a rehash
1834     n->raise_bottom_type(t);
1835     _worklist.push(n);          // n re-enters the hash table via the worklist
1836   }
1837 
1838   // TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks
1839   switch( n->Opcode() ) {
1840   case Op_FastLock:      // Revisit FastLocks for lock coarsening
1841   case Op_If:
1842   case Op_CountedLoopEnd:
1843   case Op_Region:
1844   case Op_Loop:
1845   case Op_CountedLoop:
1846   case Op_Conv2B:
1847   case Op_Opaque1:
1848   case Op_Opaque2:
1849     _worklist.push(n);
1850     break;
1851   default:
1852     break;
1853   }
1854 
1855   return  n;
1856 }
1857 
1858 //---------------------------------saturate------------------------------------
1859 const Type* PhaseCCP::saturate(const Type* new_type, const Type* old_type,
1860                                const Type* limit_type) const {
1861   const Type* wide_type = new_type->widen(old_type, limit_type);
1862   if (wide_type != new_type) {          // did we widen?
1863     // If so, we may have widened beyond the limit type.  Clip it back down.
1864     new_type = wide_type->filter(limit_type);
1865   }
1866   return new_type;
1867 }
1868 


1974   assert( igvn->hash_find(this) != this, "Need to remove from hash before changing edges" );
1975   Node *old = in(i);
1976   set_req(i, n);
1977 
1978   // old goes dead?
1979   if( old ) {
1980     switch (old->outcnt()) {
1981     case 0:
1982       // Put into the worklist to kill later. We do not kill it now because the
1983       // recursive kill will delete the current node (this) if dead-loop exists
1984       if (!old->is_top())
1985         igvn->_worklist.push( old );
1986       break;
1987     case 1:
1988       if( old->is_Store() || old->has_special_unique_user() )
1989         igvn->add_users_to_worklist( old );
1990       break;
1991     case 2:
1992       if( old->is_Store() )
1993         igvn->add_users_to_worklist( old );
1994       if( old->Opcode() == Op_Region )
1995         igvn->_worklist.push(old);
1996       break;
1997     case 3:
1998       if( old->Opcode() == Op_Region ) {
1999         igvn->_worklist.push(old);
2000         igvn->add_users_to_worklist( old );
2001       }
2002       break;
2003     default:
2004       break;
2005     }
2006   }
2007 
2008 }
2009 
2010 //-------------------------------replace_by-----------------------------------
2011 // Using def-use info, replace one node for another.  Follow the def-use info
2012 // to all users of the OLD node.  Then make all uses point to the NEW node.
2013 void Node::replace_by(Node *new_node) {
2014   assert(!is_top(), "top node has no DU info");
2015   for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
2016     Node* use = last_out(i);
2017     uint uses_found = 0;
2018     for (uint j = 0; j < use->len(); j++) {




  91 }
  92 
  93 //------------------------------hash_find--------------------------------------
  94 // Find in hash table
  95 Node *NodeHash::hash_find( const Node *n ) {
  96   // ((Node*)n)->set_hash( n->hash() );
  97   uint hash = n->hash();
  98   if (hash == Node::NO_HASH) {
  99     NOT_PRODUCT( _lookup_misses++ );
 100     return NULL;
 101   }
 102   uint key = hash & (_max-1);
 103   uint stride = key | 0x01;
 104   NOT_PRODUCT( _look_probes++ );
 105   Node *k = _table[key];        // Get hashed value
 106   if( !k ) {                    // ?Miss?
 107     NOT_PRODUCT( _lookup_misses++ );
 108     return NULL;                // Miss!
 109   }
 110 
 111   Opcodes op = n->Opcode();
 112   uint req = n->req();
 113   while( 1 ) {                  // While probing hash table
 114     if( k->req() == req &&      // Same count of inputs
 115         k->Opcode() == op ) {   // Same Opcode
 116       for( uint i=0; i<req; i++ )
 117         if( n->in(i)!=k->in(i)) // Different inputs?
 118           goto collision;       // "goto" is a speed hack...
 119       if( n->cmp(*k) ) {        // Check for any special bits
 120         NOT_PRODUCT( _lookup_hits++ );
 121         return k;               // Hit!
 122       }
 123     }
 124   collision:
 125     NOT_PRODUCT( _look_probes++ );
 126     key = (key + stride/*7*/) & (_max-1); // Stride through table with relative prime
 127     k = _table[key];            // Get hashed value
 128     if( !k ) {                  // ?Miss?
 129       NOT_PRODUCT( _lookup_misses++ );
 130       return NULL;              // Miss!
 131     }


 143   if (hash == Node::NO_HASH) {
 144     NOT_PRODUCT( _lookup_misses++ );
 145     return NULL;
 146   }
 147   uint key = hash & (_max-1);
 148   uint stride = key | 0x01;     // stride must be relatively prime to table siz
 149   uint first_sentinel = 0;      // replace a sentinel if seen.
 150   NOT_PRODUCT( _look_probes++ );
 151   Node *k = _table[key];        // Get hashed value
 152   if( !k ) {                    // ?Miss?
 153     NOT_PRODUCT( _lookup_misses++ );
 154     _table[key] = n;            // Insert into table!
 155     debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
 156     check_grow();               // Grow table if insert hit limit
 157     return NULL;                // Miss!
 158   }
 159   else if( k == _sentinel ) {
 160     first_sentinel = key;      // Can insert here
 161   }
 162 
 163   Opcodes op = n->Opcode();
 164   uint req = n->req();
 165   while( 1 ) {                  // While probing hash table
 166     if( k->req() == req &&      // Same count of inputs
 167         k->Opcode() == op ) {   // Same Opcode
 168       for( uint i=0; i<req; i++ )
 169         if( n->in(i)!=k->in(i)) // Different inputs?
 170           goto collision;       // "goto" is a speed hack...
 171       if( n->cmp(*k) ) {        // Check for any special bits
 172         NOT_PRODUCT( _lookup_hits++ );
 173         return k;               // Hit!
 174       }
 175     }
 176   collision:
 177     NOT_PRODUCT( _look_probes++ );
 178     key = (key + stride) & (_max-1); // Stride through table w/ relative prime
 179     k = _table[key];            // Get hashed value
 180     if( !k ) {                  // ?Miss?
 181       NOT_PRODUCT( _lookup_misses++ );
 182       key = (first_sentinel == 0) ? key : first_sentinel; // ?saw sentinel?
 183       _table[key] = n;          // Insert into table!


 913 
 914   // Dead nodes in the hash table inherited from GVN were not treated as
 915   // roots during def-use info creation; hence they represent an invisible
 916   // use.  Clear them out.
 917   max = _table.size();
 918   for( uint i = 0; i < max; ++i ) {
 919     Node *n = _table.at(i);
 920     if(n != NULL && n != _table.sentinel() && n->outcnt() == 0) {
 921       if( n->is_top() ) continue;
 922       assert( false, "Parse::remove_useless_nodes missed this node");
 923       hash_delete(n);
 924     }
 925   }
 926 
 927   // Any Phis or Regions on the worklist probably had uses that could not
 928   // make more progress because the uses were made while the Phis and Regions
 929   // were in half-built states.  Put all uses of Phis and Regions on worklist.
 930   max = _worklist.size();
 931   for( uint j = 0; j < max; j++ ) {
 932     Node *n = _worklist.at(j);
 933     Opcodes uop = n->Opcode();
 934     if( uop == Opcodes::Op_Phi || uop == Opcodes::Op_Region ||
 935         n->is_Type() ||
 936         n->is_Mem() )
 937       add_users_to_worklist(n);
 938   }
 939 }
 940 
 941 /**
 942  * Initialize worklist for each node.
 943  */
 944 void PhaseIterGVN::init_worklist(Node* first) {
 945   Unique_Node_List to_process;
 946   to_process.push(first);
 947 
 948   while (to_process.size() > 0) {
 949     Node* n = to_process.pop();
 950     if (!_worklist.member(n)) {
 951       _worklist.push(n);
 952 
 953       uint cnt = n->req();
 954       for(uint i = 0; i < cnt; i++) {


1337     if (progress_state == PROCESS_INPUTS) {
1338       // After following inputs, continue to outputs
1339       _stack.set_index(PROCESS_OUTPUTS);
1340       if (!dead->is_Con()) { // Don't kill cons but uses
1341         bool recurse = false;
1342         // Remove from hash table
1343         _table.hash_delete( dead );
1344         // Smash all inputs to 'dead', isolating him completely
1345         for (uint i = 0; i < dead->req(); i++) {
1346           Node *in = dead->in(i);
1347           if (in != NULL && in != C->top()) {  // Points to something?
1348             int nrep = dead->replace_edge(in, NULL);  // Kill edges
1349             assert((nrep > 0), "sanity");
1350             if (in->outcnt() == 0) { // Made input go dead?
1351               _stack.push(in, PROCESS_INPUTS); // Recursively remove
1352               recurse = true;
1353             } else if (in->outcnt() == 1 &&
1354                        in->has_special_unique_user()) {
1355               _worklist.push(in->unique_out());
1356             } else if (in->outcnt() <= 2 && dead->is_Phi()) {
1357               if (in->Opcode() == Opcodes::Op_Region) {
1358                 _worklist.push(in);
1359               } else if (in->is_Store()) {
1360                 DUIterator_Fast imax, i = in->fast_outs(imax);
1361                 _worklist.push(in->fast_out(i));
1362                 i++;
1363                 if (in->outcnt() == 2) {
1364                   _worklist.push(in->fast_out(i));
1365                   i++;
1366                 }
1367                 assert(!(i < imax), "sanity");
1368               }
1369             }
1370             if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
1371                 in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) {
1372               // A Load that directly follows an InitializeNode is
1373               // going away. The Stores that follow are candidates
1374               // again to be captured by the InitializeNode.
1375               for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
1376                 Node *n = in->fast_out(j);
1377                 if (n->is_Store()) {


1498 
1499   // Move users of node to worklist
1500   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1501     Node* use = n->fast_out(i); // Get use
1502 
1503     if( use->is_Multi() ||      // Multi-definer?  Push projs on worklist
1504         use->is_Store() )       // Enable store/load same address
1505       add_users_to_worklist0(use);
1506 
1507     // If we changed the receiver type to a call, we need to revisit
1508     // the Catch following the call.  It's looking for a non-NULL
1509     // receiver to know when to enable the regular fall-through path
1510     // in addition to the NullPtrException path.
1511     if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1512       Node* p = use->as_CallDynamicJava()->proj_out(TypeFunc::Control);
1513       if (p != NULL) {
1514         add_users_to_worklist0(p);
1515       }
1516     }
1517 
1518     Opcodes use_op = use->Opcode();
1519     if(use->is_Cmp()) {       // Enable CMP/BOOL optimization
1520       add_users_to_worklist(use); // Put Bool on worklist
1521       if (use->outcnt() > 0) {
1522         Node* bol = use->raw_out(0);
1523         if (bol->outcnt() > 0) {
1524           Node* iff = bol->raw_out(0);
1525           if (iff->outcnt() == 2) {
1526             // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1527             // phi merging either 0 or 1 onto the worklist
1528             Node* ifproj0 = iff->raw_out(0);
1529             Node* ifproj1 = iff->raw_out(1);
1530             if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1531               Node* region0 = ifproj0->raw_out(0);
1532               Node* region1 = ifproj1->raw_out(0);
1533               if( region0 == region1 )
1534                 add_users_to_worklist0(region0);
1535             }
1536           }
1537         }
1538       }
1539       if (use_op == Opcodes::Op_CmpI) {
1540         Node* phi = countedloop_phi_from_cmp((CmpINode*)use, n);
1541         if (phi != NULL) {
1542           // If an opaque node feeds into the limit condition of a
1543           // CountedLoop, we need to process the Phi node for the
1544           // induction variable when the opaque node is removed:
1545           // the range of values taken by the Phi is now known and
1546           // so its type is also known.
1547           _worklist.push(phi);
1548         }
1549         Node* in1 = use->in(1);
1550         for (uint i = 0; i < in1->outcnt(); i++) {
1551           if (in1->raw_out(i)->Opcode() == Opcodes::Op_CastII) {
1552             Node* castii = in1->raw_out(i);
1553             if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
1554               Node* ifnode = castii->in(0)->in(0);
1555               if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
1556                 // Reprocess a CastII node that may depend on an
1557                 // opaque node value when the opaque node is
1558                 // removed. In case it carries a dependency we can do
1559                 // a better job of computing its type.
1560                 _worklist.push(castii);
1561               }
1562             }
1563           }
1564         }
1565       }
1566     }
1567 
1568     // If changed Cast input, check Phi users for simple cycles
1569     if (use->is_ConstraintCast()) {
1570       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1571         Node* u = use->fast_out(i2);
1572         if (u->is_Phi())
1573           _worklist.push(u);
1574       }
1575     }
1576     // If changed LShift inputs, check RShift users for useless sign-ext
1577     if( use_op == Opcodes::Op_LShiftI ) {
1578       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1579         Node* u = use->fast_out(i2);
1580         if (u->Opcode() == Opcodes::Op_RShiftI)
1581           _worklist.push(u);
1582       }
1583     }
1584     // If changed AddI/SubI inputs, check CmpU for range check optimization.
1585     if (use_op == Opcodes::Op_AddI || use_op == Opcodes::Op_SubI) {
1586       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1587         Node* u = use->fast_out(i2);
1588         if (u->is_Cmp() && (u->Opcode() == Opcodes::Op_CmpU)) {
1589           _worklist.push(u);
1590         }
1591       }
1592     }
1593     // If changed AddP inputs, check Stores for loop invariant
1594     if( use_op == Opcodes::Op_AddP ) {
1595       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1596         Node* u = use->fast_out(i2);
1597         if (u->is_Mem())
1598           _worklist.push(u);
1599       }
1600     }
1601     // If changed initialization activity, check dependent Stores
1602     if (use_op == Opcodes::Op_Allocate || use_op == Opcodes::Op_AllocateArray) {
1603       InitializeNode* init = use->as_Allocate()->initialization();
1604       if (init != NULL) {
1605         Node* imem = init->proj_out(TypeFunc::Memory);
1606         if (imem != NULL)  add_users_to_worklist0(imem);
1607       }
1608     }
1609     if (use_op == Opcodes::Op_Initialize) {
1610       Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
1611       if (imem != NULL)  add_users_to_worklist0(imem);
1612     }
1613   }
1614 }
1615 
1616 /**
1617  * Remove the speculative part of all types that we know of
1618  */
1619 void PhaseIterGVN::remove_speculative_types()  {
1620   assert(UseTypeSpeculation, "speculation is off");
1621   for (uint i = 0; i < _types.Size(); i++)  {
1622     const Type* t = _types.fast_lookup(i);
1623     if (t != NULL) {
1624       _types.map(i, t->remove_speculative());
1625     }
1626   }
1627   _table.check_no_speculative_types();
1628 }
1629 


1705         // If we changed the receiver type to a call, we need to revisit
1706         // the Catch following the call.  It's looking for a non-NULL
1707         // receiver to know when to enable the regular fall-through path
1708         // in addition to the NullPtrException path
1709         if (m->is_Call()) {
1710           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1711             Node* p = m->fast_out(i2);  // Propagate changes to uses
1712             if (p->is_Proj() && p->as_Proj()->_con == TypeFunc::Control && p->outcnt() == 1) {
1713               worklist.push(p->unique_out());
1714             }
1715           }
1716         }
1717         if (m->bottom_type() != type(m)) { // If not already bottomed out
1718           worklist.push(m);     // Propagate change to user
1719         }
1720 
1721         // CmpU nodes can get their type information from two nodes up in the
1722         // graph (instead of from the nodes immediately above). Make sure they
1723         // are added to the worklist if nodes they depend on are updated, since
1724         // they could be missed and get wrong types otherwise.
1725         Opcodes m_op = m->Opcode();
1726         if (m_op == Opcodes::Op_AddI || m_op == Opcodes::Op_SubI) {
1727           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1728             Node* p = m->fast_out(i2); // Propagate changes to uses
1729             if (p->Opcode() == Opcodes::Op_CmpU) {
1730               // Got a CmpU which might need the new type information from node n.
1731               if(p->bottom_type() != type(p)) { // If not already bottomed out
1732                 worklist.push(p); // Propagate change to user
1733               }
1734             }
1735           }
1736         }
1737         // If n is used in a counted loop exit condition then the type
1738         // of the counted loop's Phi depends on the type of n. See
1739         // PhiNode::Value().
1740         if (m_op == Opcodes::Op_CmpI) {
1741           PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1742           if (phi != NULL) {
1743             worklist.push(phi);
1744           }
1745         }
1746       }
1747     }
1748   }
1749 }
1750 
1751 //------------------------------do_transform-----------------------------------
1752 // Top level driver for the recursive transformer
1753 void PhaseCCP::do_transform() {
1754   // Correct leaves of new-space Nodes; they point to old-space.
1755   C->set_root( transform(C->root())->as_Root() );
1756   assert( C->top(),  "missing TOP node" );
1757   assert( C->root(), "missing root" );
1758 }
1759 
1760 //------------------------------transform--------------------------------------


1820             assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
1821             replace_node(m, nn);
1822             --i; // deleted this phi; rescan starting with next position
1823           }
1824         }
1825       }
1826       replace_node(n,nn);       // Update DefUse edges for new constant
1827     }
1828     return nn;
1829   }
1830 
1831   // If x is a TypeNode, capture any more-precise type permanently into Node
1832   if (t != n->bottom_type()) {
1833     hash_delete(n);             // changing bottom type may force a rehash
1834     n->raise_bottom_type(t);
1835     _worklist.push(n);          // n re-enters the hash table via the worklist
1836   }
1837 
1838   // TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks
1839   switch( n->Opcode() ) {
1840   case Opcodes::Op_FastLock:      // Revisit FastLocks for lock coarsening
1841   case Opcodes::Op_If:
1842   case Opcodes::Op_CountedLoopEnd:
1843   case Opcodes::Op_Region:
1844   case Opcodes::Op_Loop:
1845   case Opcodes::Op_CountedLoop:
1846   case Opcodes::Op_Conv2B:
1847   case Opcodes::Op_Opaque1:
1848   case Opcodes::Op_Opaque2:
1849     _worklist.push(n);
1850     break;
1851   default:
1852     break;
1853   }
1854 
1855   return  n;
1856 }
1857 
1858 //---------------------------------saturate------------------------------------
1859 const Type* PhaseCCP::saturate(const Type* new_type, const Type* old_type,
1860                                const Type* limit_type) const {
1861   const Type* wide_type = new_type->widen(old_type, limit_type);
1862   if (wide_type != new_type) {          // did we widen?
1863     // If so, we may have widened beyond the limit type.  Clip it back down.
1864     new_type = wide_type->filter(limit_type);
1865   }
1866   return new_type;
1867 }
1868 


1974   assert( igvn->hash_find(this) != this, "Need to remove from hash before changing edges" );
1975   Node *old = in(i);
1976   set_req(i, n);
1977 
1978   // old goes dead?
1979   if( old ) {
1980     switch (old->outcnt()) {
1981     case 0:
1982       // Put into the worklist to kill later. We do not kill it now because the
1983       // recursive kill will delete the current node (this) if dead-loop exists
1984       if (!old->is_top())
1985         igvn->_worklist.push( old );
1986       break;
1987     case 1:
1988       if( old->is_Store() || old->has_special_unique_user() )
1989         igvn->add_users_to_worklist( old );
1990       break;
1991     case 2:
1992       if( old->is_Store() )
1993         igvn->add_users_to_worklist( old );
1994       if( old->Opcode() == Opcodes::Op_Region )
1995         igvn->_worklist.push(old);
1996       break;
1997     case 3:
1998       if( old->Opcode() == Opcodes::Op_Region ) {
1999         igvn->_worklist.push(old);
2000         igvn->add_users_to_worklist( old );
2001       }
2002       break;
2003     default:
2004       break;
2005     }
2006   }
2007 
2008 }
2009 
2010 //-------------------------------replace_by-----------------------------------
2011 // Using def-use info, replace one node for another.  Follow the def-use info
2012 // to all users of the OLD node.  Then make all uses point to the NEW node.
2013 void Node::replace_by(Node *new_node) {
2014   assert(!is_top(), "top node has no DU info");
2015   for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
2016     Node* use = last_out(i);
2017     uint uses_found = 0;
2018     for (uint j = 0; j < use->len(); j++) {


< prev index next >