< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page




1608     // If changed AddP inputs, check Stores for loop invariant
1609     if( use_op == Op_AddP ) {
1610       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1611         Node* u = use->fast_out(i2);
1612         if (u->is_Mem())
1613           _worklist.push(u);
1614       }
1615     }
1616     // If changed initialization activity, check dependent Stores
1617     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1618       InitializeNode* init = use->as_Allocate()->initialization();
1619       if (init != NULL) {
1620         Node* imem = init->proj_out(TypeFunc::Memory);
1621         if (imem != NULL)  add_users_to_worklist0(imem);
1622       }
1623     }
1624     if (use_op == Op_Initialize) {
1625       Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
1626       if (imem != NULL)  add_users_to_worklist0(imem);
1627     }











1628   }
1629 }
1630 
1631 /**
1632  * Remove the speculative part of all types that we know of
1633  */
1634 void PhaseIterGVN::remove_speculative_types()  {
1635   assert(UseTypeSpeculation, "speculation is off");
1636   for (uint i = 0; i < _types.Size(); i++)  {
1637     const Type* t = _types.fast_lookup(i);
1638     if (t != NULL) {
1639       _types.map(i, t->remove_speculative());
1640     }
1641   }
1642   _table.check_no_speculative_types();
1643 }
1644 
1645 //=============================================================================
1646 #ifndef PRODUCT
1647 uint PhaseCCP::_total_invokes   = 0;


1741         // they could be missed and get wrong types otherwise.
1742         uint m_op = m->Opcode();
1743         if (m_op == Op_AddI || m_op == Op_SubI) {
1744           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1745             Node* p = m->fast_out(i2); // Propagate changes to uses
1746             if (p->Opcode() == Op_CmpU) {
1747               // Got a CmpU which might need the new type information from node n.
1748               if(p->bottom_type() != type(p)) { // If not already bottomed out
1749                 worklist.push(p); // Propagate change to user
1750               }
1751             }
1752           }
1753         }
1754         // If n is used in a counted loop exit condition then the type
1755         // of the counted loop's Phi depends on the type of n. See
1756         // PhiNode::Value().
1757         if (m_op == Op_CmpI) {
1758           PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1759           if (phi != NULL) {
1760             worklist.push(phi);











1761           }
1762         }
1763       }
1764     }
1765   }
1766 }
1767 
1768 //------------------------------do_transform-----------------------------------
1769 // Top level driver for the recursive transformer
1770 void PhaseCCP::do_transform() {
1771   // Correct leaves of new-space Nodes; they point to old-space.
1772   C->set_root( transform(C->root())->as_Root() );
1773   assert( C->top(),  "missing TOP node" );
1774   assert( C->root(), "missing root" );
1775 }
1776 
1777 //------------------------------transform--------------------------------------
1778 // Given a Node in old-space, clone him into new-space.
1779 // Convert any of his old-space children into new-space children.
1780 Node *PhaseCCP::transform( Node *n ) {




1608     // If changed AddP inputs, check Stores for loop invariant
1609     if( use_op == Op_AddP ) {
1610       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1611         Node* u = use->fast_out(i2);
1612         if (u->is_Mem())
1613           _worklist.push(u);
1614       }
1615     }
1616     // If changed initialization activity, check dependent Stores
1617     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1618       InitializeNode* init = use->as_Allocate()->initialization();
1619       if (init != NULL) {
1620         Node* imem = init->proj_out(TypeFunc::Memory);
1621         if (imem != NULL)  add_users_to_worklist0(imem);
1622       }
1623     }
1624     if (use_op == Op_Initialize) {
1625       Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
1626       if (imem != NULL)  add_users_to_worklist0(imem);
1627     }
1628     // Loading the java mirror from a klass oop requires two loads and the type
1629     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1630     // Also handle other raw memory users that depend on the type of the AddP.
1631     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1632       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1633         Node* u = use->fast_out(i2);
1634         if (u->is_Mem()) {
1635           _worklist.push(u);
1636         }
1637       }
1638     }
1639   }
1640 }
1641 
1642 /**
1643  * Remove the speculative part of all types that we know of
1644  */
1645 void PhaseIterGVN::remove_speculative_types()  {
1646   assert(UseTypeSpeculation, "speculation is off");
1647   for (uint i = 0; i < _types.Size(); i++)  {
1648     const Type* t = _types.fast_lookup(i);
1649     if (t != NULL) {
1650       _types.map(i, t->remove_speculative());
1651     }
1652   }
1653   _table.check_no_speculative_types();
1654 }
1655 
1656 //=============================================================================
1657 #ifndef PRODUCT
1658 uint PhaseCCP::_total_invokes   = 0;


1752         // they could be missed and get wrong types otherwise.
1753         uint m_op = m->Opcode();
1754         if (m_op == Op_AddI || m_op == Op_SubI) {
1755           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1756             Node* p = m->fast_out(i2); // Propagate changes to uses
1757             if (p->Opcode() == Op_CmpU) {
1758               // Got a CmpU which might need the new type information from node n.
1759               if(p->bottom_type() != type(p)) { // If not already bottomed out
1760                 worklist.push(p); // Propagate change to user
1761               }
1762             }
1763           }
1764         }
1765         // If n is used in a counted loop exit condition then the type
1766         // of the counted loop's Phi depends on the type of n. See
1767         // PhiNode::Value().
1768         if (m_op == Op_CmpI) {
1769           PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1770           if (phi != NULL) {
1771             worklist.push(phi);
1772           }
1773         }
1774         // Loading the java mirror from a klass oop requires two loads and the type
1775         // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1776         // Also handle other raw memory users that depend on the type of the AddP.
1777         if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) {
1778           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1779             Node* u = m->fast_out(i2);
1780             if (u->is_Mem() && u->bottom_type() != type(u)) {
1781               worklist.push(u);
1782             }
1783           }
1784         }
1785       }
1786     }
1787   }
1788 }
1789 
1790 //------------------------------do_transform-----------------------------------
1791 // Top level driver for the recursive transformer
1792 void PhaseCCP::do_transform() {
1793   // Correct leaves of new-space Nodes; they point to old-space.
1794   C->set_root( transform(C->root())->as_Root() );
1795   assert( C->top(),  "missing TOP node" );
1796   assert( C->root(), "missing root" );
1797 }
1798 
1799 //------------------------------transform--------------------------------------
1800 // Given a Node in old-space, clone him into new-space.
1801 // Convert any of his old-space children into new-space children.
1802 Node *PhaseCCP::transform( Node *n ) {


< prev index next >