< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page
rev 52634 : 8214055: GC/C2 abstraction for phaseX


1638       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1639         Node* u = use->fast_out(i2);
1640         if (u->is_Mem())
1641           _worklist.push(u);
1642       }
1643     }
1644     // If changed initialization activity, check dependent Stores
1645     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1646       InitializeNode* init = use->as_Allocate()->initialization();
1647       if (init != NULL) {
1648         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1649         if (imem != NULL)  add_users_to_worklist0(imem);
1650       }
1651     }
1652     if (use_op == Op_Initialize) {
1653       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1654       if (imem != NULL)  add_users_to_worklist0(imem);
1655     }
1656     // Loading the java mirror from a Klass requires two loads and the type
1657     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1658     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1659     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1660     bool has_load_barriers = bs->has_load_barriers();
1661 
1662     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1663       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1664         Node* u = use->fast_out(i2);
1665         const Type* ut = u->bottom_type();
1666         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1667           if (has_load_barriers) {
1668             // Search for load barriers behind the load
1669             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1670               Node* b = u->fast_out(i3);
1671               if (bs->is_gc_barrier_node(b)) {
1672                 _worklist.push(b);
1673               }
1674             }
1675           }
1676           _worklist.push(u);
1677         }
1678       }
1679     }


1680   }
1681 }
1682 
1683 /**
1684  * Remove the speculative part of all types that we know of
1685  */
1686 void PhaseIterGVN::remove_speculative_types()  {
1687   assert(UseTypeSpeculation, "speculation is off");
1688   for (uint i = 0; i < _types.Size(); i++)  {
1689     const Type* t = _types.fast_lookup(i);
1690     if (t != NULL) {
1691       _types.map(i, t->remove_speculative());
1692     }
1693   }
1694   _table.check_no_speculative_types();
1695 }
1696 
1697 //=============================================================================
1698 #ifndef PRODUCT
1699 uint PhaseCCP::_total_invokes   = 0;


1797             Node* p = m->fast_out(i2); // Propagate changes to uses
1798             if (p->Opcode() == Op_CmpU) {
1799               // Got a CmpU which might need the new type information from node n.
1800               if(p->bottom_type() != type(p)) { // If not already bottomed out
1801                 worklist.push(p); // Propagate change to user
1802               }
1803             }
1804           }
1805         }
1806         // If n is used in a counted loop exit condition then the type
1807         // of the counted loop's Phi depends on the type of n. See
1808         // PhiNode::Value().
1809         if (m_op == Op_CmpI) {
1810           PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1811           if (phi != NULL) {
1812             worklist.push(phi);
1813           }
1814         }
1815         // Loading the java mirror from a Klass requires two loads and the type
1816         // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1817         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1818         bool has_load_barriers = bs->has_load_barriers();
1819 
1820         if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) {
1821           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1822             Node* u = m->fast_out(i2);
1823             const Type* ut = u->bottom_type();
1824             if (u->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(u)) {
1825               if (has_load_barriers) {
1826                 // Search for load barriers behind the load
1827                 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1828                   Node* b = u->fast_out(i3);
1829                   if (bs->is_gc_barrier_node(b)) {
1830                     _worklist.push(b);
1831                   }
1832                 }
1833               }
1834               worklist.push(u);
1835             }
1836           }
1837         }


1838       }
1839     }
1840   }
1841 }
1842 
1843 //------------------------------do_transform-----------------------------------
1844 // Top level driver for the recursive transformer
1845 void PhaseCCP::do_transform() {
1846   // Correct leaves of new-space Nodes; they point to old-space.
1847   C->set_root( transform(C->root())->as_Root() );
1848   assert( C->top(),  "missing TOP node" );
1849   assert( C->root(), "missing root" );
1850 }
1851 
1852 //------------------------------transform--------------------------------------
1853 // Given a Node in old-space, clone him into new-space.
1854 // Convert any of his old-space children into new-space children.
1855 Node *PhaseCCP::transform( Node *n ) {
1856   Node *new_node = _nodes[n->_idx]; // Check for transformed node
1857   if( new_node != NULL )




1638       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1639         Node* u = use->fast_out(i2);
1640         if (u->is_Mem())
1641           _worklist.push(u);
1642       }
1643     }
1644     // If changed initialization activity, check dependent Stores
1645     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1646       InitializeNode* init = use->as_Allocate()->initialization();
1647       if (init != NULL) {
1648         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1649         if (imem != NULL)  add_users_to_worklist0(imem);
1650       }
1651     }
1652     if (use_op == Op_Initialize) {
1653       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1654       if (imem != NULL)  add_users_to_worklist0(imem);
1655     }
1656     // Loading the java mirror from a Klass requires two loads and the type
1657     // of the mirror load depends on the type of 'n'. See LoadNode::Value().




1658     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1659       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1660         Node* u = use->fast_out(i2);
1661         const Type* ut = u->bottom_type();
1662         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {









1663           _worklist.push(u);
1664         }
1665       }
1666     }
1667 
1668     BarrierSet::barrier_set()->barrier_set_c2()->igvn_add_users_to_worklist(this, use);
1669   }
1670 }
1671 
1672 /**
1673  * Remove the speculative part of all types that we know of
1674  */
1675 void PhaseIterGVN::remove_speculative_types()  {
1676   assert(UseTypeSpeculation, "speculation is off");
1677   for (uint i = 0; i < _types.Size(); i++)  {
1678     const Type* t = _types.fast_lookup(i);
1679     if (t != NULL) {
1680       _types.map(i, t->remove_speculative());
1681     }
1682   }
1683   _table.check_no_speculative_types();
1684 }
1685 
1686 //=============================================================================
1687 #ifndef PRODUCT
1688 uint PhaseCCP::_total_invokes   = 0;


1786             Node* p = m->fast_out(i2); // Propagate changes to uses
1787             if (p->Opcode() == Op_CmpU) {
1788               // Got a CmpU which might need the new type information from node n.
1789               if(p->bottom_type() != type(p)) { // If not already bottomed out
1790                 worklist.push(p); // Propagate change to user
1791               }
1792             }
1793           }
1794         }
1795         // If n is used in a counted loop exit condition then the type
1796         // of the counted loop's Phi depends on the type of n. See
1797         // PhiNode::Value().
1798         if (m_op == Op_CmpI) {
1799           PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1800           if (phi != NULL) {
1801             worklist.push(phi);
1802           }
1803         }
1804         // Loading the java mirror from a Klass requires two loads and the type
1805         // of the mirror load depends on the type of 'n'. See LoadNode::Value().



1806         if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) {
1807           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1808             Node* u = m->fast_out(i2);
1809             const Type* ut = u->bottom_type();
1810             if (u->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(u)) {









1811               worklist.push(u);
1812             }
1813           }
1814         }
1815 
1816         BarrierSet::barrier_set()->barrier_set_c2()->ccp_analyze(this, worklist, m);
1817       }
1818     }
1819   }
1820 }
1821 
1822 //------------------------------do_transform-----------------------------------
1823 // Top level driver for the recursive transformer
1824 void PhaseCCP::do_transform() {
1825   // Correct leaves of new-space Nodes; they point to old-space.
1826   C->set_root( transform(C->root())->as_Root() );
1827   assert( C->top(),  "missing TOP node" );
1828   assert( C->root(), "missing root" );
1829 }
1830 
1831 //------------------------------transform--------------------------------------
1832 // Given a Node in old-space, clone him into new-space.
1833 // Convert any of his old-space children into new-space children.
1834 Node *PhaseCCP::transform( Node *n ) {
1835   Node *new_node = _nodes[n->_idx]; // Check for transformed node
1836   if( new_node != NULL )


< prev index next >