src/share/vm/opto/escape.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 7059047 Sdiff src/share/vm/opto

src/share/vm/opto/escape.cpp

Print this page




  43   _edges->append_if_missing(v);
  44 }
  45 
  46 void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) {
  47   uint v = (targIdx << EdgeShift) + ((uint) et);
  48 
  49   _edges->remove(v);
  50 }
  51 
  52 #ifndef PRODUCT
  53 static const char *node_type_names[] = {
  54   "UnknownType",
  55   "JavaObject",
  56   "LocalVar",
  57   "Field"
  58 };
  59 
  60 static const char *esc_names[] = {
  61   "UnknownEscape",
  62   "NoEscape",

  63   "ArgEscape",
  64   "GlobalEscape"
  65 };
  66 
  67 static const char *edge_type_suffix[] = {
  68  "?", // UnknownEdge
  69  "P", // PointsToEdge
  70  "D", // DeferredEdge
  71  "F"  // FieldEdge
  72 };
  73 
  74 void PointsToNode::dump(bool print_state) const {
  75   NodeType nt = node_type();
  76   tty->print("%s ", node_type_names[(int) nt]);
  77   if (print_state) {
  78     EscapeState es = escape_state();
  79     tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR");
  80   }
  81   tty->print("[[");
  82   for (uint i = 0; i < edge_count(); i++) {
  83     tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
  84   }
  85   tty->print("]]  ");
  86   if (_node == NULL)
  87     tty->print_cr("<null>");
  88   else
  89     _node->dump();
  90 }
  91 #endif
  92 
  93 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
  94   _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
  95   _processed(C->comp_arena()),
  96   pt_ptset(C->comp_arena()),
  97   pt_visited(C->comp_arena()),
  98   pt_worklist(C->comp_arena(), 4, 0, 0),
  99   _collecting(true),


 361   bool deferred = (to->node_type() == PointsToNode::LocalVar);
 362 
 363   for (uint fe = 0; fe < an->edge_count(); fe++) {
 364     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
 365     int fi = an->edge_target(fe);
 366     PointsToNode* pf = ptnode_adr(fi);
 367     int po = pf->offset();
 368     if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
 369       if (deferred)
 370         add_deferred_edge(fi, to_i);
 371       else
 372         add_pointsto_edge(fi, to_i);
 373     }
 374   }
 375 }
 376 
 377 // Add a deferred  edge from node given by "from_i" to any field of adr_i
 378 // whose offset matches "offset".
 379 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
 380   PointsToNode* an = ptnode_adr(adr_i);

 381   for (uint fe = 0; fe < an->edge_count(); fe++) {
 382     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
 383     int fi = an->edge_target(fe);
 384     PointsToNode* pf = ptnode_adr(fi);
 385     int po = pf->offset();
 386     if (pf->edge_count() == 0) {
 387       // we have not seen any stores to this field, assume it was set outside this method
 388       add_pointsto_edge(fi, _phantom_object);
 389     }
 390     if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
 391       add_deferred_edge(from_i, fi);
 392     }
 393   }
 394 }
 395 
 396 // Helper functions
 397 
 398 static Node* get_addp_base(Node *addp) {
 399   assert(addp->is_AddP(), "must be AddP");
 400   //
 401   // AddP cases for Base and Address inputs:
 402   // case #1. Direct object's field reference:
 403   //     Allocate
 404   //       |
 405   //     Proj #5 ( oop result )
 406   //       |
 407   //     CheckCastPP (cast to instance type)
 408   //      | |
 409   //     AddP  ( base == address )
 410   //


1024 
1025 
1026   //  Phase 1:  Process possible allocations from alloc_worklist.
1027   //  Create instance types for the CheckCastPP for allocations where possible.
1028   //
1029   // (Note: don't forget to change the order of the second AddP node on
1030   //  the alloc_worklist if the order of the worklist processing is changed,
1031   //  see the comment in find_second_addp().)
1032   //
1033   while (alloc_worklist.length() != 0) {
1034     Node *n = alloc_worklist.pop();
1035     uint ni = n->_idx;
1036     const TypeOopPtr* tinst = NULL;
1037     if (n->is_Call()) {
1038       CallNode *alloc = n->as_Call();
1039       // copy escape information to call node
1040       PointsToNode* ptn = ptnode_adr(alloc->_idx);
1041       PointsToNode::EscapeState es = escape_state(alloc);
1042       // We have an allocation or call which returns a Java object,
1043       // see if it is unescaped.
1044       if (es != PointsToNode::NoEscape || !ptn->_scalar_replaceable)
1045         continue;
1046 
1047       // Find CheckCastPP for the allocate or for the return value of a call
1048       n = alloc->result_cast();
1049       if (n == NULL) {            // No uses except Initialize node
1050         if (alloc->is_Allocate()) {
1051           // Set the scalar_replaceable flag for allocation
1052           // so it could be eliminated if it has no uses.
1053           alloc->as_Allocate()->_is_scalar_replaceable = true;
1054         }
1055         continue;
1056       }
1057       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
1058         assert(!alloc->is_Allocate(), "allocation should have unique type");
1059         continue;
1060       }
1061 
1062       // The inline code for Object.clone() casts the allocation result to
1063       // java.lang.Object and then to the actual type of the allocated
1064       // object. Detect this case and use the second cast.


1085           continue;
1086         }
1087       }
1088       if (alloc->is_Allocate()) {
1089         // Set the scalar_replaceable flag for allocation
1090         // so it could be eliminated.
1091         alloc->as_Allocate()->_is_scalar_replaceable = true;
1092       }
1093       set_escape_state(n->_idx, es);
1094       // in order for an object to be scalar-replaceable, it must be:
1095       //   - a direct allocation (not a call returning an object)
1096       //   - non-escaping
1097       //   - eligible to be a unique type
1098       //   - not determined to be ineligible by escape analysis
1099       assert(ptnode_adr(alloc->_idx)->_node != NULL &&
1100              ptnode_adr(n->_idx)->_node != NULL, "should be registered");
1101       set_map(alloc->_idx, n);
1102       set_map(n->_idx, alloc);
1103       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
1104       if (t == NULL)
1105         continue;  // not a TypeInstPtr
1106       tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
1107       igvn->hash_delete(n);
1108       igvn->set_type(n,  tinst);
1109       n->raise_bottom_type(tinst);
1110       igvn->hash_insert(n);
1111       record_for_optimizer(n);
1112       if (alloc->is_Allocate() && ptn->_scalar_replaceable &&
1113           (t->isa_instptr() || t->isa_aryptr())) {
1114 
1115         // First, put on the worklist all Field edges from Connection Graph
1116         // which is more accurate then putting immediate users from Ideal Graph.
1117         for (uint e = 0; e < ptn->edge_count(); e++) {
1118           Node *use = ptnode_adr(ptn->edge_target(e))->_node;
1119           assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(),
1120                  "only AddP nodes are Field edges in CG");
1121           if (use->outcnt() > 0) { // Don't process dead nodes
1122             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
1123             if (addp2 != NULL) {
1124               assert(alloc->is_AllocateArray(),"array allocation was expected");
1125               alloc_worklist.append_if_missing(addp2);
1126             }
1127             alloc_worklist.append_if_missing(use);
1128           }
1129         }
1130 
1131         // An allocation may have an Initialize which has raw stores. Scan
1132         // the users of the raw allocation result and push AddP users
1133         // on alloc_worklist.


1521   // Cleanup.
1522   if (oop_null->outcnt() == 0)
1523     igvn->hash_delete(oop_null);
1524   if (noop_null->outcnt() == 0)
1525     igvn->hash_delete(noop_null);
1526 }
1527 
1528 bool ConnectionGraph::compute_escape() {
1529   Compile* C = _compile;
1530 
1531   // 1. Populate Connection Graph (CG) with Ideal nodes.
1532 
1533   Unique_Node_List worklist_init;
1534   worklist_init.map(C->unique(), NULL);  // preallocate space
1535 
1536   // Initialize worklist
1537   if (C->root() != NULL) {
1538     worklist_init.push(C->root());
1539   }
1540 

1541   GrowableArray<int> cg_worklist;
1542   PhaseGVN* igvn = _igvn;
1543   bool has_allocations = false;
1544 
1545   // Push all useful nodes onto CG list and set their type.
1546   for( uint next = 0; next < worklist_init.size(); ++next ) {
1547     Node* n = worklist_init.at(next);
1548     record_for_escape_analysis(n, igvn);
1549     // Only allocations and java static calls results are checked
1550     // for an escape status. See process_call_result() below.
1551     if (n->is_Allocate() || n->is_CallStaticJava() &&
1552         ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
1553       has_allocations = true;


1554     }
1555     if(n->is_AddP()) {
1556       // Collect address nodes. Use them during stage 3 below
1557       // to build initial connection graph field edges.
1558       cg_worklist.append(n->_idx);
1559     } else if (n->is_MergeMem()) {
1560       // Collect all MergeMem nodes to add memory slices for
1561       // scalar replaceable objects in split_unique_types().
1562       _mergemem_worklist.append(n->as_MergeMem());
1563     }
1564     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1565       Node* m = n->fast_out(i);   // Get user
1566       worklist_init.push(m);
1567     }
1568   }
1569 
1570   if (!has_allocations) {
1571     _collecting = false;
1572     return false; // Nothing to do.
1573   }


1636       PointsToNode* ptn = ptnode_adr(ni);
1637       Node* n = ptn->_node;
1638       assert(n != NULL, "should be known node");
1639       build_connection_graph(n, igvn);
1640     }
1641   }
1642   if (iterations >= CG_BUILD_ITER_LIMIT) {
1643     assert(iterations < CG_BUILD_ITER_LIMIT,
1644            err_msg("infinite EA connection graph build with %d nodes and worklist size %d",
1645            nodes_size(), length));
1646     // Possible infinite build_connection_graph loop,
1647     // retry compilation without escape analysis.
1648     C->record_failure(C2Compiler::retry_no_escape_analysis());
1649     _collecting = false;
1650     return false;
1651   }
1652 #undef CG_BUILD_ITER_LIMIT
1653 
1654   Arena* arena = Thread::current()->resource_area();
1655   VectorSet visited(arena);










1656   worklist.clear();


1657 
1658   // 5. Remove deferred edges from the graph and adjust
1659   //    escape state of nonescaping objects.
1660   cg_length = cg_worklist.length();
1661   for( uint next = 0; next < cg_length; ++next ) {
1662     int ni = cg_worklist.at(next);
1663     PointsToNode* ptn = ptnode_adr(ni);
1664     PointsToNode::NodeType nt = ptn->node_type();
1665     if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
1666       remove_deferred(ni, &worklist, &visited);
1667       Node *n = ptn->_node;
1668       if (n->is_AddP()) {
1669         // Search for objects which are not scalar replaceable
1670         // and adjust their escape state.
1671         adjust_escape_state(ni, igvn);
1672       }
1673     }
1674   }
1675 
1676   // 6. Propagate escape states.







1677   worklist.clear();
1678   bool has_non_escaping_obj = false;
1679 
1680   // push all GlobalEscape nodes on the worklist
1681   for( uint next = 0; next < cg_length; ++next ) {
1682     int nk = cg_worklist.at(next);
1683     if (ptnode_adr(nk)->escape_state() == PointsToNode::GlobalEscape)
1684       worklist.push(nk);
1685   }
1686   // mark all nodes reachable from GlobalEscape nodes
1687   while(worklist.length() > 0) {
1688     PointsToNode* ptn = ptnode_adr(worklist.pop());
1689     uint e_cnt = ptn->edge_count();
1690     for (uint ei = 0; ei < e_cnt; ei++) {
1691       uint npi = ptn->edge_target(ei);
1692       PointsToNode *np = ptnode_adr(npi);
1693       if (np->escape_state() < PointsToNode::GlobalEscape) {
1694         set_escape_state(npi, PointsToNode::GlobalEscape);
1695         worklist.push(npi);
1696       }
1697     }
1698   }
1699 
1700   // push all ArgEscape nodes on the worklist
1701   for( uint next = 0; next < cg_length; ++next ) {
1702     int nk = cg_worklist.at(next);
1703     if (ptnode_adr(nk)->escape_state() == PointsToNode::ArgEscape)
1704       worklist.push(nk);
1705   }
1706   // mark all nodes reachable from ArgEscape nodes
1707   while(worklist.length() > 0) {
1708     PointsToNode* ptn = ptnode_adr(worklist.pop());
1709     if (ptn->node_type() == PointsToNode::JavaObject)
1710       has_non_escaping_obj = true; // Non GlobalEscape
1711     uint e_cnt = ptn->edge_count();
1712     for (uint ei = 0; ei < e_cnt; ei++) {
1713       uint npi = ptn->edge_target(ei);
1714       PointsToNode *np = ptnode_adr(npi);
1715       if (np->escape_state() < PointsToNode::ArgEscape) {
1716         set_escape_state(npi, PointsToNode::ArgEscape);
1717         worklist.push(npi);
1718       }
1719     }
1720   }
1721 
1722   GrowableArray<Node*> alloc_worklist;

1723 


1724   // push all NoEscape nodes on the worklist
1725   for( uint next = 0; next < cg_length; ++next ) {
1726     int nk = cg_worklist.at(next);
1727     if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape)
1728       worklist.push(nk);
1729   }
1730   // mark all nodes reachable from NoEscape nodes
1731   while(worklist.length() > 0) {
1732     uint nk = worklist.pop();
1733     PointsToNode* ptn = ptnode_adr(nk);
1734     if (ptn->node_type() == PointsToNode::JavaObject &&
1735         !(nk == _noop_null || nk == _oop_null))
1736       has_non_escaping_obj = true; // Non Escape
1737     Node* n = ptn->_node;
1738     if (n->is_Allocate() && ptn->_scalar_replaceable ) {
1739       // Push scalar replaceable allocations on alloc_worklist
1740       // for processing in split_unique_types().
1741       alloc_worklist.append(n);
1742     }
1743     uint e_cnt = ptn->edge_count();
1744     for (uint ei = 0; ei < e_cnt; ei++) {
1745       uint npi = ptn->edge_target(ei);
1746       PointsToNode *np = ptnode_adr(npi);
1747       if (np->escape_state() < PointsToNode::NoEscape) {
1748         set_escape_state(npi, PointsToNode::NoEscape);
1749         worklist.push(npi);
1750       }
1751     }
1752   }
1753 
1754   _collecting = false;
1755   assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
1756 
1757   assert(ptnode_adr(_oop_null)->escape_state() == PointsToNode::NoEscape, "sanity");
1758   if (UseCompressedOops) {
1759     assert(ptnode_adr(_noop_null)->escape_state() == PointsToNode::NoEscape, "sanity");
1760   }
1761 
1762   if (EliminateLocks) {
1763     // Mark locks before changing ideal graph.
1764     int cnt = C->macro_count();
1765     for( int i=0; i < cnt; i++ ) {
1766       Node *n = C->macro_node(i);
1767       if (n->is_AbstractLock()) { // Lock and Unlock nodes
1768         AbstractLockNode* alock = n->as_AbstractLock();
1769         if (!alock->is_eliminated()) {
1770           PointsToNode::EscapeState es = escape_state(alock->obj_node());
1771           assert(es != PointsToNode::UnknownEscape, "should know");
1772           if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
1773             // Mark it eliminated
1774             alock->set_eliminated();
1775           }
1776         }
1777       }
1778     }
1779   }
1780 
1781 #ifndef PRODUCT
1782   if (PrintEscapeAnalysis) {


1796 
1797     C->print_method("After Escape Analysis", 2);
1798 
1799 #ifdef ASSERT
1800   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
1801     tty->print("=== No allocations eliminated for ");
1802     C->method()->print_short_name();
1803     if(!EliminateAllocations) {
1804       tty->print(" since EliminateAllocations is off ===");
1805     } else if(!has_scalar_replaceable_candidates) {
1806       tty->print(" since there are no scalar replaceable candidates ===");
1807     } else if(C->AliasLevel() < 3) {
1808       tty->print(" since AliasLevel < 3 ===");
1809     }
1810     tty->cr();
1811 #endif
1812   }
1813   return has_non_escaping_obj;
1814 }
1815 
1816 // Adjust escape state after Connection Graph is built.
1817 void ConnectionGraph::adjust_escape_state(int nidx, PhaseTransform* phase) {
1818   PointsToNode* ptn = ptnode_adr(nidx);
1819   Node* n = ptn->_node;
1820   assert(n->is_AddP(), "Should be called for AddP nodes only");
1821   // Search for objects which are not scalar replaceable.
1822   // Mark their escape state as ArgEscape to propagate the state
1823   // to referenced objects.
1824   // Note: currently there are no difference in compiler optimizations
1825   // for ArgEscape objects and NoEscape objects which are not
1826   // scalar replaceable.
1827 
1828   Compile* C = _compile;
1829 
1830   int offset = ptn->offset();
1831   Node* base = get_addp_base(n);
1832   VectorSet* ptset = PointsTo(base);
1833   int ptset_size = ptset->Size();
1834 
1835   // Check if a oop field's initializing value is recorded and add
1836   // a corresponding NULL field's value if it is not recorded.
1837   // Connection Graph does not record a default initialization by NULL
1838   // captured by Initialize node.
1839   //
1840   // Note: it will disable scalar replacement in some cases:
1841   //
1842   //    Point p[] = new Point[1];
1843   //    p[0] = new Point(); // Will be not scalar replaced
1844   //
1845   // but it will save us from incorrect optimizations in next cases:
1846   //
1847   //    Point p[] = new Point[1];
1848   //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
1849   //
1850   // Do a simple control flow analysis to distinguish above cases.
1851   //
1852   if (offset != Type::OffsetBot && ptset_size == 1) {
1853     uint elem = ptset->getelem(); // Allocation node's index
1854     // It does not matter if it is not Allocation node since
1855     // only non-escaping allocations are scalar replaced.
1856     if (ptnode_adr(elem)->_node->is_Allocate() &&
1857         ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
1858       AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
1859       InitializeNode* ini = alloc->initialization();
1860 
1861       // Check only oop fields.
1862       const Type* adr_type = n->as_AddP()->bottom_type();
1863       BasicType basic_field_type = T_INT;
1864       if (adr_type->isa_instptr()) {
1865         ciField* field = C->alias_type(adr_type->isa_instptr())->field();
1866         if (field != NULL) {
1867           basic_field_type = field->layout_type();
1868         } else {
1869           // Ignore non field load (for example, klass load)
1870         }
1871       } else if (adr_type->isa_aryptr()) {

1872         const Type* elemtype = adr_type->isa_aryptr()->elem();
1873         basic_field_type = elemtype->array_element_basic_type();
1874       } else {
1875         // Raw pointers are used for initializing stores so skip it.






1876         assert(adr_type->isa_rawptr() && base->is_Proj() &&
1877                (base->in(0) == alloc),"unexpected pointer type");

1878       }
1879       if (basic_field_type == T_OBJECT ||
1880           basic_field_type == T_NARROWOOP ||
1881           basic_field_type == T_ARRAY) {
1882         Node* value = NULL;
1883         if (ini != NULL) {
1884           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
1885           Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
1886           if (store != NULL && store->is_Store()) {
1887             value = store->in(MemNode::ValueIn);
1888           } else if (ptn->edge_count() > 0) { // Are there oop stores?
1889             // Check for a store which follows allocation without branches.
1890             // For example, a volatile field store is not collected
1891             // by Initialize node. TODO: it would be nice to use idom() here.
1892             for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1893               store = n->fast_out(i);













1894               if (store->is_Store() && store->in(0) != NULL) {
1895                 Node* ctrl = store->in(0);
1896                 while(!(ctrl == ini || ctrl == alloc || ctrl == NULL ||
1897                         ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() ||
1898                         ctrl->is_IfTrue() || ctrl->is_IfFalse())) {
1899                    ctrl = ctrl->in(0);
1900                 }
1901                 if (ctrl == ini || ctrl == alloc) {
1902                   value = store->in(MemNode::ValueIn);
1903                   break;
1904                 }
1905               }
1906             }
1907           }
1908         }


1909         if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
1910           // A field's initializing value was not recorded. Add NULL.
1911           uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
1912           add_pointsto_edge(nidx, null_idx);
1913         }
1914       }
1915     }
1916   }

1917 













1918   // An object is not scalar replaceable if the field which may point
1919   // to it has unknown offset (unknown element of an array of objects).
1920   //

1921   if (offset == Type::OffsetBot) {
1922     uint e_cnt = ptn->edge_count();
1923     for (uint ei = 0; ei < e_cnt; ei++) {
1924       uint npi = ptn->edge_target(ei);
1925       set_escape_state(npi, PointsToNode::ArgEscape);
1926       ptnode_adr(npi)->_scalar_replaceable = false;
1927     }
1928   }
1929 
1930   // Currently an object is not scalar replaceable if a LoadStore node
1931   // access its field since the field value is unknown after it.
1932   //
1933   bool has_LoadStore = false;
1934   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1935     Node *use = n->fast_out(i);
1936     if (use->is_LoadStore()) {
1937       has_LoadStore = true;
1938       break;
1939     }
1940   }
1941   // An object is not scalar replaceable if the address points
1942   // to unknown field (unknown element for arrays, offset is OffsetBot).
1943   //
1944   // Or the address may point to more then one object. This may produce
1945   // the false positive result (set scalar_replaceable to false)
1946   // since the flow-insensitive escape analysis can't separate
1947   // the case when stores overwrite the field's value from the case
1948   // when stores happened on different control branches.
1949   //










1950   if (ptset_size > 1 || ptset_size != 0 &&
1951       (has_LoadStore || offset == Type::OffsetBot)) {
1952     for( VectorSetI j(ptset); j.test(); ++j ) {
1953       set_escape_state(j.elem, PointsToNode::ArgEscape);
1954       ptnode_adr(j.elem)->_scalar_replaceable = false;
1955     }
1956   }
1957 }
1958 

































1959 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
1960 
1961     switch (call->Opcode()) {
1962 #ifdef ASSERT
1963     case Op_Allocate:
1964     case Op_AllocateArray:
1965     case Op_Lock:
1966     case Op_Unlock:
1967       assert(false, "should be done already");
1968       break;
1969 #endif
1970     case Op_CallLeaf:
1971     case Op_CallLeafNoFP:
1972     {
1973       // Stub calls, objects do not escape but they are not scale replaceable.
1974       // Adjust escape state for outgoing arguments.
1975       const TypeTuple * d = call->tf()->domain();
1976       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1977         const Type* at = d->field_at(i);
1978         Node *arg = call->in(i)->uncast();


2121 
2122     case Op_AllocateArray:
2123     {
2124 
2125       Node *k = call->in(AllocateNode::KlassNode);
2126       const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
2127       assert(kt != NULL, "TypeKlassPtr  required.");
2128       ciKlass* cik = kt->klass();
2129 
2130       PointsToNode::EscapeState es;
2131       uint edge_to;
2132       if (!cik->is_array_klass()) { // StressReflectiveCode
2133         es = PointsToNode::GlobalEscape;
2134         edge_to = _phantom_object;
2135       } else {
2136         es = PointsToNode::NoEscape;
2137         edge_to = call_idx;
2138         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2139         if (length < 0 || length > EliminateAllocationArraySizeLimit) {
2140           // Not scalar replaceable if the length is not constant or too big.
2141           ptnode_adr(call_idx)->_scalar_replaceable = false;
2142         }
2143       }
2144       set_escape_state(call_idx, es);
2145       add_pointsto_edge(resproj_idx, edge_to);
2146       _processed.set(resproj_idx);
2147       break;
2148     }
2149 
2150     case Op_CallStaticJava:
2151     // For a static call, we know exactly what method is being called.
2152     // Use bytecode estimator to record whether the call's return value escapes
2153     {
2154       bool done = true;
2155       const TypeTuple *r = call->tf()->range();
2156       const Type* ret_type = NULL;
2157 
2158       if (r->cnt() > TypeFunc::Parms)
2159         ret_type = r->field_at(TypeFunc::Parms);
2160 
2161       // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
2162       //        _multianewarray functions return a TypeRawPtr.
2163       if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
2164         _processed.set(resproj_idx);
2165         break;  // doesn't return a pointer type
2166       }
2167       ciMethod *meth = call->as_CallJava()->method();
2168       const TypeTuple * d = call->tf()->domain();
2169       if (meth == NULL) {
2170         // not a Java method, assume global escape
2171         set_escape_state(call_idx, PointsToNode::GlobalEscape);
2172         add_pointsto_edge(resproj_idx, _phantom_object);
2173       } else {
2174         BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
2175         bool copy_dependencies = false;
2176 
2177         if (call_analyzer->is_return_allocated()) {
2178           // Returns a newly allocated unescaped object, simply
2179           // update dependency information.
2180           // Mark it as NoEscape so that objects referenced by
2181           // it's fields will be marked as NoEscape at least.
2182           set_escape_state(call_idx, PointsToNode::NoEscape);
2183           add_pointsto_edge(resproj_idx, call_idx);
2184           copy_dependencies = true;
2185         } else if (call_analyzer->is_return_local()) {
2186           // determine whether any arguments are returned
2187           set_escape_state(call_idx, PointsToNode::NoEscape);
2188           bool ret_arg = false;
2189           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2190             const Type* at = d->field_at(i);
2191 
2192             if (at->isa_oopptr() != NULL) {
2193               Node *arg = call->in(i)->uncast();
2194 
2195               if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2196                 ret_arg = true;
2197                 PointsToNode *arg_esp = ptnode_adr(arg->_idx);
2198                 if (arg_esp->node_type() == PointsToNode::UnknownType)
2199                   done = false;
2200                 else if (arg_esp->node_type() == PointsToNode::JavaObject)
2201                   add_pointsto_edge(resproj_idx, arg->_idx);
2202                 else
2203                   add_deferred_edge(resproj_idx, arg->_idx);
2204                 arg_esp->_hidden_alias = true;
2205               }
2206             }
2207           }
2208           if (done && !ret_arg) {
2209             // Returns unknown object.
2210             set_escape_state(call_idx, PointsToNode::GlobalEscape);
2211             add_pointsto_edge(resproj_idx, _phantom_object);
2212           }

2213           copy_dependencies = true;

2214         } else {
2215           set_escape_state(call_idx, PointsToNode::GlobalEscape);
2216           add_pointsto_edge(resproj_idx, _phantom_object);
2217           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2218             const Type* at = d->field_at(i);
2219             if (at->isa_oopptr() != NULL) {
2220               Node *arg = call->in(i)->uncast();
2221               PointsToNode *arg_esp = ptnode_adr(arg->_idx);
2222               arg_esp->_hidden_alias = true;
2223             }
2224           }
2225         }
2226         if (copy_dependencies)
2227           call_analyzer->copy_dependencies(_compile->dependencies());
2228       }
2229       if (done)
2230         _processed.set(resproj_idx);
2231       break;
2232     }
2233 
2234     default:
2235     // Some other type of call, assume the worst case that the
2236     // returned value, if any, globally escapes.
2237     {
2238       const TypeTuple *r = call->tf()->range();
2239       if (r->cnt() > TypeFunc::Parms) {
2240         const Type* ret_type = r->field_at(TypeFunc::Parms);
2241 
2242         // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
2243         //        _multianewarray functions return a TypeRawPtr.
2244         if (ret_type->isa_ptr() != NULL) {
2245           set_escape_state(call_idx, PointsToNode::GlobalEscape);




  43   _edges->append_if_missing(v);
  44 }
  45 
  46 void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) {
  47   uint v = (targIdx << EdgeShift) + ((uint) et);
  48 
  49   _edges->remove(v);
  50 }
  51 
  52 #ifndef PRODUCT
  53 static const char *node_type_names[] = {
  54   "UnknownType",
  55   "JavaObject",
  56   "LocalVar",
  57   "Field"
  58 };
  59 
  60 static const char *esc_names[] = {
  61   "UnknownEscape",
  62   "NoEscape",
  63   "ControlEscape",
  64   "ArgEscape",
  65   "GlobalEscape"
  66 };
  67 
  68 static const char *edge_type_suffix[] = {
  69  "?", // UnknownEdge
  70  "P", // PointsToEdge
  71  "D", // DeferredEdge
  72  "F"  // FieldEdge
  73 };
  74 
  75 void PointsToNode::dump(bool print_state) const {
  76   NodeType nt = node_type();
  77   tty->print("%s ", node_type_names[(int) nt]);
  78   if (print_state) {
  79     EscapeState es = escape_state();
  80     tty->print("%s ", esc_names[(int) es]);
  81   }
  82   tty->print("[[");
  83   for (uint i = 0; i < edge_count(); i++) {
  84     tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
  85   }
  86   tty->print("]]  ");
  87   if (_node == NULL)
  88     tty->print_cr("<null>");
  89   else
  90     _node->dump();
  91 }
  92 #endif
  93 
  94 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
  95   _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
  96   _processed(C->comp_arena()),
  97   pt_ptset(C->comp_arena()),
  98   pt_visited(C->comp_arena()),
  99   pt_worklist(C->comp_arena(), 4, 0, 0),
 100   _collecting(true),


 362   bool deferred = (to->node_type() == PointsToNode::LocalVar);
 363 
 364   for (uint fe = 0; fe < an->edge_count(); fe++) {
 365     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
 366     int fi = an->edge_target(fe);
 367     PointsToNode* pf = ptnode_adr(fi);
 368     int po = pf->offset();
 369     if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
 370       if (deferred)
 371         add_deferred_edge(fi, to_i);
 372       else
 373         add_pointsto_edge(fi, to_i);
 374     }
 375   }
 376 }
 377 
 378 // Add a deferred  edge from node given by "from_i" to any field of adr_i
 379 // whose offset matches "offset".
 380 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
 381   PointsToNode* an = ptnode_adr(adr_i);
 382   bool is_alloc = an->_node->is_Allocate();
 383   for (uint fe = 0; fe < an->edge_count(); fe++) {
 384     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
 385     int fi = an->edge_target(fe);
 386     PointsToNode* pf = ptnode_adr(fi);
 387     int offset = pf->offset();
 388     if (!is_alloc) {
 389       // Assume the field was set outside this method if it is not Allocation
 390       add_pointsto_edge(fi, _phantom_object);
 391     }
 392     if (offset == offs || offset == Type::OffsetBot || offs == Type::OffsetBot) {
 393       add_deferred_edge(from_i, fi);
 394     }
 395   }
 396 }
 397 
 398 // Helper functions
 399 
 400 static Node* get_addp_base(Node *addp) {
 401   assert(addp->is_AddP(), "must be AddP");
 402   //
 403   // AddP cases for Base and Address inputs:
 404   // case #1. Direct object's field reference:
 405   //     Allocate
 406   //       |
 407   //     Proj #5 ( oop result )
 408   //       |
 409   //     CheckCastPP (cast to instance type)
 410   //      | |
 411   //     AddP  ( base == address )
 412   //


1026 
1027 
1028   //  Phase 1:  Process possible allocations from alloc_worklist.
1029   //  Create instance types for the CheckCastPP for allocations where possible.
1030   //
1031   // (Note: don't forget to change the order of the second AddP node on
1032   //  the alloc_worklist if the order of the worklist processing is changed,
1033   //  see the comment in find_second_addp().)
1034   //
1035   while (alloc_worklist.length() != 0) {
1036     Node *n = alloc_worklist.pop();
1037     uint ni = n->_idx;
1038     const TypeOopPtr* tinst = NULL;
1039     if (n->is_Call()) {
1040       CallNode *alloc = n->as_Call();
1041       // copy escape information to call node
1042       PointsToNode* ptn = ptnode_adr(alloc->_idx);
1043       PointsToNode::EscapeState es = escape_state(alloc);
1044       // We have an allocation or call which returns a Java object,
1045       // see if it is unescaped.
1046       if (es != PointsToNode::NoEscape)
1047         continue;
1048 
1049       // Find CheckCastPP for the allocate or for the return value of a call
1050       n = alloc->result_cast();
1051       if (n == NULL) {            // No uses except Initialize node
1052         if (alloc->is_Allocate()) {
1053           // Set the scalar_replaceable flag for allocation
1054           // so it could be eliminated if it has no uses.
1055           alloc->as_Allocate()->_is_scalar_replaceable = true;
1056         }
1057         continue;
1058       }
1059       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
1060         assert(!alloc->is_Allocate(), "allocation should have unique type");
1061         continue;
1062       }
1063 
1064       // The inline code for Object.clone() casts the allocation result to
1065       // java.lang.Object and then to the actual type of the allocated
1066       // object. Detect this case and use the second cast.


1087           continue;
1088         }
1089       }
1090       if (alloc->is_Allocate()) {
1091         // Set the scalar_replaceable flag for allocation
1092         // so it could be eliminated.
1093         alloc->as_Allocate()->_is_scalar_replaceable = true;
1094       }
1095       set_escape_state(n->_idx, es);
1096       // in order for an object to be scalar-replaceable, it must be:
1097       //   - a direct allocation (not a call returning an object)
1098       //   - non-escaping
1099       //   - eligible to be a unique type
1100       //   - not determined to be ineligible by escape analysis
1101       assert(ptnode_adr(alloc->_idx)->_node != NULL &&
1102              ptnode_adr(n->_idx)->_node != NULL, "should be registered");
1103       set_map(alloc->_idx, n);
1104       set_map(n->_idx, alloc);
1105       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
1106       if (t == NULL)
1107         continue;  // not a TypeOopPtr
1108       tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
1109       igvn->hash_delete(n);
1110       igvn->set_type(n,  tinst);
1111       n->raise_bottom_type(tinst);
1112       igvn->hash_insert(n);
1113       record_for_optimizer(n);
1114       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {

1115 
1116         // First, put on the worklist all Field edges from Connection Graph
1117         // which is more accurate then putting immediate users from Ideal Graph.
1118         for (uint e = 0; e < ptn->edge_count(); e++) {
1119           Node *use = ptnode_adr(ptn->edge_target(e))->_node;
1120           assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(),
1121                  "only AddP nodes are Field edges in CG");
1122           if (use->outcnt() > 0) { // Don't process dead nodes
1123             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
1124             if (addp2 != NULL) {
1125               assert(alloc->is_AllocateArray(),"array allocation was expected");
1126               alloc_worklist.append_if_missing(addp2);
1127             }
1128             alloc_worklist.append_if_missing(use);
1129           }
1130         }
1131 
1132         // An allocation may have an Initialize which has raw stores. Scan
1133         // the users of the raw allocation result and push AddP users
1134         // on alloc_worklist.


1522   // Cleanup.
1523   if (oop_null->outcnt() == 0)
1524     igvn->hash_delete(oop_null);
1525   if (noop_null->outcnt() == 0)
1526     igvn->hash_delete(noop_null);
1527 }
1528 
1529 bool ConnectionGraph::compute_escape() {
1530   Compile* C = _compile;
1531 
1532   // 1. Populate Connection Graph (CG) with Ideal nodes.
1533 
1534   Unique_Node_List worklist_init;
1535   worklist_init.map(C->unique(), NULL);  // preallocate space
1536 
1537   // Initialize worklist
1538   if (C->root() != NULL) {
1539     worklist_init.push(C->root());
1540   }
1541 
1542   GrowableArray<Node*> alloc_worklist;
1543   GrowableArray<int> cg_worklist;
1544   PhaseGVN* igvn = _igvn;
1545   bool has_allocations = false;
1546 
1547   // Push all useful nodes onto CG list and set their type.
1548   for( uint next = 0; next < worklist_init.size(); ++next ) {
1549     Node* n = worklist_init.at(next);
1550     record_for_escape_analysis(n, igvn);
1551     // Only allocations and java static calls results are checked
1552     // for an escape status. See process_call_result() below.
1553     if (n->is_Allocate() || n->is_CallStaticJava() &&
1554         ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
1555       has_allocations = true;
1556       if (n->is_Allocate())
1557         alloc_worklist.append(n);
1558     }
1559     if(n->is_AddP()) {
1560       // Collect address nodes. Use them during stage 3 below
1561       // to build initial connection graph field edges.
1562       cg_worklist.append(n->_idx);
1563     } else if (n->is_MergeMem()) {
1564       // Collect all MergeMem nodes to add memory slices for
1565       // scalar replaceable objects in split_unique_types().
1566       _mergemem_worklist.append(n->as_MergeMem());
1567     }
1568     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1569       Node* m = n->fast_out(i);   // Get user
1570       worklist_init.push(m);
1571     }
1572   }
1573 
1574   if (!has_allocations) {
1575     _collecting = false;
1576     return false; // Nothing to do.
1577   }


1640       PointsToNode* ptn = ptnode_adr(ni);
1641       Node* n = ptn->_node;
1642       assert(n != NULL, "should be known node");
1643       build_connection_graph(n, igvn);
1644     }
1645   }
1646   if (iterations >= CG_BUILD_ITER_LIMIT) {
1647     assert(iterations < CG_BUILD_ITER_LIMIT,
1648            err_msg("infinite EA connection graph build with %d nodes and worklist size %d",
1649            nodes_size(), length));
1650     // Possible infinite build_connection_graph loop,
1651     // retry compilation without escape analysis.
1652     C->record_failure(C2Compiler::retry_no_escape_analysis());
1653     _collecting = false;
1654     return false;
1655   }
1656 #undef CG_BUILD_ITER_LIMIT
1657 
1658   Arena* arena = Thread::current()->resource_area();
1659   VectorSet visited(arena);
1660 
1661   // 5. Find fields initializing values for not escaped allocations
1662   length = alloc_worklist.length();
1663   for (uint next = 0; next < length; ++next) {
1664     Node* n = alloc_worklist.at(next);
1665     if (ptnode_adr(n->_idx)->escape_state() == PointsToNode::NoEscape) {
1666       find_init_values(n, &visited, igvn);
1667     }
1668   }
1669 
1670   worklist.clear();
1671   worklist_init.clear();
1672   alloc_worklist.clear();
1673 
1674   // 6. Remove deferred edges from the graph.

1675   cg_length = cg_worklist.length();
1676   for (uint next = 0; next < cg_length; ++next) {
1677     int ni = cg_worklist.at(next);
1678     PointsToNode* ptn = ptnode_adr(ni);
1679     PointsToNode::NodeType nt = ptn->node_type();
1680     if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
1681       remove_deferred(ni, &worklist, &visited);
1682       Node *n = ptn->_node;
1683       if (n->is_AddP()) {
1684         alloc_worklist.append(n);


1685       }
1686     }
1687   }
1688 
1689   // 7. Adjust escape state of nonescaping objects.
1690   length = alloc_worklist.length();
1691   for (uint next = 0; next < length; ++next) {
1692     Node* n = alloc_worklist.at(next);
1693     adjust_escape_state(n);
1694   }
1695 
1696   // 8. Propagate escape states.
1697   worklist.clear();
1698   bool has_non_escaping_obj = false;
1699 






1700   // mark all nodes reachable from GlobalEscape nodes
1701   (void)propagate_escape_state(&cg_worklist, &worklist, PointsToNode::GlobalEscape);











1702 






1703   // mark all nodes reachable from ArgEscape nodes
1704   has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ArgEscape);













1705 
1706   // mark all nodes reachable from ControlEscape nodes
1707   has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ControlEscape);
1708 
1709   alloc_worklist.clear();
1710 
1711   // push all NoEscape nodes on the worklist
1712   for( uint next = 0; next < cg_length; ++next ) {
1713     int nk = cg_worklist.at(next);
1714     if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape)
1715       worklist.push(nk);
1716   }
1717   // mark all nodes reachable from NoEscape nodes
1718   while(worklist.length() > 0) {
1719     uint nk = worklist.pop();
1720     PointsToNode* ptn = ptnode_adr(nk);
1721     if (ptn->node_type() == PointsToNode::JavaObject &&
1722         !(nk == _noop_null || nk == _oop_null))
1723       has_non_escaping_obj = true; // Non Escape
1724     Node* n = ptn->_node;
1725     if (n->is_Allocate()) {
1726       // Push scalar replaceable allocations on alloc_worklist
1727       // for processing in split_unique_types().
1728       alloc_worklist.append(n);
1729     }
1730     uint e_cnt = ptn->edge_count();
1731     for (uint ei = 0; ei < e_cnt; ei++) {
1732       uint npi = ptn->edge_target(ei);
1733       PointsToNode *np = ptnode_adr(npi);
1734       if (np->escape_state() < PointsToNode::NoEscape) {
1735         set_escape_state(npi, PointsToNode::NoEscape);
1736         worklist.push(npi);
1737       }
1738     }
1739   }
1740 
1741   _collecting = false;
1742   assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
1743 
1744   assert(ptnode_adr(_oop_null)->escape_state() == PointsToNode::NoEscape, "sanity");
1745   if (UseCompressedOops) {
1746     assert(ptnode_adr(_noop_null)->escape_state() == PointsToNode::NoEscape, "sanity");
1747   }
1748 
1749   if (EliminateLocks && has_non_escaping_obj) {
1750     // Mark locks before changing ideal graph.
1751     int cnt = C->macro_count();
1752     for( int i=0; i < cnt; i++ ) {
1753       Node *n = C->macro_node(i);
1754       if (n->is_AbstractLock()) { // Lock and Unlock nodes
1755         AbstractLockNode* alock = n->as_AbstractLock();
1756         if (!alock->is_eliminated()) {
1757           PointsToNode::EscapeState es = escape_state(alock->obj_node());
1758           assert(es != PointsToNode::UnknownEscape, "should know");
1759           if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
1760             // Mark it eliminated
1761             alock->set_eliminated();
1762           }
1763         }
1764       }
1765     }
1766   }
1767 
1768 #ifndef PRODUCT
1769   if (PrintEscapeAnalysis) {


1783 
1784     C->print_method("After Escape Analysis", 2);
1785 
1786 #ifdef ASSERT
1787   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
1788     tty->print("=== No allocations eliminated for ");
1789     C->method()->print_short_name();
1790     if(!EliminateAllocations) {
1791       tty->print(" since EliminateAllocations is off ===");
1792     } else if(!has_scalar_replaceable_candidates) {
1793       tty->print(" since there are no scalar replaceable candidates ===");
1794     } else if(C->AliasLevel() < 3) {
1795       tty->print(" since AliasLevel < 3 ===");
1796     }
1797     tty->cr();
1798 #endif
1799   }
1800   return has_non_escaping_obj;
1801 }
1802 
1803 // Find fields initializing values for allocations.
1804 void ConnectionGraph::find_init_values(Node* alloc, VectorSet* visited, PhaseTransform* phase) {
1805   assert(alloc->is_Allocate(), "Should be called for Allocate nodes only");
1806   PointsToNode* pta = ptnode_adr(alloc->_idx);
1807   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1808   InitializeNode* ini = alloc->as_Allocate()->initialization();





1809 
1810   Compile* C = _compile;
1811   visited->Reset();





1812   // Check if a oop field's initializing value is recorded and add
1813   // a corresponding NULL field's value if it is not recorded.
1814   // Connection Graph does not record a default initialization by NULL
1815   // captured by Initialize node.
1816   //
1817   uint ae_cnt = pta->edge_count();
1818   for (uint ei = 0; ei < ae_cnt; ei++) {
1819     uint nidx = pta->edge_target(ei); // Field (AddP)
1820     PointsToNode* ptn = ptnode_adr(nidx);
1821     assert(ptn->_node->is_AddP(), "Should be AddP nodes only");
1822     int offset = ptn->offset();
1823     if (offset != Type::OffsetBot &&
1824         offset != oopDesc::klass_offset_in_bytes() &&
1825         !visited->test_set(offset)) {











1826 
1827       // Check only oop fields.
1828       const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
1829       BasicType basic_field_type = T_INT;
1830       if (adr_type->isa_instptr()) {
1831         ciField* field = C->alias_type(adr_type->isa_instptr())->field();
1832         if (field != NULL) {
1833           basic_field_type = field->layout_type();
1834         } else {
1835           // Ignore non field load (for example, klass load)
1836         }
1837       } else if (adr_type->isa_aryptr()) {
1838         if (offset != arrayOopDesc::length_offset_in_bytes()) {
1839           const Type* elemtype = adr_type->isa_aryptr()->elem();
1840           basic_field_type = elemtype->array_element_basic_type();
1841         } else {
1842           // Ignore array length load
1843         }
1844 #ifdef ASSERT
1845       } else {
1846         // Raw pointers are used for initializing stores so skip it
1847         // since it should be recorded already
1848         Node* base = get_addp_base(ptn->_node);
1849         assert(adr_type->isa_rawptr() && base->is_Proj() &&
1850                (base->in(0) == alloc),"unexpected pointer type");
1851 #endif
1852       }
1853       if (basic_field_type == T_OBJECT ||
1854           basic_field_type == T_NARROWOOP ||
1855           basic_field_type == T_ARRAY) {
1856         Node* value = NULL;
1857         if (ini != NULL) {
1858           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
1859           Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
1860           if (store != NULL && store->is_Store()) {
1861             value = store->in(MemNode::ValueIn);
1862           } else if (ptn->edge_count() > 0) { // Are there oop stores?
1863             // Check for a store which follows allocation without branches.
1864             // For example, a volatile field store is not collected
1865             // by Initialize node. TODO: it would be nice to use idom() here.
1866             //
1867             // Search all references to the same field which use different
1868             // AddP nodes, for example, in the next case:
1869             //
1870             //    Point p[] = new Point[1];
1871             //    if ( x ) { p[0] = new Point(); p[0].x = x; }
1872             //    if ( p[0] != null ) { y = p[0].x; } // has CastPP
1873             //
1874             for (uint next = ei; (next < ae_cnt) && (value == NULL); next++) {
1875               uint fpi = pta->edge_target(next); // Field (AddP)
1876               PointsToNode *ptf = ptnode_adr(fpi);
1877               if (ptf->offset() == offset) {
1878                 Node* nf = ptf->_node;
1879                 for (DUIterator_Fast imax, i = nf->fast_outs(imax); i < imax; i++) {
1880                   store = nf->fast_out(i);
1881                   if (store->is_Store() && store->in(0) != NULL) {
1882                     Node* ctrl = store->in(0);
1883                     while(!(ctrl == ini || ctrl == alloc || ctrl == NULL ||
1884                             ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() ||
1885                             ctrl->is_IfTrue() || ctrl->is_IfFalse())) {
1886                        ctrl = ctrl->in(0);
1887                     }
1888                     if (ctrl == ini || ctrl == alloc) {
1889                       value = store->in(MemNode::ValueIn);
1890                       break;
1891                     }
1892                   }
1893                 }
1894               }
1895             }
1896           }
1897         }
1898         if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
1899           // A field's initializing value was not recorded. Add NULL.
1900           uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
1901           add_edge_from_fields(alloc->_idx, null_idx, offset);
1902         }
1903       }
1904     }
1905   }
1906 }
1907 
1908 // Adjust escape state after Connection Graph is built.
1909 void ConnectionGraph::adjust_escape_state(Node* n) {
1910   PointsToNode* ptn = ptnode_adr(n->_idx);
1911   assert(n->is_AddP(), "Should be called for AddP nodes only");
1912   // Search for objects which are not scalar replaceable.
1913   // Mark their escape state as ControlEscape to propagate the state
1914   // to referenced objects.
1915 
1916   int offset = ptn->offset();
1917   Node* base = get_addp_base(n);
1918   VectorSet* ptset = PointsTo(base);
1919   int ptset_size = ptset->Size();
1920 
1921   // An object is not scalar replaceable if the field which may point
1922   // to it has unknown offset (unknown element of an array of objects).
1923   //
1924 
1925   if (offset == Type::OffsetBot) {
1926     uint e_cnt = ptn->edge_count();
1927     for (uint ei = 0; ei < e_cnt; ei++) {
1928       uint npi = ptn->edge_target(ei);
1929       set_escape_state(npi, PointsToNode::ControlEscape);

1930     }
1931   }
1932 
1933   // Currently an object is not scalar replaceable if a LoadStore node
1934   // access its field since the field value is unknown after it.
1935   //
1936   bool has_LoadStore = false;
1937   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1938     Node *use = n->fast_out(i);
1939     if (use->is_LoadStore()) {
1940       has_LoadStore = true;
1941       break;
1942     }
1943   }
1944   // An object is not scalar replaceable if the address points
1945   // to unknown field (unknown element for arrays, offset is OffsetBot).
1946   //
1947   // Or the address may point to more then one object. This may produce
1948   // the false positive result (set escape to ControlEscape)
1949   // since the flow-insensitive escape analysis can't separate
1950   // the case when stores overwrite the field's value from the case
1951   // when stores happened on different control branches.
1952   //
1953   // Note: it will disable scalar replacement in some cases:
1954   //
1955   //    Point p[] = new Point[1];
1956   //    p[0] = new Point(); // Will be not scalar replaced
1957   //
1958   // but it will save us from incorrect optimizations in next cases:
1959   //
1960   //    Point p[] = new Point[1];
1961   //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
1962   //
1963   if (ptset_size > 1 || ptset_size != 0 &&
1964       (has_LoadStore || offset == Type::OffsetBot)) {
1965     for( VectorSetI j(ptset); j.test(); ++j ) {
1966       set_escape_state(j.elem, PointsToNode::ControlEscape);

1967     }
1968   }
1969 }
1970 
1971 // Propagate escape states to referenced nodes.
1972 bool ConnectionGraph::propagate_escape_state(GrowableArray<int>* cg_worklist,
1973                                              GrowableArray<uint>* worklist,
1974                                              PointsToNode::EscapeState esc_state) {
1975   bool has_java_obj = false;
1976 
1977   // push all nodes with the same escape state on the worklist
1978   uint cg_length = cg_worklist->length();
1979   for (uint next = 0; next < cg_length; ++next) {
1980     int nk = cg_worklist->at(next);
1981     if (ptnode_adr(nk)->escape_state() == esc_state)
1982       worklist->push(nk);
1983   }
1984   // mark all reachable nodes
1985   while (worklist->length() > 0) {
1986     PointsToNode* ptn = ptnode_adr(worklist->pop());
1987     if (ptn->node_type() == PointsToNode::JavaObject) {
1988       has_java_obj = true;
1989     }
1990     uint e_cnt = ptn->edge_count();
1991     for (uint ei = 0; ei < e_cnt; ei++) {
1992       uint npi = ptn->edge_target(ei);
1993       PointsToNode *np = ptnode_adr(npi);
1994       if (np->escape_state() < esc_state) {
1995         set_escape_state(npi, esc_state);
1996         worklist->push(npi);
1997       }
1998     }
1999   }
2000   // Has not escaping java objects
2001   return has_java_obj && (esc_state < PointsToNode::GlobalEscape);
2002 }
2003 
2004 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
2005 
2006     switch (call->Opcode()) {
2007 #ifdef ASSERT
2008     case Op_Allocate:
2009     case Op_AllocateArray:
2010     case Op_Lock:
2011     case Op_Unlock:
2012       assert(false, "should be done already");
2013       break;
2014 #endif
2015     case Op_CallLeaf:
2016     case Op_CallLeafNoFP:
2017     {
2018       // Stub calls, objects do not escape but they are not scale replaceable.
2019       // Adjust escape state for outgoing arguments.
2020       const TypeTuple * d = call->tf()->domain();
2021       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2022         const Type* at = d->field_at(i);
2023         Node *arg = call->in(i)->uncast();


2166 
2167     case Op_AllocateArray:
2168     {
2169 
2170       Node *k = call->in(AllocateNode::KlassNode);
2171       const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
2172       assert(kt != NULL, "TypeKlassPtr  required.");
2173       ciKlass* cik = kt->klass();
2174 
2175       PointsToNode::EscapeState es;
2176       uint edge_to;
2177       if (!cik->is_array_klass()) { // StressReflectiveCode
2178         es = PointsToNode::GlobalEscape;
2179         edge_to = _phantom_object;
2180       } else {
2181         es = PointsToNode::NoEscape;
2182         edge_to = call_idx;
2183         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2184         if (length < 0 || length > EliminateAllocationArraySizeLimit) {
2185           // Not scalar replaceable if the length is not constant or too big.
2186           es = PointsToNode::ControlEscape;
2187         }
2188       }
2189       set_escape_state(call_idx, es);
2190       add_pointsto_edge(resproj_idx, edge_to);
2191       _processed.set(resproj_idx);
2192       break;
2193     }
2194 
2195     case Op_CallStaticJava:
2196     // For a static call, we know exactly what method is being called.
2197     // Use bytecode estimator to record whether the call's return value escapes
2198     {
2199       bool done = true;
2200       const TypeTuple *r = call->tf()->range();
2201       const Type* ret_type = NULL;
2202 
2203       if (r->cnt() > TypeFunc::Parms)
2204         ret_type = r->field_at(TypeFunc::Parms);
2205 
2206       // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
2207       //        _multianewarray functions return a TypeRawPtr.
2208       if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
2209         _processed.set(resproj_idx);
2210         break;  // doesn't return a pointer type
2211       }
2212       ciMethod *meth = call->as_CallJava()->method();
2213       const TypeTuple * d = call->tf()->domain();
2214       if (meth == NULL) {
2215         // not a Java method, assume global escape
2216         set_escape_state(call_idx, PointsToNode::GlobalEscape);
2217         add_pointsto_edge(resproj_idx, _phantom_object);
2218       } else {
2219         BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
2220         bool copy_dependencies = false;
2221 
2222         if (call_analyzer->is_return_allocated()) {
2223           // Returns a newly allocated unescaped object, simply
2224           // update dependency information.
2225           // Mark it as ControlEscape so that objects referenced by
2226           // it's fields will be marked as ControlEscape at least.
2227           set_escape_state(call_idx, PointsToNode::ControlEscape);
2228           add_pointsto_edge(resproj_idx, call_idx);
2229           copy_dependencies = true;
2230         } else if (call_analyzer->is_return_local()) {
2231           // determine whether any arguments are returned
2232           set_escape_state(call_idx, PointsToNode::ArgEscape);
2233           bool ret_arg = false;
2234           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2235             const Type* at = d->field_at(i);
2236 
2237             if (at->isa_oopptr() != NULL) {
2238               Node *arg = call->in(i)->uncast();
2239 
2240               if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2241                 ret_arg = true;
2242                 PointsToNode *arg_esp = ptnode_adr(arg->_idx);
2243                 if (arg_esp->node_type() == PointsToNode::UnknownType)
2244                   done = false;
2245                 else if (arg_esp->node_type() == PointsToNode::JavaObject)
2246                   add_pointsto_edge(resproj_idx, arg->_idx);
2247                 else
2248                   add_deferred_edge(resproj_idx, arg->_idx);

2249               }
2250             }
2251           }
2252           if (done && !ret_arg) {
2253             // Returns unknown object.
2254             set_escape_state(call_idx, PointsToNode::GlobalEscape);
2255             add_pointsto_edge(resproj_idx, _phantom_object);
2256           }
2257           if (done) {
2258             copy_dependencies = true;
2259           }
2260         } else {
2261           set_escape_state(call_idx, PointsToNode::GlobalEscape);
2262           add_pointsto_edge(resproj_idx, _phantom_object);






2263         }


2264         if (copy_dependencies)
2265           call_analyzer->copy_dependencies(_compile->dependencies());
2266       }
2267       if (done)
2268         _processed.set(resproj_idx);
2269       break;
2270     }
2271 
2272     default:
2273     // Some other type of call, assume the worst case that the
2274     // returned value, if any, globally escapes.
2275     {
2276       const TypeTuple *r = call->tf()->range();
2277       if (r->cnt() > TypeFunc::Parms) {
2278         const Type* ret_type = r->field_at(TypeFunc::Parms);
2279 
2280         // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
2281         //        _multianewarray functions return a TypeRawPtr.
2282         if (ret_type->isa_ptr() != NULL) {
2283           set_escape_state(call_idx, PointsToNode::GlobalEscape);


src/share/vm/opto/escape.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File