1 /*
   2  * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "libadt/vectset.hpp"
  28 #include "memory/allocation.hpp"
  29 #include "opto/c2compiler.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/compile.hpp"
  33 #include "opto/escape.hpp"
  34 #include "opto/phaseX.hpp"
  35 #include "opto/rootnode.hpp"
  36 
  37 void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) {
  38   uint v = (targIdx << EdgeShift) + ((uint) et);
  39   if (_edges == NULL) {
  40      Arena *a = Compile::current()->comp_arena();
  41     _edges = new(a) GrowableArray<uint>(a, INITIAL_EDGE_COUNT, 0, 0);
  42   }
  43   _edges->append_if_missing(v);
  44 }
  45 
  46 void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) {
  47   uint v = (targIdx << EdgeShift) + ((uint) et);
  48 
  49   _edges->remove(v);
  50 }
  51 
  52 #ifndef PRODUCT
  53 static const char *node_type_names[] = {
  54   "UnknownType",
  55   "JavaObject",
  56   "LocalVar",
  57   "Field"
  58 };
  59 
  60 static const char *esc_names[] = {
  61   "UnknownEscape",
  62   "NoEscape",
  63   "ArgEscape",
  64   "GlobalEscape"
  65 };
  66 
  67 static const char *edge_type_suffix[] = {
  68  "?", // UnknownEdge
  69  "P", // PointsToEdge
  70  "D", // DeferredEdge
  71  "F"  // FieldEdge
  72 };
  73 
  74 void PointsToNode::dump(bool print_state) const {
  75   NodeType nt = node_type();
  76   tty->print("%s ", node_type_names[(int) nt]);
  77   if (print_state) {
  78     EscapeState es = escape_state();
  79     tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR");
  80   }
  81   tty->print("[[");
  82   for (uint i = 0; i < edge_count(); i++) {
  83     tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
  84   }
  85   tty->print("]]  ");
  86   if (_node == NULL)
  87     tty->print_cr("<null>");
  88   else
  89     _node->dump();
  90 }
  91 #endif
  92 
  93 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
  94   _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
  95   _processed(C->comp_arena()),
  96   pt_ptset(C->comp_arena()),
  97   pt_visited(C->comp_arena()),
  98   pt_worklist(C->comp_arena(), 4, 0, 0),
  99   _collecting(true),
 100   _progress(false),
 101   _compile(C),
 102   _igvn(igvn),
 103   _node_map(C->comp_arena()) {
 104 
 105   _phantom_object = C->top()->_idx,
 106   add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true);
 107 
 108   // Add ConP(#NULL) and ConN(#NULL) nodes.
 109   Node* oop_null = igvn->zerocon(T_OBJECT);
 110   _oop_null = oop_null->_idx;
 111   assert(_oop_null < nodes_size(), "should be created already");
 112   add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
 113 
 114   if (UseCompressedOops) {
 115     Node* noop_null = igvn->zerocon(T_NARROWOOP);
 116     _noop_null = noop_null->_idx;
 117     assert(_noop_null < nodes_size(), "should be created already");
 118     add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
 119   } else {
 120     _noop_null = _oop_null; // Should be initialized
 121   }
 122   if (OptimizePtrCompare) {
 123     // Add ConI(#CC_GT) and ConI(#CC_EQ).
 124     _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
 125     assert(_pcmp_neq->_idx < C->unique(), "should be created already");
 126 
 127     _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
 128     assert(_pcmp_eq->_idx < C->unique(), "should be created already");
 129   } else {
 130     _pcmp_neq = NULL; // Should be initialized
 131     _pcmp_eq  = NULL;
 132   }
 133 }
 134 
 135 void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) {
 136   PointsToNode *f = ptnode_adr(from_i);
 137   PointsToNode *t = ptnode_adr(to_i);
 138 
 139   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
 140   assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge");
 141   assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge");
 142   add_edge(f, to_i, PointsToNode::PointsToEdge);
 143 }
 144 
 145 void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) {
 146   PointsToNode *f = ptnode_adr(from_i);
 147   PointsToNode *t = ptnode_adr(to_i);
 148 
 149   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
 150   assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge");
 151   assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge");
 152   // don't add a self-referential edge, this can occur during removal of
 153   // deferred edges
 154   if (from_i != to_i)
 155     add_edge(f, to_i, PointsToNode::DeferredEdge);
 156 }
 157 
 158 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
 159   const Type *adr_type = phase->type(adr);
 160   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
 161       adr->in(AddPNode::Address)->is_Proj() &&
 162       adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
 163     // We are computing a raw address for a store captured by an Initialize
 164     // compute an appropriate address type. AddP cases #3 and #5 (see below).
 165     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 166     assert(offs != Type::OffsetBot ||
 167            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
 168            "offset must be a constant or it is initialization of array");
 169     return offs;
 170   }
 171   const TypePtr *t_ptr = adr_type->isa_ptr();
 172   assert(t_ptr != NULL, "must be a pointer type");
 173   return t_ptr->offset();
 174 }
 175 
 176 void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) {
 177   PointsToNode *f = ptnode_adr(from_i);
 178   PointsToNode *t = ptnode_adr(to_i);
 179 
 180   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
 181   assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge");
 182   assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge");
 183   assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets");
 184   t->set_offset(offset);
 185 
 186   add_edge(f, to_i, PointsToNode::FieldEdge);
 187 }
 188 
 189 void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) {
 190   // Don't change non-escaping state of NULL pointer.
 191   if (ni == _noop_null || ni == _oop_null)
 192     return;
 193   PointsToNode *npt = ptnode_adr(ni);
 194   PointsToNode::EscapeState old_es = npt->escape_state();
 195   if (es > old_es)
 196     npt->set_escape_state(es);
 197 }
 198 
 199 void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
 200                                PointsToNode::EscapeState es, bool done) {
 201   PointsToNode* ptadr = ptnode_adr(n->_idx);
 202   ptadr->_node = n;
 203   ptadr->set_node_type(nt);
 204 
 205   // inline set_escape_state(idx, es);
 206   PointsToNode::EscapeState old_es = ptadr->escape_state();
 207   if (es > old_es)
 208     ptadr->set_escape_state(es);
 209 
 210   if (done)
 211     _processed.set(n->_idx);
 212 }
 213 
 214 PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n) {
 215   uint idx = n->_idx;
 216   PointsToNode::EscapeState es;
 217 
 218   // If we are still collecting or there were no non-escaping allocations
 219   // we don't know the answer yet
 220   if (_collecting)
 221     return PointsToNode::UnknownEscape;
 222 
 223   // if the node was created after the escape computation, return
 224   // UnknownEscape
 225   if (idx >= nodes_size())
 226     return PointsToNode::UnknownEscape;
 227 
 228   es = ptnode_adr(idx)->escape_state();
 229 
 230   // if we have already computed a value, return it
 231   if (es != PointsToNode::UnknownEscape &&
 232       ptnode_adr(idx)->node_type() == PointsToNode::JavaObject)
 233     return es;
 234 
 235   // PointsTo() calls n->uncast() which can return a new ideal node.
 236   if (n->uncast()->_idx >= nodes_size())
 237     return PointsToNode::UnknownEscape;
 238 
 239   PointsToNode::EscapeState orig_es = es;
 240 
 241   // compute max escape state of anything this node could point to
 242   for(VectorSetI i(PointsTo(n)); i.test() && es != PointsToNode::GlobalEscape; ++i) {
 243     uint pt = i.elem;
 244     PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state();
 245     if (pes > es)
 246       es = pes;
 247   }
 248   if (orig_es != es) {
 249     // cache the computed escape state
 250     assert(es > orig_es, "should have computed an escape state");
 251     set_escape_state(idx, es);
 252   } // orig_es could be PointsToNode::UnknownEscape
 253   return es;
 254 }
 255 
 256 VectorSet* ConnectionGraph::PointsTo(Node * n) {
 257   pt_ptset.Reset();
 258   pt_visited.Reset();
 259   pt_worklist.clear();
 260 
 261 #ifdef ASSERT
 262   Node *orig_n = n;
 263 #endif
 264 
 265   n = n->uncast();
 266   PointsToNode* npt = ptnode_adr(n->_idx);
 267 
 268   // If we have a JavaObject, return just that object
 269   if (npt->node_type() == PointsToNode::JavaObject) {
 270     pt_ptset.set(n->_idx);
 271     return &pt_ptset;
 272   }
 273 #ifdef ASSERT
 274   if (npt->_node == NULL) {
 275     if (orig_n != n)
 276       orig_n->dump();
 277     n->dump();
 278     assert(npt->_node != NULL, "unregistered node");
 279   }
 280 #endif
 281   pt_worklist.push(n->_idx);
 282   while(pt_worklist.length() > 0) {
 283     int ni = pt_worklist.pop();
 284     if (pt_visited.test_set(ni))
 285       continue;
 286 
 287     PointsToNode* pn = ptnode_adr(ni);
 288     // ensure that all inputs of a Phi have been processed
 289     assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),"");
 290 
 291     int edges_processed = 0;
 292     uint e_cnt = pn->edge_count();
 293     for (uint e = 0; e < e_cnt; e++) {
 294       uint etgt = pn->edge_target(e);
 295       PointsToNode::EdgeType et = pn->edge_type(e);
 296       if (et == PointsToNode::PointsToEdge) {
 297         pt_ptset.set(etgt);
 298         edges_processed++;
 299       } else if (et == PointsToNode::DeferredEdge) {
 300         pt_worklist.push(etgt);
 301         edges_processed++;
 302       } else {
 303         assert(false,"neither PointsToEdge or DeferredEdge");
 304       }
 305     }
 306     if (edges_processed == 0) {
 307       // no deferred or pointsto edges found.  Assume the value was set
 308       // outside this method.  Add the phantom object to the pointsto set.
 309       pt_ptset.set(_phantom_object);
 310     }
 311   }
 312   return &pt_ptset;
 313 }
 314 
 315 void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) {
 316   // This method is most expensive during ConnectionGraph construction.
 317   // Reuse vectorSet and an additional growable array for deferred edges.
 318   deferred_edges->clear();
 319   visited->Reset();
 320 
 321   visited->set(ni);
 322   PointsToNode *ptn = ptnode_adr(ni);
 323 
 324   // Mark current edges as visited and move deferred edges to separate array.
 325   for (uint i = 0; i < ptn->edge_count(); ) {
 326     uint t = ptn->edge_target(i);
 327 #ifdef ASSERT
 328     assert(!visited->test_set(t), "expecting no duplications");
 329 #else
 330     visited->set(t);
 331 #endif
 332     if (ptn->edge_type(i) == PointsToNode::DeferredEdge) {
 333       ptn->remove_edge(t, PointsToNode::DeferredEdge);
 334       deferred_edges->append(t);
 335     } else {
 336       i++;
 337     }
 338   }
 339   for (int next = 0; next < deferred_edges->length(); ++next) {
 340     uint t = deferred_edges->at(next);
 341     PointsToNode *ptt = ptnode_adr(t);
 342     uint e_cnt = ptt->edge_count();
 343     for (uint e = 0; e < e_cnt; e++) {
 344       uint etgt = ptt->edge_target(e);
 345       if (visited->test_set(etgt))
 346         continue;
 347 
 348       PointsToNode::EdgeType et = ptt->edge_type(e);
 349       if (et == PointsToNode::PointsToEdge) {
 350         add_pointsto_edge(ni, etgt);
 351         if(etgt == _phantom_object) {
 352           // Special case - field set outside (globally escaping).
 353           set_escape_state(ni, PointsToNode::GlobalEscape);
 354         }
 355       } else if (et == PointsToNode::DeferredEdge) {
 356         deferred_edges->append(etgt);
 357       } else {
 358         assert(false,"invalid connection graph");
 359       }
 360     }
 361   }
 362 }
 363 
 364 
 365 //  Add an edge to node given by "to_i" from any field of adr_i whose offset
 366 //  matches "offset"  A deferred edge is added if to_i is a LocalVar, and
 367 //  a pointsto edge is added if it is a JavaObject
 368 
 369 void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) {
 370   PointsToNode* an = ptnode_adr(adr_i);
 371   PointsToNode* to = ptnode_adr(to_i);
 372   bool deferred = (to->node_type() == PointsToNode::LocalVar);
 373 
 374   for (uint fe = 0; fe < an->edge_count(); fe++) {
 375     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
 376     int fi = an->edge_target(fe);
 377     PointsToNode* pf = ptnode_adr(fi);
 378     int po = pf->offset();
 379     if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
 380       if (deferred)
 381         add_deferred_edge(fi, to_i);
 382       else
 383         add_pointsto_edge(fi, to_i);
 384     }
 385   }
 386 }
 387 
 388 // Add a deferred  edge from node given by "from_i" to any field of adr_i
 389 // whose offset matches "offset".
 390 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
 391   PointsToNode* an = ptnode_adr(adr_i);
 392   bool is_alloc = an->_node->is_Allocate();
 393   for (uint fe = 0; fe < an->edge_count(); fe++) {
 394     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
 395     int fi = an->edge_target(fe);
 396     PointsToNode* pf = ptnode_adr(fi);
 397     int offset = pf->offset();
 398     if (!is_alloc) {
 399       // Assume the field was set outside this method if it is not Allocation
 400       add_pointsto_edge(fi, _phantom_object);
 401     }
 402     if (offset == offs || offset == Type::OffsetBot || offs == Type::OffsetBot) {
 403       add_deferred_edge(from_i, fi);
 404     }
 405   }
 406 }
 407 
 408 // Helper functions
 409 
 410 static Node* get_addp_base(Node *addp) {
 411   assert(addp->is_AddP(), "must be AddP");
 412   //
 413   // AddP cases for Base and Address inputs:
 414   // case #1. Direct object's field reference:
 415   //     Allocate
 416   //       |
 417   //     Proj #5 ( oop result )
 418   //       |
 419   //     CheckCastPP (cast to instance type)
 420   //      | |
 421   //     AddP  ( base == address )
 422   //
 423   // case #2. Indirect object's field reference:
 424   //      Phi
 425   //       |
 426   //     CastPP (cast to instance type)
 427   //      | |
 428   //     AddP  ( base == address )
 429   //
 430   // case #3. Raw object's field reference for Initialize node:
 431   //      Allocate
 432   //        |
 433   //      Proj #5 ( oop result )
 434   //  top   |
 435   //     \  |
 436   //     AddP  ( base == top )
 437   //
 438   // case #4. Array's element reference:
 439   //   {CheckCastPP | CastPP}
 440   //     |  | |
 441   //     |  AddP ( array's element offset )
 442   //     |  |
 443   //     AddP ( array's offset )
 444   //
 445   // case #5. Raw object's field reference for arraycopy stub call:
 446   //          The inline_native_clone() case when the arraycopy stub is called
 447   //          after the allocation before Initialize and CheckCastPP nodes.
 448   //      Allocate
 449   //        |
 450   //      Proj #5 ( oop result )
 451   //       | |
 452   //       AddP  ( base == address )
 453   //
 454   // case #6. Constant Pool, ThreadLocal, CastX2P or
 455   //          Raw object's field reference:
 456   //      {ConP, ThreadLocal, CastX2P, raw Load}
 457   //  top   |
 458   //     \  |
 459   //     AddP  ( base == top )
 460   //
 461   // case #7. Klass's field reference.
 462   //      LoadKlass
 463   //       | |
 464   //       AddP  ( base == address )
 465   //
 466   // case #8. narrow Klass's field reference.
 467   //      LoadNKlass
 468   //       |
 469   //      DecodeN
 470   //       | |
 471   //       AddP  ( base == address )
 472   //
 473   Node *base = addp->in(AddPNode::Base)->uncast();
 474   if (base->is_top()) { // The AddP case #3 and #6.
 475     base = addp->in(AddPNode::Address)->uncast();
 476     while (base->is_AddP()) {
 477       // Case #6 (unsafe access) may have several chained AddP nodes.
 478       assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only");
 479       base = base->in(AddPNode::Address)->uncast();
 480     }
 481     assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal ||
 482            base->Opcode() == Op_CastX2P || base->is_DecodeN() ||
 483            (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) ||
 484            (base->is_Proj() && base->in(0)->is_Allocate()), "sanity");
 485   }
 486   return base;
 487 }
 488 
 489 static Node* find_second_addp(Node* addp, Node* n) {
 490   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
 491 
 492   Node* addp2 = addp->raw_out(0);
 493   if (addp->outcnt() == 1 && addp2->is_AddP() &&
 494       addp2->in(AddPNode::Base) == n &&
 495       addp2->in(AddPNode::Address) == addp) {
 496 
 497     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
 498     //
 499     // Find array's offset to push it on worklist first and
 500     // as result process an array's element offset first (pushed second)
 501     // to avoid CastPP for the array's offset.
 502     // Otherwise the inserted CastPP (LocalVar) will point to what
 503     // the AddP (Field) points to. Which would be wrong since
 504     // the algorithm expects the CastPP has the same point as
 505     // as AddP's base CheckCastPP (LocalVar).
 506     //
 507     //    ArrayAllocation
 508     //     |
 509     //    CheckCastPP
 510     //     |
 511     //    memProj (from ArrayAllocation CheckCastPP)
 512     //     |  ||
 513     //     |  ||   Int (element index)
 514     //     |  ||    |   ConI (log(element size))
 515     //     |  ||    |   /
 516     //     |  ||   LShift
 517     //     |  ||  /
 518     //     |  AddP (array's element offset)
 519     //     |  |
 520     //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits))
 521     //     | / /
 522     //     AddP (array's offset)
 523     //      |
 524     //     Load/Store (memory operation on array's element)
 525     //
 526     return addp2;
 527   }
 528   return NULL;
 529 }
 530 
 531 //
 532 // Adjust the type and inputs of an AddP which computes the
 533 // address of a field of an instance
 534 //
 535 bool ConnectionGraph::split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn) {
 536   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
 537   assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
 538   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
 539   if (t == NULL) {
 540     // We are computing a raw address for a store captured by an Initialize
 541     // compute an appropriate address type (cases #3 and #5).
 542     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
 543     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
 544     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
 545     assert(offs != Type::OffsetBot, "offset must be a constant");
 546     t = base_t->add_offset(offs)->is_oopptr();
 547   }
 548   int inst_id =  base_t->instance_id();
 549   assert(!t->is_known_instance() || t->instance_id() == inst_id,
 550                              "old type must be non-instance or match new type");
 551 
 552   // The type 't' could be subclass of 'base_t'.
 553   // As result t->offset() could be large then base_t's size and it will
 554   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
 555   // constructor verifies correctness of the offset.
 556   //
 557   // It could happened on subclass's branch (from the type profiling
 558   // inlining) which was not eliminated during parsing since the exactness
 559   // of the allocation type was not propagated to the subclass type check.
 560   //
 561   // Or the type 't' could be not related to 'base_t' at all.
 562   // It could happened when CHA type is different from MDO type on a dead path
 563   // (for example, from instanceof check) which is not collapsed during parsing.
 564   //
 565   // Do nothing for such AddP node and don't process its users since
 566   // this code branch will go away.
 567   //
 568   if (!t->is_known_instance() &&
 569       !base_t->klass()->is_subtype_of(t->klass())) {
 570      return false; // bail out
 571   }
 572 
 573   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
 574   // Do NOT remove the next line: ensure a new alias index is allocated
 575   // for the instance type. Note: C++ will not remove it since the call
 576   // has side effect.
 577   int alias_idx = _compile->get_alias_index(tinst);
 578   igvn->set_type(addp, tinst);
 579   // record the allocation in the node map
 580   assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered");
 581   set_map(addp->_idx, get_map(base->_idx));
 582 
 583   // Set addp's Base and Address to 'base'.
 584   Node *abase = addp->in(AddPNode::Base);
 585   Node *adr   = addp->in(AddPNode::Address);
 586   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
 587       adr->in(0)->_idx == (uint)inst_id) {
 588     // Skip AddP cases #3 and #5.
 589   } else {
 590     assert(!abase->is_top(), "sanity"); // AddP case #3
 591     if (abase != base) {
 592       igvn->hash_delete(addp);
 593       addp->set_req(AddPNode::Base, base);
 594       if (abase == adr) {
 595         addp->set_req(AddPNode::Address, base);
 596       } else {
 597         // AddP case #4 (adr is array's element offset AddP node)
 598 #ifdef ASSERT
 599         const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
 600         assert(adr->is_AddP() && atype != NULL &&
 601                atype->instance_id() == inst_id, "array's element offset should be processed first");
 602 #endif
 603       }
 604       igvn->hash_insert(addp);
 605     }
 606   }
 607   // Put on IGVN worklist since at least addp's type was changed above.
 608   record_for_optimizer(addp);
 609   return true;
 610 }
 611 
 612 //
 613 // Create a new version of orig_phi if necessary. Returns either the newly
 614 // created phi or an existing phi.  Sets create_new to indicate whether a new
 615 // phi was created.  Cache the last newly created phi in the node map.
 616 //
 617 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created) {
 618   Compile *C = _compile;
 619   new_created = false;
 620   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
 621   // nothing to do if orig_phi is bottom memory or matches alias_idx
 622   if (phi_alias_idx == alias_idx) {
 623     return orig_phi;
 624   }
 625   // Have we recently created a Phi for this alias index?
 626   PhiNode *result = get_map_phi(orig_phi->_idx);
 627   if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
 628     return result;
 629   }
 630   // Previous check may fail when the same wide memory Phi was split into Phis
 631   // for different memory slices. Search all Phis for this region.
 632   if (result != NULL) {
 633     Node* region = orig_phi->in(0);
 634     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
 635       Node* phi = region->fast_out(i);
 636       if (phi->is_Phi() &&
 637           C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
 638         assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
 639         return phi->as_Phi();
 640       }
 641     }
 642   }
 643   if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
 644     if (C->do_escape_analysis() == true && !C->failing()) {
 645       // Retry compilation without escape analysis.
 646       // If this is the first failure, the sentinel string will "stick"
 647       // to the Compile object, and the C2Compiler will see it and retry.
 648       C->record_failure(C2Compiler::retry_no_escape_analysis());
 649     }
 650     return NULL;
 651   }
 652   orig_phi_worklist.append_if_missing(orig_phi);
 653   const TypePtr *atype = C->get_adr_type(alias_idx);
 654   result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
 655   C->copy_node_notes_to(result, orig_phi);
 656   igvn->set_type(result, result->bottom_type());
 657   record_for_optimizer(result);
 658 
 659   debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;)
 660   assert(pn == NULL || pn == orig_phi, "wrong node");
 661   set_map(orig_phi->_idx, result);
 662   ptnode_adr(orig_phi->_idx)->_node = orig_phi;
 663 
 664   new_created = true;
 665   return result;
 666 }
 667 
 668 //
 669 // Return a new version of Memory Phi "orig_phi" with the inputs having the
 670 // specified alias index.
 671 //
 672 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn) {
 673 
 674   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
 675   Compile *C = _compile;
 676   bool new_phi_created;
 677   PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created);
 678   if (!new_phi_created) {
 679     return result;
 680   }
 681 
 682   GrowableArray<PhiNode *>  phi_list;
 683   GrowableArray<uint>  cur_input;
 684 
 685   PhiNode *phi = orig_phi;
 686   uint idx = 1;
 687   bool finished = false;
 688   while(!finished) {
 689     while (idx < phi->req()) {
 690       Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn);
 691       if (mem != NULL && mem->is_Phi()) {
 692         PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created);
 693         if (new_phi_created) {
 694           // found an phi for which we created a new split, push current one on worklist and begin
 695           // processing new one
 696           phi_list.push(phi);
 697           cur_input.push(idx);
 698           phi = mem->as_Phi();
 699           result = newphi;
 700           idx = 1;
 701           continue;
 702         } else {
 703           mem = newphi;
 704         }
 705       }
 706       if (C->failing()) {
 707         return NULL;
 708       }
 709       result->set_req(idx++, mem);
 710     }
 711 #ifdef ASSERT
 712     // verify that the new Phi has an input for each input of the original
 713     assert( phi->req() == result->req(), "must have same number of inputs.");
 714     assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
 715 #endif
 716     // Check if all new phi's inputs have specified alias index.
 717     // Otherwise use old phi.
 718     for (uint i = 1; i < phi->req(); i++) {
 719       Node* in = result->in(i);
 720       assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
 721     }
 722     // we have finished processing a Phi, see if there are any more to do
 723     finished = (phi_list.length() == 0 );
 724     if (!finished) {
 725       phi = phi_list.pop();
 726       idx = cur_input.pop();
 727       PhiNode *prev_result = get_map_phi(phi->_idx);
 728       prev_result->set_req(idx++, result);
 729       result = prev_result;
 730     }
 731   }
 732   return result;
 733 }
 734 
 735 
 736 //
 737 // The next methods are derived from methods in MemNode.
 738 //
 739 static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
 740   Node *mem = mmem;
 741   // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
 742   // means an array I have not precisely typed yet.  Do not do any
 743   // alias stuff with it any time soon.
 744   if( toop->base() != Type::AnyPtr &&
 745       !(toop->klass() != NULL &&
 746         toop->klass()->is_java_lang_Object() &&
 747         toop->offset() == Type::OffsetBot) ) {
 748     mem = mmem->memory_at(alias_idx);
 749     // Update input if it is progress over what we have now
 750   }
 751   return mem;
 752 }
 753 
 754 //
 755 // Move memory users to their memory slices.
 756 //
 757 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *igvn) {
 758   Compile* C = _compile;
 759 
 760   const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
 761   assert(tp != NULL, "ptr type");
 762   int alias_idx = C->get_alias_index(tp);
 763   int general_idx = C->get_general_index(alias_idx);
 764 
 765   // Move users first
 766   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 767     Node* use = n->fast_out(i);
 768     if (use->is_MergeMem()) {
 769       MergeMemNode* mmem = use->as_MergeMem();
 770       assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
 771       if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
 772         continue; // Nothing to do
 773       }
 774       // Replace previous general reference to mem node.
 775       uint orig_uniq = C->unique();
 776       Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
 777       assert(orig_uniq == C->unique(), "no new nodes");
 778       mmem->set_memory_at(general_idx, m);
 779       --imax;
 780       --i;
 781     } else if (use->is_MemBar()) {
 782       assert(!use->is_Initialize(), "initializing stores should not be moved");
 783       if (use->req() > MemBarNode::Precedent &&
 784           use->in(MemBarNode::Precedent) == n) {
 785         // Don't move related membars.
 786         record_for_optimizer(use);
 787         continue;
 788       }
 789       tp = use->as_MemBar()->adr_type()->isa_ptr();
 790       if (tp != NULL && C->get_alias_index(tp) == alias_idx ||
 791           alias_idx == general_idx) {
 792         continue; // Nothing to do
 793       }
 794       // Move to general memory slice.
 795       uint orig_uniq = C->unique();
 796       Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
 797       assert(orig_uniq == C->unique(), "no new nodes");
 798       igvn->hash_delete(use);
 799       imax -= use->replace_edge(n, m);
 800       igvn->hash_insert(use);
 801       record_for_optimizer(use);
 802       --i;
 803 #ifdef ASSERT
 804     } else if (use->is_Mem()) {
 805       if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
 806         // Don't move related cardmark.
 807         continue;
 808       }
 809       // Memory nodes should have new memory input.
 810       tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
 811       assert(tp != NULL, "ptr type");
 812       int idx = C->get_alias_index(tp);
 813       assert(get_map(use->_idx) != NULL || idx == alias_idx,
 814              "Following memory nodes should have new memory input or be on the same memory slice");
 815     } else if (use->is_Phi()) {
 816       // Phi nodes should be split and moved already.
 817       tp = use->as_Phi()->adr_type()->isa_ptr();
 818       assert(tp != NULL, "ptr type");
 819       int idx = C->get_alias_index(tp);
 820       assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
 821     } else {
 822       use->dump();
 823       assert(false, "should not be here");
 824 #endif
 825     }
 826   }
 827 }
 828 
 829 //
 830 // Search memory chain of "mem" to find a MemNode whose address
 831 // is the specified alias index.
 832 //
 833 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *phase) {
 834   if (orig_mem == NULL)
 835     return orig_mem;
 836   Compile* C = phase->C;
 837   const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
 838   bool is_instance = (toop != NULL) && toop->is_known_instance();
 839   Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
 840   Node *prev = NULL;
 841   Node *result = orig_mem;
 842   while (prev != result) {
 843     prev = result;
 844     if (result == start_mem)
 845       break;  // hit one of our sentinels
 846     if (result->is_Mem()) {
 847       const Type *at = phase->type(result->in(MemNode::Address));
 848       if (at == Type::TOP)
 849         break; // Dead
 850       assert (at->isa_ptr() != NULL, "pointer type required.");
 851       int idx = C->get_alias_index(at->is_ptr());
 852       if (idx == alias_idx)
 853         break; // Found
 854       if (!is_instance && (at->isa_oopptr() == NULL ||
 855                            !at->is_oopptr()->is_known_instance())) {
 856         break; // Do not skip store to general memory slice.
 857       }
 858       result = result->in(MemNode::Memory);
 859     }
 860     if (!is_instance)
 861       continue;  // don't search further for non-instance types
 862     // skip over a call which does not affect this memory slice
 863     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
 864       Node *proj_in = result->in(0);
 865       if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
 866         break;  // hit one of our sentinels
 867       } else if (proj_in->is_Call()) {
 868         CallNode *call = proj_in->as_Call();
 869         if (!call->may_modify(toop, phase)) {
 870           result = call->in(TypeFunc::Memory);
 871         }
 872       } else if (proj_in->is_Initialize()) {
 873         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
 874         // Stop if this is the initialization for the object instance which
 875         // which contains this memory slice, otherwise skip over it.
 876         if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {
 877           result = proj_in->in(TypeFunc::Memory);
 878         }
 879       } else if (proj_in->is_MemBar()) {
 880         result = proj_in->in(TypeFunc::Memory);
 881       }
 882     } else if (result->is_MergeMem()) {
 883       MergeMemNode *mmem = result->as_MergeMem();
 884       result = step_through_mergemem(mmem, alias_idx, toop);
 885       if (result == mmem->base_memory()) {
 886         // Didn't find instance memory, search through general slice recursively.
 887         result = mmem->memory_at(C->get_general_index(alias_idx));
 888         result = find_inst_mem(result, alias_idx, orig_phis, phase);
 889         if (C->failing()) {
 890           return NULL;
 891         }
 892         mmem->set_memory_at(alias_idx, result);
 893       }
 894     } else if (result->is_Phi() &&
 895                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
 896       Node *un = result->as_Phi()->unique_input(phase);
 897       if (un != NULL) {
 898         orig_phis.append_if_missing(result->as_Phi());
 899         result = un;
 900       } else {
 901         break;
 902       }
 903     } else if (result->is_ClearArray()) {
 904       if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), phase)) {
 905         // Can not bypass initialization of the instance
 906         // we are looking for.
 907         break;
 908       }
 909       // Otherwise skip it (the call updated 'result' value).
 910     } else if (result->Opcode() == Op_SCMemProj) {
 911       assert(result->in(0)->is_LoadStore(), "sanity");
 912       const Type *at = phase->type(result->in(0)->in(MemNode::Address));
 913       if (at != Type::TOP) {
 914         assert (at->isa_ptr() != NULL, "pointer type required.");
 915         int idx = C->get_alias_index(at->is_ptr());
 916         assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");
 917         break;
 918       }
 919       result = result->in(0)->in(MemNode::Memory);
 920     }
 921   }
 922   if (result->is_Phi()) {
 923     PhiNode *mphi = result->as_Phi();
 924     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
 925     const TypePtr *t = mphi->adr_type();
 926     if (!is_instance) {
 927       // Push all non-instance Phis on the orig_phis worklist to update inputs
 928       // during Phase 4 if needed.
 929       orig_phis.append_if_missing(mphi);
 930     } else if (C->get_alias_index(t) != alias_idx) {
 931       // Create a new Phi with the specified alias index type.
 932       result = split_memory_phi(mphi, alias_idx, orig_phis, phase);
 933     }
 934   }
 935   // the result is either MemNode, PhiNode, InitializeNode.
 936   return result;
 937 }
 938 
 939 //
 940 //  Convert the types of unescaped object to instance types where possible,
 941 //  propagate the new type information through the graph, and update memory
 942 //  edges and MergeMem inputs to reflect the new type.
 943 //
 944 //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
 945 //  The processing is done in 4 phases:
 946 //
 947 //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
 948 //            types for the CheckCastPP for allocations where possible.
 949 //            Propagate the the new types through users as follows:
 950 //               casts and Phi:  push users on alloc_worklist
 951 //               AddP:  cast Base and Address inputs to the instance type
 952 //                      push any AddP users on alloc_worklist and push any memnode
 953 //                      users onto memnode_worklist.
 954 //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
 955 //            search the Memory chain for a store with the appropriate type
 956 //            address type.  If a Phi is found, create a new version with
 957 //            the appropriate memory slices from each of the Phi inputs.
 958 //            For stores, process the users as follows:
 959 //               MemNode:  push on memnode_worklist
 960 //               MergeMem: push on mergemem_worklist
 961 //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
 962 //            moving the first node encountered of each  instance type to the
 963 //            the input corresponding to its alias index.
 964 //            appropriate memory slice.
 965 //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
 966 //
 967 // In the following example, the CheckCastPP nodes are the cast of allocation
 968 // results and the allocation of node 29 is unescaped and eligible to be an
 969 // instance type.
 970 //
 971 // We start with:
 972 //
 973 //     7 Parm #memory
 974 //    10  ConI  "12"
 975 //    19  CheckCastPP   "Foo"
 976 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
 977 //    29  CheckCastPP   "Foo"
 978 //    30  AddP  _ 29 29 10  Foo+12  alias_index=4
 979 //
 980 //    40  StoreP  25   7  20   ... alias_index=4
 981 //    50  StoreP  35  40  30   ... alias_index=4
 982 //    60  StoreP  45  50  20   ... alias_index=4
 983 //    70  LoadP    _  60  30   ... alias_index=4
 984 //    80  Phi     75  50  60   Memory alias_index=4
 985 //    90  LoadP    _  80  30   ... alias_index=4
 986 //   100  LoadP    _  80  20   ... alias_index=4
 987 //
 988 //
 989 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
 990 // and creating a new alias index for node 30.  This gives:
 991 //
 992 //     7 Parm #memory
 993 //    10  ConI  "12"
 994 //    19  CheckCastPP   "Foo"
 995 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
 996 //    29  CheckCastPP   "Foo"  iid=24
 997 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
 998 //
 999 //    40  StoreP  25   7  20   ... alias_index=4
1000 //    50  StoreP  35  40  30   ... alias_index=6
1001 //    60  StoreP  45  50  20   ... alias_index=4
1002 //    70  LoadP    _  60  30   ... alias_index=6
1003 //    80  Phi     75  50  60   Memory alias_index=4
1004 //    90  LoadP    _  80  30   ... alias_index=6
1005 //   100  LoadP    _  80  20   ... alias_index=4
1006 //
1007 // In phase 2, new memory inputs are computed for the loads and stores,
1008 // And a new version of the phi is created.  In phase 4, the inputs to
1009 // node 80 are updated and then the memory nodes are updated with the
1010 // values computed in phase 2.  This results in:
1011 //
1012 //     7 Parm #memory
1013 //    10  ConI  "12"
1014 //    19  CheckCastPP   "Foo"
1015 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
1016 //    29  CheckCastPP   "Foo"  iid=24
1017 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
1018 //
1019 //    40  StoreP  25  7   20   ... alias_index=4
1020 //    50  StoreP  35  7   30   ... alias_index=6
1021 //    60  StoreP  45  40  20   ... alias_index=4
1022 //    70  LoadP    _  50  30   ... alias_index=6
1023 //    80  Phi     75  40  60   Memory alias_index=4
1024 //   120  Phi     75  50  50   Memory alias_index=6
1025 //    90  LoadP    _ 120  30   ... alias_index=6
1026 //   100  LoadP    _  80  20   ... alias_index=4
1027 //
1028 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist) {
1029   GrowableArray<Node *>  memnode_worklist;
1030   GrowableArray<PhiNode *>  orig_phis;
1031 
1032   PhaseIterGVN  *igvn = _igvn;
1033   uint new_index_start = (uint) _compile->num_alias_types();
1034   Arena* arena = Thread::current()->resource_area();
1035   VectorSet visited(arena);
1036 
1037 
1038   //  Phase 1:  Process possible allocations from alloc_worklist.
1039   //  Create instance types for the CheckCastPP for allocations where possible.
1040   //
1041   // (Note: don't forget to change the order of the second AddP node on
1042   //  the alloc_worklist if the order of the worklist processing is changed,
1043   //  see the comment in find_second_addp().)
1044   //
1045   while (alloc_worklist.length() != 0) {
1046     Node *n = alloc_worklist.pop();
1047     uint ni = n->_idx;
1048     const TypeOopPtr* tinst = NULL;
1049     if (n->is_Call()) {
1050       CallNode *alloc = n->as_Call();
1051       // copy escape information to call node
1052       PointsToNode* ptn = ptnode_adr(alloc->_idx);
1053       PointsToNode::EscapeState es = escape_state(alloc);
1054       // We have an allocation or call which returns a Java object,
1055       // see if it is unescaped.
1056       if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable())
1057         continue;
1058 
1059       // Find CheckCastPP for the allocate or for the return value of a call
1060       n = alloc->result_cast();
1061       if (n == NULL) {            // No uses except Initialize node
1062         if (alloc->is_Allocate()) {
1063           // Set the scalar_replaceable flag for allocation
1064           // so it could be eliminated if it has no uses.
1065           alloc->as_Allocate()->_is_scalar_replaceable = true;
1066         }
1067         continue;
1068       }
1069       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
1070         assert(!alloc->is_Allocate(), "allocation should have unique type");
1071         continue;
1072       }
1073 
1074       // The inline code for Object.clone() casts the allocation result to
1075       // java.lang.Object and then to the actual type of the allocated
1076       // object. Detect this case and use the second cast.
1077       // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
1078       // the allocation result is cast to java.lang.Object and then
1079       // to the actual Array type.
1080       if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
1081           && (alloc->is_AllocateArray() ||
1082               igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {
1083         Node *cast2 = NULL;
1084         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1085           Node *use = n->fast_out(i);
1086           if (use->is_CheckCastPP()) {
1087             cast2 = use;
1088             break;
1089           }
1090         }
1091         if (cast2 != NULL) {
1092           n = cast2;
1093         } else {
1094           // Non-scalar replaceable if the allocation type is unknown statically
1095           // (reflection allocation), the object can't be restored during
1096           // deoptimization without precise type.
1097           continue;
1098         }
1099       }
1100       if (alloc->is_Allocate()) {
1101         // Set the scalar_replaceable flag for allocation
1102         // so it could be eliminated.
1103         alloc->as_Allocate()->_is_scalar_replaceable = true;
1104       }
1105       set_escape_state(n->_idx, es); // CheckCastPP escape state
1106       // in order for an object to be scalar-replaceable, it must be:
1107       //   - a direct allocation (not a call returning an object)
1108       //   - non-escaping
1109       //   - eligible to be a unique type
1110       //   - not determined to be ineligible by escape analysis
1111       assert(ptnode_adr(alloc->_idx)->_node != NULL &&
1112              ptnode_adr(n->_idx)->_node != NULL, "should be registered");
1113       set_map(alloc->_idx, n);
1114       set_map(n->_idx, alloc);
1115       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
1116       if (t == NULL)
1117         continue;  // not a TypeOopPtr
1118       tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
1119       igvn->hash_delete(n);
1120       igvn->set_type(n,  tinst);
1121       n->raise_bottom_type(tinst);
1122       igvn->hash_insert(n);
1123       record_for_optimizer(n);
1124       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
1125 
1126         // First, put on the worklist all Field edges from Connection Graph
1127         // which is more accurate then putting immediate users from Ideal Graph.
1128         for (uint e = 0; e < ptn->edge_count(); e++) {
1129           Node *use = ptnode_adr(ptn->edge_target(e))->_node;
1130           assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(),
1131                  "only AddP nodes are Field edges in CG");
1132           if (use->outcnt() > 0) { // Don't process dead nodes
1133             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
1134             if (addp2 != NULL) {
1135               assert(alloc->is_AllocateArray(),"array allocation was expected");
1136               alloc_worklist.append_if_missing(addp2);
1137             }
1138             alloc_worklist.append_if_missing(use);
1139           }
1140         }
1141 
1142         // An allocation may have an Initialize which has raw stores. Scan
1143         // the users of the raw allocation result and push AddP users
1144         // on alloc_worklist.
1145         Node *raw_result = alloc->proj_out(TypeFunc::Parms);
1146         assert (raw_result != NULL, "must have an allocation result");
1147         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
1148           Node *use = raw_result->fast_out(i);
1149           if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
1150             Node* addp2 = find_second_addp(use, raw_result);
1151             if (addp2 != NULL) {
1152               assert(alloc->is_AllocateArray(),"array allocation was expected");
1153               alloc_worklist.append_if_missing(addp2);
1154             }
1155             alloc_worklist.append_if_missing(use);
1156           } else if (use->is_MemBar()) {
1157             memnode_worklist.append_if_missing(use);
1158           }
1159         }
1160       }
1161     } else if (n->is_AddP()) {
1162       VectorSet* ptset = PointsTo(get_addp_base(n));
1163       assert(ptset->Size() == 1, "AddP address is unique");
1164       uint elem = ptset->getelem(); // Allocation node's index
1165       if (elem == _phantom_object) {
1166         assert(false, "escaped allocation");
1167         continue; // Assume the value was set outside this method.
1168       }
1169       Node *base = get_map(elem);  // CheckCastPP node
1170       if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path
1171       tinst = igvn->type(base)->isa_oopptr();
1172     } else if (n->is_Phi() ||
1173                n->is_CheckCastPP() ||
1174                n->is_EncodeP() ||
1175                n->is_DecodeN() ||
1176                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
1177       if (visited.test_set(n->_idx)) {
1178         assert(n->is_Phi(), "loops only through Phi's");
1179         continue;  // already processed
1180       }
1181       VectorSet* ptset = PointsTo(n);
1182       if (ptset->Size() == 1) {
1183         uint elem = ptset->getelem(); // Allocation node's index
1184         if (elem == _phantom_object) {
1185           assert(false, "escaped allocation");
1186           continue; // Assume the value was set outside this method.
1187         }
1188         Node *val = get_map(elem);   // CheckCastPP node
1189         TypeNode *tn = n->as_Type();
1190         tinst = igvn->type(val)->isa_oopptr();
1191         assert(tinst != NULL && tinst->is_known_instance() &&
1192                (uint)tinst->instance_id() == elem , "instance type expected.");
1193 
1194         const Type *tn_type = igvn->type(tn);
1195         const TypeOopPtr *tn_t;
1196         if (tn_type->isa_narrowoop()) {
1197           tn_t = tn_type->make_ptr()->isa_oopptr();
1198         } else {
1199           tn_t = tn_type->isa_oopptr();
1200         }
1201 
1202         if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
1203           if (tn_type->isa_narrowoop()) {
1204             tn_type = tinst->make_narrowoop();
1205           } else {
1206             tn_type = tinst;
1207           }
1208           igvn->hash_delete(tn);
1209           igvn->set_type(tn, tn_type);
1210           tn->set_type(tn_type);
1211           igvn->hash_insert(tn);
1212           record_for_optimizer(n);
1213         } else {
1214           assert(tn_type == TypePtr::NULL_PTR ||
1215                  tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
1216                  "unexpected type");
1217           continue; // Skip dead path with different type
1218         }
1219       }
1220     } else {
1221       debug_only(n->dump();)
1222       assert(false, "EA: unexpected node");
1223       continue;
1224     }
1225     // push allocation's users on appropriate worklist
1226     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1227       Node *use = n->fast_out(i);
1228       if(use->is_Mem() && use->in(MemNode::Address) == n) {
1229         // Load/store to instance's field
1230         memnode_worklist.append_if_missing(use);
1231       } else if (use->is_MemBar()) {
1232         memnode_worklist.append_if_missing(use);
1233       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
1234         Node* addp2 = find_second_addp(use, n);
1235         if (addp2 != NULL) {
1236           alloc_worklist.append_if_missing(addp2);
1237         }
1238         alloc_worklist.append_if_missing(use);
1239       } else if (use->is_Phi() ||
1240                  use->is_CheckCastPP() ||
1241                  use->is_EncodeP() ||
1242                  use->is_DecodeN() ||
1243                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
1244         alloc_worklist.append_if_missing(use);
1245 #ifdef ASSERT
1246       } else if (use->is_Mem()) {
1247         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
1248       } else if (use->is_MergeMem()) {
1249         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
1250       } else if (use->is_SafePoint()) {
1251         // Look for MergeMem nodes for calls which reference unique allocation
1252         // (through CheckCastPP nodes) even for debug info.
1253         Node* m = use->in(TypeFunc::Memory);
1254         if (m->is_MergeMem()) {
1255           assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
1256         }
1257       } else {
1258         uint op = use->Opcode();
1259         if (!(op == Op_CmpP || op == Op_Conv2B ||
1260               op == Op_CastP2X || op == Op_StoreCM ||
1261               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
1262               op == Op_StrEquals || op == Op_StrIndexOf)) {
1263           n->dump();
1264           use->dump();
1265           assert(false, "EA: missing allocation reference path");
1266         }
1267 #endif
1268       }
1269     }
1270 
1271   }
1272   // New alias types were created in split_AddP().
1273   uint new_index_end = (uint) _compile->num_alias_types();
1274 
1275   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
1276   //            compute new values for Memory inputs  (the Memory inputs are not
1277   //            actually updated until phase 4.)
1278   if (memnode_worklist.length() == 0)
1279     return;  // nothing to do
1280 
1281   while (memnode_worklist.length() != 0) {
1282     Node *n = memnode_worklist.pop();
1283     if (visited.test_set(n->_idx))
1284       continue;
1285     if (n->is_Phi() || n->is_ClearArray()) {
1286       // we don't need to do anything, but the users must be pushed
1287     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
1288       // we don't need to do anything, but the users must be pushed
1289       n = n->as_MemBar()->proj_out(TypeFunc::Memory);
1290       if (n == NULL)
1291         continue;
1292     } else {
1293       assert(n->is_Mem(), "memory node required.");
1294       Node *addr = n->in(MemNode::Address);
1295       const Type *addr_t = igvn->type(addr);
1296       if (addr_t == Type::TOP)
1297         continue;
1298       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
1299       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
1300       assert ((uint)alias_idx < new_index_end, "wrong alias index");
1301       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn);
1302       if (_compile->failing()) {
1303         return;
1304       }
1305       if (mem != n->in(MemNode::Memory)) {
1306         // We delay the memory edge update since we need old one in
1307         // MergeMem code below when instances memory slices are separated.
1308         debug_only(Node* pn = ptnode_adr(n->_idx)->_node;)
1309         assert(pn == NULL || pn == n, "wrong node");
1310         set_map(n->_idx, mem);
1311         ptnode_adr(n->_idx)->_node = n;
1312       }
1313       if (n->is_Load()) {
1314         continue;  // don't push users
1315       } else if (n->is_LoadStore()) {
1316         // get the memory projection
1317         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1318           Node *use = n->fast_out(i);
1319           if (use->Opcode() == Op_SCMemProj) {
1320             n = use;
1321             break;
1322           }
1323         }
1324         assert(n->Opcode() == Op_SCMemProj, "memory projection required");
1325       }
1326     }
1327     // push user on appropriate worklist
1328     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1329       Node *use = n->fast_out(i);
1330       if (use->is_Phi() || use->is_ClearArray()) {
1331         memnode_worklist.append_if_missing(use);
1332       } else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
1333         if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
1334           continue;
1335         memnode_worklist.append_if_missing(use);
1336       } else if (use->is_MemBar()) {
1337         memnode_worklist.append_if_missing(use);
1338 #ifdef ASSERT
1339       } else if(use->is_Mem()) {
1340         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
1341       } else if (use->is_MergeMem()) {
1342         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
1343       } else {
1344         uint op = use->Opcode();
1345         if (!(op == Op_StoreCM ||
1346               (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
1347                strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
1348               op == Op_AryEq || op == Op_StrComp ||
1349               op == Op_StrEquals || op == Op_StrIndexOf)) {
1350           n->dump();
1351           use->dump();
1352           assert(false, "EA: missing memory path");
1353         }
1354 #endif
1355       }
1356     }
1357   }
1358 
1359   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
1360   //            Walk each memory slice moving the first node encountered of each
1361   //            instance type to the the input corresponding to its alias index.
1362   uint length = _mergemem_worklist.length();
1363   for( uint next = 0; next < length; ++next ) {
1364     MergeMemNode* nmm = _mergemem_worklist.at(next);
1365     assert(!visited.test_set(nmm->_idx), "should not be visited before");
1366     // Note: we don't want to use MergeMemStream here because we only want to
1367     // scan inputs which exist at the start, not ones we add during processing.
1368     // Note 2: MergeMem may already contains instance memory slices added
1369     // during find_inst_mem() call when memory nodes were processed above.
1370     igvn->hash_delete(nmm);
1371     uint nslices = nmm->req();
1372     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
1373       Node* mem = nmm->in(i);
1374       Node* cur = NULL;
1375       if (mem == NULL || mem->is_top())
1376         continue;
1377       // First, update mergemem by moving memory nodes to corresponding slices
1378       // if their type became more precise since this mergemem was created.
1379       while (mem->is_Mem()) {
1380         const Type *at = igvn->type(mem->in(MemNode::Address));
1381         if (at != Type::TOP) {
1382           assert (at->isa_ptr() != NULL, "pointer type required.");
1383           uint idx = (uint)_compile->get_alias_index(at->is_ptr());
1384           if (idx == i) {
1385             if (cur == NULL)
1386               cur = mem;
1387           } else {
1388             if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
1389               nmm->set_memory_at(idx, mem);
1390             }
1391           }
1392         }
1393         mem = mem->in(MemNode::Memory);
1394       }
1395       nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
1396       // Find any instance of the current type if we haven't encountered
1397       // already a memory slice of the instance along the memory chain.
1398       for (uint ni = new_index_start; ni < new_index_end; ni++) {
1399         if((uint)_compile->get_general_index(ni) == i) {
1400           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
1401           if (nmm->is_empty_memory(m)) {
1402             Node* result = find_inst_mem(mem, ni, orig_phis, igvn);
1403             if (_compile->failing()) {
1404               return;
1405             }
1406             nmm->set_memory_at(ni, result);
1407           }
1408         }
1409       }
1410     }
1411     // Find the rest of instances values
1412     for (uint ni = new_index_start; ni < new_index_end; ni++) {
1413       const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
1414       Node* result = step_through_mergemem(nmm, ni, tinst);
1415       if (result == nmm->base_memory()) {
1416         // Didn't find instance memory, search through general slice recursively.
1417         result = nmm->memory_at(_compile->get_general_index(ni));
1418         result = find_inst_mem(result, ni, orig_phis, igvn);
1419         if (_compile->failing()) {
1420           return;
1421         }
1422         nmm->set_memory_at(ni, result);
1423       }
1424     }
1425     igvn->hash_insert(nmm);
1426     record_for_optimizer(nmm);
1427   }
1428 
1429   //  Phase 4:  Update the inputs of non-instance memory Phis and
1430   //            the Memory input of memnodes
1431   // First update the inputs of any non-instance Phi's from
1432   // which we split out an instance Phi.  Note we don't have
1433   // to recursively process Phi's encounted on the input memory
1434   // chains as is done in split_memory_phi() since they  will
1435   // also be processed here.
1436   for (int j = 0; j < orig_phis.length(); j++) {
1437     PhiNode *phi = orig_phis.at(j);
1438     int alias_idx = _compile->get_alias_index(phi->adr_type());
1439     igvn->hash_delete(phi);
1440     for (uint i = 1; i < phi->req(); i++) {
1441       Node *mem = phi->in(i);
1442       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn);
1443       if (_compile->failing()) {
1444         return;
1445       }
1446       if (mem != new_mem) {
1447         phi->set_req(i, new_mem);
1448       }
1449     }
1450     igvn->hash_insert(phi);
1451     record_for_optimizer(phi);
1452   }
1453 
1454   // Update the memory inputs of MemNodes with the value we computed
1455   // in Phase 2 and move stores memory users to corresponding memory slices.
1456 
1457   // Disable memory split verification code until the fix for 6984348.
1458   // Currently it produces false negative results since it does not cover all cases.
1459 #if 0 // ifdef ASSERT
1460   visited.Reset();
1461   Node_Stack old_mems(arena, _compile->unique() >> 2);
1462 #endif
1463   for (uint i = 0; i < nodes_size(); i++) {
1464     Node *nmem = get_map(i);
1465     if (nmem != NULL) {
1466       Node *n = ptnode_adr(i)->_node;
1467       assert(n != NULL, "sanity");
1468       if (n->is_Mem()) {
1469 #if 0 // ifdef ASSERT
1470         Node* old_mem = n->in(MemNode::Memory);
1471         if (!visited.test_set(old_mem->_idx)) {
1472           old_mems.push(old_mem, old_mem->outcnt());
1473         }
1474 #endif
1475         assert(n->in(MemNode::Memory) != nmem, "sanity");
1476         if (!n->is_Load()) {
1477           // Move memory users of a store first.
1478           move_inst_mem(n, orig_phis, igvn);
1479         }
1480         // Now update memory input
1481         igvn->hash_delete(n);
1482         n->set_req(MemNode::Memory, nmem);
1483         igvn->hash_insert(n);
1484         record_for_optimizer(n);
1485       } else {
1486         assert(n->is_Allocate() || n->is_CheckCastPP() ||
1487                n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
1488       }
1489     }
1490   }
1491 #if 0 // ifdef ASSERT
1492   // Verify that memory was split correctly
1493   while (old_mems.is_nonempty()) {
1494     Node* old_mem = old_mems.node();
1495     uint  old_cnt = old_mems.index();
1496     old_mems.pop();
1497     assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
1498   }
1499 #endif
1500 }
1501 
1502 bool ConnectionGraph::has_candidates(Compile *C) {
1503   // EA brings benefits only when the code has allocations and/or locks which
1504   // are represented by ideal Macro nodes.
1505   int cnt = C->macro_count();
1506   for( int i=0; i < cnt; i++ ) {
1507     Node *n = C->macro_node(i);
1508     if ( n->is_Allocate() )
1509       return true;
1510     if( n->is_Lock() ) {
1511       Node* obj = n->as_Lock()->obj_node()->uncast();
1512       if( !(obj->is_Parm() || obj->is_Con()) )
1513         return true;
1514     }
1515   }
1516   return false;
1517 }
1518 
1519 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
1520   // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
1521   // to create space for them in ConnectionGraph::_nodes[].
1522   Node* oop_null = igvn->zerocon(T_OBJECT);
1523   Node* noop_null = igvn->zerocon(T_NARROWOOP);
1524 
1525   // Add ConI(#CC_GT) and ConI(#CC_EQ) if needed.
1526   Node* pcmp_neq = OptimizePtrCompare ? igvn->makecon(TypeInt::CC_GT) : NULL;
1527   Node* pcmp_eq  = OptimizePtrCompare ? igvn->makecon(TypeInt::CC_EQ) : NULL;
1528 
1529   ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
1530   // Perform escape analysis
1531   if (congraph->compute_escape()) {
1532     // There are non escaping objects.
1533     C->set_congraph(congraph);
1534   }
1535 
1536   // Cleanup.
1537   if (oop_null->outcnt() == 0)
1538     igvn->hash_delete(oop_null);
1539   if (noop_null->outcnt() == 0)
1540     igvn->hash_delete(noop_null);
1541   if (pcmp_neq != NULL && pcmp_neq->outcnt() == 0)
1542     igvn->hash_delete(pcmp_neq);
1543   if (pcmp_eq  != NULL && pcmp_eq->outcnt()  == 0)
1544     igvn->hash_delete(pcmp_eq);
1545 }
1546 
1547 bool ConnectionGraph::compute_escape() {
1548   Compile* C = _compile;
1549 
1550   // 1. Populate Connection Graph (CG) with Ideal nodes.
1551 
1552   Unique_Node_List worklist_init;
1553   worklist_init.map(C->unique(), NULL);  // preallocate space
1554 
1555   // Initialize worklist
1556   if (C->root() != NULL) {
1557     worklist_init.push(C->root());
1558   }
1559 
1560   GrowableArray<Node*> alloc_worklist;
1561   GrowableArray<Node*> addp_worklist;
1562   GrowableArray<Node*> ptr_cmp_worklist;
1563   PhaseGVN* igvn = _igvn;
1564   bool has_allocations = false;
1565 
1566   // Push all useful nodes onto CG list and set their type.
1567   for( uint next = 0; next < worklist_init.size(); ++next ) {
1568     Node* n = worklist_init.at(next);
1569     record_for_escape_analysis(n, igvn);
1570     // Only allocations and java static calls results are checked
1571     // for an escape status. See process_call_result() below.
1572     if (n->is_Allocate() || n->is_CallStaticJava() &&
1573         ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
1574       has_allocations = true;
1575       if (n->is_Allocate())
1576         alloc_worklist.append(n);
1577     } else if(n->is_AddP()) {
1578       // Collect address nodes. Use them during stage 3 below
1579       // to build initial connection graph field edges.
1580       addp_worklist.append(n);
1581     } else if (n->is_MergeMem()) {
1582       // Collect all MergeMem nodes to add memory slices for
1583       // scalar replaceable objects in split_unique_types().
1584       _mergemem_worklist.append(n->as_MergeMem());
1585     } else if (OptimizePtrCompare && n->is_Cmp() &&
1586                (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
1587       // Compare pointers nodes
1588       ptr_cmp_worklist.append(n);
1589     }
1590     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1591       Node* m = n->fast_out(i);   // Get user
1592       worklist_init.push(m);
1593     }
1594   }
1595 
1596   if (!has_allocations) {
1597     _collecting = false;
1598     return false; // Nothing to do.
1599   }
1600 
1601   // 2. First pass to create simple CG edges (doesn't require to walk CG).
1602   uint delayed_size = _delayed_worklist.size();
1603   for( uint next = 0; next < delayed_size; ++next ) {
1604     Node* n = _delayed_worklist.at(next);
1605     build_connection_graph(n, igvn);
1606   }
1607 
1608   // 3. Pass to create initial fields edges (JavaObject -F-> AddP)
1609   //    to reduce number of iterations during stage 4 below.
1610   uint addp_length = addp_worklist.length();
1611   for( uint next = 0; next < addp_length; ++next ) {
1612     Node* n = addp_worklist.at(next);
1613     Node* base = get_addp_base(n);
1614     if (base->is_Proj())
1615       base = base->in(0);
1616     PointsToNode::NodeType nt = ptnode_adr(base->_idx)->node_type();
1617     if (nt == PointsToNode::JavaObject) {
1618       build_connection_graph(n, igvn);
1619     }
1620   }
1621 
1622   GrowableArray<int> cg_worklist;
1623   cg_worklist.append(_phantom_object);
1624   GrowableArray<uint>  worklist;
1625 
1626   // 4. Build Connection Graph which need
1627   //    to walk the connection graph.
1628   _progress = false;
1629   for (uint ni = 0; ni < nodes_size(); ni++) {
1630     PointsToNode* ptn = ptnode_adr(ni);
1631     Node *n = ptn->_node;
1632     if (n != NULL) { // Call, AddP, LoadP, StoreP
1633       build_connection_graph(n, igvn);
1634       if (ptn->node_type() != PointsToNode::UnknownType)
1635         cg_worklist.append(n->_idx); // Collect CG nodes
1636       if (!_processed.test(n->_idx))
1637         worklist.append(n->_idx); // Collect C/A/L/S nodes
1638     }
1639   }
1640 
1641   // After IGVN user nodes may have smaller _idx than
1642   // their inputs so they will be processed first in
1643   // previous loop. Because of that not all Graph
1644   // edges will be created. Walk over interesting
1645   // nodes again until no new edges are created.
1646   //
1647   // Normally only 1-3 passes needed to build
1648   // Connection Graph depending on graph complexity.
1649   // Observed 8 passes in jvm2008 compiler.compiler.
1650   // Set limit to 20 to catch situation when something
1651   // did go wrong and recompile the method without EA.
1652 
1653 #define CG_BUILD_ITER_LIMIT 20
1654 
1655   uint length = worklist.length();
1656   int iterations = 0;
1657   while(_progress && (iterations++ < CG_BUILD_ITER_LIMIT)) {
1658     _progress = false;
1659     for( uint next = 0; next < length; ++next ) {
1660       int ni = worklist.at(next);
1661       PointsToNode* ptn = ptnode_adr(ni);
1662       Node* n = ptn->_node;
1663       assert(n != NULL, "should be known node");
1664       build_connection_graph(n, igvn);
1665     }
1666   }
1667   if (iterations >= CG_BUILD_ITER_LIMIT) {
1668     assert(iterations < CG_BUILD_ITER_LIMIT,
1669            err_msg("infinite EA connection graph build with %d nodes and worklist size %d",
1670            nodes_size(), length));
1671     // Possible infinite build_connection_graph loop,
1672     // retry compilation without escape analysis.
1673     C->record_failure(C2Compiler::retry_no_escape_analysis());
1674     _collecting = false;
1675     return false;
1676   }
1677 #undef CG_BUILD_ITER_LIMIT
1678 
1679   Arena* arena = Thread::current()->resource_area();
1680   VectorSet visited(arena);
1681 
1682   // 5. Find fields initializing values for not escaped allocations
1683   uint alloc_length = alloc_worklist.length();
1684   for (uint next = 0; next < alloc_length; ++next) {
1685     Node* n = alloc_worklist.at(next);
1686     if (ptnode_adr(n->_idx)->escape_state() == PointsToNode::NoEscape) {
1687       find_init_values(n, &visited, igvn);
1688     }
1689   }
1690 
1691   worklist.clear();
1692 
1693   // 6. Remove deferred edges from the graph.
1694   uint cg_length = cg_worklist.length();
1695   for (uint next = 0; next < cg_length; ++next) {
1696     int ni = cg_worklist.at(next);
1697     PointsToNode* ptn = ptnode_adr(ni);
1698     PointsToNode::NodeType nt = ptn->node_type();
1699     if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
1700       remove_deferred(ni, &worklist, &visited);
1701       Node *n = ptn->_node;
1702     }
1703   }
1704 
1705   // 7. Adjust escape state of nonescaping objects.
1706   for (uint next = 0; next < addp_length; ++next) {
1707     Node* n = addp_worklist.at(next);
1708     adjust_escape_state(n);
1709   }
1710 
1711   // 8. Propagate escape states.
1712   worklist.clear();
1713 
1714   // mark all nodes reachable from GlobalEscape nodes
1715   (void)propagate_escape_state(&cg_worklist, &worklist, PointsToNode::GlobalEscape);
1716 
1717   // mark all nodes reachable from ArgEscape nodes
1718   bool has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ArgEscape);
1719 
1720   // push all NoEscape nodes on the worklist
1721   for( uint next = 0; next < cg_length; ++next ) {
1722     int nk = cg_worklist.at(next);
1723     if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape)
1724       worklist.push(nk);
1725   }
1726   alloc_worklist.clear();
1727   // mark all nodes reachable from NoEscape nodes
1728   while(worklist.length() > 0) {
1729     uint nk = worklist.pop();
1730     PointsToNode* ptn = ptnode_adr(nk);
1731     if (ptn->node_type() == PointsToNode::JavaObject &&
1732         !(nk == _noop_null || nk == _oop_null))
1733       has_non_escaping_obj = true; // Non Escape
1734     Node* n = ptn->_node;
1735     bool scalar_replaceable = ptn->scalar_replaceable();
1736     if (n->is_Allocate() && scalar_replaceable) {
1737       // Push scalar replaceable allocations on alloc_worklist
1738       // for processing in split_unique_types(). Note,
1739       // following code may change scalar_replaceable value.
1740       alloc_worklist.append(n);
1741     }
1742     uint e_cnt = ptn->edge_count();
1743     for (uint ei = 0; ei < e_cnt; ei++) {
1744       uint npi = ptn->edge_target(ei);
1745       PointsToNode *np = ptnode_adr(npi);
1746       if (np->escape_state() < PointsToNode::NoEscape) {
1747         set_escape_state(npi, PointsToNode::NoEscape);
1748         if (!scalar_replaceable) {
1749           np->set_scalar_replaceable(false);
1750         }
1751         worklist.push(npi);
1752       } else if (np->scalar_replaceable() && !scalar_replaceable) {
1753         // Propagate scalar_replaceable value.
1754         np->set_scalar_replaceable(false);
1755         worklist.push(npi);
1756       }
1757     }
1758   }
1759 
1760   _collecting = false;
1761   assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
1762 
1763   assert(ptnode_adr(_oop_null)->escape_state() == PointsToNode::NoEscape, "sanity");
1764   if (UseCompressedOops) {
1765     assert(ptnode_adr(_noop_null)->escape_state() == PointsToNode::NoEscape, "sanity");
1766   }
1767 
1768   if (EliminateLocks && has_non_escaping_obj) {
1769     // Mark locks before changing ideal graph.
1770     int cnt = C->macro_count();
1771     for( int i=0; i < cnt; i++ ) {
1772       Node *n = C->macro_node(i);
1773       if (n->is_AbstractLock()) { // Lock and Unlock nodes
1774         AbstractLockNode* alock = n->as_AbstractLock();
1775         if (!alock->is_eliminated()) {
1776           PointsToNode::EscapeState es = escape_state(alock->obj_node());
1777           assert(es != PointsToNode::UnknownEscape, "should know");
1778           if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
1779             // Mark it eliminated
1780             alock->set_eliminated();
1781           }
1782         }
1783       }
1784     }
1785   }
1786 
1787   if (OptimizePtrCompare && has_non_escaping_obj) {
1788     // Optimize objects compare.
1789     while (ptr_cmp_worklist.length() != 0) {
1790       Node *n = ptr_cmp_worklist.pop();
1791       Node *res = optimize_ptr_compare(n);
1792       if (res != NULL) {
1793 #ifndef PRODUCT
1794         if (PrintOptimizePtrCompare) {
1795           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
1796           if (Verbose) {
1797             n->dump(1);
1798           }
1799         }
1800 #endif
1801         _igvn->replace_node(n, res);
1802       }
1803     }
1804   }
1805 
1806 #ifndef PRODUCT
1807   if (PrintEscapeAnalysis) {
1808     dump(); // Dump ConnectionGraph
1809   }
1810 #endif
1811 
1812   bool has_scalar_replaceable_candidates = false;
1813   alloc_length = alloc_worklist.length();
1814   for (uint next = 0; next < alloc_length; ++next) {
1815     Node* n = alloc_worklist.at(next);
1816     PointsToNode* ptn = ptnode_adr(n->_idx);
1817     assert(ptn->escape_state() == PointsToNode::NoEscape, "sanity");
1818     if (ptn->scalar_replaceable()) {
1819       has_scalar_replaceable_candidates = true;
1820       break;
1821     }
1822   }
1823 
1824   if ( has_scalar_replaceable_candidates &&
1825        C->AliasLevel() >= 3 && EliminateAllocations ) {
1826 
1827     // Now use the escape information to create unique types for
1828     // scalar replaceable objects.
1829     split_unique_types(alloc_worklist);
1830 
1831     if (C->failing())  return false;
1832 
1833     C->print_method("After Escape Analysis", 2);
1834 
1835 #ifdef ASSERT
1836   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
1837     tty->print("=== No allocations eliminated for ");
1838     C->method()->print_short_name();
1839     if(!EliminateAllocations) {
1840       tty->print(" since EliminateAllocations is off ===");
1841     } else if(!has_scalar_replaceable_candidates) {
1842       tty->print(" since there are no scalar replaceable candidates ===");
1843     } else if(C->AliasLevel() < 3) {
1844       tty->print(" since AliasLevel < 3 ===");
1845     }
1846     tty->cr();
1847 #endif
1848   }
1849   return has_non_escaping_obj;
1850 }
1851 
1852 // Find fields initializing values for allocations.
1853 void ConnectionGraph::find_init_values(Node* alloc, VectorSet* visited, PhaseTransform* phase) {
1854   assert(alloc->is_Allocate(), "Should be called for Allocate nodes only");
1855   PointsToNode* pta = ptnode_adr(alloc->_idx);
1856   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1857   InitializeNode* ini = alloc->as_Allocate()->initialization();
1858 
1859   Compile* C = _compile;
1860   visited->Reset();
1861   // Check if a oop field's initializing value is recorded and add
1862   // a corresponding NULL field's value if it is not recorded.
1863   // Connection Graph does not record a default initialization by NULL
1864   // captured by Initialize node.
1865   //
1866   uint ae_cnt = pta->edge_count();
1867   for (uint ei = 0; ei < ae_cnt; ei++) {
1868     uint nidx = pta->edge_target(ei); // Field (AddP)
1869     PointsToNode* ptn = ptnode_adr(nidx);
1870     assert(ptn->_node->is_AddP(), "Should be AddP nodes only");
1871     int offset = ptn->offset();
1872     if (offset != Type::OffsetBot &&
1873         offset != oopDesc::klass_offset_in_bytes() &&
1874         !visited->test_set(offset)) {
1875 
1876       // Check only oop fields.
1877       const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
1878       BasicType basic_field_type = T_INT;
1879       if (adr_type->isa_instptr()) {
1880         ciField* field = C->alias_type(adr_type->isa_instptr())->field();
1881         if (field != NULL) {
1882           basic_field_type = field->layout_type();
1883         } else {
1884           // Ignore non field load (for example, klass load)
1885         }
1886       } else if (adr_type->isa_aryptr()) {
1887         if (offset != arrayOopDesc::length_offset_in_bytes()) {
1888           const Type* elemtype = adr_type->isa_aryptr()->elem();
1889           basic_field_type = elemtype->array_element_basic_type();
1890         } else {
1891           // Ignore array length load
1892         }
1893 #ifdef ASSERT
1894       } else {
1895         // Raw pointers are used for initializing stores so skip it
1896         // since it should be recorded already
1897         Node* base = get_addp_base(ptn->_node);
1898         assert(adr_type->isa_rawptr() && base->is_Proj() &&
1899                (base->in(0) == alloc),"unexpected pointer type");
1900 #endif
1901       }
1902       if (basic_field_type == T_OBJECT ||
1903           basic_field_type == T_NARROWOOP ||
1904           basic_field_type == T_ARRAY) {
1905         Node* value = NULL;
1906         if (ini != NULL) {
1907           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
1908           Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
1909           if (store != NULL && store->is_Store()) {
1910             value = store->in(MemNode::ValueIn);
1911           } else if (ptn->edge_count() > 0) { // Are there oop stores?
1912             // Check for a store which follows allocation without branches.
1913             // For example, a volatile field store is not collected
1914             // by Initialize node. TODO: it would be nice to use idom() here.
1915             //
1916             // Search all references to the same field which use different
1917             // AddP nodes, for example, in the next case:
1918             //
1919             //    Point p[] = new Point[1];
1920             //    if ( x ) { p[0] = new Point(); p[0].x = x; }
1921             //    if ( p[0] != null ) { y = p[0].x; } // has CastPP
1922             //
1923             for (uint next = ei; (next < ae_cnt) && (value == NULL); next++) {
1924               uint fpi = pta->edge_target(next); // Field (AddP)
1925               PointsToNode *ptf = ptnode_adr(fpi);
1926               if (ptf->offset() == offset) {
1927                 Node* nf = ptf->_node;
1928                 for (DUIterator_Fast imax, i = nf->fast_outs(imax); i < imax; i++) {
1929                   store = nf->fast_out(i);
1930                   if (store->is_Store() && store->in(0) != NULL) {
1931                     Node* ctrl = store->in(0);
1932                     while(!(ctrl == ini || ctrl == alloc || ctrl == NULL ||
1933                             ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() ||
1934                             ctrl->is_IfTrue() || ctrl->is_IfFalse())) {
1935                        ctrl = ctrl->in(0);
1936                     }
1937                     if (ctrl == ini || ctrl == alloc) {
1938                       value = store->in(MemNode::ValueIn);
1939                       break;
1940                     }
1941                   }
1942                 }
1943               }
1944             }
1945           }
1946         }
1947         if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
1948           // A field's initializing value was not recorded. Add NULL.
1949           uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
1950           add_edge_from_fields(alloc->_idx, null_idx, offset);
1951         }
1952       }
1953     }
1954   }
1955 }
1956 
1957 // Adjust escape state after Connection Graph is built.
1958 void ConnectionGraph::adjust_escape_state(Node* n) {
1959   PointsToNode* ptn = ptnode_adr(n->_idx);
1960   assert(n->is_AddP(), "Should be called for AddP nodes only");
1961   // Search for objects which are not scalar replaceable
1962   // and mark them to propagate the state to referenced objects.
1963   //
1964 
1965   int offset = ptn->offset();
1966   Node* base = get_addp_base(n);
1967   VectorSet* ptset = PointsTo(base);
1968   int ptset_size = ptset->Size();
1969 
1970   // An object is not scalar replaceable if the field which may point
1971   // to it has unknown offset (unknown element of an array of objects).
1972   //
1973 
1974   if (offset == Type::OffsetBot) {
1975     uint e_cnt = ptn->edge_count();
1976     for (uint ei = 0; ei < e_cnt; ei++) {
1977       uint npi = ptn->edge_target(ei);
1978       ptnode_adr(npi)->set_scalar_replaceable(false);
1979     }
1980   }
1981 
1982   // Currently an object is not scalar replaceable if a LoadStore node
1983   // access its field since the field value is unknown after it.
1984   //
1985   bool has_LoadStore = false;
1986   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1987     Node *use = n->fast_out(i);
1988     if (use->is_LoadStore()) {
1989       has_LoadStore = true;
1990       break;
1991     }
1992   }
1993   // An object is not scalar replaceable if the address points
1994   // to unknown field (unknown element for arrays, offset is OffsetBot).
1995   //
1996   // Or the address may point to more then one object. This may produce
1997   // the false positive result (set not scalar replaceable)
1998   // since the flow-insensitive escape analysis can't separate
1999   // the case when stores overwrite the field's value from the case
2000   // when stores happened on different control branches.
2001   //
2002   // Note: it will disable scalar replacement in some cases:
2003   //
2004   //    Point p[] = new Point[1];
2005   //    p[0] = new Point(); // Will be not scalar replaced
2006   //
2007   // but it will save us from incorrect optimizations in next cases:
2008   //
2009   //    Point p[] = new Point[1];
2010   //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
2011   //
2012   if (ptset_size > 1 || ptset_size != 0 &&
2013       (has_LoadStore || offset == Type::OffsetBot)) {
2014     for( VectorSetI j(ptset); j.test(); ++j ) {
2015       ptnode_adr(j.elem)->set_scalar_replaceable(false);
2016     }
2017   }
2018 }
2019 
2020 // Propagate escape states to referenced nodes.
2021 bool ConnectionGraph::propagate_escape_state(GrowableArray<int>* cg_worklist,
2022                                              GrowableArray<uint>* worklist,
2023                                              PointsToNode::EscapeState esc_state) {
2024   bool has_java_obj = false;
2025 
2026   // push all nodes with the same escape state on the worklist
2027   uint cg_length = cg_worklist->length();
2028   for (uint next = 0; next < cg_length; ++next) {
2029     int nk = cg_worklist->at(next);
2030     if (ptnode_adr(nk)->escape_state() == esc_state)
2031       worklist->push(nk);
2032   }
2033   // mark all reachable nodes
2034   while (worklist->length() > 0) {
2035     PointsToNode* ptn = ptnode_adr(worklist->pop());
2036     if (ptn->node_type() == PointsToNode::JavaObject) {
2037       has_java_obj = true;
2038     }
2039     uint e_cnt = ptn->edge_count();
2040     for (uint ei = 0; ei < e_cnt; ei++) {
2041       uint npi = ptn->edge_target(ei);
2042       PointsToNode *np = ptnode_adr(npi);
2043       if (np->escape_state() < esc_state) {
2044         set_escape_state(npi, esc_state);
2045         worklist->push(npi);
2046       }
2047     }
2048   }
2049   // Has not escaping java objects
2050   return has_java_obj && (esc_state < PointsToNode::GlobalEscape);
2051 }
2052 
2053 // Optimize objects compare.
2054 Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
2055   assert(OptimizePtrCompare, "sanity");
2056   // Clone returned Set since PointsTo() returns pointer
2057   // to the same structure ConnectionGraph.pt_ptset. 
2058   VectorSet ptset1 = *PointsTo(n->in(1));
2059   VectorSet ptset2 = *PointsTo(n->in(2));
2060 
2061   // Check simple cases first.
2062   if (ptset1.Size() == 1) {
2063     uint pt1 = ptset1.getelem();
2064     PointsToNode* ptn1 = ptnode_adr(pt1);
2065     if (ptn1->escape_state() == PointsToNode::NoEscape) {
2066       uint pt2 = ptset2.getelem();
2067       if (ptset2.Size() == 1 && ptset2.getelem() == pt1) {
2068         // Comparing the same not escaping object.
2069         return _pcmp_eq;
2070       }
2071       Node* obj = ptn1->_node;
2072       // Comparing not escaping allocation.
2073       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
2074           !ptset2.test(pt1)) {
2075         return _pcmp_neq; // This includes nullness check.
2076       }
2077     }
2078   } else if (ptset2.Size() == 1) {
2079     uint pt2 = ptset2.getelem();
2080     PointsToNode* ptn2 = ptnode_adr(pt2);
2081     if (ptn2->escape_state() == PointsToNode::NoEscape) {
2082       Node* obj = ptn2->_node;
2083       // Comparing not escaping allocation.
2084       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
2085           !ptset1.test(pt2)) {
2086         return _pcmp_neq; // This includes nullness check.
2087       }
2088     }
2089   }
2090 
2091   if (!ptset1.disjoint(ptset2)) {
2092     // References point to the same object and something else.
2093     return NULL;
2094   }
2095 
2096   bool set1_has_unknown_ptr = ptset1.test(_phantom_object) != 0;
2097   bool set2_has_unknown_ptr = ptset2.test(_phantom_object) != 0;
2098   bool set1_has_null_ptr   = (ptset1.test(_oop_null) | ptset1.test(_noop_null)) != 0;
2099   bool set2_has_null_ptr   = (ptset2.test(_oop_null) | ptset2.test(_noop_null)) != 0;
2100 
2101   if (set1_has_unknown_ptr && set2_has_unknown_ptr ||
2102       set1_has_unknown_ptr && set2_has_null_ptr    ||
2103       set2_has_unknown_ptr && set1_has_null_ptr) {
2104     // Comparing unknown objects or check nullness of unknown object.
2105     return NULL;
2106   }
2107 
2108   // Check if one set has only not escaping allocations.
2109   if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
2110     bool has_only_non_escaping_alloc = true;
2111     for (VectorSetI i(&ptset1); i.test(); ++i) {
2112       uint pt = i.elem;
2113       PointsToNode* ptn = ptnode_adr(pt);
2114       Node* obj = ptn->_node;
2115       if (ptn->escape_state() != PointsToNode::NoEscape ||
2116           !(obj->is_Allocate() || obj->is_CallStaticJava())) {
2117         has_only_non_escaping_alloc = false;
2118         break;
2119       }
2120     }
2121     if (has_only_non_escaping_alloc) {
2122       return _pcmp_neq;
2123     }
2124   }
2125   if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
2126     bool has_only_non_escaping_alloc = true;
2127     for (VectorSetI i(&ptset2); i.test(); ++i) {
2128       uint pt = i.elem;
2129       PointsToNode* ptn = ptnode_adr(pt);
2130       Node* obj = ptn->_node;
2131       if (ptn->escape_state() != PointsToNode::NoEscape ||
2132           !(obj->is_Allocate() || obj->is_CallStaticJava())) {
2133         has_only_non_escaping_alloc = false;
2134         break;
2135       }
2136     }
2137     if (has_only_non_escaping_alloc) {
2138       return _pcmp_neq;
2139     }
2140   }
2141   return NULL;
2142 }
2143 
2144 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
2145 
2146     switch (call->Opcode()) {
2147 #ifdef ASSERT
2148     case Op_Allocate:
2149     case Op_AllocateArray:
2150     case Op_Lock:
2151     case Op_Unlock:
2152       assert(false, "should be done already");
2153       break;
2154 #endif
2155     case Op_CallLeaf:
2156     case Op_CallLeafNoFP:
2157     {
2158       // Stub calls, objects do not escape but they are not scale replaceable.
2159       // Adjust escape state for outgoing arguments.
2160       const TypeTuple * d = call->tf()->domain();
2161       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2162         const Type* at = d->field_at(i);
2163         Node *arg = call->in(i)->uncast();
2164         const Type *aat = phase->type(arg);
2165         if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
2166             ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) {
2167 
2168           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2169                  aat->isa_ptr() != NULL, "expecting an Ptr");
2170 #ifdef ASSERT
2171           if (!(call->Opcode() == Op_CallLeafNoFP &&
2172                 call->as_CallLeaf()->_name != NULL &&
2173                 (strstr(call->as_CallLeaf()->_name, "arraycopy")  != 0) ||
2174                 call->as_CallLeaf()->_name != NULL &&
2175                 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
2176                  strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
2177           ) {
2178             call->dump();
2179             assert(false, "EA: unexpected CallLeaf");
2180           }
2181 #endif
2182           set_escape_state(arg->_idx, PointsToNode::ArgEscape);
2183           if (arg->is_AddP()) {
2184             //
2185             // The inline_native_clone() case when the arraycopy stub is called
2186             // after the allocation before Initialize and CheckCastPP nodes.
2187             //
2188             // Set AddP's base (Allocate) as not scalar replaceable since
2189             // pointer to the base (with offset) is passed as argument.
2190             //
2191             arg = get_addp_base(arg);
2192           }
2193           for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
2194             uint pt = j.elem;
2195             set_escape_state(pt, PointsToNode::ArgEscape);
2196           }
2197         }
2198       }
2199       break;
2200     }
2201 
2202     case Op_CallStaticJava:
2203     // For a static call, we know exactly what method is being called.
2204     // Use bytecode estimator to record the call's escape affects
2205     {
2206       ciMethod *meth = call->as_CallJava()->method();
2207       BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
2208       // fall-through if not a Java method or no analyzer information
2209       if (call_analyzer != NULL) {
2210         const TypeTuple * d = call->tf()->domain();
2211         bool copy_dependencies = false;
2212         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2213           const Type* at = d->field_at(i);
2214           int k = i - TypeFunc::Parms;
2215           Node *arg = call->in(i)->uncast();
2216 
2217           if (at->isa_oopptr() != NULL &&
2218               ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) {
2219 
2220             bool global_escapes = false;
2221             bool fields_escapes = false;
2222             if (!call_analyzer->is_arg_stack(k)) {
2223               // The argument global escapes, mark everything it could point to
2224               set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
2225               global_escapes = true;
2226             } else {
2227               if (!call_analyzer->is_arg_local(k)) {
2228                 // The argument itself doesn't escape, but any fields might
2229                 fields_escapes = true;
2230               }
2231               set_escape_state(arg->_idx, PointsToNode::ArgEscape);
2232               copy_dependencies = true;
2233             }
2234 
2235             for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
2236               uint pt = j.elem;
2237               if (global_escapes) {
2238                 //The argument global escapes, mark everything it could point to
2239                 set_escape_state(pt, PointsToNode::GlobalEscape);
2240               } else {
2241                 if (fields_escapes) {
2242                   // The argument itself doesn't escape, but any fields might
2243                   add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
2244                 }
2245                 set_escape_state(pt, PointsToNode::ArgEscape);
2246               }
2247             }
2248           }
2249         }
2250         if (copy_dependencies)
2251           call_analyzer->copy_dependencies(_compile->dependencies());
2252         break;
2253       }
2254     }
2255 
2256     default:
2257     // Fall-through here if not a Java method or no analyzer information
2258     // or some other type of call, assume the worst case: all arguments
2259     // globally escape.
2260     {
2261       // adjust escape state for  outgoing arguments
2262       const TypeTuple * d = call->tf()->domain();
2263       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2264         const Type* at = d->field_at(i);
2265         if (at->isa_oopptr() != NULL) {
2266           Node *arg = call->in(i)->uncast();
2267           set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
2268           for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
2269             uint pt = j.elem;
2270             set_escape_state(pt, PointsToNode::GlobalEscape);
2271           }
2272         }
2273       }
2274     }
2275   }
2276 }
2277 void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) {
2278   CallNode   *call = resproj->in(0)->as_Call();
2279   uint    call_idx = call->_idx;
2280   uint resproj_idx = resproj->_idx;
2281 
2282   switch (call->Opcode()) {
2283     case Op_Allocate:
2284     {
2285       Node *k = call->in(AllocateNode::KlassNode);
2286       const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
2287       assert(kt != NULL, "TypeKlassPtr  required.");
2288       ciKlass* cik = kt->klass();
2289 
2290       PointsToNode::EscapeState es;
2291       uint edge_to;
2292       if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
2293          !cik->is_instance_klass() || // StressReflectiveCode
2294           cik->as_instance_klass()->has_finalizer()) {
2295         es = PointsToNode::GlobalEscape;
2296         edge_to = _phantom_object; // Could not be worse
2297       } else {
2298         es = PointsToNode::NoEscape;
2299         edge_to = call_idx;
2300         assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity");
2301       }
2302       set_escape_state(call_idx, es);
2303       add_pointsto_edge(resproj_idx, edge_to);
2304       _processed.set(resproj_idx);
2305       break;
2306     }
2307 
2308     case Op_AllocateArray:
2309     {
2310 
2311       Node *k = call->in(AllocateNode::KlassNode);
2312       const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
2313       assert(kt != NULL, "TypeKlassPtr  required.");
2314       ciKlass* cik = kt->klass();
2315 
2316       PointsToNode::EscapeState es;
2317       uint edge_to;
2318       if (!cik->is_array_klass()) { // StressReflectiveCode
2319         es = PointsToNode::GlobalEscape;
2320         edge_to = _phantom_object;
2321       } else {
2322         es = PointsToNode::NoEscape;
2323         edge_to = call_idx;
2324         assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity");
2325         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2326         if (length < 0 || length > EliminateAllocationArraySizeLimit) {
2327           // Not scalar replaceable if the length is not constant or too big.
2328           ptnode_adr(call_idx)->set_scalar_replaceable(false);
2329         }
2330       }
2331       set_escape_state(call_idx, es);
2332       add_pointsto_edge(resproj_idx, edge_to);
2333       _processed.set(resproj_idx);
2334       break;
2335     }
2336 
2337     case Op_CallStaticJava:
2338     // For a static call, we know exactly what method is being called.
2339     // Use bytecode estimator to record whether the call's return value escapes
2340     {
2341       bool done = true;
2342       const TypeTuple *r = call->tf()->range();
2343       const Type* ret_type = NULL;
2344 
2345       if (r->cnt() > TypeFunc::Parms)
2346         ret_type = r->field_at(TypeFunc::Parms);
2347 
2348       // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
2349       //        _multianewarray functions return a TypeRawPtr.
2350       if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
2351         _processed.set(resproj_idx);
2352         break;  // doesn't return a pointer type
2353       }
2354       ciMethod *meth = call->as_CallJava()->method();
2355       const TypeTuple * d = call->tf()->domain();
2356       if (meth == NULL) {
2357         // not a Java method, assume global escape
2358         set_escape_state(call_idx, PointsToNode::GlobalEscape);
2359         add_pointsto_edge(resproj_idx, _phantom_object);
2360       } else {
2361         BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
2362         bool copy_dependencies = false;
2363 
2364         if (call_analyzer->is_return_allocated()) {
2365           // Returns a newly allocated unescaped object, simply
2366           // update dependency information.
2367           // Mark it as NoEscape so that objects referenced by
2368           // it's fields will be marked as NoEscape at least.
2369           set_escape_state(call_idx, PointsToNode::NoEscape);
2370           ptnode_adr(call_idx)->set_scalar_replaceable(false);
2371           add_pointsto_edge(resproj_idx, call_idx);
2372           copy_dependencies = true;
2373         } else if (call_analyzer->is_return_local()) {
2374           // determine whether any arguments are returned
2375           set_escape_state(call_idx, PointsToNode::ArgEscape);
2376           bool ret_arg = false;
2377           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2378             const Type* at = d->field_at(i);
2379 
2380             if (at->isa_oopptr() != NULL) {
2381               Node *arg = call->in(i)->uncast();
2382 
2383               if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2384                 ret_arg = true;
2385                 PointsToNode *arg_esp = ptnode_adr(arg->_idx);
2386                 if (arg_esp->node_type() == PointsToNode::UnknownType)
2387                   done = false;
2388                 else if (arg_esp->node_type() == PointsToNode::JavaObject)
2389                   add_pointsto_edge(resproj_idx, arg->_idx);
2390                 else
2391                   add_deferred_edge(resproj_idx, arg->_idx);
2392               }
2393             }
2394           }
2395           if (done && !ret_arg) {
2396             // Returns unknown object.
2397             set_escape_state(call_idx, PointsToNode::GlobalEscape);
2398             add_pointsto_edge(resproj_idx, _phantom_object);
2399           }
2400           if (done) {
2401             copy_dependencies = true;
2402           }
2403         } else {
2404           set_escape_state(call_idx, PointsToNode::GlobalEscape);
2405           add_pointsto_edge(resproj_idx, _phantom_object);
2406         }
2407         if (copy_dependencies)
2408           call_analyzer->copy_dependencies(_compile->dependencies());
2409       }
2410       if (done)
2411         _processed.set(resproj_idx);
2412       break;
2413     }
2414 
2415     default:
2416     // Some other type of call, assume the worst case that the
2417     // returned value, if any, globally escapes.
2418     {
2419       const TypeTuple *r = call->tf()->range();
2420       if (r->cnt() > TypeFunc::Parms) {
2421         const Type* ret_type = r->field_at(TypeFunc::Parms);
2422 
2423         // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
2424         //        _multianewarray functions return a TypeRawPtr.
2425         if (ret_type->isa_ptr() != NULL) {
2426           set_escape_state(call_idx, PointsToNode::GlobalEscape);
2427           add_pointsto_edge(resproj_idx, _phantom_object);
2428         }
2429       }
2430       _processed.set(resproj_idx);
2431     }
2432   }
2433 }
2434 
2435 // Populate Connection Graph with Ideal nodes and create simple
2436 // connection graph edges (do not need to check the node_type of inputs
2437 // or to call PointsTo() to walk the connection graph).
2438 void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) {
2439   if (_processed.test(n->_idx))
2440     return; // No need to redefine node's state.
2441 
2442   if (n->is_Call()) {
2443     // Arguments to allocation and locking don't escape.
2444     if (n->is_Allocate()) {
2445       add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true);
2446       record_for_optimizer(n);
2447     } else if (n->is_Lock() || n->is_Unlock()) {
2448       // Put Lock and Unlock nodes on IGVN worklist to process them during
2449       // the first IGVN optimization when escape information is still available.
2450       record_for_optimizer(n);
2451       _processed.set(n->_idx);
2452     } else {
2453       // Don't mark as processed since call's arguments have to be processed.
2454       PointsToNode::NodeType nt = PointsToNode::UnknownType;
2455       PointsToNode::EscapeState es = PointsToNode::UnknownEscape;
2456 
2457       // Check if a call returns an object.
2458       const TypeTuple *r = n->as_Call()->tf()->range();
2459       if (r->cnt() > TypeFunc::Parms &&
2460           r->field_at(TypeFunc::Parms)->isa_ptr() &&
2461           n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
2462         nt = PointsToNode::JavaObject;
2463         if (!n->is_CallStaticJava()) {
2464           // Since the called mathod is statically unknown assume
2465           // the worst case that the returned value globally escapes.
2466           es = PointsToNode::GlobalEscape;
2467         }
2468       }
2469       add_node(n, nt, es, false);
2470     }
2471     return;
2472   }
2473 
2474   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
2475   // ThreadLocal has RawPrt type.
2476   switch (n->Opcode()) {
2477     case Op_AddP:
2478     {
2479       add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false);
2480       break;
2481     }
2482     case Op_CastX2P:
2483     { // "Unsafe" memory access.
2484       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
2485       break;
2486     }
2487     case Op_CastPP:
2488     case Op_CheckCastPP:
2489     case Op_EncodeP:
2490     case Op_DecodeN:
2491     {
2492       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
2493       int ti = n->in(1)->_idx;
2494       PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
2495       if (nt == PointsToNode::UnknownType) {
2496         _delayed_worklist.push(n); // Process it later.
2497         break;
2498       } else if (nt == PointsToNode::JavaObject) {
2499         add_pointsto_edge(n->_idx, ti);
2500       } else {
2501         add_deferred_edge(n->_idx, ti);
2502       }
2503       _processed.set(n->_idx);
2504       break;
2505     }
2506     case Op_ConP:
2507     {
2508       // assume all pointer constants globally escape except for null
2509       PointsToNode::EscapeState es;
2510       if (phase->type(n) == TypePtr::NULL_PTR)
2511         es = PointsToNode::NoEscape;
2512       else
2513         es = PointsToNode::GlobalEscape;
2514 
2515       add_node(n, PointsToNode::JavaObject, es, true);
2516       break;
2517     }
2518     case Op_ConN:
2519     {
2520       // assume all narrow oop constants globally escape except for null
2521       PointsToNode::EscapeState es;
2522       if (phase->type(n) == TypeNarrowOop::NULL_PTR)
2523         es = PointsToNode::NoEscape;
2524       else
2525         es = PointsToNode::GlobalEscape;
2526 
2527       add_node(n, PointsToNode::JavaObject, es, true);
2528       break;
2529     }
2530     case Op_CreateEx:
2531     {
2532       // assume that all exception objects globally escape
2533       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
2534       break;
2535     }
2536     case Op_LoadKlass:
2537     case Op_LoadNKlass:
2538     {
2539       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
2540       break;
2541     }
2542     case Op_LoadP:
2543     case Op_LoadN:
2544     {
2545       const Type *t = phase->type(n);
2546       if (t->make_ptr() == NULL) {
2547         _processed.set(n->_idx);
2548         return;
2549       }
2550       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
2551       break;
2552     }
2553     case Op_Parm:
2554     {
2555       _processed.set(n->_idx); // No need to redefine it state.
2556       uint con = n->as_Proj()->_con;
2557       if (con < TypeFunc::Parms)
2558         return;
2559       const Type *t = n->in(0)->as_Start()->_domain->field_at(con);
2560       if (t->isa_ptr() == NULL)
2561         return;
2562       // We have to assume all input parameters globally escape
2563       // (Note: passing 'false' since _processed is already set).
2564       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false);
2565       break;
2566     }
2567     case Op_PartialSubtypeCheck:
2568     { // Produces Null or notNull and is used in CmpP.
2569       add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
2570       break;
2571     }
2572     case Op_Phi:
2573     {
2574       const Type *t = n->as_Phi()->type();
2575       if (t->make_ptr() == NULL) {
2576         // nothing to do if not an oop or narrow oop
2577         _processed.set(n->_idx);
2578         return;
2579       }
2580       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
2581       uint i;
2582       for (i = 1; i < n->req() ; i++) {
2583         Node* in = n->in(i);
2584         if (in == NULL)
2585           continue;  // ignore NULL
2586         in = in->uncast();
2587         if (in->is_top() || in == n)
2588           continue;  // ignore top or inputs which go back this node
2589         int ti = in->_idx;
2590         PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
2591         if (nt == PointsToNode::UnknownType) {
2592           break;
2593         } else if (nt == PointsToNode::JavaObject) {
2594           add_pointsto_edge(n->_idx, ti);
2595         } else {
2596           add_deferred_edge(n->_idx, ti);
2597         }
2598       }
2599       if (i >= n->req())
2600         _processed.set(n->_idx);
2601       else
2602         _delayed_worklist.push(n);
2603       break;
2604     }
2605     case Op_Proj:
2606     {
2607       // we are only interested in the oop result projection from a call
2608       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
2609         const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
2610         assert(r->cnt() > TypeFunc::Parms, "sanity");
2611         if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
2612           add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
2613           int ti = n->in(0)->_idx;
2614           // The call may not be registered yet (since not all its inputs are registered)
2615           // if this is the projection from backbranch edge of Phi.
2616           if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) {
2617             process_call_result(n->as_Proj(), phase);
2618           }
2619           if (!_processed.test(n->_idx)) {
2620             // The call's result may need to be processed later if the call
2621             // returns it's argument and the argument is not processed yet.
2622             _delayed_worklist.push(n);
2623           }
2624           break;
2625         }
2626       }
2627       _processed.set(n->_idx);
2628       break;
2629     }
2630     case Op_Return:
2631     {
2632       if( n->req() > TypeFunc::Parms &&
2633           phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
2634         // Treat Return value as LocalVar with GlobalEscape escape state.
2635         add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false);
2636         int ti = n->in(TypeFunc::Parms)->_idx;
2637         PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
2638         if (nt == PointsToNode::UnknownType) {
2639           _delayed_worklist.push(n); // Process it later.
2640           break;
2641         } else if (nt == PointsToNode::JavaObject) {
2642           add_pointsto_edge(n->_idx, ti);
2643         } else {
2644           add_deferred_edge(n->_idx, ti);
2645         }
2646       }
2647       _processed.set(n->_idx);
2648       break;
2649     }
2650     case Op_StoreP:
2651     case Op_StoreN:
2652     {
2653       const Type *adr_type = phase->type(n->in(MemNode::Address));
2654       adr_type = adr_type->make_ptr();
2655       if (adr_type->isa_oopptr()) {
2656         add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
2657       } else {
2658         Node* adr = n->in(MemNode::Address);
2659         if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL &&
2660             adr->in(AddPNode::Address)->is_Proj() &&
2661             adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
2662           add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
2663           // We are computing a raw address for a store captured
2664           // by an Initialize compute an appropriate address type.
2665           int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2666           assert(offs != Type::OffsetBot, "offset must be a constant");
2667         } else {
2668           _processed.set(n->_idx);
2669           return;
2670         }
2671       }
2672       break;
2673     }
2674     case Op_StorePConditional:
2675     case Op_CompareAndSwapP:
2676     case Op_CompareAndSwapN:
2677     {
2678       const Type *adr_type = phase->type(n->in(MemNode::Address));
2679       adr_type = adr_type->make_ptr();
2680       if (adr_type->isa_oopptr()) {
2681         add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
2682       } else {
2683         _processed.set(n->_idx);
2684         return;
2685       }
2686       break;
2687     }
2688     case Op_AryEq:
2689     case Op_StrComp:
2690     case Op_StrEquals:
2691     case Op_StrIndexOf:
2692     {
2693       // char[] arrays passed to string intrinsics are not scalar replaceable.
2694       add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
2695       break;
2696     }
2697     case Op_ThreadLocal:
2698     {
2699       add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
2700       break;
2701     }
2702     default:
2703       ;
2704       // nothing to do
2705   }
2706   return;
2707 }
2708 
2709 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
2710   uint n_idx = n->_idx;
2711   assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered");
2712 
2713   // Don't set processed bit for AddP, LoadP, StoreP since
2714   // they may need more then one pass to process.
2715   // Also don't mark as processed Call nodes since their
2716   // arguments may need more then one pass to process.
2717   if (_processed.test(n_idx))
2718     return; // No need to redefine node's state.
2719 
2720   if (n->is_Call()) {
2721     CallNode *call = n->as_Call();
2722     process_call_arguments(call, phase);
2723     return;
2724   }
2725 
2726   switch (n->Opcode()) {
2727     case Op_AddP:
2728     {
2729       Node *base = get_addp_base(n);
2730       // Create a field edge to this node from everything base could point to.
2731       for( VectorSetI i(PointsTo(base)); i.test(); ++i ) {
2732         uint pt = i.elem;
2733         add_field_edge(pt, n_idx, address_offset(n, phase));
2734       }
2735       break;
2736     }
2737     case Op_CastX2P:
2738     {
2739       assert(false, "Op_CastX2P");
2740       break;
2741     }
2742     case Op_CastPP:
2743     case Op_CheckCastPP:
2744     case Op_EncodeP:
2745     case Op_DecodeN:
2746     {
2747       int ti = n->in(1)->_idx;
2748       assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered");
2749       if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
2750         add_pointsto_edge(n_idx, ti);
2751       } else {
2752         add_deferred_edge(n_idx, ti);
2753       }
2754       _processed.set(n_idx);
2755       break;
2756     }
2757     case Op_ConP:
2758     {
2759       assert(false, "Op_ConP");
2760       break;
2761     }
2762     case Op_ConN:
2763     {
2764       assert(false, "Op_ConN");
2765       break;
2766     }
2767     case Op_CreateEx:
2768     {
2769       assert(false, "Op_CreateEx");
2770       break;
2771     }
2772     case Op_LoadKlass:
2773     case Op_LoadNKlass:
2774     {
2775       assert(false, "Op_LoadKlass");
2776       break;
2777     }
2778     case Op_LoadP:
2779     case Op_LoadN:
2780     {
2781       const Type *t = phase->type(n);
2782 #ifdef ASSERT
2783       if (t->make_ptr() == NULL)
2784         assert(false, "Op_LoadP");
2785 #endif
2786 
2787       Node* adr = n->in(MemNode::Address)->uncast();
2788       Node* adr_base;
2789       if (adr->is_AddP()) {
2790         adr_base = get_addp_base(adr);
2791       } else {
2792         adr_base = adr;
2793       }
2794 
2795       // For everything "adr_base" could point to, create a deferred edge from
2796       // this node to each field with the same offset.
2797       int offset = address_offset(adr, phase);
2798       for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
2799         uint pt = i.elem;
2800         add_deferred_edge_to_fields(n_idx, pt, offset);
2801       }
2802       break;
2803     }
2804     case Op_Parm:
2805     {
2806       assert(false, "Op_Parm");
2807       break;
2808     }
2809     case Op_PartialSubtypeCheck:
2810     {
2811       assert(false, "Op_PartialSubtypeCheck");
2812       break;
2813     }
2814     case Op_Phi:
2815     {
2816 #ifdef ASSERT
2817       const Type *t = n->as_Phi()->type();
2818       if (t->make_ptr() == NULL)
2819         assert(false, "Op_Phi");
2820 #endif
2821       for (uint i = 1; i < n->req() ; i++) {
2822         Node* in = n->in(i);
2823         if (in == NULL)
2824           continue;  // ignore NULL
2825         in = in->uncast();
2826         if (in->is_top() || in == n)
2827           continue;  // ignore top or inputs which go back this node
2828         int ti = in->_idx;
2829         PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
2830         assert(nt != PointsToNode::UnknownType, "all nodes should be known");
2831         if (nt == PointsToNode::JavaObject) {
2832           add_pointsto_edge(n_idx, ti);
2833         } else {
2834           add_deferred_edge(n_idx, ti);
2835         }
2836       }
2837       _processed.set(n_idx);
2838       break;
2839     }
2840     case Op_Proj:
2841     {
2842       // we are only interested in the oop result projection from a call
2843       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
2844         assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType,
2845                "all nodes should be registered");
2846         const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
2847         assert(r->cnt() > TypeFunc::Parms, "sanity");
2848         if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
2849           process_call_result(n->as_Proj(), phase);
2850           assert(_processed.test(n_idx), "all call results should be processed");
2851           break;
2852         }
2853       }
2854       assert(false, "Op_Proj");
2855       break;
2856     }
2857     case Op_Return:
2858     {
2859 #ifdef ASSERT
2860       if( n->req() <= TypeFunc::Parms ||
2861           !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
2862         assert(false, "Op_Return");
2863       }
2864 #endif
2865       int ti = n->in(TypeFunc::Parms)->_idx;
2866       assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered");
2867       if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
2868         add_pointsto_edge(n_idx, ti);
2869       } else {
2870         add_deferred_edge(n_idx, ti);
2871       }
2872       _processed.set(n_idx);
2873       break;
2874     }
2875     case Op_StoreP:
2876     case Op_StoreN:
2877     case Op_StorePConditional:
2878     case Op_CompareAndSwapP:
2879     case Op_CompareAndSwapN:
2880     {
2881       Node *adr = n->in(MemNode::Address);
2882       const Type *adr_type = phase->type(adr)->make_ptr();
2883 #ifdef ASSERT
2884       if (!adr_type->isa_oopptr())
2885         assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP");
2886 #endif
2887 
2888       assert(adr->is_AddP(), "expecting an AddP");
2889       Node *adr_base = get_addp_base(adr);
2890       Node *val = n->in(MemNode::ValueIn)->uncast();
2891       // For everything "adr_base" could point to, create a deferred edge
2892       // to "val" from each field with the same offset.
2893       for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
2894         uint pt = i.elem;
2895         add_edge_from_fields(pt, val->_idx, address_offset(adr, phase));
2896       }
2897       break;
2898     }
2899     case Op_AryEq:
2900     case Op_StrComp:
2901     case Op_StrEquals:
2902     case Op_StrIndexOf:
2903     {
2904       // char[] arrays passed to string intrinsic do not escape but
2905       // they are not scalar replaceable. Adjust escape state for them.
2906       // Start from in(2) edge since in(1) is memory edge.
2907       for (uint i = 2; i < n->req(); i++) {
2908         Node* adr = n->in(i)->uncast();
2909         const Type *at = phase->type(adr);
2910         if (!adr->is_top() && at->isa_ptr()) {
2911           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
2912                  at->isa_ptr() != NULL, "expecting an Ptr");
2913           if (adr->is_AddP()) {
2914             adr = get_addp_base(adr);
2915           }
2916           // Mark as ArgEscape everything "adr" could point to.
2917           set_escape_state(adr->_idx, PointsToNode::ArgEscape);
2918         }
2919       }
2920       _processed.set(n_idx);
2921       break;
2922     }
2923     case Op_ThreadLocal:
2924     {
2925       assert(false, "Op_ThreadLocal");
2926       break;
2927     }
2928     default:
2929       // This method should be called only for EA specific nodes.
2930       ShouldNotReachHere();
2931   }
2932 }
2933 
2934 #ifndef PRODUCT
2935 void ConnectionGraph::dump() {
2936   bool first = true;
2937 
2938   uint size = nodes_size();
2939   for (uint ni = 0; ni < size; ni++) {
2940     PointsToNode *ptn = ptnode_adr(ni);
2941     PointsToNode::NodeType ptn_type = ptn->node_type();
2942 
2943     if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
2944       continue;
2945     PointsToNode::EscapeState es = escape_state(ptn->_node);
2946     if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
2947       if (first) {
2948         tty->cr();
2949         tty->print("======== Connection graph for ");
2950         _compile->method()->print_short_name();
2951         tty->cr();
2952         first = false;
2953       }
2954       tty->print("%6d ", ni);
2955       ptn->dump();
2956       // Print all locals which reference this allocation
2957       for (uint li = ni; li < size; li++) {
2958         PointsToNode *ptn_loc = ptnode_adr(li);
2959         PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type();
2960         if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL &&
2961              ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) {
2962           ptnode_adr(li)->dump(false);
2963         }
2964       }
2965       if (Verbose) {
2966         // Print all fields which reference this allocation
2967         for (uint i = 0; i < ptn->edge_count(); i++) {
2968           uint ei = ptn->edge_target(i);
2969           ptnode_adr(ei)->dump(false);
2970         }
2971       }
2972       tty->cr();
2973     }
2974   }
2975 }
2976 #endif