1 /*
   2  * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "libadt/vectset.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "opto/c2compiler.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/cfgnode.hpp"
  33 #include "opto/compile.hpp"
  34 #include "opto/escape.hpp"
  35 #include "opto/phaseX.hpp"
  36 #include "opto/rootnode.hpp"
  37 
  38 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
  39   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  40   _collecting(true),
  41   _verify(false),
  42   _compile(C),
  43   _igvn(igvn),
  44   _node_map(C->comp_arena()) {
  45   // Add unknown java object.
  46   add_java_object(C->top(), PointsToNode::GlobalEscape);
  47   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  48   // Add ConP(#NULL) and ConN(#NULL) nodes.
  49   Node* oop_null = igvn->zerocon(T_OBJECT);
  50   assert(oop_null->_idx < nodes_size(), "should be created already");
  51   add_java_object(oop_null, PointsToNode::NoEscape);
  52   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  53   if (UseCompressedOops) {
  54     Node* noop_null = igvn->zerocon(T_NARROWOOP);
  55     assert(noop_null->_idx < nodes_size(), "should be created already");
  56     map_ideal_node(noop_null, null_obj);
  57   }
  58   _pcmp_neq = NULL; // Should be initialized
  59   _pcmp_eq  = NULL;
  60 }
  61 
  62 bool ConnectionGraph::has_candidates(Compile *C) {
  63   // EA brings benefits only when the code has allocations and/or locks which
  64   // are represented by ideal Macro nodes.
  65   int cnt = C->macro_count();
  66   for( int i=0; i < cnt; i++ ) {
  67     Node *n = C->macro_node(i);
  68     if ( n->is_Allocate() )
  69       return true;
  70     if( n->is_Lock() ) {
  71       Node* obj = n->as_Lock()->obj_node()->uncast();
  72       if( !(obj->is_Parm() || obj->is_Con()) )
  73         return true;
  74     }
  75   }
  76   return false;
  77 }
  78 
  79 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
  80   Compile::TracePhase t2("escapeAnalysis", &Phase::_t_escapeAnalysis, true);
  81   ResourceMark rm;
  82 
  83   // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
  84   // to create space for them in ConnectionGraph::_nodes[].
  85   Node* oop_null = igvn->zerocon(T_OBJECT);
  86   Node* noop_null = igvn->zerocon(T_NARROWOOP);
  87   ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
  88   // Perform escape analysis
  89   if (congraph->compute_escape()) {
  90     // There are non escaping objects.
  91     C->set_congraph(congraph);
  92   }
  93   // Cleanup.
  94   if (oop_null->outcnt() == 0)
  95     igvn->hash_delete(oop_null);
  96   if (noop_null->outcnt() == 0)
  97     igvn->hash_delete(noop_null);
  98 }
  99 
 100 bool ConnectionGraph::compute_escape() {
 101   Compile* C = _compile;
 102   PhaseGVN* igvn = _igvn;
 103 
 104   // Worklists used by EA.
 105   Unique_Node_List delayed_worklist;
 106   GrowableArray<Node*> alloc_worklist;
 107   GrowableArray<Node*> ptr_cmp_worklist;
 108   GrowableArray<Node*> storestore_worklist;
 109   GrowableArray<PointsToNode*>   ptnodes_worklist;
 110   GrowableArray<JavaObjectNode*> java_objects_worklist;
 111   GrowableArray<JavaObjectNode*> non_escaped_worklist;
 112   GrowableArray<FieldNode*>      oop_fields_worklist;
 113   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 114 
 115   { Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true);
 116 
 117   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 118   ideal_nodes.map(C->unique(), NULL);  // preallocate space
 119   // Initialize worklist
 120   if (C->root() != NULL) {
 121     ideal_nodes.push(C->root());
 122   }
 123   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 124     Node* n = ideal_nodes.at(next);
 125     // Create PointsTo nodes and add them to Connection Graph. Called
 126     // only once per ideal node since ideal_nodes is Unique_Node list.
 127     add_node_to_connection_graph(n, &delayed_worklist);
 128     PointsToNode* ptn = ptnode_adr(n->_idx);
 129     if (ptn != NULL) {
 130       ptnodes_worklist.append(ptn);
 131       if (ptn->is_JavaObject()) {
 132         java_objects_worklist.append(ptn->as_JavaObject());
 133         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 134             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 135           // Only allocations and java static calls results are interesting.
 136           non_escaped_worklist.append(ptn->as_JavaObject());
 137         }
 138       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 139         oop_fields_worklist.append(ptn->as_Field());
 140       }
 141     }
 142     if (n->is_MergeMem()) {
 143       // Collect all MergeMem nodes to add memory slices for
 144       // scalar replaceable objects in split_unique_types().
 145       _mergemem_worklist.append(n->as_MergeMem());
 146     } else if (OptimizePtrCompare && n->is_Cmp() &&
 147                (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
 148       // Collect compare pointers nodes.
 149       ptr_cmp_worklist.append(n);
 150     } else if (n->is_MemBarStoreStore()) {
 151       // Collect all MemBarStoreStore nodes so that depending on the
 152       // escape status of the associated Allocate node some of them
 153       // may be eliminated.
 154       storestore_worklist.append(n);
 155 #ifdef ASSERT
 156     } else if(n->is_AddP()) {
 157       // Collect address nodes for graph verification.
 158       addp_worklist.append(n);
 159 #endif
 160     }
 161     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 162       Node* m = n->fast_out(i);   // Get user
 163       ideal_nodes.push(m);
 164     }
 165   }
 166   if (non_escaped_worklist.length() == 0) {
 167     _collecting = false;
 168     return false; // Nothing to do.
 169   }
 170   // Add final simple edges to graph.
 171   while(delayed_worklist.size() > 0) {
 172     Node* n = delayed_worklist.pop();
 173     add_final_edges(n);
 174   }
 175   int ptnodes_length = ptnodes_worklist.length();
 176 
 177 #ifdef ASSERT
 178   if (VerifyConnectionGraph) {
 179     // Verify that no new simple edges could be created and all
 180     // local vars has edges.
 181     _verify = true;
 182     for (int next = 0; next < ptnodes_length; ++next) {
 183       PointsToNode* ptn = ptnodes_worklist.at(next);
 184       add_final_edges(ptn->ideal_node());
 185       if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
 186         ptn->dump();
 187         assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
 188       }
 189     }
 190     _verify = false;
 191   }
 192 #endif
 193 
 194   // 2. Finish Graph construction by propagating references to all
 195   //    java objects through graph.
 196   if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist,
 197                                  java_objects_worklist, oop_fields_worklist)) {
 198     // All objects escaped or hit time or iterations limits.
 199     _collecting = false;
 200     return false;
 201   }
 202 
 203   // 3. Adjust scalar_replaceable state of nonescaping objects and push
 204   //    scalar replaceable allocations on alloc_worklist for processing
 205   //    in split_unique_types().
 206   int non_escaped_length = non_escaped_worklist.length();
 207   for (int next = 0; next < non_escaped_length; next++) {
 208     JavaObjectNode* ptn = non_escaped_worklist.at(next);
 209     if (ptn->escape_state() == PointsToNode::NoEscape &&
 210         ptn->scalar_replaceable()) {
 211       adjust_scalar_replaceable_state(ptn);
 212       if (ptn->scalar_replaceable()) {
 213         alloc_worklist.append(ptn->ideal_node());
 214       }
 215     }
 216   }
 217 
 218 #ifdef ASSERT
 219   if (VerifyConnectionGraph) {
 220     // Verify that graph is complete - no new edges could be added or needed.
 221     verify_connection_graph(ptnodes_worklist, non_escaped_worklist,
 222                             java_objects_worklist, addp_worklist);
 223   }
 224   assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
 225   assert(null_obj->escape_state() == PointsToNode::NoEscape &&
 226          null_obj->edge_count() == 0 &&
 227          !null_obj->arraycopy_src() &&
 228          !null_obj->arraycopy_dst(), "sanity");
 229 #endif
 230 
 231   _collecting = false;
 232 
 233   } // TracePhase t3("connectionGraph")
 234 
 235   // 4. Optimize ideal graph based on EA information.
 236   bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);
 237   if (has_non_escaping_obj) {
 238     optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
 239   }
 240 
 241 #ifndef PRODUCT
 242   if (PrintEscapeAnalysis) {
 243     dump(ptnodes_worklist); // Dump ConnectionGraph
 244   }
 245 #endif
 246 
 247   bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
 248 #ifdef ASSERT
 249   if (VerifyConnectionGraph) {
 250     int alloc_length = alloc_worklist.length();
 251     for (int next = 0; next < alloc_length; ++next) {
 252       Node* n = alloc_worklist.at(next);
 253       PointsToNode* ptn = ptnode_adr(n->_idx);
 254       assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
 255     }
 256   }
 257 #endif
 258 
 259   // 5. Separate memory graph for scalar replaceable allcations.
 260   if (has_scalar_replaceable_candidates &&
 261       C->AliasLevel() >= 3 && EliminateAllocations) {
 262     // Now use the escape information to create unique types for
 263     // scalar replaceable objects.
 264     split_unique_types(alloc_worklist);
 265     if (C->failing())  return false;
 266     C->print_method("After Escape Analysis", 2);
 267 
 268 #ifdef ASSERT
 269   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
 270     tty->print("=== No allocations eliminated for ");
 271     C->method()->print_short_name();
 272     if(!EliminateAllocations) {
 273       tty->print(" since EliminateAllocations is off ===");
 274     } else if(!has_scalar_replaceable_candidates) {
 275       tty->print(" since there are no scalar replaceable candidates ===");
 276     } else if(C->AliasLevel() < 3) {
 277       tty->print(" since AliasLevel < 3 ===");
 278     }
 279     tty->cr();
 280 #endif
 281   }
 282   return has_non_escaping_obj;
 283 }
 284 
 285 // Utility function for nodes that load an object
 286 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
 287   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 288   // ThreadLocal has RawPtr type.
 289   const Type* t = _igvn->type(n);
 290   if (t->make_ptr() != NULL) {
 291     Node* adr = n->in(MemNode::Address);
 292 #ifdef ASSERT
 293     if (!adr->is_AddP()) {
 294       assert(_igvn->type(adr)->isa_rawptr(), "sanity");
 295     } else {
 296       assert((ptnode_adr(adr->_idx) == NULL ||
 297               ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
 298     }
 299 #endif
 300     add_local_var_and_edge(n, PointsToNode::NoEscape,
 301                            adr, delayed_worklist);
 302   }
 303 }
 304 
 305 // Populate Connection Graph with PointsTo nodes and create simple
 306 // connection graph edges.
 307 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
 308   assert(!_verify, "this method sould not be called for verification");
 309   PhaseGVN* igvn = _igvn;
 310   uint n_idx = n->_idx;
 311   PointsToNode* n_ptn = ptnode_adr(n_idx);
 312   if (n_ptn != NULL)
 313     return; // No need to redefine PointsTo node during first iteration.
 314 
 315   if (n->is_Call()) {
 316     // Arguments to allocation and locking don't escape.
 317     if (n->is_AbstractLock()) {
 318       // Put Lock and Unlock nodes on IGVN worklist to process them during
 319       // first IGVN optimization when escape information is still available.
 320       record_for_optimizer(n);
 321     } else if (n->is_Allocate()) {
 322       add_call_node(n->as_Call());
 323       record_for_optimizer(n);
 324     } else {
 325       if (n->is_CallStaticJava()) {
 326         const char* name = n->as_CallStaticJava()->_name;
 327         if (name != NULL && strcmp(name, "uncommon_trap") == 0)
 328           return; // Skip uncommon traps
 329       }
 330       // Don't mark as processed since call's arguments have to be processed.
 331       delayed_worklist->push(n);
 332       // Check if a call returns an object.
 333       if (n->as_Call()->returns_pointer() &&
 334           n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
 335         add_call_node(n->as_Call());
 336       }
 337     }
 338     return;
 339   }
 340   // Put this check here to process call arguments since some call nodes
 341   // point to phantom_obj.
 342   if (n_ptn == phantom_obj || n_ptn == null_obj)
 343     return; // Skip predefined nodes.
 344 
 345   int opcode = n->Opcode();
 346   switch (opcode) {
 347     case Op_AddP: {
 348       Node* base = get_addp_base(n);
 349       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 350       // Field nodes are created for all field types. They are used in
 351       // adjust_scalar_replaceable_state() and split_unique_types().
 352       // Note, non-oop fields will have only base edges in Connection
 353       // Graph because such fields are not used for oop loads and stores.
 354       int offset = address_offset(n, igvn);
 355       add_field(n, PointsToNode::NoEscape, offset);
 356       if (ptn_base == NULL) {
 357         delayed_worklist->push(n); // Process it later.
 358       } else {
 359         n_ptn = ptnode_adr(n_idx);
 360         add_base(n_ptn->as_Field(), ptn_base);
 361       }
 362       break;
 363     }
 364     case Op_CastX2P: {
 365       map_ideal_node(n, phantom_obj);
 366       break;
 367     }
 368     case Op_CastPP:
 369     case Op_CheckCastPP:
 370     case Op_EncodeP:
 371     case Op_DecodeN: {
 372       add_local_var_and_edge(n, PointsToNode::NoEscape,
 373                              n->in(1), delayed_worklist);
 374       break;
 375     }
 376     case Op_CMoveP: {
 377       add_local_var(n, PointsToNode::NoEscape);
 378       // Do not add edges during first iteration because some could be
 379       // not defined yet.
 380       delayed_worklist->push(n);
 381       break;
 382     }
 383     case Op_ConP:
 384     case Op_ConN: {
 385       // assume all oop constants globally escape except for null
 386       PointsToNode::EscapeState es;
 387       if (igvn->type(n) == TypePtr::NULL_PTR ||
 388           igvn->type(n) == TypeNarrowOop::NULL_PTR) {
 389         es = PointsToNode::NoEscape;
 390       } else {
 391         es = PointsToNode::GlobalEscape;
 392       }
 393       add_java_object(n, es);
 394       break;
 395     }
 396     case Op_CreateEx: {
 397       // assume that all exception objects globally escape
 398       add_java_object(n, PointsToNode::GlobalEscape);
 399       break;
 400     }
 401     case Op_LoadKlass:
 402     case Op_LoadNKlass: {
 403       // Unknown class is loaded
 404       map_ideal_node(n, phantom_obj);
 405       break;
 406     }
 407     case Op_LoadP:
 408     case Op_LoadN:
 409     case Op_LoadPLocked: {
 410       add_objload_to_connection_graph(n, delayed_worklist);
 411       break;
 412     }
 413     case Op_Parm: {
 414       map_ideal_node(n, phantom_obj);
 415       break;
 416     }
 417     case Op_PartialSubtypeCheck: {
 418       // Produces Null or notNull and is used in only in CmpP so
 419       // phantom_obj could be used.
 420       map_ideal_node(n, phantom_obj); // Result is unknown
 421       break;
 422     }
 423     case Op_Phi: {
 424       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 425       // ThreadLocal has RawPtr type.
 426       const Type* t = n->as_Phi()->type();
 427       if (t->make_ptr() != NULL) {
 428         add_local_var(n, PointsToNode::NoEscape);
 429         // Do not add edges during first iteration because some could be
 430         // not defined yet.
 431         delayed_worklist->push(n);
 432       }
 433       break;
 434     }
 435     case Op_Proj: {
 436       // we are only interested in the oop result projection from a call
 437       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 438           n->in(0)->as_Call()->returns_pointer()) {
 439         add_local_var_and_edge(n, PointsToNode::NoEscape,
 440                                n->in(0), delayed_worklist);
 441       }
 442       break;
 443     }
 444     case Op_Rethrow: // Exception object escapes
 445     case Op_Return: {
 446       if (n->req() > TypeFunc::Parms &&
 447           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 448         // Treat Return value as LocalVar with GlobalEscape escape state.
 449         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 450                                n->in(TypeFunc::Parms), delayed_worklist);
 451       }
 452       break;
 453     }
 454     case Op_GetAndSetP:
 455     case Op_GetAndSetN: {
 456       add_objload_to_connection_graph(n, delayed_worklist);
 457       // fallthrough
 458     }
 459     case Op_StoreP:
 460     case Op_StoreN:
 461     case Op_StorePConditional:
 462     case Op_CompareAndSwapP:
 463     case Op_CompareAndSwapN: {
 464       Node* adr = n->in(MemNode::Address);
 465       const Type *adr_type = igvn->type(adr);
 466       adr_type = adr_type->make_ptr();
 467       if (adr_type->isa_oopptr() ||
 468           (opcode == Op_StoreP || opcode == Op_StoreN) &&
 469                         (adr_type == TypeRawPtr::NOTNULL &&
 470                          adr->in(AddPNode::Address)->is_Proj() &&
 471                          adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 472         delayed_worklist->push(n); // Process it later.
 473 #ifdef ASSERT
 474         assert(adr->is_AddP(), "expecting an AddP");
 475         if (adr_type == TypeRawPtr::NOTNULL) {
 476           // Verify a raw address for a store captured by Initialize node.
 477           int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 478           assert(offs != Type::OffsetBot, "offset must be a constant");
 479         }
 480 #endif
 481       } else {
 482         // Ignore copy the displaced header to the BoxNode (OSR compilation).
 483         if (adr->is_BoxLock())
 484           break;
 485         // Stored value escapes in unsafe access.
 486         if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {
 487           // Pointer stores in G1 barriers looks like unsafe access.
 488           // Ignore such stores to be able scalar replace non-escaping
 489           // allocations.
 490           if (UseG1GC && adr->is_AddP()) {
 491             Node* base = get_addp_base(adr);
 492             if (base->Opcode() == Op_LoadP &&
 493                 base->in(MemNode::Address)->is_AddP()) {
 494               adr = base->in(MemNode::Address);
 495               Node* tls = get_addp_base(adr);
 496               if (tls->Opcode() == Op_ThreadLocal) {
 497                 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 498                 if (offs == in_bytes(JavaThread::satb_mark_queue_offset() +
 499                                      PtrQueue::byte_offset_of_buf())) {
 500                   break; // G1 pre barier previous oop value store.
 501                 }
 502                 if (offs == in_bytes(JavaThread::dirty_card_queue_offset() +
 503                                      PtrQueue::byte_offset_of_buf())) {
 504                   break; // G1 post barier card address store.
 505                 }
 506               }
 507             }
 508           }
 509           delayed_worklist->push(n); // Process unsafe access later.
 510           break;
 511         }
 512 #ifdef ASSERT
 513         n->dump(1);
 514         assert(false, "not unsafe or G1 barrier raw StoreP");
 515 #endif
 516       }
 517       break;
 518     }
 519     case Op_AryEq:
 520     case Op_StrComp:
 521     case Op_StrEquals:
 522     case Op_StrIndexOf: {
 523       add_local_var(n, PointsToNode::ArgEscape);
 524       delayed_worklist->push(n); // Process it later.
 525       break;
 526     }
 527     case Op_ThreadLocal: {
 528       add_java_object(n, PointsToNode::ArgEscape);
 529       break;
 530     }
 531     default:
 532       ; // Do nothing for nodes not related to EA.
 533   }
 534   return;
 535 }
 536 
 537 #ifdef ASSERT
 538 #define ELSE_FAIL(name)                               \
 539       /* Should not be called for not pointer type. */  \
 540       n->dump(1);                                       \
 541       assert(false, name);                              \
 542       break;
 543 #else
 544 #define ELSE_FAIL(name) \
 545       break;
 546 #endif
 547 
 548 // Add final simple edges to graph.
 549 void ConnectionGraph::add_final_edges(Node *n) {
 550   PointsToNode* n_ptn = ptnode_adr(n->_idx);
 551 #ifdef ASSERT
 552   if (_verify && n_ptn->is_JavaObject())
 553     return; // This method does not change graph for JavaObject.
 554 #endif
 555 
 556   if (n->is_Call()) {
 557     process_call_arguments(n->as_Call());
 558     return;
 559   }
 560   assert(n->is_Store() || n->is_LoadStore() ||
 561          (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
 562          "node should be registered already");
 563   int opcode = n->Opcode();
 564   switch (opcode) {
 565     case Op_AddP: {
 566       Node* base = get_addp_base(n);
 567       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 568       assert(ptn_base != NULL, "field's base should be registered");
 569       add_base(n_ptn->as_Field(), ptn_base);
 570       break;
 571     }
 572     case Op_CastPP:
 573     case Op_CheckCastPP:
 574     case Op_EncodeP:
 575     case Op_DecodeN: {
 576       add_local_var_and_edge(n, PointsToNode::NoEscape,
 577                              n->in(1), NULL);
 578       break;
 579     }
 580     case Op_CMoveP: {
 581       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
 582         Node* in = n->in(i);
 583         if (in == NULL)
 584           continue;  // ignore NULL
 585         Node* uncast_in = in->uncast();
 586         if (uncast_in->is_top() || uncast_in == n)
 587           continue;  // ignore top or inputs which go back this node
 588         PointsToNode* ptn = ptnode_adr(in->_idx);
 589         assert(ptn != NULL, "node should be registered");
 590         add_edge(n_ptn, ptn);
 591       }
 592       break;
 593     }
 594     case Op_LoadP:
 595     case Op_LoadN:
 596     case Op_LoadPLocked: {
 597       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 598       // ThreadLocal has RawPtr type.
 599       const Type* t = _igvn->type(n);
 600       if (t->make_ptr() != NULL) {
 601         Node* adr = n->in(MemNode::Address);
 602         add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
 603         break;
 604       }
 605       ELSE_FAIL("Op_LoadP");
 606     }
 607     case Op_Phi: {
 608       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 609       // ThreadLocal has RawPtr type.
 610       const Type* t = n->as_Phi()->type();
 611       if (t->make_ptr() != NULL) {
 612         for (uint i = 1; i < n->req(); i++) {
 613           Node* in = n->in(i);
 614           if (in == NULL)
 615             continue;  // ignore NULL
 616           Node* uncast_in = in->uncast();
 617           if (uncast_in->is_top() || uncast_in == n)
 618             continue;  // ignore top or inputs which go back this node
 619           PointsToNode* ptn = ptnode_adr(in->_idx);
 620           assert(ptn != NULL, "node should be registered");
 621           add_edge(n_ptn, ptn);
 622         }
 623         break;
 624       }
 625       ELSE_FAIL("Op_Phi");
 626     }
 627     case Op_Proj: {
 628       // we are only interested in the oop result projection from a call
 629       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 630           n->in(0)->as_Call()->returns_pointer()) {
 631         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
 632         break;
 633       }
 634       ELSE_FAIL("Op_Proj");
 635     }
 636     case Op_Rethrow: // Exception object escapes
 637     case Op_Return: {
 638       if (n->req() > TypeFunc::Parms &&
 639           _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 640         // Treat Return value as LocalVar with GlobalEscape escape state.
 641         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 642                                n->in(TypeFunc::Parms), NULL);
 643         break;
 644       }
 645       ELSE_FAIL("Op_Return");
 646     }
 647     case Op_StoreP:
 648     case Op_StoreN:
 649     case Op_StorePConditional:
 650     case Op_CompareAndSwapP:
 651     case Op_CompareAndSwapN:
 652     case Op_GetAndSetP:
 653     case Op_GetAndSetN: {
 654       Node* adr = n->in(MemNode::Address);
 655       if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) {
 656         const Type* t = _igvn->type(n);
 657         if (t->make_ptr() != NULL) {
 658           add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
 659         }
 660       }
 661       const Type *adr_type = _igvn->type(adr);
 662       adr_type = adr_type->make_ptr();
 663       if (adr_type->isa_oopptr() ||
 664           (opcode == Op_StoreP || opcode == Op_StoreN) &&
 665                         (adr_type == TypeRawPtr::NOTNULL &&
 666                          adr->in(AddPNode::Address)->is_Proj() &&
 667                          adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 668         // Point Address to Value
 669         PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 670         assert(adr_ptn != NULL &&
 671                adr_ptn->as_Field()->is_oop(), "node should be registered");
 672         Node *val = n->in(MemNode::ValueIn);
 673         PointsToNode* ptn = ptnode_adr(val->_idx);
 674         assert(ptn != NULL, "node should be registered");
 675         add_edge(adr_ptn, ptn);
 676         break;
 677       } else if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {
 678         // Stored value escapes in unsafe access.
 679         Node *val = n->in(MemNode::ValueIn);
 680         PointsToNode* ptn = ptnode_adr(val->_idx);
 681         assert(ptn != NULL, "node should be registered");
 682         ptn->set_escape_state(PointsToNode::GlobalEscape);
 683         // Add edge to object for unsafe access with offset.
 684         PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 685         assert(adr_ptn != NULL, "node should be registered");
 686         if (adr_ptn->is_Field()) {
 687           assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
 688           add_edge(adr_ptn, ptn);
 689         }
 690         break;
 691       }
 692       ELSE_FAIL("Op_StoreP");
 693     }
 694     case Op_AryEq:
 695     case Op_StrComp:
 696     case Op_StrEquals:
 697     case Op_StrIndexOf: {
 698       // char[] arrays passed to string intrinsic do not escape but
 699       // they are not scalar replaceable. Adjust escape state for them.
 700       // Start from in(2) edge since in(1) is memory edge.
 701       for (uint i = 2; i < n->req(); i++) {
 702         Node* adr = n->in(i);
 703         const Type* at = _igvn->type(adr);
 704         if (!adr->is_top() && at->isa_ptr()) {
 705           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
 706                  at->isa_ptr() != NULL, "expecting a pointer");
 707           if (adr->is_AddP()) {
 708             adr = get_addp_base(adr);
 709           }
 710           PointsToNode* ptn = ptnode_adr(adr->_idx);
 711           assert(ptn != NULL, "node should be registered");
 712           add_edge(n_ptn, ptn);
 713         }
 714       }
 715       break;
 716     }
 717     default: {
 718       // This method should be called only for EA specific nodes which may
 719       // miss some edges when they were created.
 720 #ifdef ASSERT
 721       n->dump(1);
 722 #endif
 723       guarantee(false, "unknown node");
 724     }
 725   }
 726   return;
 727 }
 728 
 729 void ConnectionGraph::add_call_node(CallNode* call) {
 730   assert(call->returns_pointer(), "only for call which returns pointer");
 731   uint call_idx = call->_idx;
 732   if (call->is_Allocate()) {
 733     Node* k = call->in(AllocateNode::KlassNode);
 734     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 735     assert(kt != NULL, "TypeKlassPtr  required.");
 736     ciKlass* cik = kt->klass();
 737     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 738     bool scalar_replaceable = true;
 739     if (call->is_AllocateArray()) {
 740       if (!cik->is_array_klass()) { // StressReflectiveCode
 741         es = PointsToNode::GlobalEscape;
 742       } else {
 743         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 744         if (length < 0 || length > EliminateAllocationArraySizeLimit) {
 745           // Not scalar replaceable if the length is not constant or too big.
 746           scalar_replaceable = false;
 747         }
 748       }
 749     } else {  // Allocate instance
 750       if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
 751          !cik->is_instance_klass() || // StressReflectiveCode
 752           cik->as_instance_klass()->has_finalizer()) {
 753         es = PointsToNode::GlobalEscape;
 754       }
 755     }
 756     add_java_object(call, es);
 757     PointsToNode* ptn = ptnode_adr(call_idx);
 758     if (!scalar_replaceable && ptn->scalar_replaceable()) {
 759       ptn->set_scalar_replaceable(false);
 760     }
 761   } else if (call->is_CallStaticJava()) {
 762     // Call nodes could be different types:
 763     //
 764     // 1. CallDynamicJavaNode (what happened during call is unknown):
 765     //
 766     //    - mapped to GlobalEscape JavaObject node if oop is returned;
 767     //
 768     //    - all oop arguments are escaping globally;
 769     //
 770     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
 771     //
 772     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
 773     //
 774     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
 775     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
 776     //      during call is returned;
 777     //    - mapped to ArgEscape LocalVar node pointed to object arguments
 778     //      which are returned and does not escape during call;
 779     //
 780     //    - oop arguments escaping status is defined by bytecode analysis;
 781     //
 782     // For a static call, we know exactly what method is being called.
 783     // Use bytecode estimator to record whether the call's return value escapes.
 784     ciMethod* meth = call->as_CallJava()->method();
 785     if (meth == NULL) {
 786       const char* name = call->as_CallStaticJava()->_name;
 787       assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
 788       // Returns a newly allocated unescaped object.
 789       add_java_object(call, PointsToNode::NoEscape);
 790       ptnode_adr(call_idx)->set_scalar_replaceable(false);
 791     } else {
 792       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
 793       call_analyzer->copy_dependencies(_compile->dependencies());
 794       if (call_analyzer->is_return_allocated()) {
 795         // Returns a newly allocated unescaped object, simply
 796         // update dependency information.
 797         // Mark it as NoEscape so that objects referenced by
 798         // it's fields will be marked as NoEscape at least.
 799         add_java_object(call, PointsToNode::NoEscape);
 800         ptnode_adr(call_idx)->set_scalar_replaceable(false);
 801       } else {
 802         // Determine whether any arguments are returned.
 803         const TypeTuple* d = call->tf()->domain();
 804         bool ret_arg = false;
 805         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 806           if (d->field_at(i)->isa_ptr() != NULL &&
 807               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
 808             ret_arg = true;
 809             break;
 810           }
 811         }
 812         if (ret_arg) {
 813           add_local_var(call, PointsToNode::ArgEscape);
 814         } else {
 815           // Returns unknown object.
 816           map_ideal_node(call, phantom_obj);
 817         }
 818       }
 819     }
 820   } else {
 821     // An other type of call, assume the worst case:
 822     // returned value is unknown and globally escapes.
 823     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
 824     map_ideal_node(call, phantom_obj);
 825   }
 826 }
 827 
 828 void ConnectionGraph::process_call_arguments(CallNode *call) {
 829     bool is_arraycopy = false;
 830     switch (call->Opcode()) {
 831 #ifdef ASSERT
 832     case Op_Allocate:
 833     case Op_AllocateArray:
 834     case Op_Lock:
 835     case Op_Unlock:
 836       assert(false, "should be done already");
 837       break;
 838 #endif
 839     case Op_CallLeafNoFP:
 840       is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
 841                       strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
 842       // fall through
 843     case Op_CallLeaf: {
 844       // Stub calls, objects do not escape but they are not scale replaceable.
 845       // Adjust escape state for outgoing arguments.
 846       const TypeTuple * d = call->tf()->domain();
 847       bool src_has_oops = false;
 848       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 849         const Type* at = d->field_at(i);
 850         Node *arg = call->in(i);
 851         const Type *aat = _igvn->type(arg);
 852         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())
 853           continue;
 854         if (arg->is_AddP()) {
 855           //
 856           // The inline_native_clone() case when the arraycopy stub is called
 857           // after the allocation before Initialize and CheckCastPP nodes.
 858           // Or normal arraycopy for object arrays case.
 859           //
 860           // Set AddP's base (Allocate) as not scalar replaceable since
 861           // pointer to the base (with offset) is passed as argument.
 862           //
 863           arg = get_addp_base(arg);
 864         }
 865         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
 866         assert(arg_ptn != NULL, "should be registered");
 867         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
 868         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
 869           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
 870                  aat->isa_ptr() != NULL, "expecting an Ptr");
 871           bool arg_has_oops = aat->isa_oopptr() &&
 872                               (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
 873                                (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
 874           if (i == TypeFunc::Parms) {
 875             src_has_oops = arg_has_oops;
 876           }
 877           //
 878           // src or dst could be j.l.Object when other is basic type array:
 879           //
 880           //   arraycopy(char[],0,Object*,0,size);
 881           //   arraycopy(Object*,0,char[],0,size);
 882           //
 883           // Don't add edges in such cases.
 884           //
 885           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
 886                                        arg_has_oops && (i > TypeFunc::Parms);
 887 #ifdef ASSERT
 888           if (!(is_arraycopy ||
 889                 call->as_CallLeaf()->_name != NULL &&
 890                 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
 891                  strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
 892           ) {
 893             call->dump();
 894             assert(false, "EA: unexpected CallLeaf");
 895           }
 896 #endif
 897           // Always process arraycopy's destination object since
 898           // we need to add all possible edges to references in
 899           // source object.
 900           if (arg_esc >= PointsToNode::ArgEscape &&
 901               !arg_is_arraycopy_dest) {
 902             continue;
 903           }
 904           set_escape_state(arg_ptn, PointsToNode::ArgEscape);
 905           if (arg_is_arraycopy_dest) {
 906             Node* src = call->in(TypeFunc::Parms);
 907             if (src->is_AddP()) {
 908               src = get_addp_base(src);
 909             }
 910             PointsToNode* src_ptn = ptnode_adr(src->_idx);
 911             assert(src_ptn != NULL, "should be registered");
 912             if (arg_ptn != src_ptn) {
 913               // Special arraycopy edge:
 914               // A destination object's field can't have the source object
 915               // as base since objects escape states are not related.
 916               // Only escape state of destination object's fields affects
 917               // escape state of fields in source object.
 918               add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn);
 919             }
 920           }
 921         }
 922       }
 923       break;
 924     }
 925     case Op_CallStaticJava: {
 926       // For a static call, we know exactly what method is being called.
 927       // Use bytecode estimator to record the call's escape affects
 928 #ifdef ASSERT
 929       const char* name = call->as_CallStaticJava()->_name;
 930       assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
 931 #endif
 932       ciMethod* meth = call->as_CallJava()->method();
 933       BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
 934       // fall-through if not a Java method or no analyzer information
 935       if (call_analyzer != NULL) {
 936         PointsToNode* call_ptn = ptnode_adr(call->_idx);
 937         const TypeTuple* d = call->tf()->domain();
 938         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 939           const Type* at = d->field_at(i);
 940           int k = i - TypeFunc::Parms;
 941           Node* arg = call->in(i);
 942           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
 943           if (at->isa_ptr() != NULL &&
 944               call_analyzer->is_arg_returned(k)) {
 945             // The call returns arguments.
 946             if (call_ptn != NULL) { // Is call's result used?
 947               assert(call_ptn->is_LocalVar(), "node should be registered");
 948               assert(arg_ptn != NULL, "node should be registered");
 949               add_edge(call_ptn, arg_ptn);
 950             }
 951           }
 952           if (at->isa_oopptr() != NULL &&
 953               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
 954             if (!call_analyzer->is_arg_stack(k)) {
 955               // The argument global escapes
 956               set_escape_state(arg_ptn, PointsToNode::GlobalEscape);
 957             } else {
 958               set_escape_state(arg_ptn, PointsToNode::ArgEscape);
 959               if (!call_analyzer->is_arg_local(k)) {
 960                 // The argument itself doesn't escape, but any fields might
 961                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);
 962               }
 963             }
 964           }
 965         }
 966         if (call_ptn != NULL && call_ptn->is_LocalVar()) {
 967           // The call returns arguments.
 968           assert(call_ptn->edge_count() > 0, "sanity");
 969           if (!call_analyzer->is_return_local()) {
 970             // Returns also unknown object.
 971             add_edge(call_ptn, phantom_obj);
 972           }
 973         }
 974         break;
 975       }
 976     }
 977     default: {
 978       // Fall-through here if not a Java method or no analyzer information
 979       // or some other type of call, assume the worst case: all arguments
 980       // globally escape.
 981       const TypeTuple* d = call->tf()->domain();
 982       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 983         const Type* at = d->field_at(i);
 984         if (at->isa_oopptr() != NULL) {
 985           Node* arg = call->in(i);
 986           if (arg->is_AddP()) {
 987             arg = get_addp_base(arg);
 988           }
 989           assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
 990           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);
 991         }
 992       }
 993     }
 994   }
 995 }
 996 
 997 
 998 // Finish Graph construction.
 999 bool ConnectionGraph::complete_connection_graph(
1000                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
1001                          GrowableArray<JavaObjectNode*>& non_escaped_worklist,
1002                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
1003                          GrowableArray<FieldNode*>&      oop_fields_worklist) {
1004   // Normally only 1-3 passes needed to build Connection Graph depending
1005   // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
1006   // Set limit to 20 to catch situation when something did go wrong and
1007   // bailout Escape Analysis.
1008   // Also limit build time to 30 sec (60 in debug VM).
1009 #define CG_BUILD_ITER_LIMIT 20
1010 #ifdef ASSERT
1011 #define CG_BUILD_TIME_LIMIT 60.0
1012 #else
1013 #define CG_BUILD_TIME_LIMIT 30.0
1014 #endif
1015 
1016   // Propagate GlobalEscape and ArgEscape escape states and check that
1017   // we still have non-escaping objects. The method pushs on _worklist
1018   // Field nodes which reference phantom_object.
1019   if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
1020     return false; // Nothing to do.
1021   }
1022   // Now propagate references to all JavaObject nodes.
1023   int java_objects_length = java_objects_worklist.length();
1024   elapsedTimer time;
1025   int new_edges = 1;
1026   int iterations = 0;
1027   do {
1028     while ((new_edges > 0) &&
1029           (iterations++   < CG_BUILD_ITER_LIMIT) &&
1030           (time.seconds() < CG_BUILD_TIME_LIMIT)) {
1031       time.start();
1032       new_edges = 0;
1033       // Propagate references to phantom_object for nodes pushed on _worklist
1034       // by find_non_escaped_objects() and find_field_value().
1035       new_edges += add_java_object_edges(phantom_obj, false);
1036       for (int next = 0; next < java_objects_length; ++next) {
1037         JavaObjectNode* ptn = java_objects_worklist.at(next);
1038         new_edges += add_java_object_edges(ptn, true);
1039       }
1040       if (new_edges > 0) {
1041         // Update escape states on each iteration if graph was updated.
1042         if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
1043           return false; // Nothing to do.
1044         }
1045       }
1046       time.stop();
1047     }
1048     if ((iterations     < CG_BUILD_ITER_LIMIT) &&
1049         (time.seconds() < CG_BUILD_TIME_LIMIT)) {
1050       time.start();
1051       // Find fields which have unknown value.
1052       int fields_length = oop_fields_worklist.length();
1053       for (int next = 0; next < fields_length; next++) {
1054         FieldNode* field = oop_fields_worklist.at(next);
1055         if (field->edge_count() == 0) {
1056           new_edges += find_field_value(field);
1057           // This code may added new edges to phantom_object.
1058           // Need an other cycle to propagate references to phantom_object.
1059         }
1060       }
1061       time.stop();
1062     } else {
1063       new_edges = 0; // Bailout
1064     }
1065   } while (new_edges > 0);
1066 
1067   // Bailout if passed limits.
1068   if ((iterations     >= CG_BUILD_ITER_LIMIT) ||
1069       (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
1070     Compile* C = _compile;
1071     if (C->log() != NULL) {
1072       C->log()->begin_elem("connectionGraph_bailout reason='reached ");
1073       C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
1074       C->log()->end_elem(" limit'");
1075     }
1076     assert(false, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
1077            time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()));
1078     // Possible infinite build_connection_graph loop,
1079     // bailout (no changes to ideal graph were made).
1080     return false;
1081   }
1082 #ifdef ASSERT
1083   if (Verbose && PrintEscapeAnalysis) {
1084     tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d",
1085                   iterations, nodes_size(), ptnodes_worklist.length());
1086   }
1087 #endif
1088 
1089 #undef CG_BUILD_ITER_LIMIT
1090 #undef CG_BUILD_TIME_LIMIT
1091 
1092   // Find fields initialized by NULL for non-escaping Allocations.
1093   int non_escaped_length = non_escaped_worklist.length();
1094   for (int next = 0; next < non_escaped_length; next++) {
1095     JavaObjectNode* ptn = non_escaped_worklist.at(next);
1096     PointsToNode::EscapeState es = ptn->escape_state();
1097     assert(es <= PointsToNode::ArgEscape, "sanity");
1098     if (es == PointsToNode::NoEscape) {
1099       if (find_init_values(ptn, null_obj, _igvn) > 0) {
1100         // Adding references to NULL object does not change escape states
1101         // since it does not escape. Also no fields are added to NULL object.
1102         add_java_object_edges(null_obj, false);
1103       }
1104     }
1105     Node* n = ptn->ideal_node();
1106     if (n->is_Allocate()) {
1107       // The object allocated by this Allocate node will never be
1108       // seen by an other thread. Mark it so that when it is
1109       // expanded no MemBarStoreStore is added.
1110       InitializeNode* ini = n->as_Allocate()->initialization();
1111       if (ini != NULL)
1112         ini->set_does_not_escape();
1113     }
1114   }
1115   return true; // Finished graph construction.
1116 }
1117 
1118 // Propagate GlobalEscape and ArgEscape escape states to all nodes
1119 // and check that we still have non-escaping java objects.
1120 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
1121                                                GrowableArray<JavaObjectNode*>& non_escaped_worklist) {
1122   GrowableArray<PointsToNode*> escape_worklist;
1123   // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
1124   int ptnodes_length = ptnodes_worklist.length();
1125   for (int next = 0; next < ptnodes_length; ++next) {
1126     PointsToNode* ptn = ptnodes_worklist.at(next);
1127     if (ptn->escape_state() >= PointsToNode::ArgEscape ||
1128         ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
1129       escape_worklist.push(ptn);
1130     }
1131   }
1132   // Set escape states to referenced nodes (edges list).
1133   while (escape_worklist.length() > 0) {
1134     PointsToNode* ptn = escape_worklist.pop();
1135     PointsToNode::EscapeState es  = ptn->escape_state();
1136     PointsToNode::EscapeState field_es = ptn->fields_escape_state();
1137     if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
1138         es >= PointsToNode::ArgEscape) {
1139       // GlobalEscape or ArgEscape state of field means it has unknown value.
1140       if (add_edge(ptn, phantom_obj)) {
1141         // New edge was added
1142         add_field_uses_to_worklist(ptn->as_Field());
1143       }
1144     }
1145     for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1146       PointsToNode* e = i.get();
1147       if (e->is_Arraycopy()) {
1148         assert(ptn->arraycopy_dst(), "sanity");
1149         // Propagate only fields escape state through arraycopy edge.
1150         if (e->fields_escape_state() < field_es) {
1151           set_fields_escape_state(e, field_es);
1152           escape_worklist.push(e);
1153         }
1154       } else if (es >= field_es) {
1155         // fields_escape_state is also set to 'es' if it is less than 'es'.
1156         if (e->escape_state() < es) {
1157           set_escape_state(e, es);
1158           escape_worklist.push(e);
1159         }
1160       } else {
1161         // Propagate field escape state.
1162         bool es_changed = false;
1163         if (e->fields_escape_state() < field_es) {
1164           set_fields_escape_state(e, field_es);
1165           es_changed = true;
1166         }
1167         if ((e->escape_state() < field_es) &&
1168             e->is_Field() && ptn->is_JavaObject() &&
1169             e->as_Field()->is_oop()) {
1170           // Change escape state of referenced fileds.
1171           set_escape_state(e, field_es);
1172           es_changed = true;;
1173         } else if (e->escape_state() < es) {
1174           set_escape_state(e, es);
1175           es_changed = true;;
1176         }
1177         if (es_changed) {
1178           escape_worklist.push(e);
1179         }
1180       }
1181     }
1182   }
1183   // Remove escaped objects from non_escaped list.
1184   for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) {
1185     JavaObjectNode* ptn = non_escaped_worklist.at(next);
1186     if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
1187       non_escaped_worklist.delete_at(next);
1188     }
1189     if (ptn->escape_state() == PointsToNode::NoEscape) {
1190       // Find fields in non-escaped allocations which have unknown value.
1191       find_init_values(ptn, phantom_obj, NULL);
1192     }
1193   }
1194   return (non_escaped_worklist.length() > 0);
1195 }
1196 
1197 // Add all references to JavaObject node by walking over all uses.
1198 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
1199   int new_edges = 0;
1200   if (populate_worklist) {
1201     // Populate _worklist by uses of jobj's uses.
1202     for (UseIterator i(jobj); i.has_next(); i.next()) {
1203       PointsToNode* use = i.get();
1204       if (use->is_Arraycopy())
1205         continue;
1206       add_uses_to_worklist(use);
1207       if (use->is_Field() && use->as_Field()->is_oop()) {
1208         // Put on worklist all field's uses (loads) and
1209         // related field nodes (same base and offset).
1210         add_field_uses_to_worklist(use->as_Field());
1211       }
1212     }
1213   }
1214   while(_worklist.length() > 0) {
1215     PointsToNode* use = _worklist.pop();
1216     if (PointsToNode::is_base_use(use)) {
1217       // Add reference from jobj to field and from field to jobj (field's base).
1218       use = PointsToNode::get_use_node(use)->as_Field();
1219       if (add_base(use->as_Field(), jobj)) {
1220         new_edges++;
1221       }
1222       continue;
1223     }
1224     assert(!use->is_JavaObject(), "sanity");
1225     if (use->is_Arraycopy()) {
1226       if (jobj == null_obj) // NULL object does not have field edges
1227         continue;
1228       // Added edge from Arraycopy node to arraycopy's source java object
1229       if (add_edge(use, jobj)) {
1230         jobj->set_arraycopy_src();
1231         new_edges++;
1232       }
1233       // and stop here.
1234       continue;
1235     }
1236     if (!add_edge(use, jobj))
1237       continue; // No new edge added, there was such edge already.
1238     new_edges++;
1239     if (use->is_LocalVar()) {
1240       add_uses_to_worklist(use);
1241       if (use->arraycopy_dst()) {
1242         for (EdgeIterator i(use); i.has_next(); i.next()) {
1243           PointsToNode* e = i.get();
1244           if (e->is_Arraycopy()) {
1245             if (jobj == null_obj) // NULL object does not have field edges
1246               continue;
1247             // Add edge from arraycopy's destination java object to Arraycopy node.
1248             if (add_edge(jobj, e)) {
1249               new_edges++;
1250               jobj->set_arraycopy_dst();
1251             }
1252           }
1253         }
1254       }
1255     } else {
1256       // Added new edge to stored in field values.
1257       // Put on worklist all field's uses (loads) and
1258       // related field nodes (same base and offset).
1259       add_field_uses_to_worklist(use->as_Field());
1260     }
1261   }
1262   return new_edges;
1263 }
1264 
1265 // Put on worklist all related field nodes.
1266 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
1267   assert(field->is_oop(), "sanity");
1268   int offset = field->offset();
1269   add_uses_to_worklist(field);
1270   // Loop over all bases of this field and push on worklist Field nodes
1271   // with the same offset and base (since they may reference the same field).
1272   for (BaseIterator i(field); i.has_next(); i.next()) {
1273     PointsToNode* base = i.get();
1274     add_fields_to_worklist(field, base);
1275     // Check if the base was source object of arraycopy and go over arraycopy's
1276     // destination objects since values stored to a field of source object are
1277     // accessable by uses (loads) of fields of destination objects.
1278     if (base->arraycopy_src()) {
1279       for (UseIterator j(base); j.has_next(); j.next()) {
1280         PointsToNode* arycp = j.get();
1281         if (arycp->is_Arraycopy()) {
1282           for (UseIterator k(arycp); k.has_next(); k.next()) {
1283             PointsToNode* abase = k.get();
1284             if (abase->arraycopy_dst() && abase != base) {
1285               // Look for the same arracopy reference.
1286               add_fields_to_worklist(field, abase);
1287             }
1288           }
1289         }
1290       }
1291     }
1292   }
1293 }
1294 
1295 // Put on worklist all related field nodes.
1296 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
1297   int offset = field->offset();
1298   if (base->is_LocalVar()) {
1299     for (UseIterator j(base); j.has_next(); j.next()) {
1300       PointsToNode* f = j.get();
1301       if (PointsToNode::is_base_use(f)) { // Field
1302         f = PointsToNode::get_use_node(f);
1303         if (f == field || !f->as_Field()->is_oop())
1304           continue;
1305         int offs = f->as_Field()->offset();
1306         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1307           add_to_worklist(f);
1308         }
1309       }
1310     }
1311   } else {
1312     assert(base->is_JavaObject(), "sanity");
1313     if (// Skip phantom_object since it is only used to indicate that
1314         // this field's content globally escapes.
1315         (base != phantom_obj) &&
1316         // NULL object node does not have fields.
1317         (base != null_obj)) {
1318       for (EdgeIterator i(base); i.has_next(); i.next()) {
1319         PointsToNode* f = i.get();
1320         // Skip arraycopy edge since store to destination object field
1321         // does not update value in source object field.
1322         if (f->is_Arraycopy()) {
1323           assert(base->arraycopy_dst(), "sanity");
1324           continue;
1325         }
1326         if (f == field || !f->as_Field()->is_oop())
1327           continue;
1328         int offs = f->as_Field()->offset();
1329         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1330           add_to_worklist(f);
1331         }
1332       }
1333     }
1334   }
1335 }
1336 
1337 // Find fields which have unknown value.
1338 int ConnectionGraph::find_field_value(FieldNode* field) {
1339   // Escaped fields should have init value already.
1340   assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
1341   int new_edges = 0;
1342   for (BaseIterator i(field); i.has_next(); i.next()) {
1343     PointsToNode* base = i.get();
1344     if (base->is_JavaObject()) {
1345       // Skip Allocate's fields which will be processed later.
1346       if (base->ideal_node()->is_Allocate())
1347         return 0;
1348       assert(base == null_obj, "only NULL ptr base expected here");
1349     }
1350   }
1351   if (add_edge(field, phantom_obj)) {
1352     // New edge was added
1353     new_edges++;
1354     add_field_uses_to_worklist(field);
1355   }
1356   return new_edges;
1357 }
1358 
1359 // Find fields initializing values for allocations.
1360 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) {
1361   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1362   int new_edges = 0;
1363   Node* alloc = pta->ideal_node();
1364   if (init_val == phantom_obj) {
1365     // Do nothing for Allocate nodes since its fields values are "known".
1366     if (alloc->is_Allocate())
1367       return 0;
1368     assert(alloc->as_CallStaticJava(), "sanity");
1369 #ifdef ASSERT
1370     if (alloc->as_CallStaticJava()->method() == NULL) {
1371       const char* name = alloc->as_CallStaticJava()->_name;
1372       assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
1373     }
1374 #endif
1375     // Non-escaped allocation returned from Java or runtime call have
1376     // unknown values in fields.
1377     for (EdgeIterator i(pta); i.has_next(); i.next()) {
1378       PointsToNode* ptn = i.get();
1379       if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
1380         if (add_edge(ptn, phantom_obj)) {
1381           // New edge was added
1382           new_edges++;
1383           add_field_uses_to_worklist(ptn->as_Field());
1384         }
1385       }
1386     }
1387     return new_edges;
1388   }
1389   assert(init_val == null_obj, "sanity");
1390   // Do nothing for Call nodes since its fields values are unknown.
1391   if (!alloc->is_Allocate())
1392     return 0;
1393 
1394   InitializeNode* ini = alloc->as_Allocate()->initialization();
1395   Compile* C = _compile;
1396   bool visited_bottom_offset = false;
1397   GrowableArray<int> offsets_worklist;
1398 
1399   // Check if an oop field's initializing value is recorded and add
1400   // a corresponding NULL if field's value if it is not recorded.
1401   // Connection Graph does not record a default initialization by NULL
1402   // captured by Initialize node.
1403   //
1404   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1405     PointsToNode* ptn = i.get(); // Field (AddP)
1406     if (!ptn->is_Field() || !ptn->as_Field()->is_oop())
1407       continue; // Not oop field
1408     int offset = ptn->as_Field()->offset();
1409     if (offset == Type::OffsetBot) {
1410       if (!visited_bottom_offset) {
1411         // OffsetBot is used to reference array's element,
1412         // always add reference to NULL to all Field nodes since we don't
1413         // known which element is referenced.
1414         if (add_edge(ptn, null_obj)) {
1415           // New edge was added
1416           new_edges++;
1417           add_field_uses_to_worklist(ptn->as_Field());
1418           visited_bottom_offset = true;
1419         }
1420       }
1421     } else {
1422       // Check only oop fields.
1423       const Type* adr_type = ptn->ideal_node()->as_AddP()->bottom_type();
1424       if (adr_type->isa_rawptr()) {
1425 #ifdef ASSERT
1426         // Raw pointers are used for initializing stores so skip it
1427         // since it should be recorded already
1428         Node* base = get_addp_base(ptn->ideal_node());
1429         assert(adr_type->isa_rawptr() && base->is_Proj() &&
1430                (base->in(0) == alloc),"unexpected pointer type");
1431 #endif
1432         continue;
1433       }
1434       if (!offsets_worklist.contains(offset)) {
1435         offsets_worklist.append(offset);
1436         Node* value = NULL;
1437         if (ini != NULL) {
1438           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
1439           Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
1440           if (store != NULL && store->is_Store()) {
1441             value = store->in(MemNode::ValueIn);
1442           } else {
1443             // There could be initializing stores which follow allocation.
1444             // For example, a volatile field store is not collected
1445             // by Initialize node.
1446             //
1447             // Need to check for dependent loads to separate such stores from
1448             // stores which follow loads. For now, add initial value NULL so
1449             // that compare pointers optimization works correctly.
1450           }
1451         }
1452         if (value == NULL) {
1453           // A field's initializing value was not recorded. Add NULL.
1454           if (add_edge(ptn, null_obj)) {
1455             // New edge was added
1456             new_edges++;
1457             add_field_uses_to_worklist(ptn->as_Field());
1458           }
1459         }
1460       }
1461     }
1462   }
1463   return new_edges;
1464 }
1465 
1466 // Adjust scalar_replaceable state after Connection Graph is built.
1467 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {
1468   // Search for non-escaping objects which are not scalar replaceable
1469   // and mark them to propagate the state to referenced objects.
1470 
1471   // 1. An object is not scalar replaceable if the field into which it is
1472   // stored has unknown offset (stored into unknown element of an array).
1473   //
1474   for (UseIterator i(jobj); i.has_next(); i.next()) {
1475     PointsToNode* use = i.get();
1476     assert(!use->is_Arraycopy(), "sanity");
1477     if (use->is_Field()) {
1478       FieldNode* field = use->as_Field();
1479       assert(field->is_oop() && field->scalar_replaceable() &&
1480              field->fields_escape_state() == PointsToNode::NoEscape, "sanity");
1481       if (field->offset() == Type::OffsetBot) {
1482         jobj->set_scalar_replaceable(false);
1483         return;
1484       }
1485     }
1486     assert(use->is_Field() || use->is_LocalVar(), "sanity");
1487     // 2. An object is not scalar replaceable if it is merged with other objects.
1488     for (EdgeIterator j(use); j.has_next(); j.next()) {
1489       PointsToNode* ptn = j.get();
1490       if (ptn->is_JavaObject() && ptn != jobj) {
1491         // Mark all objects.
1492         jobj->set_scalar_replaceable(false);
1493          ptn->set_scalar_replaceable(false);
1494       }
1495     }
1496     if (!jobj->scalar_replaceable()) {
1497       return;
1498     }
1499   }
1500 
1501   for (EdgeIterator j(jobj); j.has_next(); j.next()) {
1502     // Non-escaping object node should point only to field nodes.
1503     FieldNode* field = j.get()->as_Field();
1504     int offset = field->as_Field()->offset();
1505 
1506     // 3. An object is not scalar replaceable if it has a field with unknown
1507     // offset (array's element is accessed in loop).
1508     if (offset == Type::OffsetBot) {
1509       jobj->set_scalar_replaceable(false);
1510       return;
1511     }
1512     // 4. Currently an object is not scalar replaceable if a LoadStore node
1513     // access its field since the field value is unknown after it.
1514     //
1515     Node* n = field->ideal_node();
1516     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1517       if (n->fast_out(i)->is_LoadStore()) {
1518         jobj->set_scalar_replaceable(false);
1519         return;
1520       }
1521     }
1522 
1523     // 5. Or the address may point to more then one object. This may produce
1524     // the false positive result (set not scalar replaceable)
1525     // since the flow-insensitive escape analysis can't separate
1526     // the case when stores overwrite the field's value from the case
1527     // when stores happened on different control branches.
1528     //
1529     // Note: it will disable scalar replacement in some cases:
1530     //
1531     //    Point p[] = new Point[1];
1532     //    p[0] = new Point(); // Will be not scalar replaced
1533     //
1534     // but it will save us from incorrect optimizations in next cases:
1535     //
1536     //    Point p[] = new Point[1];
1537     //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
1538     //
1539     if (field->base_count() > 1) {
1540       for (BaseIterator i(field); i.has_next(); i.next()) {
1541         PointsToNode* base = i.get();
1542         // Don't take into account LocalVar nodes which
1543         // may point to only one object which should be also
1544         // this field's base by now.
1545         if (base->is_JavaObject() && base != jobj) {
1546           // Mark all bases.
1547           jobj->set_scalar_replaceable(false);
1548           base->set_scalar_replaceable(false);
1549         }
1550       }
1551     }
1552   }
1553 }
1554 
1555 #ifdef ASSERT
1556 void ConnectionGraph::verify_connection_graph(
1557                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
1558                          GrowableArray<JavaObjectNode*>& non_escaped_worklist,
1559                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
1560                          GrowableArray<Node*>& addp_worklist) {
1561   // Verify that graph is complete - no new edges could be added.
1562   int java_objects_length = java_objects_worklist.length();
1563   int non_escaped_length  = non_escaped_worklist.length();
1564   int new_edges = 0;
1565   for (int next = 0; next < java_objects_length; ++next) {
1566     JavaObjectNode* ptn = java_objects_worklist.at(next);
1567     new_edges += add_java_object_edges(ptn, true);
1568   }
1569   assert(new_edges == 0, "graph was not complete");
1570   // Verify that escape state is final.
1571   int length = non_escaped_worklist.length();
1572   find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist);
1573   assert((non_escaped_length == non_escaped_worklist.length()) &&
1574          (non_escaped_length == length) &&
1575          (_worklist.length() == 0), "escape state was not final");
1576 
1577   // Verify fields information.
1578   int addp_length = addp_worklist.length();
1579   for (int next = 0; next < addp_length; ++next ) {
1580     Node* n = addp_worklist.at(next);
1581     FieldNode* field = ptnode_adr(n->_idx)->as_Field();
1582     if (field->is_oop()) {
1583       // Verify that field has all bases
1584       Node* base = get_addp_base(n);
1585       PointsToNode* ptn = ptnode_adr(base->_idx);
1586       if (ptn->is_JavaObject()) {
1587         assert(field->has_base(ptn->as_JavaObject()), "sanity");
1588       } else {
1589         assert(ptn->is_LocalVar(), "sanity");
1590         for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1591           PointsToNode* e = i.get();
1592           if (e->is_JavaObject()) {
1593             assert(field->has_base(e->as_JavaObject()), "sanity");
1594           }
1595         }
1596       }
1597       // Verify that all fields have initializing values.
1598       if (field->edge_count() == 0) {
1599         field->dump();
1600         assert(field->edge_count() > 0, "sanity");
1601       }
1602     }
1603   }
1604 }
1605 #endif
1606 
1607 // Optimize ideal graph.
1608 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
1609                                            GrowableArray<Node*>& storestore_worklist) {
1610   Compile* C = _compile;
1611   PhaseIterGVN* igvn = _igvn;
1612   if (EliminateLocks) {
1613     // Mark locks before changing ideal graph.
1614     int cnt = C->macro_count();
1615     for( int i=0; i < cnt; i++ ) {
1616       Node *n = C->macro_node(i);
1617       if (n->is_AbstractLock()) { // Lock and Unlock nodes
1618         AbstractLockNode* alock = n->as_AbstractLock();
1619         if (!alock->is_non_esc_obj()) {
1620           if (not_global_escape(alock->obj_node())) {
1621             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
1622             // The lock could be marked eliminated by lock coarsening
1623             // code during first IGVN before EA. Replace coarsened flag
1624             // to eliminate all associated locks/unlocks.
1625             alock->set_non_esc_obj();
1626           }
1627         }
1628       }
1629     }
1630   }
1631 
1632   if (OptimizePtrCompare) {
1633     // Add ConI(#CC_GT) and ConI(#CC_EQ).
1634     _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
1635     _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
1636     // Optimize objects compare.
1637     while (ptr_cmp_worklist.length() != 0) {
1638       Node *n = ptr_cmp_worklist.pop();
1639       Node *res = optimize_ptr_compare(n);
1640       if (res != NULL) {
1641 #ifndef PRODUCT
1642         if (PrintOptimizePtrCompare) {
1643           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
1644           if (Verbose) {
1645             n->dump(1);
1646           }
1647         }
1648 #endif
1649         igvn->replace_node(n, res);
1650       }
1651     }
1652     // cleanup
1653     if (_pcmp_neq->outcnt() == 0)
1654       igvn->hash_delete(_pcmp_neq);
1655     if (_pcmp_eq->outcnt()  == 0)
1656       igvn->hash_delete(_pcmp_eq);
1657   }
1658 
1659   // For MemBarStoreStore nodes added in library_call.cpp, check
1660   // escape status of associated AllocateNode and optimize out
1661   // MemBarStoreStore node if the allocated object never escapes.
1662   while (storestore_worklist.length() != 0) {
1663     Node *n = storestore_worklist.pop();
1664     MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
1665     Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
1666     assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
1667     if (not_global_escape(alloc)) {
1668       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
1669       mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
1670       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
1671       igvn->register_new_node_with_optimizer(mb);
1672       igvn->replace_node(storestore, mb);
1673     }
1674   }
1675 }
1676 
1677 // Optimize objects compare.
1678 Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
1679   assert(OptimizePtrCompare, "sanity");
1680   PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
1681   PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
1682   JavaObjectNode* jobj1 = unique_java_object(n->in(1));
1683   JavaObjectNode* jobj2 = unique_java_object(n->in(2));
1684   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
1685   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
1686 
1687   // Check simple cases first.
1688   if (jobj1 != NULL) {
1689     if (jobj1->escape_state() == PointsToNode::NoEscape) {
1690       if (jobj1 == jobj2) {
1691         // Comparing the same not escaping object.
1692         return _pcmp_eq;
1693       }
1694       Node* obj = jobj1->ideal_node();
1695       // Comparing not escaping allocation.
1696       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
1697           !ptn2->points_to(jobj1)) {
1698         return _pcmp_neq; // This includes nullness check.
1699       }
1700     }
1701   }
1702   if (jobj2 != NULL) {
1703     if (jobj2->escape_state() == PointsToNode::NoEscape) {
1704       Node* obj = jobj2->ideal_node();
1705       // Comparing not escaping allocation.
1706       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
1707           !ptn1->points_to(jobj2)) {
1708         return _pcmp_neq; // This includes nullness check.
1709       }
1710     }
1711   }
1712   if (jobj1 != NULL && jobj1 != phantom_obj &&
1713       jobj2 != NULL && jobj2 != phantom_obj &&
1714       jobj1->ideal_node()->is_Con() &&
1715       jobj2->ideal_node()->is_Con()) {
1716     // Klass or String constants compare. Need to be careful with
1717     // compressed pointers - compare types of ConN and ConP instead of nodes.
1718     const Type* t1 = jobj1->ideal_node()->bottom_type()->make_ptr();
1719     const Type* t2 = jobj2->ideal_node()->bottom_type()->make_ptr();
1720     assert(t1 != NULL && t2 != NULL, "sanity");
1721     if (t1->make_ptr() == t2->make_ptr()) {
1722       return _pcmp_eq;
1723     } else {
1724       return _pcmp_neq;
1725     }
1726   }
1727   if (ptn1->meet(ptn2)) {
1728     return NULL; // Sets are not disjoint
1729   }
1730 
1731   // Sets are disjoint.
1732   bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
1733   bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
1734   bool set1_has_null_ptr    = ptn1->points_to(null_obj);
1735   bool set2_has_null_ptr    = ptn2->points_to(null_obj);
1736   if (set1_has_unknown_ptr && set2_has_null_ptr ||
1737       set2_has_unknown_ptr && set1_has_null_ptr) {
1738     // Check nullness of unknown object.
1739     return NULL;
1740   }
1741 
1742   // Disjointness by itself is not sufficient since
1743   // alias analysis is not complete for escaped objects.
1744   // Disjoint sets are definitely unrelated only when
1745   // at least one set has only not escaping allocations.
1746   if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
1747     if (ptn1->non_escaping_allocation()) {
1748       return _pcmp_neq;
1749     }
1750   }
1751   if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
1752     if (ptn2->non_escaping_allocation()) {
1753       return _pcmp_neq;
1754     }
1755   }
1756   return NULL;
1757 }
1758 
1759 // Connection Graph constuction functions.
1760 
1761 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
1762   PointsToNode* ptadr = _nodes.at(n->_idx);
1763   if (ptadr != NULL) {
1764     assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
1765     return;
1766   }
1767   Compile* C = _compile;
1768   ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
1769   _nodes.at_put(n->_idx, ptadr);
1770 }
1771 
1772 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
1773   PointsToNode* ptadr = _nodes.at(n->_idx);
1774   if (ptadr != NULL) {
1775     assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
1776     return;
1777   }
1778   Compile* C = _compile;
1779   ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
1780   _nodes.at_put(n->_idx, ptadr);
1781 }
1782 
1783 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
1784   PointsToNode* ptadr = _nodes.at(n->_idx);
1785   if (ptadr != NULL) {
1786     assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
1787     return;
1788   }
1789   bool unsafe = false;
1790   bool is_oop = is_oop_field(n, offset, &unsafe);
1791   if (unsafe) {
1792     es = PointsToNode::GlobalEscape;
1793   }
1794   Compile* C = _compile;
1795   FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
1796   _nodes.at_put(n->_idx, field);
1797 }
1798 
1799 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
1800                                     PointsToNode* src, PointsToNode* dst) {
1801   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
1802   assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
1803   PointsToNode* ptadr = _nodes.at(n->_idx);
1804   if (ptadr != NULL) {
1805     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
1806     return;
1807   }
1808   Compile* C = _compile;
1809   ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
1810   _nodes.at_put(n->_idx, ptadr);
1811   // Add edge from arraycopy node to source object.
1812   (void)add_edge(ptadr, src);
1813   src->set_arraycopy_src();
1814   // Add edge from destination object to arraycopy node.
1815   (void)add_edge(dst, ptadr);
1816   dst->set_arraycopy_dst();
1817 }
1818 
1819 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
1820   const Type* adr_type = n->as_AddP()->bottom_type();
1821   BasicType bt = T_INT;
1822   if (offset == Type::OffsetBot) {
1823     // Check only oop fields.
1824     if (!adr_type->isa_aryptr() ||
1825         (adr_type->isa_aryptr()->klass() == NULL) ||
1826          adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
1827       // OffsetBot is used to reference array's element. Ignore first AddP.
1828       if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
1829         bt = T_OBJECT;
1830       }
1831     }
1832   } else if (offset != oopDesc::klass_offset_in_bytes()) {
1833     if (adr_type->isa_instptr()) {
1834       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
1835       if (field != NULL) {
1836         bt = field->layout_type();
1837       } else {
1838         // Check for unsafe oop field access
1839         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1840           int opcode = n->fast_out(i)->Opcode();
1841           if (opcode == Op_StoreP || opcode == Op_LoadP ||
1842               opcode == Op_StoreN || opcode == Op_LoadN) {
1843             bt = T_OBJECT;
1844             (*unsafe) = true;
1845             break;
1846           }
1847         }
1848       }
1849     } else if (adr_type->isa_aryptr()) {
1850       if (offset == arrayOopDesc::length_offset_in_bytes()) {
1851         // Ignore array length load.
1852       } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
1853         // Ignore first AddP.
1854       } else {
1855         const Type* elemtype = adr_type->isa_aryptr()->elem();
1856         bt = elemtype->array_element_basic_type();
1857       }
1858     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
1859       // Allocation initialization, ThreadLocal field access, unsafe access
1860       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1861         int opcode = n->fast_out(i)->Opcode();
1862         if (opcode == Op_StoreP || opcode == Op_LoadP ||
1863             opcode == Op_StoreN || opcode == Op_LoadN) {
1864           bt = T_OBJECT;
1865           break;
1866         }
1867       }
1868     }
1869   }
1870   return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
1871 }
1872 
1873 // Returns unique pointed java object or NULL.
1874 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
1875   assert(!_collecting, "should not call when contructed graph");
1876   // If the node was created after the escape computation we can't answer.
1877   uint idx = n->_idx;
1878   if (idx >= nodes_size()) {
1879     return NULL;
1880   }
1881   PointsToNode* ptn = ptnode_adr(idx);
1882   if (ptn->is_JavaObject()) {
1883     return ptn->as_JavaObject();
1884   }
1885   assert(ptn->is_LocalVar(), "sanity");
1886   // Check all java objects it points to.
1887   JavaObjectNode* jobj = NULL;
1888   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1889     PointsToNode* e = i.get();
1890     if (e->is_JavaObject()) {
1891       if (jobj == NULL) {
1892         jobj = e->as_JavaObject();
1893       } else if (jobj != e) {
1894         return NULL;
1895       }
1896     }
1897   }
1898   return jobj;
1899 }
1900 
1901 // Return true if this node points only to non-escaping allocations.
1902 bool PointsToNode::non_escaping_allocation() {
1903   if (is_JavaObject()) {
1904     Node* n = ideal_node();
1905     if (n->is_Allocate() || n->is_CallStaticJava()) {
1906       return (escape_state() == PointsToNode::NoEscape);
1907     } else {
1908       return false;
1909     }
1910   }
1911   assert(is_LocalVar(), "sanity");
1912   // Check all java objects it points to.
1913   for (EdgeIterator i(this); i.has_next(); i.next()) {
1914     PointsToNode* e = i.get();
1915     if (e->is_JavaObject()) {
1916       Node* n = e->ideal_node();
1917       if ((e->escape_state() != PointsToNode::NoEscape) ||
1918           !(n->is_Allocate() || n->is_CallStaticJava())) {
1919         return false;
1920       }
1921     }
1922   }
1923   return true;
1924 }
1925 
1926 // Return true if we know the node does not escape globally.
1927 bool ConnectionGraph::not_global_escape(Node *n) {
1928   assert(!_collecting, "should not call during graph construction");
1929   // If the node was created after the escape computation we can't answer.
1930   uint idx = n->_idx;
1931   if (idx >= nodes_size()) {
1932     return false;
1933   }
1934   PointsToNode* ptn = ptnode_adr(idx);
1935   PointsToNode::EscapeState es = ptn->escape_state();
1936   // If we have already computed a value, return it.
1937   if (es >= PointsToNode::GlobalEscape)
1938     return false;
1939   if (ptn->is_JavaObject()) {
1940     return true; // (es < PointsToNode::GlobalEscape);
1941   }
1942   assert(ptn->is_LocalVar(), "sanity");
1943   // Check all java objects it points to.
1944   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1945     if (i.get()->escape_state() >= PointsToNode::GlobalEscape)
1946       return false;
1947   }
1948   return true;
1949 }
1950 
1951 
1952 // Helper functions
1953 
1954 // Return true if this node points to specified node or nodes it points to.
1955 bool PointsToNode::points_to(JavaObjectNode* ptn) const {
1956   if (is_JavaObject()) {
1957     return (this == ptn);
1958   }
1959   assert(is_LocalVar(), "sanity");
1960   for (EdgeIterator i(this); i.has_next(); i.next()) {
1961     if (i.get() == ptn)
1962       return true;
1963   }
1964   return false;
1965 }
1966 
1967 // Return true if one node points to an other.
1968 bool PointsToNode::meet(PointsToNode* ptn) {
1969   if (this == ptn) {
1970     return true;
1971   } else if (ptn->is_JavaObject()) {
1972     return this->points_to(ptn->as_JavaObject());
1973   } else if (this->is_JavaObject()) {
1974     return ptn->points_to(this->as_JavaObject());
1975   }
1976   assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
1977   int ptn_count =  ptn->edge_count();
1978   for (EdgeIterator i(this); i.has_next(); i.next()) {
1979     PointsToNode* this_e = i.get();
1980     for (int j = 0; j < ptn_count; j++) {
1981       if (this_e == ptn->edge(j))
1982         return true;
1983     }
1984   }
1985   return false;
1986 }
1987 
1988 #ifdef ASSERT
1989 // Return true if bases point to this java object.
1990 bool FieldNode::has_base(JavaObjectNode* jobj) const {
1991   for (BaseIterator i(this); i.has_next(); i.next()) {
1992     if (i.get() == jobj)
1993       return true;
1994   }
1995   return false;
1996 }
1997 #endif
1998 
1999 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2000   const Type *adr_type = phase->type(adr);
2001   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
2002       adr->in(AddPNode::Address)->is_Proj() &&
2003       adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
2004     // We are computing a raw address for a store captured by an Initialize
2005     // compute an appropriate address type. AddP cases #3 and #5 (see below).
2006     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2007     assert(offs != Type::OffsetBot ||
2008            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2009            "offset must be a constant or it is initialization of array");
2010     return offs;
2011   }
2012   const TypePtr *t_ptr = adr_type->isa_ptr();
2013   assert(t_ptr != NULL, "must be a pointer type");
2014   return t_ptr->offset();
2015 }
2016 
2017 Node* ConnectionGraph::get_addp_base(Node *addp) {
2018   assert(addp->is_AddP(), "must be AddP");
2019   //
2020   // AddP cases for Base and Address inputs:
2021   // case #1. Direct object's field reference:
2022   //     Allocate
2023   //       |
2024   //     Proj #5 ( oop result )
2025   //       |
2026   //     CheckCastPP (cast to instance type)
2027   //      | |
2028   //     AddP  ( base == address )
2029   //
2030   // case #2. Indirect object's field reference:
2031   //      Phi
2032   //       |
2033   //     CastPP (cast to instance type)
2034   //      | |
2035   //     AddP  ( base == address )
2036   //
2037   // case #3. Raw object's field reference for Initialize node:
2038   //      Allocate
2039   //        |
2040   //      Proj #5 ( oop result )
2041   //  top   |
2042   //     \  |
2043   //     AddP  ( base == top )
2044   //
2045   // case #4. Array's element reference:
2046   //   {CheckCastPP | CastPP}
2047   //     |  | |
2048   //     |  AddP ( array's element offset )
2049   //     |  |
2050   //     AddP ( array's offset )
2051   //
2052   // case #5. Raw object's field reference for arraycopy stub call:
2053   //          The inline_native_clone() case when the arraycopy stub is called
2054   //          after the allocation before Initialize and CheckCastPP nodes.
2055   //      Allocate
2056   //        |
2057   //      Proj #5 ( oop result )
2058   //       | |
2059   //       AddP  ( base == address )
2060   //
2061   // case #6. Constant Pool, ThreadLocal, CastX2P or
2062   //          Raw object's field reference:
2063   //      {ConP, ThreadLocal, CastX2P, raw Load}
2064   //  top   |
2065   //     \  |
2066   //     AddP  ( base == top )
2067   //
2068   // case #7. Klass's field reference.
2069   //      LoadKlass
2070   //       | |
2071   //       AddP  ( base == address )
2072   //
2073   // case #8. narrow Klass's field reference.
2074   //      LoadNKlass
2075   //       |
2076   //      DecodeN
2077   //       | |
2078   //       AddP  ( base == address )
2079   //
2080   Node *base = addp->in(AddPNode::Base);
2081   if (base->uncast()->is_top()) { // The AddP case #3 and #6.
2082     base = addp->in(AddPNode::Address);
2083     while (base->is_AddP()) {
2084       // Case #6 (unsafe access) may have several chained AddP nodes.
2085       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2086       base = base->in(AddPNode::Address);
2087     }
2088     Node* uncast_base = base->uncast();
2089     int opcode = uncast_base->Opcode();
2090     assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2091            opcode == Op_CastX2P || uncast_base->is_DecodeN() ||
2092            (uncast_base->is_Mem() && uncast_base->bottom_type() == TypeRawPtr::NOTNULL) ||
2093            (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
2094   }
2095   return base;
2096 }
2097 
2098 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2099   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2100   Node* addp2 = addp->raw_out(0);
2101   if (addp->outcnt() == 1 && addp2->is_AddP() &&
2102       addp2->in(AddPNode::Base) == n &&
2103       addp2->in(AddPNode::Address) == addp) {
2104     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2105     //
2106     // Find array's offset to push it on worklist first and
2107     // as result process an array's element offset first (pushed second)
2108     // to avoid CastPP for the array's offset.
2109     // Otherwise the inserted CastPP (LocalVar) will point to what
2110     // the AddP (Field) points to. Which would be wrong since
2111     // the algorithm expects the CastPP has the same point as
2112     // as AddP's base CheckCastPP (LocalVar).
2113     //
2114     //    ArrayAllocation
2115     //     |
2116     //    CheckCastPP
2117     //     |
2118     //    memProj (from ArrayAllocation CheckCastPP)
2119     //     |  ||
2120     //     |  ||   Int (element index)
2121     //     |  ||    |   ConI (log(element size))
2122     //     |  ||    |   /
2123     //     |  ||   LShift
2124     //     |  ||  /
2125     //     |  AddP (array's element offset)
2126     //     |  |
2127     //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits))
2128     //     | / /
2129     //     AddP (array's offset)
2130     //      |
2131     //     Load/Store (memory operation on array's element)
2132     //
2133     return addp2;
2134   }
2135   return NULL;
2136 }
2137 
2138 //
2139 // Adjust the type and inputs of an AddP which computes the
2140 // address of a field of an instance
2141 //
2142 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2143   PhaseGVN* igvn = _igvn;
2144   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2145   assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
2146   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2147   if (t == NULL) {
2148     // We are computing a raw address for a store captured by an Initialize
2149     // compute an appropriate address type (cases #3 and #5).
2150     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2151     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2152     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2153     assert(offs != Type::OffsetBot, "offset must be a constant");
2154     t = base_t->add_offset(offs)->is_oopptr();
2155   }
2156   int inst_id =  base_t->instance_id();
2157   assert(!t->is_known_instance() || t->instance_id() == inst_id,
2158                              "old type must be non-instance or match new type");
2159 
2160   // The type 't' could be subclass of 'base_t'.
2161   // As result t->offset() could be large then base_t's size and it will
2162   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2163   // constructor verifies correctness of the offset.
2164   //
2165   // It could happened on subclass's branch (from the type profiling
2166   // inlining) which was not eliminated during parsing since the exactness
2167   // of the allocation type was not propagated to the subclass type check.
2168   //
2169   // Or the type 't' could be not related to 'base_t' at all.
2170   // It could happened when CHA type is different from MDO type on a dead path
2171   // (for example, from instanceof check) which is not collapsed during parsing.
2172   //
2173   // Do nothing for such AddP node and don't process its users since
2174   // this code branch will go away.
2175   //
2176   if (!t->is_known_instance() &&
2177       !base_t->klass()->is_subtype_of(t->klass())) {
2178      return false; // bail out
2179   }
2180   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
2181   // Do NOT remove the next line: ensure a new alias index is allocated
2182   // for the instance type. Note: C++ will not remove it since the call
2183   // has side effect.
2184   int alias_idx = _compile->get_alias_index(tinst);
2185   igvn->set_type(addp, tinst);
2186   // record the allocation in the node map
2187   set_map(addp, get_map(base->_idx));
2188   // Set addp's Base and Address to 'base'.
2189   Node *abase = addp->in(AddPNode::Base);
2190   Node *adr   = addp->in(AddPNode::Address);
2191   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2192       adr->in(0)->_idx == (uint)inst_id) {
2193     // Skip AddP cases #3 and #5.
2194   } else {
2195     assert(!abase->is_top(), "sanity"); // AddP case #3
2196     if (abase != base) {
2197       igvn->hash_delete(addp);
2198       addp->set_req(AddPNode::Base, base);
2199       if (abase == adr) {
2200         addp->set_req(AddPNode::Address, base);
2201       } else {
2202         // AddP case #4 (adr is array's element offset AddP node)
2203 #ifdef ASSERT
2204         const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
2205         assert(adr->is_AddP() && atype != NULL &&
2206                atype->instance_id() == inst_id, "array's element offset should be processed first");
2207 #endif
2208       }
2209       igvn->hash_insert(addp);
2210     }
2211   }
2212   // Put on IGVN worklist since at least addp's type was changed above.
2213   record_for_optimizer(addp);
2214   return true;
2215 }
2216 
2217 //
2218 // Create a new version of orig_phi if necessary. Returns either the newly
2219 // created phi or an existing phi.  Sets create_new to indicate whether a new
2220 // phi was created.  Cache the last newly created phi in the node map.
2221 //
2222 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created) {
2223   Compile *C = _compile;
2224   PhaseGVN* igvn = _igvn;
2225   new_created = false;
2226   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
2227   // nothing to do if orig_phi is bottom memory or matches alias_idx
2228   if (phi_alias_idx == alias_idx) {
2229     return orig_phi;
2230   }
2231   // Have we recently created a Phi for this alias index?
2232   PhiNode *result = get_map_phi(orig_phi->_idx);
2233   if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
2234     return result;
2235   }
2236   // Previous check may fail when the same wide memory Phi was split into Phis
2237   // for different memory slices. Search all Phis for this region.
2238   if (result != NULL) {
2239     Node* region = orig_phi->in(0);
2240     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2241       Node* phi = region->fast_out(i);
2242       if (phi->is_Phi() &&
2243           C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
2244         assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
2245         return phi->as_Phi();
2246       }
2247     }
2248   }
2249   if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
2250     if (C->do_escape_analysis() == true && !C->failing()) {
2251       // Retry compilation without escape analysis.
2252       // If this is the first failure, the sentinel string will "stick"
2253       // to the Compile object, and the C2Compiler will see it and retry.
2254       C->record_failure(C2Compiler::retry_no_escape_analysis());
2255     }
2256     return NULL;
2257   }
2258   orig_phi_worklist.append_if_missing(orig_phi);
2259   const TypePtr *atype = C->get_adr_type(alias_idx);
2260   result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
2261   C->copy_node_notes_to(result, orig_phi);
2262   igvn->set_type(result, result->bottom_type());
2263   record_for_optimizer(result);
2264   set_map(orig_phi, result);
2265   new_created = true;
2266   return result;
2267 }
2268 
2269 //
2270 // Return a new version of Memory Phi "orig_phi" with the inputs having the
2271 // specified alias index.
2272 //
2273 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist) {
2274   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
2275   Compile *C = _compile;
2276   PhaseGVN* igvn = _igvn;
2277   bool new_phi_created;
2278   PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
2279   if (!new_phi_created) {
2280     return result;
2281   }
2282   GrowableArray<PhiNode *>  phi_list;
2283   GrowableArray<uint>  cur_input;
2284   PhiNode *phi = orig_phi;
2285   uint idx = 1;
2286   bool finished = false;
2287   while(!finished) {
2288     while (idx < phi->req()) {
2289       Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);
2290       if (mem != NULL && mem->is_Phi()) {
2291         PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
2292         if (new_phi_created) {
2293           // found an phi for which we created a new split, push current one on worklist and begin
2294           // processing new one
2295           phi_list.push(phi);
2296           cur_input.push(idx);
2297           phi = mem->as_Phi();
2298           result = newphi;
2299           idx = 1;
2300           continue;
2301         } else {
2302           mem = newphi;
2303         }
2304       }
2305       if (C->failing()) {
2306         return NULL;
2307       }
2308       result->set_req(idx++, mem);
2309     }
2310 #ifdef ASSERT
2311     // verify that the new Phi has an input for each input of the original
2312     assert( phi->req() == result->req(), "must have same number of inputs.");
2313     assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
2314 #endif
2315     // Check if all new phi's inputs have specified alias index.
2316     // Otherwise use old phi.
2317     for (uint i = 1; i < phi->req(); i++) {
2318       Node* in = result->in(i);
2319       assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
2320     }
2321     // we have finished processing a Phi, see if there are any more to do
2322     finished = (phi_list.length() == 0 );
2323     if (!finished) {
2324       phi = phi_list.pop();
2325       idx = cur_input.pop();
2326       PhiNode *prev_result = get_map_phi(phi->_idx);
2327       prev_result->set_req(idx++, result);
2328       result = prev_result;
2329     }
2330   }
2331   return result;
2332 }
2333 
2334 //
2335 // The next methods are derived from methods in MemNode.
2336 //
2337 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
2338   Node *mem = mmem;
2339   // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
2340   // means an array I have not precisely typed yet.  Do not do any
2341   // alias stuff with it any time soon.
2342   if (toop->base() != Type::AnyPtr &&
2343       !(toop->klass() != NULL &&
2344         toop->klass()->is_java_lang_Object() &&
2345         toop->offset() == Type::OffsetBot)) {
2346     mem = mmem->memory_at(alias_idx);
2347     // Update input if it is progress over what we have now
2348   }
2349   return mem;
2350 }
2351 
2352 //
2353 // Move memory users to their memory slices.
2354 //
2355 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis) {
2356   Compile* C = _compile;
2357   PhaseGVN* igvn = _igvn;
2358   const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
2359   assert(tp != NULL, "ptr type");
2360   int alias_idx = C->get_alias_index(tp);
2361   int general_idx = C->get_general_index(alias_idx);
2362 
2363   // Move users first
2364   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2365     Node* use = n->fast_out(i);
2366     if (use->is_MergeMem()) {
2367       MergeMemNode* mmem = use->as_MergeMem();
2368       assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
2369       if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
2370         continue; // Nothing to do
2371       }
2372       // Replace previous general reference to mem node.
2373       uint orig_uniq = C->unique();
2374       Node* m = find_inst_mem(n, general_idx, orig_phis);
2375       assert(orig_uniq == C->unique(), "no new nodes");
2376       mmem->set_memory_at(general_idx, m);
2377       --imax;
2378       --i;
2379     } else if (use->is_MemBar()) {
2380       assert(!use->is_Initialize(), "initializing stores should not be moved");
2381       if (use->req() > MemBarNode::Precedent &&
2382           use->in(MemBarNode::Precedent) == n) {
2383         // Don't move related membars.
2384         record_for_optimizer(use);
2385         continue;
2386       }
2387       tp = use->as_MemBar()->adr_type()->isa_ptr();
2388       if (tp != NULL && C->get_alias_index(tp) == alias_idx ||
2389           alias_idx == general_idx) {
2390         continue; // Nothing to do
2391       }
2392       // Move to general memory slice.
2393       uint orig_uniq = C->unique();
2394       Node* m = find_inst_mem(n, general_idx, orig_phis);
2395       assert(orig_uniq == C->unique(), "no new nodes");
2396       igvn->hash_delete(use);
2397       imax -= use->replace_edge(n, m);
2398       igvn->hash_insert(use);
2399       record_for_optimizer(use);
2400       --i;
2401 #ifdef ASSERT
2402     } else if (use->is_Mem()) {
2403       if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
2404         // Don't move related cardmark.
2405         continue;
2406       }
2407       // Memory nodes should have new memory input.
2408       tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
2409       assert(tp != NULL, "ptr type");
2410       int idx = C->get_alias_index(tp);
2411       assert(get_map(use->_idx) != NULL || idx == alias_idx,
2412              "Following memory nodes should have new memory input or be on the same memory slice");
2413     } else if (use->is_Phi()) {
2414       // Phi nodes should be split and moved already.
2415       tp = use->as_Phi()->adr_type()->isa_ptr();
2416       assert(tp != NULL, "ptr type");
2417       int idx = C->get_alias_index(tp);
2418       assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
2419     } else {
2420       use->dump();
2421       assert(false, "should not be here");
2422 #endif
2423     }
2424   }
2425 }
2426 
2427 //
2428 // Search memory chain of "mem" to find a MemNode whose address
2429 // is the specified alias index.
2430 //
2431 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis) {
2432   if (orig_mem == NULL)
2433     return orig_mem;
2434   Compile* C = _compile;
2435   PhaseGVN* igvn = _igvn;
2436   const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
2437   bool is_instance = (toop != NULL) && toop->is_known_instance();
2438   Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
2439   Node *prev = NULL;
2440   Node *result = orig_mem;
2441   while (prev != result) {
2442     prev = result;
2443     if (result == start_mem)
2444       break;  // hit one of our sentinels
2445     if (result->is_Mem()) {
2446       const Type *at = igvn->type(result->in(MemNode::Address));
2447       if (at == Type::TOP)
2448         break; // Dead
2449       assert (at->isa_ptr() != NULL, "pointer type required.");
2450       int idx = C->get_alias_index(at->is_ptr());
2451       if (idx == alias_idx)
2452         break; // Found
2453       if (!is_instance && (at->isa_oopptr() == NULL ||
2454                            !at->is_oopptr()->is_known_instance())) {
2455         break; // Do not skip store to general memory slice.
2456       }
2457       result = result->in(MemNode::Memory);
2458     }
2459     if (!is_instance)
2460       continue;  // don't search further for non-instance types
2461     // skip over a call which does not affect this memory slice
2462     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
2463       Node *proj_in = result->in(0);
2464       if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
2465         break;  // hit one of our sentinels
2466       } else if (proj_in->is_Call()) {
2467         CallNode *call = proj_in->as_Call();
2468         if (!call->may_modify(toop, igvn)) {
2469           result = call->in(TypeFunc::Memory);
2470         }
2471       } else if (proj_in->is_Initialize()) {
2472         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
2473         // Stop if this is the initialization for the object instance which
2474         // which contains this memory slice, otherwise skip over it.
2475         if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {
2476           result = proj_in->in(TypeFunc::Memory);
2477         }
2478       } else if (proj_in->is_MemBar()) {
2479         result = proj_in->in(TypeFunc::Memory);
2480       }
2481     } else if (result->is_MergeMem()) {
2482       MergeMemNode *mmem = result->as_MergeMem();
2483       result = step_through_mergemem(mmem, alias_idx, toop);
2484       if (result == mmem->base_memory()) {
2485         // Didn't find instance memory, search through general slice recursively.
2486         result = mmem->memory_at(C->get_general_index(alias_idx));
2487         result = find_inst_mem(result, alias_idx, orig_phis);
2488         if (C->failing()) {
2489           return NULL;
2490         }
2491         mmem->set_memory_at(alias_idx, result);
2492       }
2493     } else if (result->is_Phi() &&
2494                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
2495       Node *un = result->as_Phi()->unique_input(igvn);
2496       if (un != NULL) {
2497         orig_phis.append_if_missing(result->as_Phi());
2498         result = un;
2499       } else {
2500         break;
2501       }
2502     } else if (result->is_ClearArray()) {
2503       if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
2504         // Can not bypass initialization of the instance
2505         // we are looking for.
2506         break;
2507       }
2508       // Otherwise skip it (the call updated 'result' value).
2509     } else if (result->Opcode() == Op_SCMemProj) {
2510       assert(result->in(0)->is_LoadStore(), "sanity");
2511       const Type *at = igvn->type(result->in(0)->in(MemNode::Address));
2512       if (at != Type::TOP) {
2513         assert (at->isa_ptr() != NULL, "pointer type required.");
2514         int idx = C->get_alias_index(at->is_ptr());
2515         assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");
2516         break;
2517       }
2518       result = result->in(0)->in(MemNode::Memory);
2519     }
2520   }
2521   if (result->is_Phi()) {
2522     PhiNode *mphi = result->as_Phi();
2523     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
2524     const TypePtr *t = mphi->adr_type();
2525     if (!is_instance) {
2526       // Push all non-instance Phis on the orig_phis worklist to update inputs
2527       // during Phase 4 if needed.
2528       orig_phis.append_if_missing(mphi);
2529     } else if (C->get_alias_index(t) != alias_idx) {
2530       // Create a new Phi with the specified alias index type.
2531       result = split_memory_phi(mphi, alias_idx, orig_phis);
2532     }
2533   }
2534   // the result is either MemNode, PhiNode, InitializeNode.
2535   return result;
2536 }
2537 
2538 //
2539 //  Convert the types of unescaped object to instance types where possible,
2540 //  propagate the new type information through the graph, and update memory
2541 //  edges and MergeMem inputs to reflect the new type.
2542 //
2543 //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
2544 //  The processing is done in 4 phases:
2545 //
2546 //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
2547 //            types for the CheckCastPP for allocations where possible.
2548 //            Propagate the the new types through users as follows:
2549 //               casts and Phi:  push users on alloc_worklist
2550 //               AddP:  cast Base and Address inputs to the instance type
2551 //                      push any AddP users on alloc_worklist and push any memnode
2552 //                      users onto memnode_worklist.
2553 //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
2554 //            search the Memory chain for a store with the appropriate type
2555 //            address type.  If a Phi is found, create a new version with
2556 //            the appropriate memory slices from each of the Phi inputs.
2557 //            For stores, process the users as follows:
2558 //               MemNode:  push on memnode_worklist
2559 //               MergeMem: push on mergemem_worklist
2560 //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
2561 //            moving the first node encountered of each  instance type to the
2562 //            the input corresponding to its alias index.
2563 //            appropriate memory slice.
2564 //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
2565 //
2566 // In the following example, the CheckCastPP nodes are the cast of allocation
2567 // results and the allocation of node 29 is unescaped and eligible to be an
2568 // instance type.
2569 //
2570 // We start with:
2571 //
2572 //     7 Parm #memory
2573 //    10  ConI  "12"
2574 //    19  CheckCastPP   "Foo"
2575 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
2576 //    29  CheckCastPP   "Foo"
2577 //    30  AddP  _ 29 29 10  Foo+12  alias_index=4
2578 //
2579 //    40  StoreP  25   7  20   ... alias_index=4
2580 //    50  StoreP  35  40  30   ... alias_index=4
2581 //    60  StoreP  45  50  20   ... alias_index=4
2582 //    70  LoadP    _  60  30   ... alias_index=4
2583 //    80  Phi     75  50  60   Memory alias_index=4
2584 //    90  LoadP    _  80  30   ... alias_index=4
2585 //   100  LoadP    _  80  20   ... alias_index=4
2586 //
2587 //
2588 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
2589 // and creating a new alias index for node 30.  This gives:
2590 //
2591 //     7 Parm #memory
2592 //    10  ConI  "12"
2593 //    19  CheckCastPP   "Foo"
2594 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
2595 //    29  CheckCastPP   "Foo"  iid=24
2596 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
2597 //
2598 //    40  StoreP  25   7  20   ... alias_index=4
2599 //    50  StoreP  35  40  30   ... alias_index=6
2600 //    60  StoreP  45  50  20   ... alias_index=4
2601 //    70  LoadP    _  60  30   ... alias_index=6
2602 //    80  Phi     75  50  60   Memory alias_index=4
2603 //    90  LoadP    _  80  30   ... alias_index=6
2604 //   100  LoadP    _  80  20   ... alias_index=4
2605 //
2606 // In phase 2, new memory inputs are computed for the loads and stores,
2607 // And a new version of the phi is created.  In phase 4, the inputs to
2608 // node 80 are updated and then the memory nodes are updated with the
2609 // values computed in phase 2.  This results in:
2610 //
2611 //     7 Parm #memory
2612 //    10  ConI  "12"
2613 //    19  CheckCastPP   "Foo"
2614 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
2615 //    29  CheckCastPP   "Foo"  iid=24
2616 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
2617 //
2618 //    40  StoreP  25  7   20   ... alias_index=4
2619 //    50  StoreP  35  7   30   ... alias_index=6
2620 //    60  StoreP  45  40  20   ... alias_index=4
2621 //    70  LoadP    _  50  30   ... alias_index=6
2622 //    80  Phi     75  40  60   Memory alias_index=4
2623 //   120  Phi     75  50  50   Memory alias_index=6
2624 //    90  LoadP    _ 120  30   ... alias_index=6
2625 //   100  LoadP    _  80  20   ... alias_index=4
2626 //
2627 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist) {
2628   GrowableArray<Node *>  memnode_worklist;
2629   GrowableArray<PhiNode *>  orig_phis;
2630   PhaseIterGVN  *igvn = _igvn;
2631   uint new_index_start = (uint) _compile->num_alias_types();
2632   Arena* arena = Thread::current()->resource_area();
2633   VectorSet visited(arena);
2634   ideal_nodes.clear(); // Reset for use with set_map/get_map.
2635   uint unique_old = _compile->unique();
2636 
2637   //  Phase 1:  Process possible allocations from alloc_worklist.
2638   //  Create instance types for the CheckCastPP for allocations where possible.
2639   //
2640   // (Note: don't forget to change the order of the second AddP node on
2641   //  the alloc_worklist if the order of the worklist processing is changed,
2642   //  see the comment in find_second_addp().)
2643   //
2644   while (alloc_worklist.length() != 0) {
2645     Node *n = alloc_worklist.pop();
2646     uint ni = n->_idx;
2647     if (n->is_Call()) {
2648       CallNode *alloc = n->as_Call();
2649       // copy escape information to call node
2650       PointsToNode* ptn = ptnode_adr(alloc->_idx);
2651       PointsToNode::EscapeState es = ptn->escape_state();
2652       // We have an allocation or call which returns a Java object,
2653       // see if it is unescaped.
2654       if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable())
2655         continue;
2656       // Find CheckCastPP for the allocate or for the return value of a call
2657       n = alloc->result_cast();
2658       if (n == NULL) {            // No uses except Initialize node
2659         if (alloc->is_Allocate()) {
2660           // Set the scalar_replaceable flag for allocation
2661           // so it could be eliminated if it has no uses.
2662           alloc->as_Allocate()->_is_scalar_replaceable = true;
2663         }
2664         continue;
2665       }
2666       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
2667         assert(!alloc->is_Allocate(), "allocation should have unique type");
2668         continue;
2669       }
2670 
2671       // The inline code for Object.clone() casts the allocation result to
2672       // java.lang.Object and then to the actual type of the allocated
2673       // object. Detect this case and use the second cast.
2674       // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
2675       // the allocation result is cast to java.lang.Object and then
2676       // to the actual Array type.
2677       if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
2678           && (alloc->is_AllocateArray() ||
2679               igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {
2680         Node *cast2 = NULL;
2681         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2682           Node *use = n->fast_out(i);
2683           if (use->is_CheckCastPP()) {
2684             cast2 = use;
2685             break;
2686           }
2687         }
2688         if (cast2 != NULL) {
2689           n = cast2;
2690         } else {
2691           // Non-scalar replaceable if the allocation type is unknown statically
2692           // (reflection allocation), the object can't be restored during
2693           // deoptimization without precise type.
2694           continue;
2695         }
2696       }
2697       if (alloc->is_Allocate()) {
2698         // Set the scalar_replaceable flag for allocation
2699         // so it could be eliminated.
2700         alloc->as_Allocate()->_is_scalar_replaceable = true;
2701       }
2702       set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state
2703       // in order for an object to be scalar-replaceable, it must be:
2704       //   - a direct allocation (not a call returning an object)
2705       //   - non-escaping
2706       //   - eligible to be a unique type
2707       //   - not determined to be ineligible by escape analysis
2708       set_map(alloc, n);
2709       set_map(n, alloc);
2710       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
2711       if (t == NULL)
2712         continue;  // not a TypeOopPtr
2713       const TypeOopPtr* tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
2714       igvn->hash_delete(n);
2715       igvn->set_type(n,  tinst);
2716       n->raise_bottom_type(tinst);
2717       igvn->hash_insert(n);
2718       record_for_optimizer(n);
2719       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
2720 
2721         // First, put on the worklist all Field edges from Connection Graph
2722         // which is more accurate then putting immediate users from Ideal Graph.
2723         for (EdgeIterator e(ptn); e.has_next(); e.next()) {
2724           PointsToNode* tgt = e.get();
2725           Node* use = tgt->ideal_node();
2726           assert(tgt->is_Field() && use->is_AddP(),
2727                  "only AddP nodes are Field edges in CG");
2728           if (use->outcnt() > 0) { // Don't process dead nodes
2729             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
2730             if (addp2 != NULL) {
2731               assert(alloc->is_AllocateArray(),"array allocation was expected");
2732               alloc_worklist.append_if_missing(addp2);
2733             }
2734             alloc_worklist.append_if_missing(use);
2735           }
2736         }
2737 
2738         // An allocation may have an Initialize which has raw stores. Scan
2739         // the users of the raw allocation result and push AddP users
2740         // on alloc_worklist.
2741         Node *raw_result = alloc->proj_out(TypeFunc::Parms);
2742         assert (raw_result != NULL, "must have an allocation result");
2743         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
2744           Node *use = raw_result->fast_out(i);
2745           if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
2746             Node* addp2 = find_second_addp(use, raw_result);
2747             if (addp2 != NULL) {
2748               assert(alloc->is_AllocateArray(),"array allocation was expected");
2749               alloc_worklist.append_if_missing(addp2);
2750             }
2751             alloc_worklist.append_if_missing(use);
2752           } else if (use->is_MemBar()) {
2753             memnode_worklist.append_if_missing(use);
2754           }
2755         }
2756       }
2757     } else if (n->is_AddP()) {
2758       JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
2759       if (jobj == NULL || jobj == phantom_obj) {
2760 #ifdef ASSERT
2761         ptnode_adr(get_addp_base(n)->_idx)->dump();
2762         ptnode_adr(n->_idx)->dump();
2763         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
2764 #endif
2765         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
2766         return;
2767       }
2768       Node *base = get_map(jobj->idx());  // CheckCastPP node
2769       if (!split_AddP(n, base)) continue; // wrong type from dead path
2770     } else if (n->is_Phi() ||
2771                n->is_CheckCastPP() ||
2772                n->is_EncodeP() ||
2773                n->is_DecodeN() ||
2774                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
2775       if (visited.test_set(n->_idx)) {
2776         assert(n->is_Phi(), "loops only through Phi's");
2777         continue;  // already processed
2778       }
2779       JavaObjectNode* jobj = unique_java_object(n);
2780       if (jobj == NULL || jobj == phantom_obj) {
2781 #ifdef ASSERT
2782         ptnode_adr(n->_idx)->dump();
2783         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
2784 #endif
2785         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
2786         return;
2787       } else {
2788         Node *val = get_map(jobj->idx());   // CheckCastPP node
2789         TypeNode *tn = n->as_Type();
2790         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
2791         assert(tinst != NULL && tinst->is_known_instance() &&
2792                tinst->instance_id() == jobj->idx() , "instance type expected.");
2793 
2794         const Type *tn_type = igvn->type(tn);
2795         const TypeOopPtr *tn_t;
2796         if (tn_type->isa_narrowoop()) {
2797           tn_t = tn_type->make_ptr()->isa_oopptr();
2798         } else {
2799           tn_t = tn_type->isa_oopptr();
2800         }
2801         if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
2802           if (tn_type->isa_narrowoop()) {
2803             tn_type = tinst->make_narrowoop();
2804           } else {
2805             tn_type = tinst;
2806           }
2807           igvn->hash_delete(tn);
2808           igvn->set_type(tn, tn_type);
2809           tn->set_type(tn_type);
2810           igvn->hash_insert(tn);
2811           record_for_optimizer(n);
2812         } else {
2813           assert(tn_type == TypePtr::NULL_PTR ||
2814                  tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
2815                  "unexpected type");
2816           continue; // Skip dead path with different type
2817         }
2818       }
2819     } else {
2820       debug_only(n->dump();)
2821       assert(false, "EA: unexpected node");
2822       continue;
2823     }
2824     // push allocation's users on appropriate worklist
2825     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2826       Node *use = n->fast_out(i);
2827       if(use->is_Mem() && use->in(MemNode::Address) == n) {
2828         // Load/store to instance's field
2829         memnode_worklist.append_if_missing(use);
2830       } else if (use->is_MemBar()) {
2831         memnode_worklist.append_if_missing(use);
2832       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
2833         Node* addp2 = find_second_addp(use, n);
2834         if (addp2 != NULL) {
2835           alloc_worklist.append_if_missing(addp2);
2836         }
2837         alloc_worklist.append_if_missing(use);
2838       } else if (use->is_Phi() ||
2839                  use->is_CheckCastPP() ||
2840                  use->is_EncodeP() ||
2841                  use->is_DecodeN() ||
2842                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
2843         alloc_worklist.append_if_missing(use);
2844 #ifdef ASSERT
2845       } else if (use->is_Mem()) {
2846         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
2847       } else if (use->is_MergeMem()) {
2848         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
2849       } else if (use->is_SafePoint()) {
2850         // Look for MergeMem nodes for calls which reference unique allocation
2851         // (through CheckCastPP nodes) even for debug info.
2852         Node* m = use->in(TypeFunc::Memory);
2853         if (m->is_MergeMem()) {
2854           assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
2855         }
2856       } else {
2857         uint op = use->Opcode();
2858         if (!(op == Op_CmpP || op == Op_Conv2B ||
2859               op == Op_CastP2X || op == Op_StoreCM ||
2860               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
2861               op == Op_StrEquals || op == Op_StrIndexOf)) {
2862           n->dump();
2863           use->dump();
2864           assert(false, "EA: missing allocation reference path");
2865         }
2866 #endif
2867       }
2868     }
2869 
2870   }
2871   // New alias types were created in split_AddP().
2872   uint new_index_end = (uint) _compile->num_alias_types();
2873   assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
2874 
2875   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
2876   //            compute new values for Memory inputs  (the Memory inputs are not
2877   //            actually updated until phase 4.)
2878   if (memnode_worklist.length() == 0)
2879     return;  // nothing to do
2880   while (memnode_worklist.length() != 0) {
2881     Node *n = memnode_worklist.pop();
2882     if (visited.test_set(n->_idx))
2883       continue;
2884     if (n->is_Phi() || n->is_ClearArray()) {
2885       // we don't need to do anything, but the users must be pushed
2886     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
2887       // we don't need to do anything, but the users must be pushed
2888       n = n->as_MemBar()->proj_out(TypeFunc::Memory);
2889       if (n == NULL)
2890         continue;
2891     } else {
2892       assert(n->is_Mem(), "memory node required.");
2893       Node *addr = n->in(MemNode::Address);
2894       const Type *addr_t = igvn->type(addr);
2895       if (addr_t == Type::TOP)
2896         continue;
2897       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
2898       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
2899       assert ((uint)alias_idx < new_index_end, "wrong alias index");
2900       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
2901       if (_compile->failing()) {
2902         return;
2903       }
2904       if (mem != n->in(MemNode::Memory)) {
2905         // We delay the memory edge update since we need old one in
2906         // MergeMem code below when instances memory slices are separated.
2907         set_map(n, mem);
2908       }
2909       if (n->is_Load()) {
2910         continue;  // don't push users
2911       } else if (n->is_LoadStore()) {
2912         // get the memory projection
2913         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2914           Node *use = n->fast_out(i);
2915           if (use->Opcode() == Op_SCMemProj) {
2916             n = use;
2917             break;
2918           }
2919         }
2920         assert(n->Opcode() == Op_SCMemProj, "memory projection required");
2921       }
2922     }
2923     // push user on appropriate worklist
2924     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2925       Node *use = n->fast_out(i);
2926       if (use->is_Phi() || use->is_ClearArray()) {
2927         memnode_worklist.append_if_missing(use);
2928       } else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
2929         if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
2930           continue;
2931         memnode_worklist.append_if_missing(use);
2932       } else if (use->is_MemBar()) {
2933         memnode_worklist.append_if_missing(use);
2934 #ifdef ASSERT
2935       } else if(use->is_Mem()) {
2936         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
2937       } else if (use->is_MergeMem()) {
2938         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
2939       } else {
2940         uint op = use->Opcode();
2941         if (!(op == Op_StoreCM ||
2942               (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
2943                strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
2944               op == Op_AryEq || op == Op_StrComp ||
2945               op == Op_StrEquals || op == Op_StrIndexOf)) {
2946           n->dump();
2947           use->dump();
2948           assert(false, "EA: missing memory path");
2949         }
2950 #endif
2951       }
2952     }
2953   }
2954 
2955   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
2956   //            Walk each memory slice moving the first node encountered of each
2957   //            instance type to the the input corresponding to its alias index.
2958   uint length = _mergemem_worklist.length();
2959   for( uint next = 0; next < length; ++next ) {
2960     MergeMemNode* nmm = _mergemem_worklist.at(next);
2961     assert(!visited.test_set(nmm->_idx), "should not be visited before");
2962     // Note: we don't want to use MergeMemStream here because we only want to
2963     // scan inputs which exist at the start, not ones we add during processing.
2964     // Note 2: MergeMem may already contains instance memory slices added
2965     // during find_inst_mem() call when memory nodes were processed above.
2966     igvn->hash_delete(nmm);
2967     uint nslices = nmm->req();
2968     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
2969       Node* mem = nmm->in(i);
2970       Node* cur = NULL;
2971       if (mem == NULL || mem->is_top())
2972         continue;
2973       // First, update mergemem by moving memory nodes to corresponding slices
2974       // if their type became more precise since this mergemem was created.
2975       while (mem->is_Mem()) {
2976         const Type *at = igvn->type(mem->in(MemNode::Address));
2977         if (at != Type::TOP) {
2978           assert (at->isa_ptr() != NULL, "pointer type required.");
2979           uint idx = (uint)_compile->get_alias_index(at->is_ptr());
2980           if (idx == i) {
2981             if (cur == NULL)
2982               cur = mem;
2983           } else {
2984             if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
2985               nmm->set_memory_at(idx, mem);
2986             }
2987           }
2988         }
2989         mem = mem->in(MemNode::Memory);
2990       }
2991       nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
2992       // Find any instance of the current type if we haven't encountered
2993       // already a memory slice of the instance along the memory chain.
2994       for (uint ni = new_index_start; ni < new_index_end; ni++) {
2995         if((uint)_compile->get_general_index(ni) == i) {
2996           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
2997           if (nmm->is_empty_memory(m)) {
2998             Node* result = find_inst_mem(mem, ni, orig_phis);
2999             if (_compile->failing()) {
3000               return;
3001             }
3002             nmm->set_memory_at(ni, result);
3003           }
3004         }
3005       }
3006     }
3007     // Find the rest of instances values
3008     for (uint ni = new_index_start; ni < new_index_end; ni++) {
3009       const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
3010       Node* result = step_through_mergemem(nmm, ni, tinst);
3011       if (result == nmm->base_memory()) {
3012         // Didn't find instance memory, search through general slice recursively.
3013         result = nmm->memory_at(_compile->get_general_index(ni));
3014         result = find_inst_mem(result, ni, orig_phis);
3015         if (_compile->failing()) {
3016           return;
3017         }
3018         nmm->set_memory_at(ni, result);
3019       }
3020     }
3021     igvn->hash_insert(nmm);
3022     record_for_optimizer(nmm);
3023   }
3024 
3025   //  Phase 4:  Update the inputs of non-instance memory Phis and
3026   //            the Memory input of memnodes
3027   // First update the inputs of any non-instance Phi's from
3028   // which we split out an instance Phi.  Note we don't have
3029   // to recursively process Phi's encounted on the input memory
3030   // chains as is done in split_memory_phi() since they  will
3031   // also be processed here.
3032   for (int j = 0; j < orig_phis.length(); j++) {
3033     PhiNode *phi = orig_phis.at(j);
3034     int alias_idx = _compile->get_alias_index(phi->adr_type());
3035     igvn->hash_delete(phi);
3036     for (uint i = 1; i < phi->req(); i++) {
3037       Node *mem = phi->in(i);
3038       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3039       if (_compile->failing()) {
3040         return;
3041       }
3042       if (mem != new_mem) {
3043         phi->set_req(i, new_mem);
3044       }
3045     }
3046     igvn->hash_insert(phi);
3047     record_for_optimizer(phi);
3048   }
3049 
3050   // Update the memory inputs of MemNodes with the value we computed
3051   // in Phase 2 and move stores memory users to corresponding memory slices.
3052   // Disable memory split verification code until the fix for 6984348.
3053   // Currently it produces false negative results since it does not cover all cases.
3054 #if 0 // ifdef ASSERT
3055   visited.Reset();
3056   Node_Stack old_mems(arena, _compile->unique() >> 2);
3057 #endif
3058   for (uint i = 0; i < ideal_nodes.size(); i++) {
3059     Node*    n = ideal_nodes.at(i);
3060     Node* nmem = get_map(n->_idx);
3061     assert(nmem != NULL, "sanity");
3062     if (n->is_Mem()) {
3063 #if 0 // ifdef ASSERT
3064       Node* old_mem = n->in(MemNode::Memory);
3065       if (!visited.test_set(old_mem->_idx)) {
3066         old_mems.push(old_mem, old_mem->outcnt());
3067       }
3068 #endif
3069       assert(n->in(MemNode::Memory) != nmem, "sanity");
3070       if (!n->is_Load()) {
3071         // Move memory users of a store first.
3072         move_inst_mem(n, orig_phis);
3073       }
3074       // Now update memory input
3075       igvn->hash_delete(n);
3076       n->set_req(MemNode::Memory, nmem);
3077       igvn->hash_insert(n);
3078       record_for_optimizer(n);
3079     } else {
3080       assert(n->is_Allocate() || n->is_CheckCastPP() ||
3081              n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
3082     }
3083   }
3084 #if 0 // ifdef ASSERT
3085   // Verify that memory was split correctly
3086   while (old_mems.is_nonempty()) {
3087     Node* old_mem = old_mems.node();
3088     uint  old_cnt = old_mems.index();
3089     old_mems.pop();
3090     assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
3091   }
3092 #endif
3093 }
3094 
3095 #ifndef PRODUCT
3096 static const char *node_type_names[] = {
3097   "UnknownType",
3098   "JavaObject",
3099   "LocalVar",
3100   "Field",
3101   "Arraycopy"
3102 };
3103 
3104 static const char *esc_names[] = {
3105   "UnknownEscape",
3106   "NoEscape",
3107   "ArgEscape",
3108   "GlobalEscape"
3109 };
3110 
3111 void PointsToNode::dump(bool print_state) const {
3112   NodeType nt = node_type();
3113   tty->print("%s ", node_type_names[(int) nt]);
3114   if (print_state) {
3115     EscapeState es = escape_state();
3116     EscapeState fields_es = fields_escape_state();
3117     tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
3118     if (nt == PointsToNode::JavaObject && !this->scalar_replaceable())
3119       tty->print("NSR");
3120   }
3121   if (is_Field()) {
3122     FieldNode* f = (FieldNode*)this;
3123     tty->print("(");
3124     for (BaseIterator i(f); i.has_next(); i.next()) {
3125       PointsToNode* b = i.get();
3126       tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
3127     }
3128     tty->print(" )");
3129   }
3130   tty->print("[");
3131   for (EdgeIterator i(this); i.has_next(); i.next()) {
3132     PointsToNode* e = i.get();
3133     tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
3134   }
3135   tty->print(" [");
3136   for (UseIterator i(this); i.has_next(); i.next()) {
3137     PointsToNode* u = i.get();
3138     bool is_base = false;
3139     if (PointsToNode::is_base_use(u)) {
3140       is_base = true;
3141       u = PointsToNode::get_use_node(u)->as_Field();
3142     }
3143     tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
3144   }
3145   tty->print(" ]]  ");
3146   if (_node == NULL)
3147     tty->print_cr("<null>");
3148   else
3149     _node->dump();
3150 }
3151 
3152 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
3153   bool first = true;
3154   int ptnodes_length = ptnodes_worklist.length();
3155   for (int i = 0; i < ptnodes_length; i++) {
3156     PointsToNode *ptn = ptnodes_worklist.at(i);
3157     if (ptn == NULL || !ptn->is_JavaObject())
3158       continue;
3159     PointsToNode::EscapeState es = ptn->escape_state();
3160     if (ptn->ideal_node()->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
3161       if (first) {
3162         tty->cr();
3163         tty->print("======== Connection graph for ");
3164         _compile->method()->print_short_name();
3165         tty->cr();
3166         first = false;
3167       }
3168       ptn->dump();
3169       // Print all locals and fields which reference this allocation
3170       for (UseIterator j(ptn); j.has_next(); j.next()) {
3171         PointsToNode* use = j.get();
3172         if (use->is_LocalVar()) {
3173           use->dump(Verbose);
3174         } else if (Verbose) {
3175           use->dump();
3176         }
3177       }
3178       tty->cr();
3179     }
3180   }
3181 }
3182 #endif