1 /*
   2  * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "opto/c2compiler.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/cfgnode.hpp"
  36 #include "opto/compile.hpp"
  37 #include "opto/escape.hpp"
  38 #include "opto/phaseX.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "utilities/macros.hpp"
  42 #if INCLUDE_G1GC
  43 #include "gc/g1/g1ThreadLocalData.hpp"
  44 #endif // INCLUDE_G1GC
  45 #if INCLUDE_ZGC
  46 #include "gc/z/c2/zBarrierSetC2.hpp"
  47 #endif
  48 #if INCLUDE_SHENANDOAHGC
  49 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  50 #endif
  51 
  52 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
  53   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  54   _in_worklist(C->comp_arena()),
  55   _next_pidx(0),
  56   _collecting(true),
  57   _verify(false),
  58   _compile(C),
  59   _igvn(igvn),
  60   _node_map(C->comp_arena()) {
  61   // Add unknown java object.
  62   add_java_object(C->top(), PointsToNode::GlobalEscape);
  63   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  64   // Add ConP(#NULL) and ConN(#NULL) nodes.
  65   Node* oop_null = igvn->zerocon(T_OBJECT);
  66   assert(oop_null->_idx < nodes_size(), "should be created already");
  67   add_java_object(oop_null, PointsToNode::NoEscape);
  68   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  69   if (UseCompressedOops) {
  70     Node* noop_null = igvn->zerocon(T_NARROWOOP);
  71     assert(noop_null->_idx < nodes_size(), "should be created already");
  72     map_ideal_node(noop_null, null_obj);
  73   }
  74   _pcmp_neq = NULL; // Should be initialized
  75   _pcmp_eq  = NULL;
  76 }
  77 
  78 bool ConnectionGraph::has_candidates(Compile *C) {
  79   // EA brings benefits only when the code has allocations and/or locks which
  80   // are represented by ideal Macro nodes.
  81   int cnt = C->macro_count();
  82   for (int i = 0; i < cnt; i++) {
  83     Node *n = C->macro_node(i);
  84     if (n->is_Allocate())
  85       return true;
  86     if (n->is_Lock()) {
  87       Node* obj = n->as_Lock()->obj_node()->uncast();
  88       if (!(obj->is_Parm() || obj->is_Con()))
  89         return true;
  90     }
  91     if (n->is_CallStaticJava() &&
  92         n->as_CallStaticJava()->is_boxing_method()) {
  93       return true;
  94     }
  95   }
  96   return false;
  97 }
  98 
  99 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
 100   Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]);
 101   ResourceMark rm;
 102 
 103   // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
 104   // to create space for them in ConnectionGraph::_nodes[].
 105   Node* oop_null = igvn->zerocon(T_OBJECT);
 106   Node* noop_null = igvn->zerocon(T_NARROWOOP);
 107   ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
 108   // Perform escape analysis
 109   if (congraph->compute_escape()) {
 110     // There are non escaping objects.
 111     C->set_congraph(congraph);
 112   }
 113   // Cleanup.
 114   if (oop_null->outcnt() == 0)
 115     igvn->hash_delete(oop_null);
 116   if (noop_null->outcnt() == 0)
 117     igvn->hash_delete(noop_null);
 118 }
 119 
 120 bool ConnectionGraph::compute_escape() {
 121   Compile* C = _compile;
 122   PhaseGVN* igvn = _igvn;
 123 
 124   // Worklists used by EA.
 125   Unique_Node_List delayed_worklist;
 126   GrowableArray<Node*> alloc_worklist;
 127   GrowableArray<Node*> ptr_cmp_worklist;
 128   GrowableArray<Node*> storestore_worklist;
 129   GrowableArray<ArrayCopyNode*> arraycopy_worklist;
 130   GrowableArray<PointsToNode*>   ptnodes_worklist;
 131   GrowableArray<JavaObjectNode*> java_objects_worklist;
 132   GrowableArray<JavaObjectNode*> non_escaped_worklist;
 133   GrowableArray<FieldNode*>      oop_fields_worklist;
 134   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 135 
 136   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 137 
 138   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 139   ideal_nodes.map(C->live_nodes(), NULL);  // preallocate space
 140   // Initialize worklist
 141   if (C->root() != NULL) {
 142     ideal_nodes.push(C->root());
 143   }
 144   // Processed ideal nodes are unique on ideal_nodes list
 145   // but several ideal nodes are mapped to the phantom_obj.
 146   // To avoid duplicated entries on the following worklists
 147   // add the phantom_obj only once to them.
 148   ptnodes_worklist.append(phantom_obj);
 149   java_objects_worklist.append(phantom_obj);
 150   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 151     Node* n = ideal_nodes.at(next);
 152     // Create PointsTo nodes and add them to Connection Graph. Called
 153     // only once per ideal node since ideal_nodes is Unique_Node list.
 154     add_node_to_connection_graph(n, &delayed_worklist);
 155     PointsToNode* ptn = ptnode_adr(n->_idx);
 156     if (ptn != NULL && ptn != phantom_obj) {
 157       ptnodes_worklist.append(ptn);
 158       if (ptn->is_JavaObject()) {
 159         java_objects_worklist.append(ptn->as_JavaObject());
 160         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 161             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 162           // Only allocations and java static calls results are interesting.
 163           non_escaped_worklist.append(ptn->as_JavaObject());
 164         }
 165       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 166         oop_fields_worklist.append(ptn->as_Field());
 167       }
 168     }
 169     if (n->is_MergeMem()) {
 170       // Collect all MergeMem nodes to add memory slices for
 171       // scalar replaceable objects in split_unique_types().
 172       _mergemem_worklist.append(n->as_MergeMem());
 173     } else if (OptimizePtrCompare && n->is_Cmp() &&
 174                (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
 175       // Collect compare pointers nodes.
 176       ptr_cmp_worklist.append(n);
 177     } else if (n->is_MemBarStoreStore()) {
 178       // Collect all MemBarStoreStore nodes so that depending on the
 179       // escape status of the associated Allocate node some of them
 180       // may be eliminated.
 181       storestore_worklist.append(n);
 182     } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&
 183                (n->req() > MemBarNode::Precedent)) {
 184       record_for_optimizer(n);
 185 #ifdef ASSERT
 186     } else if (n->is_AddP()) {
 187       // Collect address nodes for graph verification.
 188       addp_worklist.append(n);
 189 #endif
 190     } else if (n->is_ArrayCopy()) {
 191       // Keep a list of ArrayCopy nodes so if one of its input is non
 192       // escaping, we can record a unique type
 193       arraycopy_worklist.append(n->as_ArrayCopy());
 194     }
 195     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 196       Node* m = n->fast_out(i);   // Get user
 197       ideal_nodes.push(m);
 198     }
 199   }
 200   if (non_escaped_worklist.length() == 0) {
 201     _collecting = false;
 202     return false; // Nothing to do.
 203   }
 204   // Add final simple edges to graph.
 205   while(delayed_worklist.size() > 0) {
 206     Node* n = delayed_worklist.pop();
 207     add_final_edges(n);
 208   }
 209   int ptnodes_length = ptnodes_worklist.length();
 210 
 211 #ifdef ASSERT
 212   if (VerifyConnectionGraph) {
 213     // Verify that no new simple edges could be created and all
 214     // local vars has edges.
 215     _verify = true;
 216     for (int next = 0; next < ptnodes_length; ++next) {
 217       PointsToNode* ptn = ptnodes_worklist.at(next);
 218       add_final_edges(ptn->ideal_node());
 219       if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
 220         ptn->dump();
 221         assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
 222       }
 223     }
 224     _verify = false;
 225   }
 226 #endif
 227   // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes
 228   // processing, calls to CI to resolve symbols (types, fields, methods)
 229   // referenced in bytecode. During symbol resolution VM may throw
 230   // an exception which CI cleans and converts to compilation failure.
 231   if (C->failing())  return false;
 232 
 233   // 2. Finish Graph construction by propagating references to all
 234   //    java objects through graph.
 235   if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist,
 236                                  java_objects_worklist, oop_fields_worklist)) {
 237     // All objects escaped or hit time or iterations limits.
 238     _collecting = false;
 239     return false;
 240   }
 241 
 242   // 3. Adjust scalar_replaceable state of nonescaping objects and push
 243   //    scalar replaceable allocations on alloc_worklist for processing
 244   //    in split_unique_types().
 245   int non_escaped_length = non_escaped_worklist.length();
 246   for (int next = 0; next < non_escaped_length; next++) {
 247     JavaObjectNode* ptn = non_escaped_worklist.at(next);
 248     bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
 249     Node* n = ptn->ideal_node();
 250     if (n->is_Allocate()) {
 251       n->as_Allocate()->_is_non_escaping = noescape;
 252     }
 253     if (n->is_CallStaticJava()) {
 254       n->as_CallStaticJava()->_is_non_escaping = noescape;
 255     }
 256     if (noescape && ptn->scalar_replaceable()) {
 257       adjust_scalar_replaceable_state(ptn);
 258       if (ptn->scalar_replaceable()) {
 259         alloc_worklist.append(ptn->ideal_node());
 260       }
 261     }
 262   }
 263 
 264 #ifdef ASSERT
 265   if (VerifyConnectionGraph) {
 266     // Verify that graph is complete - no new edges could be added or needed.
 267     verify_connection_graph(ptnodes_worklist, non_escaped_worklist,
 268                             java_objects_worklist, addp_worklist);
 269   }
 270   assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
 271   assert(null_obj->escape_state() == PointsToNode::NoEscape &&
 272          null_obj->edge_count() == 0 &&
 273          !null_obj->arraycopy_src() &&
 274          !null_obj->arraycopy_dst(), "sanity");
 275 #endif
 276 
 277   _collecting = false;
 278 
 279   } // TracePhase t3("connectionGraph")
 280 
 281   // 4. Optimize ideal graph based on EA information.
 282   bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);
 283   if (has_non_escaping_obj) {
 284     optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
 285   }
 286 
 287 #ifndef PRODUCT
 288   if (PrintEscapeAnalysis) {
 289     dump(ptnodes_worklist); // Dump ConnectionGraph
 290   }
 291 #endif
 292 
 293   bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
 294 #ifdef ASSERT
 295   if (VerifyConnectionGraph) {
 296     int alloc_length = alloc_worklist.length();
 297     for (int next = 0; next < alloc_length; ++next) {
 298       Node* n = alloc_worklist.at(next);
 299       PointsToNode* ptn = ptnode_adr(n->_idx);
 300       assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
 301     }
 302   }
 303 #endif
 304 
 305   // 5. Separate memory graph for scalar replaceable allcations.
 306   if (has_scalar_replaceable_candidates &&
 307       C->AliasLevel() >= 3 && EliminateAllocations) {
 308     // Now use the escape information to create unique types for
 309     // scalar replaceable objects.
 310     split_unique_types(alloc_worklist, arraycopy_worklist);
 311     if (C->failing())  return false;
 312     C->print_method(PHASE_AFTER_EA, 2);
 313 
 314 #ifdef ASSERT
 315   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
 316     tty->print("=== No allocations eliminated for ");
 317     C->method()->print_short_name();
 318     if(!EliminateAllocations) {
 319       tty->print(" since EliminateAllocations is off ===");
 320     } else if(!has_scalar_replaceable_candidates) {
 321       tty->print(" since there are no scalar replaceable candidates ===");
 322     } else if(C->AliasLevel() < 3) {
 323       tty->print(" since AliasLevel < 3 ===");
 324     }
 325     tty->cr();
 326 #endif
 327   }
 328   return has_non_escaping_obj;
 329 }
 330 
 331 // Utility function for nodes that load an object
 332 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
 333   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 334   // ThreadLocal has RawPtr type.
 335   const Type* t = _igvn->type(n);
 336   if (t->make_ptr() != NULL) {
 337     Node* adr = n->in(MemNode::Address);
 338 #ifdef ASSERT
 339     if (!adr->is_AddP()) {
 340       assert(_igvn->type(adr)->isa_rawptr(), "sanity");
 341     } else {
 342       assert((ptnode_adr(adr->_idx) == NULL ||
 343               ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
 344     }
 345 #endif
 346     add_local_var_and_edge(n, PointsToNode::NoEscape,
 347                            adr, delayed_worklist);
 348   }
 349 }
 350 
 351 // Populate Connection Graph with PointsTo nodes and create simple
 352 // connection graph edges.
 353 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
 354   assert(!_verify, "this method should not be called for verification");
 355   PhaseGVN* igvn = _igvn;
 356   uint n_idx = n->_idx;
 357   PointsToNode* n_ptn = ptnode_adr(n_idx);
 358   if (n_ptn != NULL)
 359     return; // No need to redefine PointsTo node during first iteration.
 360 
 361   if (n->is_Call()) {
 362     // Arguments to allocation and locking don't escape.
 363     if (n->is_AbstractLock()) {
 364       // Put Lock and Unlock nodes on IGVN worklist to process them during
 365       // first IGVN optimization when escape information is still available.
 366       record_for_optimizer(n);
 367     } else if (n->is_Allocate()) {
 368       add_call_node(n->as_Call());
 369       record_for_optimizer(n);
 370     } else {
 371       if (n->is_CallStaticJava()) {
 372         const char* name = n->as_CallStaticJava()->_name;
 373         if (name != NULL && strcmp(name, "uncommon_trap") == 0)
 374           return; // Skip uncommon traps
 375       }
 376       // Don't mark as processed since call's arguments have to be processed.
 377       delayed_worklist->push(n);
 378       // Check if a call returns an object.
 379       if ((n->as_Call()->returns_pointer() &&
 380            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
 381           (n->is_CallStaticJava() &&
 382            n->as_CallStaticJava()->is_boxing_method())) {
 383         add_call_node(n->as_Call());
 384       }
 385     }
 386     return;
 387   }
 388   // Put this check here to process call arguments since some call nodes
 389   // point to phantom_obj.
 390   if (n_ptn == phantom_obj || n_ptn == null_obj)
 391     return; // Skip predefined nodes.
 392 
 393   int opcode = n->Opcode();
 394   switch (opcode) {
 395     case Op_AddP: {
 396       Node* base = get_addp_base(n);
 397       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 398       // Field nodes are created for all field types. They are used in
 399       // adjust_scalar_replaceable_state() and split_unique_types().
 400       // Note, non-oop fields will have only base edges in Connection
 401       // Graph because such fields are not used for oop loads and stores.
 402       int offset = address_offset(n, igvn);
 403       add_field(n, PointsToNode::NoEscape, offset);
 404       if (ptn_base == NULL) {
 405         delayed_worklist->push(n); // Process it later.
 406       } else {
 407         n_ptn = ptnode_adr(n_idx);
 408         add_base(n_ptn->as_Field(), ptn_base);
 409       }
 410       break;
 411     }
 412     case Op_CastX2P: {
 413       map_ideal_node(n, phantom_obj);
 414       break;
 415     }
 416     case Op_CastPP:
 417     case Op_CheckCastPP:
 418     case Op_EncodeP:
 419     case Op_DecodeN:
 420     case Op_EncodePKlass:
 421     case Op_DecodeNKlass: {
 422       add_local_var_and_edge(n, PointsToNode::NoEscape,
 423                              n->in(1), delayed_worklist);
 424       break;
 425     }
 426     case Op_CMoveP: {
 427       add_local_var(n, PointsToNode::NoEscape);
 428       // Do not add edges during first iteration because some could be
 429       // not defined yet.
 430       delayed_worklist->push(n);
 431       break;
 432     }
 433     case Op_ConP:
 434     case Op_ConN:
 435     case Op_ConNKlass: {
 436       // assume all oop constants globally escape except for null
 437       PointsToNode::EscapeState es;
 438       const Type* t = igvn->type(n);
 439       if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
 440         es = PointsToNode::NoEscape;
 441       } else {
 442         es = PointsToNode::GlobalEscape;
 443       }
 444       add_java_object(n, es);
 445       break;
 446     }
 447     case Op_CreateEx: {
 448       // assume that all exception objects globally escape
 449       map_ideal_node(n, phantom_obj);
 450       break;
 451     }
 452     case Op_LoadKlass:
 453     case Op_LoadNKlass: {
 454       // Unknown class is loaded
 455       map_ideal_node(n, phantom_obj);
 456       break;
 457     }
 458     case Op_LoadP:
 459 #if INCLUDE_ZGC
 460     case Op_LoadBarrierSlowReg:
 461     case Op_LoadBarrierWeakSlowReg:
 462 #endif
 463     case Op_LoadN:
 464     case Op_LoadPLocked: {
 465       add_objload_to_connection_graph(n, delayed_worklist);
 466       break;
 467     }
 468     case Op_Parm: {
 469       map_ideal_node(n, phantom_obj);
 470       break;
 471     }
 472     case Op_PartialSubtypeCheck: {
 473       // Produces Null or notNull and is used in only in CmpP so
 474       // phantom_obj could be used.
 475       map_ideal_node(n, phantom_obj); // Result is unknown
 476       break;
 477     }
 478     case Op_Phi: {
 479       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 480       // ThreadLocal has RawPtr type.
 481       const Type* t = n->as_Phi()->type();
 482       if (t->make_ptr() != NULL) {
 483         add_local_var(n, PointsToNode::NoEscape);
 484         // Do not add edges during first iteration because some could be
 485         // not defined yet.
 486         delayed_worklist->push(n);
 487       }
 488       break;
 489     }
 490     case Op_Proj: {
 491       // we are only interested in the oop result projection from a call
 492       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 493           n->in(0)->as_Call()->returns_pointer()) {
 494         add_local_var_and_edge(n, PointsToNode::NoEscape,
 495                                n->in(0), delayed_worklist);
 496       }
 497 #if INCLUDE_ZGC
 498       else if (UseZGC) {
 499         if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
 500           add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist);
 501         }
 502       }
 503 #endif
 504       break;
 505     }
 506     case Op_Rethrow: // Exception object escapes
 507     case Op_Return: {
 508       if (n->req() > TypeFunc::Parms &&
 509           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 510         // Treat Return value as LocalVar with GlobalEscape escape state.
 511         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 512                                n->in(TypeFunc::Parms), delayed_worklist);
 513       }
 514       break;
 515     }
 516     case Op_CompareAndExchangeP:
 517     case Op_CompareAndExchangeN:
 518 #if INCLUDE_SHENANDOAHGC
 519     case Op_ShenandoahCompareAndExchangeP:
 520     case Op_ShenandoahCompareAndExchangeN:
 521 #endif
 522     case Op_GetAndSetP:
 523     case Op_GetAndSetN: {
 524       add_objload_to_connection_graph(n, delayed_worklist);
 525       // fallthrough
 526     }
 527     case Op_StoreP:
 528     case Op_StoreN:
 529     case Op_StoreNKlass:
 530     case Op_StorePConditional:
 531 #if INCLUDE_SHENANDOAHGC
 532     case Op_ShenandoahWeakCompareAndSwapP:
 533     case Op_ShenandoahWeakCompareAndSwapN:
 534     case Op_ShenandoahCompareAndSwapP:
 535     case Op_ShenandoahCompareAndSwapN:
 536 #endif
 537     case Op_WeakCompareAndSwapP:
 538     case Op_WeakCompareAndSwapN:
 539     case Op_CompareAndSwapP:
 540     case Op_CompareAndSwapN: {
 541       Node* adr = n->in(MemNode::Address);
 542       const Type *adr_type = igvn->type(adr);
 543       adr_type = adr_type->make_ptr();
 544       if (adr_type == NULL) {
 545         break; // skip dead nodes
 546       }
 547       if (   adr_type->isa_oopptr()
 548           || (   (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
 549               && adr_type == TypeRawPtr::NOTNULL
 550               && adr->in(AddPNode::Address)->is_Proj()
 551               && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 552         delayed_worklist->push(n); // Process it later.
 553 #ifdef ASSERT
 554         assert(adr->is_AddP(), "expecting an AddP");
 555         if (adr_type == TypeRawPtr::NOTNULL) {
 556           // Verify a raw address for a store captured by Initialize node.
 557           int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 558           assert(offs != Type::OffsetBot, "offset must be a constant");
 559         }
 560 #endif
 561       } else {
 562         // Ignore copy the displaced header to the BoxNode (OSR compilation).
 563         if (adr->is_BoxLock())
 564           break;
 565         // Stored value escapes in unsafe access.
 566         if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 567           // Pointer stores in G1 barriers looks like unsafe access.
 568           // Ignore such stores to be able scalar replace non-escaping
 569           // allocations.
 570 #if INCLUDE_G1GC || INCLUDE_SHENANDOAHGC
 571           if ((UseG1GC || UseShenandoahGC) && adr->is_AddP()) {
 572             Node* base = get_addp_base(adr);
 573             if (base->Opcode() == Op_LoadP &&
 574                 base->in(MemNode::Address)->is_AddP()) {
 575               adr = base->in(MemNode::Address);
 576               Node* tls = get_addp_base(adr);
 577               if (tls->Opcode() == Op_ThreadLocal) {
 578                 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 579 #if INCLUDE_G1GC && INCLUDE_SHENANDOAHGC
 580                 const int buf_offset = in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_buffer_offset()
 581                                                         : ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
 582 #elif INCLUDE_G1GC
 583                 const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
 584 #else
 585                 const int buf_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
 586 #endif
 587                 if (offs == buf_offset) {
 588                   break; // G1 pre barrier previous oop value store.
 589                 }
 590                 if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
 591                   break; // G1 post barrier card address store.
 592                 }
 593               }
 594             }
 595           }
 596 #endif
 597           delayed_worklist->push(n); // Process unsafe access later.
 598           break;
 599         }
 600 #ifdef ASSERT
 601         n->dump(1);
 602         assert(false, "not unsafe or G1 barrier raw StoreP");
 603 #endif
 604       }
 605       break;
 606     }
 607     case Op_AryEq:
 608     case Op_HasNegatives:
 609     case Op_StrComp:
 610     case Op_StrEquals:
 611     case Op_StrIndexOf:
 612     case Op_StrIndexOfChar:
 613     case Op_StrInflatedCopy:
 614     case Op_StrCompressedCopy:
 615     case Op_EncodeISOArray: {
 616       add_local_var(n, PointsToNode::ArgEscape);
 617       delayed_worklist->push(n); // Process it later.
 618       break;
 619     }
 620     case Op_ThreadLocal: {
 621       add_java_object(n, PointsToNode::ArgEscape);
 622       break;
 623     }
 624 #if INCLUDE_SHENANDOAHGC
 625     case Op_ShenandoahEnqueueBarrier:
 626       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
 627       break;
 628     case Op_ShenandoahLoadReferenceBarrier:
 629       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist);
 630 #endif
 631     default:
 632       ; // Do nothing for nodes not related to EA.
 633   }
 634   return;
 635 }
 636 
 637 #ifdef ASSERT
 638 #define ELSE_FAIL(name)                               \
 639       /* Should not be called for not pointer type. */  \
 640       n->dump(1);                                       \
 641       assert(false, name);                              \
 642       break;
 643 #else
 644 #define ELSE_FAIL(name) \
 645       break;
 646 #endif
 647 
 648 // Add final simple edges to graph.
 649 void ConnectionGraph::add_final_edges(Node *n) {
 650   PointsToNode* n_ptn = ptnode_adr(n->_idx);
 651 #ifdef ASSERT
 652   if (_verify && n_ptn->is_JavaObject())
 653     return; // This method does not change graph for JavaObject.
 654 #endif
 655 
 656   if (n->is_Call()) {
 657     process_call_arguments(n->as_Call());
 658     return;
 659   }
 660   assert(n->is_Store() || n->is_LoadStore() ||
 661          (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
 662          "node should be registered already");
 663   int opcode = n->Opcode();
 664   switch (opcode) {
 665     case Op_AddP: {
 666       Node* base = get_addp_base(n);
 667       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 668       assert(ptn_base != NULL, "field's base should be registered");
 669       add_base(n_ptn->as_Field(), ptn_base);
 670       break;
 671     }
 672     case Op_CastPP:
 673     case Op_CheckCastPP:
 674     case Op_EncodeP:
 675     case Op_DecodeN:
 676     case Op_EncodePKlass:
 677     case Op_DecodeNKlass: {
 678       add_local_var_and_edge(n, PointsToNode::NoEscape,
 679                              n->in(1), NULL);
 680       break;
 681     }
 682     case Op_CMoveP: {
 683       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
 684         Node* in = n->in(i);
 685         if (in == NULL)
 686           continue;  // ignore NULL
 687         Node* uncast_in = in->uncast();
 688         if (uncast_in->is_top() || uncast_in == n)
 689           continue;  // ignore top or inputs which go back this node
 690         PointsToNode* ptn = ptnode_adr(in->_idx);
 691         assert(ptn != NULL, "node should be registered");
 692         add_edge(n_ptn, ptn);
 693       }
 694       break;
 695     }
 696     case Op_LoadP:
 697 #if INCLUDE_ZGC
 698     case Op_LoadBarrierSlowReg:
 699     case Op_LoadBarrierWeakSlowReg:
 700 #endif
 701     case Op_LoadN:
 702     case Op_LoadPLocked: {
 703       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 704       // ThreadLocal has RawPtr type.
 705       const Type* t = _igvn->type(n);
 706       if (t->make_ptr() != NULL) {
 707         Node* adr = n->in(MemNode::Address);
 708         add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
 709         break;
 710       }
 711       ELSE_FAIL("Op_LoadP");
 712     }
 713     case Op_Phi: {
 714       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 715       // ThreadLocal has RawPtr type.
 716       const Type* t = n->as_Phi()->type();
 717       if (t->make_ptr() != NULL) {
 718         for (uint i = 1; i < n->req(); i++) {
 719           Node* in = n->in(i);
 720           if (in == NULL)
 721             continue;  // ignore NULL
 722           Node* uncast_in = in->uncast();
 723           if (uncast_in->is_top() || uncast_in == n)
 724             continue;  // ignore top or inputs which go back this node
 725           PointsToNode* ptn = ptnode_adr(in->_idx);
 726           assert(ptn != NULL, "node should be registered");
 727           add_edge(n_ptn, ptn);
 728         }
 729         break;
 730       }
 731       ELSE_FAIL("Op_Phi");
 732     }
 733     case Op_Proj: {
 734       // we are only interested in the oop result projection from a call
 735       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 736           n->in(0)->as_Call()->returns_pointer()) {
 737         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
 738         break;
 739       }
 740 #if INCLUDE_ZGC
 741       else if (UseZGC) {
 742         if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
 743           add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), NULL);
 744           break;
 745         }
 746       }
 747 #endif
 748       ELSE_FAIL("Op_Proj");
 749     }
 750     case Op_Rethrow: // Exception object escapes
 751     case Op_Return: {
 752       if (n->req() > TypeFunc::Parms &&
 753           _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 754         // Treat Return value as LocalVar with GlobalEscape escape state.
 755         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 756                                n->in(TypeFunc::Parms), NULL);
 757         break;
 758       }
 759       ELSE_FAIL("Op_Return");
 760     }
 761     case Op_StoreP:
 762     case Op_StoreN:
 763     case Op_StoreNKlass:
 764     case Op_StorePConditional:
 765     case Op_CompareAndExchangeP:
 766     case Op_CompareAndExchangeN:
 767     case Op_CompareAndSwapP:
 768     case Op_CompareAndSwapN:
 769     case Op_WeakCompareAndSwapP:
 770     case Op_WeakCompareAndSwapN:
 771 #if INCLUDE_SHENANDOAHGC
 772     case Op_ShenandoahCompareAndExchangeP:
 773     case Op_ShenandoahCompareAndExchangeN:
 774     case Op_ShenandoahCompareAndSwapP:
 775     case Op_ShenandoahCompareAndSwapN:
 776     case Op_ShenandoahWeakCompareAndSwapP:
 777     case Op_ShenandoahWeakCompareAndSwapN:
 778 #endif
 779     case Op_GetAndSetP:
 780     case Op_GetAndSetN: {
 781       Node* adr = n->in(MemNode::Address);
 782       const Type *adr_type = _igvn->type(adr);
 783       adr_type = adr_type->make_ptr();
 784 #ifdef ASSERT
 785       if (adr_type == NULL) {
 786         n->dump(1);
 787         assert(adr_type != NULL, "dead node should not be on list");
 788         break;
 789       }
 790 #endif
 791       if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||
 792 #if INCLUDE_SHENANDOAHGC
 793           opcode == Op_ShenandoahCompareAndExchangeN || opcode == Op_ShenandoahCompareAndExchangeP ||
 794 #endif
 795           opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {
 796         add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
 797       }
 798       if (   adr_type->isa_oopptr()
 799           || (   (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
 800               && adr_type == TypeRawPtr::NOTNULL
 801               && adr->in(AddPNode::Address)->is_Proj()
 802               && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 803         // Point Address to Value
 804         PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 805         assert(adr_ptn != NULL &&
 806                adr_ptn->as_Field()->is_oop(), "node should be registered");
 807         Node *val = n->in(MemNode::ValueIn);
 808         PointsToNode* ptn = ptnode_adr(val->_idx);
 809         assert(ptn != NULL, "node should be registered");
 810         add_edge(adr_ptn, ptn);
 811         break;
 812       } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 813         // Stored value escapes in unsafe access.
 814         Node *val = n->in(MemNode::ValueIn);
 815         PointsToNode* ptn = ptnode_adr(val->_idx);
 816         assert(ptn != NULL, "node should be registered");
 817         set_escape_state(ptn, PointsToNode::GlobalEscape);
 818         // Add edge to object for unsafe access with offset.
 819         PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 820         assert(adr_ptn != NULL, "node should be registered");
 821         if (adr_ptn->is_Field()) {
 822           assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
 823           add_edge(adr_ptn, ptn);
 824         }
 825         break;
 826       }
 827       ELSE_FAIL("Op_StoreP");
 828     }
 829     case Op_AryEq:
 830     case Op_HasNegatives:
 831     case Op_StrComp:
 832     case Op_StrEquals:
 833     case Op_StrIndexOf:
 834     case Op_StrIndexOfChar:
 835     case Op_StrInflatedCopy:
 836     case Op_StrCompressedCopy:
 837     case Op_EncodeISOArray: {
 838       // char[]/byte[] arrays passed to string intrinsic do not escape but
 839       // they are not scalar replaceable. Adjust escape state for them.
 840       // Start from in(2) edge since in(1) is memory edge.
 841       for (uint i = 2; i < n->req(); i++) {
 842         Node* adr = n->in(i);
 843         const Type* at = _igvn->type(adr);
 844         if (!adr->is_top() && at->isa_ptr()) {
 845           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
 846                  at->isa_ptr() != NULL, "expecting a pointer");
 847           if (adr->is_AddP()) {
 848             adr = get_addp_base(adr);
 849           }
 850           PointsToNode* ptn = ptnode_adr(adr->_idx);
 851           assert(ptn != NULL, "node should be registered");
 852           add_edge(n_ptn, ptn);
 853         }
 854       }
 855       break;
 856     }
 857 #if INCLUDE_SHENANDOAHGC
 858     case Op_ShenandoahEnqueueBarrier:
 859       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
 860       break;
 861     case Op_ShenandoahLoadReferenceBarrier:
 862       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL);
 863       break;
 864 #endif
 865     default: {
 866       // This method should be called only for EA specific nodes which may
 867       // miss some edges when they were created.
 868 #ifdef ASSERT
 869       n->dump(1);
 870 #endif
 871       guarantee(false, "unknown node");
 872     }
 873   }
 874   return;
 875 }
 876 
 877 void ConnectionGraph::add_call_node(CallNode* call) {
 878   assert(call->returns_pointer(), "only for call which returns pointer");
 879   uint call_idx = call->_idx;
 880   if (call->is_Allocate()) {
 881     Node* k = call->in(AllocateNode::KlassNode);
 882     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 883     assert(kt != NULL, "TypeKlassPtr  required.");
 884     ciKlass* cik = kt->klass();
 885     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 886     bool scalar_replaceable = true;
 887     if (call->is_AllocateArray()) {
 888       if (!cik->is_array_klass()) { // StressReflectiveCode
 889         es = PointsToNode::GlobalEscape;
 890       } else {
 891         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 892         if (length < 0 || length > EliminateAllocationArraySizeLimit) {
 893           // Not scalar replaceable if the length is not constant or too big.
 894           scalar_replaceable = false;
 895         }
 896       }
 897     } else {  // Allocate instance
 898       if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
 899           cik->is_subclass_of(_compile->env()->Reference_klass()) ||
 900          !cik->is_instance_klass() || // StressReflectiveCode
 901          !cik->as_instance_klass()->can_be_instantiated() ||
 902           cik->as_instance_klass()->has_finalizer()) {
 903         es = PointsToNode::GlobalEscape;
 904       }
 905     }
 906     add_java_object(call, es);
 907     PointsToNode* ptn = ptnode_adr(call_idx);
 908     if (!scalar_replaceable && ptn->scalar_replaceable()) {
 909       ptn->set_scalar_replaceable(false);
 910     }
 911   } else if (call->is_CallStaticJava()) {
 912     // Call nodes could be different types:
 913     //
 914     // 1. CallDynamicJavaNode (what happened during call is unknown):
 915     //
 916     //    - mapped to GlobalEscape JavaObject node if oop is returned;
 917     //
 918     //    - all oop arguments are escaping globally;
 919     //
 920     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
 921     //
 922     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
 923     //
 924     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
 925     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
 926     //      during call is returned;
 927     //    - mapped to ArgEscape LocalVar node pointed to object arguments
 928     //      which are returned and does not escape during call;
 929     //
 930     //    - oop arguments escaping status is defined by bytecode analysis;
 931     //
 932     // For a static call, we know exactly what method is being called.
 933     // Use bytecode estimator to record whether the call's return value escapes.
 934     ciMethod* meth = call->as_CallJava()->method();
 935     if (meth == NULL) {
 936       const char* name = call->as_CallStaticJava()->_name;
 937       assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
 938       // Returns a newly allocated unescaped object.
 939       add_java_object(call, PointsToNode::NoEscape);
 940       ptnode_adr(call_idx)->set_scalar_replaceable(false);
 941     } else if (meth->is_boxing_method()) {
 942       // Returns boxing object
 943       PointsToNode::EscapeState es;
 944       vmIntrinsics::ID intr = meth->intrinsic_id();
 945       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
 946         // It does not escape if object is always allocated.
 947         es = PointsToNode::NoEscape;
 948       } else {
 949         // It escapes globally if object could be loaded from cache.
 950         es = PointsToNode::GlobalEscape;
 951       }
 952       add_java_object(call, es);
 953     } else {
 954       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
 955       call_analyzer->copy_dependencies(_compile->dependencies());
 956       if (call_analyzer->is_return_allocated()) {
 957         // Returns a newly allocated unescaped object, simply
 958         // update dependency information.
 959         // Mark it as NoEscape so that objects referenced by
 960         // it's fields will be marked as NoEscape at least.
 961         add_java_object(call, PointsToNode::NoEscape);
 962         ptnode_adr(call_idx)->set_scalar_replaceable(false);
 963       } else {
 964         // Determine whether any arguments are returned.
 965         const TypeTuple* d = call->tf()->domain();
 966         bool ret_arg = false;
 967         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 968           if (d->field_at(i)->isa_ptr() != NULL &&
 969               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
 970             ret_arg = true;
 971             break;
 972           }
 973         }
 974         if (ret_arg) {
 975           add_local_var(call, PointsToNode::ArgEscape);
 976         } else {
 977           // Returns unknown object.
 978           map_ideal_node(call, phantom_obj);
 979         }
 980       }
 981     }
 982   } else {
 983     // An other type of call, assume the worst case:
 984     // returned value is unknown and globally escapes.
 985     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
 986     map_ideal_node(call, phantom_obj);
 987   }
 988 }
 989 
 990 void ConnectionGraph::process_call_arguments(CallNode *call) {
 991     bool is_arraycopy = false;
 992     switch (call->Opcode()) {
 993 #ifdef ASSERT
 994     case Op_Allocate:
 995     case Op_AllocateArray:
 996     case Op_Lock:
 997     case Op_Unlock:
 998       assert(false, "should be done already");
 999       break;
1000 #endif
1001     case Op_ArrayCopy:
1002     case Op_CallLeafNoFP:
1003       // Most array copies are ArrayCopy nodes at this point but there
1004       // are still a few direct calls to the copy subroutines (See
1005       // PhaseStringOpts::copy_string())
1006       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1007         call->as_CallLeaf()->is_call_to_arraycopystub();
1008       // fall through
1009     case Op_CallLeaf: {
1010       // Stub calls, objects do not escape but they are not scale replaceable.
1011       // Adjust escape state for outgoing arguments.
1012       const TypeTuple * d = call->tf()->domain();
1013       bool src_has_oops = false;
1014       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1015         const Type* at = d->field_at(i);
1016         Node *arg = call->in(i);
1017         if (arg == NULL) {
1018           continue;
1019         }
1020         const Type *aat = _igvn->type(arg);
1021         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())
1022           continue;
1023         if (arg->is_AddP()) {
1024           //
1025           // The inline_native_clone() case when the arraycopy stub is called
1026           // after the allocation before Initialize and CheckCastPP nodes.
1027           // Or normal arraycopy for object arrays case.
1028           //
1029           // Set AddP's base (Allocate) as not scalar replaceable since
1030           // pointer to the base (with offset) is passed as argument.
1031           //
1032           arg = get_addp_base(arg);
1033         }
1034         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1035         assert(arg_ptn != NULL, "should be registered");
1036         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1037         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1038           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1039                  aat->isa_ptr() != NULL, "expecting an Ptr");
1040           bool arg_has_oops = aat->isa_oopptr() &&
1041                               (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
1042                                (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
1043           if (i == TypeFunc::Parms) {
1044             src_has_oops = arg_has_oops;
1045           }
1046           //
1047           // src or dst could be j.l.Object when other is basic type array:
1048           //
1049           //   arraycopy(char[],0,Object*,0,size);
1050           //   arraycopy(Object*,0,char[],0,size);
1051           //
1052           // Don't add edges in such cases.
1053           //
1054           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1055                                        arg_has_oops && (i > TypeFunc::Parms);
1056 #ifdef ASSERT
1057           if (!(is_arraycopy ||
1058                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1059                 (call->as_CallLeaf()->_name != NULL &&
1060                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1061                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1062                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
1063                   strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
1064                   strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
1065                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
1066                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
1067                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||
1068                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||
1069                   strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
1070                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1071                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1072                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1073                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1074                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1075                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1076                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1077                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1078                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1079                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1080                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1081                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1082                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
1083                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0)
1084                  ))) {
1085             call->dump();
1086             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1087           }
1088 #endif
1089           // Always process arraycopy's destination object since
1090           // we need to add all possible edges to references in
1091           // source object.
1092           if (arg_esc >= PointsToNode::ArgEscape &&
1093               !arg_is_arraycopy_dest) {
1094             continue;
1095           }
1096           PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1097           if (call->is_ArrayCopy()) {
1098             ArrayCopyNode* ac = call->as_ArrayCopy();
1099             if (ac->is_clonebasic() ||
1100                 ac->is_arraycopy_validated() ||
1101                 ac->is_copyof_validated() ||
1102                 ac->is_copyofrange_validated()) {
1103               es = PointsToNode::NoEscape;
1104             }
1105           }
1106           set_escape_state(arg_ptn, es);
1107           if (arg_is_arraycopy_dest) {
1108             Node* src = call->in(TypeFunc::Parms);
1109             if (src->is_AddP()) {
1110               src = get_addp_base(src);
1111             }
1112             PointsToNode* src_ptn = ptnode_adr(src->_idx);
1113             assert(src_ptn != NULL, "should be registered");
1114             if (arg_ptn != src_ptn) {
1115               // Special arraycopy edge:
1116               // A destination object's field can't have the source object
1117               // as base since objects escape states are not related.
1118               // Only escape state of destination object's fields affects
1119               // escape state of fields in source object.
1120               add_arraycopy(call, es, src_ptn, arg_ptn);
1121             }
1122           }
1123         }
1124       }
1125       break;
1126     }
1127     case Op_CallStaticJava: {
1128       // For a static call, we know exactly what method is being called.
1129       // Use bytecode estimator to record the call's escape affects
1130 #ifdef ASSERT
1131       const char* name = call->as_CallStaticJava()->_name;
1132       assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1133 #endif
1134       ciMethod* meth = call->as_CallJava()->method();
1135       if ((meth != NULL) && meth->is_boxing_method()) {
1136         break; // Boxing methods do not modify any oops.
1137       }
1138       BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1139       // fall-through if not a Java method or no analyzer information
1140       if (call_analyzer != NULL) {
1141         PointsToNode* call_ptn = ptnode_adr(call->_idx);
1142         const TypeTuple* d = call->tf()->domain();
1143         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1144           const Type* at = d->field_at(i);
1145           int k = i - TypeFunc::Parms;
1146           Node* arg = call->in(i);
1147           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1148           if (at->isa_ptr() != NULL &&
1149               call_analyzer->is_arg_returned(k)) {
1150             // The call returns arguments.
1151             if (call_ptn != NULL) { // Is call's result used?
1152               assert(call_ptn->is_LocalVar(), "node should be registered");
1153               assert(arg_ptn != NULL, "node should be registered");
1154               add_edge(call_ptn, arg_ptn);
1155             }
1156           }
1157           if (at->isa_oopptr() != NULL &&
1158               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1159             if (!call_analyzer->is_arg_stack(k)) {
1160               // The argument global escapes
1161               set_escape_state(arg_ptn, PointsToNode::GlobalEscape);
1162             } else {
1163               set_escape_state(arg_ptn, PointsToNode::ArgEscape);
1164               if (!call_analyzer->is_arg_local(k)) {
1165                 // The argument itself doesn't escape, but any fields might
1166                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);
1167               }
1168             }
1169           }
1170         }
1171         if (call_ptn != NULL && call_ptn->is_LocalVar()) {
1172           // The call returns arguments.
1173           assert(call_ptn->edge_count() > 0, "sanity");
1174           if (!call_analyzer->is_return_local()) {
1175             // Returns also unknown object.
1176             add_edge(call_ptn, phantom_obj);
1177           }
1178         }
1179         break;
1180       }
1181     }
1182     default: {
1183       // Fall-through here if not a Java method or no analyzer information
1184       // or some other type of call, assume the worst case: all arguments
1185       // globally escape.
1186       const TypeTuple* d = call->tf()->domain();
1187       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1188         const Type* at = d->field_at(i);
1189         if (at->isa_oopptr() != NULL) {
1190           Node* arg = call->in(i);
1191           if (arg->is_AddP()) {
1192             arg = get_addp_base(arg);
1193           }
1194           assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
1195           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);
1196         }
1197       }
1198     }
1199   }
1200 }
1201 
1202 
1203 // Finish Graph construction.
1204 bool ConnectionGraph::complete_connection_graph(
1205                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
1206                          GrowableArray<JavaObjectNode*>& non_escaped_worklist,
1207                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
1208                          GrowableArray<FieldNode*>&      oop_fields_worklist) {
1209   // Normally only 1-3 passes needed to build Connection Graph depending
1210   // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
1211   // Set limit to 20 to catch situation when something did go wrong and
1212   // bailout Escape Analysis.
1213   // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
1214 #define CG_BUILD_ITER_LIMIT 20
1215 
1216   // Propagate GlobalEscape and ArgEscape escape states and check that
1217   // we still have non-escaping objects. The method pushs on _worklist
1218   // Field nodes which reference phantom_object.
1219   if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
1220     return false; // Nothing to do.
1221   }
1222   // Now propagate references to all JavaObject nodes.
1223   int java_objects_length = java_objects_worklist.length();
1224   elapsedTimer time;
1225   bool timeout = false;
1226   int new_edges = 1;
1227   int iterations = 0;
1228   do {
1229     while ((new_edges > 0) &&
1230            (iterations++ < CG_BUILD_ITER_LIMIT)) {
1231       double start_time = time.seconds();
1232       time.start();
1233       new_edges = 0;
1234       // Propagate references to phantom_object for nodes pushed on _worklist
1235       // by find_non_escaped_objects() and find_field_value().
1236       new_edges += add_java_object_edges(phantom_obj, false);
1237       for (int next = 0; next < java_objects_length; ++next) {
1238         JavaObjectNode* ptn = java_objects_worklist.at(next);
1239         new_edges += add_java_object_edges(ptn, true);
1240 
1241 #define SAMPLE_SIZE 4
1242         if ((next % SAMPLE_SIZE) == 0) {
1243           // Each 4 iterations calculate how much time it will take
1244           // to complete graph construction.
1245           time.stop();
1246           // Poll for requests from shutdown mechanism to quiesce compiler
1247           // because Connection graph construction may take long time.
1248           CompileBroker::maybe_block();
1249           double stop_time = time.seconds();
1250           double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
1251           double time_until_end = time_per_iter * (double)(java_objects_length - next);
1252           if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
1253             timeout = true;
1254             break; // Timeout
1255           }
1256           start_time = stop_time;
1257           time.start();
1258         }
1259 #undef SAMPLE_SIZE
1260 
1261       }
1262       if (timeout) break;
1263       if (new_edges > 0) {
1264         // Update escape states on each iteration if graph was updated.
1265         if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
1266           return false; // Nothing to do.
1267         }
1268       }
1269       time.stop();
1270       if (time.seconds() >= EscapeAnalysisTimeout) {
1271         timeout = true;
1272         break;
1273       }
1274     }
1275     if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) {
1276       time.start();
1277       // Find fields which have unknown value.
1278       int fields_length = oop_fields_worklist.length();
1279       for (int next = 0; next < fields_length; next++) {
1280         FieldNode* field = oop_fields_worklist.at(next);
1281         if (field->edge_count() == 0) {
1282           new_edges += find_field_value(field);
1283           // This code may added new edges to phantom_object.
1284           // Need an other cycle to propagate references to phantom_object.
1285         }
1286       }
1287       time.stop();
1288       if (time.seconds() >= EscapeAnalysisTimeout) {
1289         timeout = true;
1290         break;
1291       }
1292     } else {
1293       new_edges = 0; // Bailout
1294     }
1295   } while (new_edges > 0);
1296 
1297   // Bailout if passed limits.
1298   if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {
1299     Compile* C = _compile;
1300     if (C->log() != NULL) {
1301       C->log()->begin_elem("connectionGraph_bailout reason='reached ");
1302       C->log()->text("%s", timeout ? "time" : "iterations");
1303       C->log()->end_elem(" limit'");
1304     }
1305     assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
1306            time.seconds(), iterations, nodes_size(), ptnodes_worklist.length());
1307     // Possible infinite build_connection_graph loop,
1308     // bailout (no changes to ideal graph were made).
1309     return false;
1310   }
1311 #ifdef ASSERT
1312   if (Verbose && PrintEscapeAnalysis) {
1313     tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d",
1314                   iterations, nodes_size(), ptnodes_worklist.length());
1315   }
1316 #endif
1317 
1318 #undef CG_BUILD_ITER_LIMIT
1319 
1320   // Find fields initialized by NULL for non-escaping Allocations.
1321   int non_escaped_length = non_escaped_worklist.length();
1322   for (int next = 0; next < non_escaped_length; next++) {
1323     JavaObjectNode* ptn = non_escaped_worklist.at(next);
1324     PointsToNode::EscapeState es = ptn->escape_state();
1325     assert(es <= PointsToNode::ArgEscape, "sanity");
1326     if (es == PointsToNode::NoEscape) {
1327       if (find_init_values(ptn, null_obj, _igvn) > 0) {
1328         // Adding references to NULL object does not change escape states
1329         // since it does not escape. Also no fields are added to NULL object.
1330         add_java_object_edges(null_obj, false);
1331       }
1332     }
1333     Node* n = ptn->ideal_node();
1334     if (n->is_Allocate()) {
1335       // The object allocated by this Allocate node will never be
1336       // seen by an other thread. Mark it so that when it is
1337       // expanded no MemBarStoreStore is added.
1338       InitializeNode* ini = n->as_Allocate()->initialization();
1339       if (ini != NULL)
1340         ini->set_does_not_escape();
1341     }
1342   }
1343   return true; // Finished graph construction.
1344 }
1345 
1346 // Propagate GlobalEscape and ArgEscape escape states to all nodes
1347 // and check that we still have non-escaping java objects.
1348 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
1349                                                GrowableArray<JavaObjectNode*>& non_escaped_worklist) {
1350   GrowableArray<PointsToNode*> escape_worklist;
1351   // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
1352   int ptnodes_length = ptnodes_worklist.length();
1353   for (int next = 0; next < ptnodes_length; ++next) {
1354     PointsToNode* ptn = ptnodes_worklist.at(next);
1355     if (ptn->escape_state() >= PointsToNode::ArgEscape ||
1356         ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
1357       escape_worklist.push(ptn);
1358     }
1359   }
1360   // Set escape states to referenced nodes (edges list).
1361   while (escape_worklist.length() > 0) {
1362     PointsToNode* ptn = escape_worklist.pop();
1363     PointsToNode::EscapeState es  = ptn->escape_state();
1364     PointsToNode::EscapeState field_es = ptn->fields_escape_state();
1365     if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
1366         es >= PointsToNode::ArgEscape) {
1367       // GlobalEscape or ArgEscape state of field means it has unknown value.
1368       if (add_edge(ptn, phantom_obj)) {
1369         // New edge was added
1370         add_field_uses_to_worklist(ptn->as_Field());
1371       }
1372     }
1373     for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1374       PointsToNode* e = i.get();
1375       if (e->is_Arraycopy()) {
1376         assert(ptn->arraycopy_dst(), "sanity");
1377         // Propagate only fields escape state through arraycopy edge.
1378         if (e->fields_escape_state() < field_es) {
1379           set_fields_escape_state(e, field_es);
1380           escape_worklist.push(e);
1381         }
1382       } else if (es >= field_es) {
1383         // fields_escape_state is also set to 'es' if it is less than 'es'.
1384         if (e->escape_state() < es) {
1385           set_escape_state(e, es);
1386           escape_worklist.push(e);
1387         }
1388       } else {
1389         // Propagate field escape state.
1390         bool es_changed = false;
1391         if (e->fields_escape_state() < field_es) {
1392           set_fields_escape_state(e, field_es);
1393           es_changed = true;
1394         }
1395         if ((e->escape_state() < field_es) &&
1396             e->is_Field() && ptn->is_JavaObject() &&
1397             e->as_Field()->is_oop()) {
1398           // Change escape state of referenced fields.
1399           set_escape_state(e, field_es);
1400           es_changed = true;
1401         } else if (e->escape_state() < es) {
1402           set_escape_state(e, es);
1403           es_changed = true;
1404         }
1405         if (es_changed) {
1406           escape_worklist.push(e);
1407         }
1408       }
1409     }
1410   }
1411   // Remove escaped objects from non_escaped list.
1412   for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) {
1413     JavaObjectNode* ptn = non_escaped_worklist.at(next);
1414     if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
1415       non_escaped_worklist.delete_at(next);
1416     }
1417     if (ptn->escape_state() == PointsToNode::NoEscape) {
1418       // Find fields in non-escaped allocations which have unknown value.
1419       find_init_values(ptn, phantom_obj, NULL);
1420     }
1421   }
1422   return (non_escaped_worklist.length() > 0);
1423 }
1424 
1425 // Add all references to JavaObject node by walking over all uses.
1426 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
1427   int new_edges = 0;
1428   if (populate_worklist) {
1429     // Populate _worklist by uses of jobj's uses.
1430     for (UseIterator i(jobj); i.has_next(); i.next()) {
1431       PointsToNode* use = i.get();
1432       if (use->is_Arraycopy())
1433         continue;
1434       add_uses_to_worklist(use);
1435       if (use->is_Field() && use->as_Field()->is_oop()) {
1436         // Put on worklist all field's uses (loads) and
1437         // related field nodes (same base and offset).
1438         add_field_uses_to_worklist(use->as_Field());
1439       }
1440     }
1441   }
1442   for (int l = 0; l < _worklist.length(); l++) {
1443     PointsToNode* use = _worklist.at(l);
1444     if (PointsToNode::is_base_use(use)) {
1445       // Add reference from jobj to field and from field to jobj (field's base).
1446       use = PointsToNode::get_use_node(use)->as_Field();
1447       if (add_base(use->as_Field(), jobj)) {
1448         new_edges++;
1449       }
1450       continue;
1451     }
1452     assert(!use->is_JavaObject(), "sanity");
1453     if (use->is_Arraycopy()) {
1454       if (jobj == null_obj) // NULL object does not have field edges
1455         continue;
1456       // Added edge from Arraycopy node to arraycopy's source java object
1457       if (add_edge(use, jobj)) {
1458         jobj->set_arraycopy_src();
1459         new_edges++;
1460       }
1461       // and stop here.
1462       continue;
1463     }
1464     if (!add_edge(use, jobj))
1465       continue; // No new edge added, there was such edge already.
1466     new_edges++;
1467     if (use->is_LocalVar()) {
1468       add_uses_to_worklist(use);
1469       if (use->arraycopy_dst()) {
1470         for (EdgeIterator i(use); i.has_next(); i.next()) {
1471           PointsToNode* e = i.get();
1472           if (e->is_Arraycopy()) {
1473             if (jobj == null_obj) // NULL object does not have field edges
1474               continue;
1475             // Add edge from arraycopy's destination java object to Arraycopy node.
1476             if (add_edge(jobj, e)) {
1477               new_edges++;
1478               jobj->set_arraycopy_dst();
1479             }
1480           }
1481         }
1482       }
1483     } else {
1484       // Added new edge to stored in field values.
1485       // Put on worklist all field's uses (loads) and
1486       // related field nodes (same base and offset).
1487       add_field_uses_to_worklist(use->as_Field());
1488     }
1489   }
1490   _worklist.clear();
1491   _in_worklist.Reset();
1492   return new_edges;
1493 }
1494 
1495 // Put on worklist all related field nodes.
1496 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
1497   assert(field->is_oop(), "sanity");
1498   int offset = field->offset();
1499   add_uses_to_worklist(field);
1500   // Loop over all bases of this field and push on worklist Field nodes
1501   // with the same offset and base (since they may reference the same field).
1502   for (BaseIterator i(field); i.has_next(); i.next()) {
1503     PointsToNode* base = i.get();
1504     add_fields_to_worklist(field, base);
1505     // Check if the base was source object of arraycopy and go over arraycopy's
1506     // destination objects since values stored to a field of source object are
1507     // accessable by uses (loads) of fields of destination objects.
1508     if (base->arraycopy_src()) {
1509       for (UseIterator j(base); j.has_next(); j.next()) {
1510         PointsToNode* arycp = j.get();
1511         if (arycp->is_Arraycopy()) {
1512           for (UseIterator k(arycp); k.has_next(); k.next()) {
1513             PointsToNode* abase = k.get();
1514             if (abase->arraycopy_dst() && abase != base) {
1515               // Look for the same arraycopy reference.
1516               add_fields_to_worklist(field, abase);
1517             }
1518           }
1519         }
1520       }
1521     }
1522   }
1523 }
1524 
1525 // Put on worklist all related field nodes.
1526 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
1527   int offset = field->offset();
1528   if (base->is_LocalVar()) {
1529     for (UseIterator j(base); j.has_next(); j.next()) {
1530       PointsToNode* f = j.get();
1531       if (PointsToNode::is_base_use(f)) { // Field
1532         f = PointsToNode::get_use_node(f);
1533         if (f == field || !f->as_Field()->is_oop())
1534           continue;
1535         int offs = f->as_Field()->offset();
1536         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1537           add_to_worklist(f);
1538         }
1539       }
1540     }
1541   } else {
1542     assert(base->is_JavaObject(), "sanity");
1543     if (// Skip phantom_object since it is only used to indicate that
1544         // this field's content globally escapes.
1545         (base != phantom_obj) &&
1546         // NULL object node does not have fields.
1547         (base != null_obj)) {
1548       for (EdgeIterator i(base); i.has_next(); i.next()) {
1549         PointsToNode* f = i.get();
1550         // Skip arraycopy edge since store to destination object field
1551         // does not update value in source object field.
1552         if (f->is_Arraycopy()) {
1553           assert(base->arraycopy_dst(), "sanity");
1554           continue;
1555         }
1556         if (f == field || !f->as_Field()->is_oop())
1557           continue;
1558         int offs = f->as_Field()->offset();
1559         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1560           add_to_worklist(f);
1561         }
1562       }
1563     }
1564   }
1565 }
1566 
1567 // Find fields which have unknown value.
1568 int ConnectionGraph::find_field_value(FieldNode* field) {
1569   // Escaped fields should have init value already.
1570   assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
1571   int new_edges = 0;
1572   for (BaseIterator i(field); i.has_next(); i.next()) {
1573     PointsToNode* base = i.get();
1574     if (base->is_JavaObject()) {
1575       // Skip Allocate's fields which will be processed later.
1576       if (base->ideal_node()->is_Allocate())
1577         return 0;
1578       assert(base == null_obj, "only NULL ptr base expected here");
1579     }
1580   }
1581   if (add_edge(field, phantom_obj)) {
1582     // New edge was added
1583     new_edges++;
1584     add_field_uses_to_worklist(field);
1585   }
1586   return new_edges;
1587 }
1588 
1589 // Find fields initializing values for allocations.
1590 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) {
1591   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1592   int new_edges = 0;
1593   Node* alloc = pta->ideal_node();
1594   if (init_val == phantom_obj) {
1595     // Do nothing for Allocate nodes since its fields values are
1596     // "known" unless they are initialized by arraycopy/clone.
1597     if (alloc->is_Allocate() && !pta->arraycopy_dst())
1598       return 0;
1599     assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");
1600 #ifdef ASSERT
1601     if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) {
1602       const char* name = alloc->as_CallStaticJava()->_name;
1603       assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
1604     }
1605 #endif
1606     // Non-escaped allocation returned from Java or runtime call have
1607     // unknown values in fields.
1608     for (EdgeIterator i(pta); i.has_next(); i.next()) {
1609       PointsToNode* field = i.get();
1610       if (field->is_Field() && field->as_Field()->is_oop()) {
1611         if (add_edge(field, phantom_obj)) {
1612           // New edge was added
1613           new_edges++;
1614           add_field_uses_to_worklist(field->as_Field());
1615         }
1616       }
1617     }
1618     return new_edges;
1619   }
1620   assert(init_val == null_obj, "sanity");
1621   // Do nothing for Call nodes since its fields values are unknown.
1622   if (!alloc->is_Allocate())
1623     return 0;
1624 
1625   InitializeNode* ini = alloc->as_Allocate()->initialization();
1626   bool visited_bottom_offset = false;
1627   GrowableArray<int> offsets_worklist;
1628 
1629   // Check if an oop field's initializing value is recorded and add
1630   // a corresponding NULL if field's value if it is not recorded.
1631   // Connection Graph does not record a default initialization by NULL
1632   // captured by Initialize node.
1633   //
1634   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1635     PointsToNode* field = i.get(); // Field (AddP)
1636     if (!field->is_Field() || !field->as_Field()->is_oop())
1637       continue; // Not oop field
1638     int offset = field->as_Field()->offset();
1639     if (offset == Type::OffsetBot) {
1640       if (!visited_bottom_offset) {
1641         // OffsetBot is used to reference array's element,
1642         // always add reference to NULL to all Field nodes since we don't
1643         // known which element is referenced.
1644         if (add_edge(field, null_obj)) {
1645           // New edge was added
1646           new_edges++;
1647           add_field_uses_to_worklist(field->as_Field());
1648           visited_bottom_offset = true;
1649         }
1650       }
1651     } else {
1652       // Check only oop fields.
1653       const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();
1654       if (adr_type->isa_rawptr()) {
1655 #ifdef ASSERT
1656         // Raw pointers are used for initializing stores so skip it
1657         // since it should be recorded already
1658         Node* base = get_addp_base(field->ideal_node());
1659         assert(adr_type->isa_rawptr() && base->is_Proj() &&
1660                (base->in(0) == alloc),"unexpected pointer type");
1661 #endif
1662         continue;
1663       }
1664       if (!offsets_worklist.contains(offset)) {
1665         offsets_worklist.append(offset);
1666         Node* value = NULL;
1667         if (ini != NULL) {
1668           // StoreP::memory_type() == T_ADDRESS
1669           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;
1670           Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);
1671           // Make sure initializing store has the same type as this AddP.
1672           // This AddP may reference non existing field because it is on a
1673           // dead branch of bimorphic call which is not eliminated yet.
1674           if (store != NULL && store->is_Store() &&
1675               store->as_Store()->memory_type() == ft) {
1676             value = store->in(MemNode::ValueIn);
1677 #ifdef ASSERT
1678             if (VerifyConnectionGraph) {
1679               // Verify that AddP already points to all objects the value points to.
1680               PointsToNode* val = ptnode_adr(value->_idx);
1681               assert((val != NULL), "should be processed already");
1682               PointsToNode* missed_obj = NULL;
1683               if (val->is_JavaObject()) {
1684                 if (!field->points_to(val->as_JavaObject())) {
1685                   missed_obj = val;
1686                 }
1687               } else {
1688                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1689                   tty->print_cr("----------init store has invalid value -----");
1690                   store->dump();
1691                   val->dump();
1692                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1693                 }
1694                 for (EdgeIterator j(val); j.has_next(); j.next()) {
1695                   PointsToNode* obj = j.get();
1696                   if (obj->is_JavaObject()) {
1697                     if (!field->points_to(obj->as_JavaObject())) {
1698                       missed_obj = obj;
1699                       break;
1700                     }
1701                   }
1702                 }
1703               }
1704               if (missed_obj != NULL) {
1705                 tty->print_cr("----------field---------------------------------");
1706                 field->dump();
1707                 tty->print_cr("----------missed referernce to object-----------");
1708                 missed_obj->dump();
1709                 tty->print_cr("----------object referernced by init store -----");
1710                 store->dump();
1711                 val->dump();
1712                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1713               }
1714             }
1715 #endif
1716           } else {
1717             // There could be initializing stores which follow allocation.
1718             // For example, a volatile field store is not collected
1719             // by Initialize node.
1720             //
1721             // Need to check for dependent loads to separate such stores from
1722             // stores which follow loads. For now, add initial value NULL so
1723             // that compare pointers optimization works correctly.
1724           }
1725         }
1726         if (value == NULL) {
1727           // A field's initializing value was not recorded. Add NULL.
1728           if (add_edge(field, null_obj)) {
1729             // New edge was added
1730             new_edges++;
1731             add_field_uses_to_worklist(field->as_Field());
1732           }
1733         }
1734       }
1735     }
1736   }
1737   return new_edges;
1738 }
1739 
1740 // Adjust scalar_replaceable state after Connection Graph is built.
1741 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {
1742   // Search for non-escaping objects which are not scalar replaceable
1743   // and mark them to propagate the state to referenced objects.
1744 
1745   // 1. An object is not scalar replaceable if the field into which it is
1746   // stored has unknown offset (stored into unknown element of an array).
1747   //
1748   for (UseIterator i(jobj); i.has_next(); i.next()) {
1749     PointsToNode* use = i.get();
1750     if (use->is_Arraycopy()) {
1751       continue;
1752     }
1753     if (use->is_Field()) {
1754       FieldNode* field = use->as_Field();
1755       assert(field->is_oop() && field->scalar_replaceable(), "sanity");
1756       if (field->offset() == Type::OffsetBot) {
1757         jobj->set_scalar_replaceable(false);
1758         return;
1759       }
1760       // 2. An object is not scalar replaceable if the field into which it is
1761       // stored has multiple bases one of which is null.
1762       if (field->base_count() > 1) {
1763         for (BaseIterator i(field); i.has_next(); i.next()) {
1764           PointsToNode* base = i.get();
1765           if (base == null_obj) {
1766             jobj->set_scalar_replaceable(false);
1767             return;
1768           }
1769         }
1770       }
1771     }
1772     assert(use->is_Field() || use->is_LocalVar(), "sanity");
1773     // 3. An object is not scalar replaceable if it is merged with other objects.
1774     for (EdgeIterator j(use); j.has_next(); j.next()) {
1775       PointsToNode* ptn = j.get();
1776       if (ptn->is_JavaObject() && ptn != jobj) {
1777         // Mark all objects.
1778         jobj->set_scalar_replaceable(false);
1779          ptn->set_scalar_replaceable(false);
1780       }
1781     }
1782     if (!jobj->scalar_replaceable()) {
1783       return;
1784     }
1785   }
1786 
1787   for (EdgeIterator j(jobj); j.has_next(); j.next()) {
1788     if (j.get()->is_Arraycopy()) {
1789       continue;
1790     }
1791 
1792     // Non-escaping object node should point only to field nodes.
1793     FieldNode* field = j.get()->as_Field();
1794     int offset = field->as_Field()->offset();
1795 
1796     // 4. An object is not scalar replaceable if it has a field with unknown
1797     // offset (array's element is accessed in loop).
1798     if (offset == Type::OffsetBot) {
1799       jobj->set_scalar_replaceable(false);
1800       return;
1801     }
1802     // 5. Currently an object is not scalar replaceable if a LoadStore node
1803     // access its field since the field value is unknown after it.
1804     //
1805     Node* n = field->ideal_node();
1806 
1807     // Test for an unsafe access that was parsed as maybe off heap
1808     // (with a CheckCastPP to raw memory).
1809     assert(n->is_AddP(), "expect an address computation");
1810     if (n->in(AddPNode::Base)->is_top() &&
1811         n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) {
1812       assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected");
1813       assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected");
1814       jobj->set_scalar_replaceable(false);
1815       return;
1816     }
1817 
1818     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1819       Node* u = n->fast_out(i);
1820       if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) {
1821         jobj->set_scalar_replaceable(false);
1822         return;
1823       }
1824     }
1825 
1826     // 6. Or the address may point to more then one object. This may produce
1827     // the false positive result (set not scalar replaceable)
1828     // since the flow-insensitive escape analysis can't separate
1829     // the case when stores overwrite the field's value from the case
1830     // when stores happened on different control branches.
1831     //
1832     // Note: it will disable scalar replacement in some cases:
1833     //
1834     //    Point p[] = new Point[1];
1835     //    p[0] = new Point(); // Will be not scalar replaced
1836     //
1837     // but it will save us from incorrect optimizations in next cases:
1838     //
1839     //    Point p[] = new Point[1];
1840     //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
1841     //
1842     if (field->base_count() > 1) {
1843       for (BaseIterator i(field); i.has_next(); i.next()) {
1844         PointsToNode* base = i.get();
1845         // Don't take into account LocalVar nodes which
1846         // may point to only one object which should be also
1847         // this field's base by now.
1848         if (base->is_JavaObject() && base != jobj) {
1849           // Mark all bases.
1850           jobj->set_scalar_replaceable(false);
1851           base->set_scalar_replaceable(false);
1852         }
1853       }
1854     }
1855   }
1856 }
1857 
1858 #ifdef ASSERT
1859 void ConnectionGraph::verify_connection_graph(
1860                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
1861                          GrowableArray<JavaObjectNode*>& non_escaped_worklist,
1862                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
1863                          GrowableArray<Node*>& addp_worklist) {
1864   // Verify that graph is complete - no new edges could be added.
1865   int java_objects_length = java_objects_worklist.length();
1866   int non_escaped_length  = non_escaped_worklist.length();
1867   int new_edges = 0;
1868   for (int next = 0; next < java_objects_length; ++next) {
1869     JavaObjectNode* ptn = java_objects_worklist.at(next);
1870     new_edges += add_java_object_edges(ptn, true);
1871   }
1872   assert(new_edges == 0, "graph was not complete");
1873   // Verify that escape state is final.
1874   int length = non_escaped_worklist.length();
1875   find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist);
1876   assert((non_escaped_length == non_escaped_worklist.length()) &&
1877          (non_escaped_length == length) &&
1878          (_worklist.length() == 0), "escape state was not final");
1879 
1880   // Verify fields information.
1881   int addp_length = addp_worklist.length();
1882   for (int next = 0; next < addp_length; ++next ) {
1883     Node* n = addp_worklist.at(next);
1884     FieldNode* field = ptnode_adr(n->_idx)->as_Field();
1885     if (field->is_oop()) {
1886       // Verify that field has all bases
1887       Node* base = get_addp_base(n);
1888       PointsToNode* ptn = ptnode_adr(base->_idx);
1889       if (ptn->is_JavaObject()) {
1890         assert(field->has_base(ptn->as_JavaObject()), "sanity");
1891       } else {
1892         assert(ptn->is_LocalVar(), "sanity");
1893         for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1894           PointsToNode* e = i.get();
1895           if (e->is_JavaObject()) {
1896             assert(field->has_base(e->as_JavaObject()), "sanity");
1897           }
1898         }
1899       }
1900       // Verify that all fields have initializing values.
1901       if (field->edge_count() == 0) {
1902         tty->print_cr("----------field does not have references----------");
1903         field->dump();
1904         for (BaseIterator i(field); i.has_next(); i.next()) {
1905           PointsToNode* base = i.get();
1906           tty->print_cr("----------field has next base---------------------");
1907           base->dump();
1908           if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {
1909             tty->print_cr("----------base has fields-------------------------");
1910             for (EdgeIterator j(base); j.has_next(); j.next()) {
1911               j.get()->dump();
1912             }
1913             tty->print_cr("----------base has references---------------------");
1914             for (UseIterator j(base); j.has_next(); j.next()) {
1915               j.get()->dump();
1916             }
1917           }
1918         }
1919         for (UseIterator i(field); i.has_next(); i.next()) {
1920           i.get()->dump();
1921         }
1922         assert(field->edge_count() > 0, "sanity");
1923       }
1924     }
1925   }
1926 }
1927 #endif
1928 
1929 // Optimize ideal graph.
1930 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
1931                                            GrowableArray<Node*>& storestore_worklist) {
1932   Compile* C = _compile;
1933   PhaseIterGVN* igvn = _igvn;
1934   if (EliminateLocks) {
1935     // Mark locks before changing ideal graph.
1936     int cnt = C->macro_count();
1937     for( int i=0; i < cnt; i++ ) {
1938       Node *n = C->macro_node(i);
1939       if (n->is_AbstractLock()) { // Lock and Unlock nodes
1940         AbstractLockNode* alock = n->as_AbstractLock();
1941         if (!alock->is_non_esc_obj()) {
1942           if (not_global_escape(alock->obj_node())) {
1943             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
1944             // The lock could be marked eliminated by lock coarsening
1945             // code during first IGVN before EA. Replace coarsened flag
1946             // to eliminate all associated locks/unlocks.
1947 #ifdef ASSERT
1948             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
1949 #endif
1950             alock->set_non_esc_obj();
1951           }
1952         }
1953       }
1954     }
1955   }
1956 
1957   if (OptimizePtrCompare) {
1958     // Add ConI(#CC_GT) and ConI(#CC_EQ).
1959     _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
1960     _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
1961     // Optimize objects compare.
1962     while (ptr_cmp_worklist.length() != 0) {
1963       Node *n = ptr_cmp_worklist.pop();
1964       Node *res = optimize_ptr_compare(n);
1965       if (res != NULL) {
1966 #ifndef PRODUCT
1967         if (PrintOptimizePtrCompare) {
1968           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
1969           if (Verbose) {
1970             n->dump(1);
1971           }
1972         }
1973 #endif
1974         igvn->replace_node(n, res);
1975       }
1976     }
1977     // cleanup
1978     if (_pcmp_neq->outcnt() == 0)
1979       igvn->hash_delete(_pcmp_neq);
1980     if (_pcmp_eq->outcnt()  == 0)
1981       igvn->hash_delete(_pcmp_eq);
1982   }
1983 
1984   // For MemBarStoreStore nodes added in library_call.cpp, check
1985   // escape status of associated AllocateNode and optimize out
1986   // MemBarStoreStore node if the allocated object never escapes.
1987   while (storestore_worklist.length() != 0) {
1988     Node *n = storestore_worklist.pop();
1989     MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
1990     Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
1991     assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
1992     if (not_global_escape(alloc)) {
1993       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
1994       mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
1995       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
1996       igvn->register_new_node_with_optimizer(mb);
1997       igvn->replace_node(storestore, mb);
1998     }
1999   }
2000 }
2001 
2002 // Optimize objects compare.
2003 Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
2004   assert(OptimizePtrCompare, "sanity");
2005   PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2006   PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2007   JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2008   JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2009   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2010   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2011 
2012   // Check simple cases first.
2013   if (jobj1 != NULL) {
2014     if (jobj1->escape_state() == PointsToNode::NoEscape) {
2015       if (jobj1 == jobj2) {
2016         // Comparing the same not escaping object.
2017         return _pcmp_eq;
2018       }
2019       Node* obj = jobj1->ideal_node();
2020       // Comparing not escaping allocation.
2021       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
2022           !ptn2->points_to(jobj1)) {
2023         return _pcmp_neq; // This includes nullness check.
2024       }
2025     }
2026   }
2027   if (jobj2 != NULL) {
2028     if (jobj2->escape_state() == PointsToNode::NoEscape) {
2029       Node* obj = jobj2->ideal_node();
2030       // Comparing not escaping allocation.
2031       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
2032           !ptn1->points_to(jobj2)) {
2033         return _pcmp_neq; // This includes nullness check.
2034       }
2035     }
2036   }
2037   if (jobj1 != NULL && jobj1 != phantom_obj &&
2038       jobj2 != NULL && jobj2 != phantom_obj &&
2039       jobj1->ideal_node()->is_Con() &&
2040       jobj2->ideal_node()->is_Con()) {
2041     // Klass or String constants compare. Need to be careful with
2042     // compressed pointers - compare types of ConN and ConP instead of nodes.
2043     const Type* t1 = jobj1->ideal_node()->get_ptr_type();
2044     const Type* t2 = jobj2->ideal_node()->get_ptr_type();
2045     if (t1->make_ptr() == t2->make_ptr()) {
2046       return _pcmp_eq;
2047     } else {
2048       return _pcmp_neq;
2049     }
2050   }
2051   if (ptn1->meet(ptn2)) {
2052     return NULL; // Sets are not disjoint
2053   }
2054 
2055   // Sets are disjoint.
2056   bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
2057   bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
2058   bool set1_has_null_ptr    = ptn1->points_to(null_obj);
2059   bool set2_has_null_ptr    = ptn2->points_to(null_obj);
2060   if ((set1_has_unknown_ptr && set2_has_null_ptr) ||
2061       (set2_has_unknown_ptr && set1_has_null_ptr)) {
2062     // Check nullness of unknown object.
2063     return NULL;
2064   }
2065 
2066   // Disjointness by itself is not sufficient since
2067   // alias analysis is not complete for escaped objects.
2068   // Disjoint sets are definitely unrelated only when
2069   // at least one set has only not escaping allocations.
2070   if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
2071     if (ptn1->non_escaping_allocation()) {
2072       return _pcmp_neq;
2073     }
2074   }
2075   if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
2076     if (ptn2->non_escaping_allocation()) {
2077       return _pcmp_neq;
2078     }
2079   }
2080   return NULL;
2081 }
2082 
2083 // Connection Graph constuction functions.
2084 
2085 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
2086   PointsToNode* ptadr = _nodes.at(n->_idx);
2087   if (ptadr != NULL) {
2088     assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
2089     return;
2090   }
2091   Compile* C = _compile;
2092   ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
2093   _nodes.at_put(n->_idx, ptadr);
2094 }
2095 
2096 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
2097   PointsToNode* ptadr = _nodes.at(n->_idx);
2098   if (ptadr != NULL) {
2099     assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
2100     return;
2101   }
2102   Compile* C = _compile;
2103   ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
2104   _nodes.at_put(n->_idx, ptadr);
2105 }
2106 
2107 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
2108   PointsToNode* ptadr = _nodes.at(n->_idx);
2109   if (ptadr != NULL) {
2110     assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
2111     return;
2112   }
2113   bool unsafe = false;
2114   bool is_oop = is_oop_field(n, offset, &unsafe);
2115   if (unsafe) {
2116     es = PointsToNode::GlobalEscape;
2117   }
2118   Compile* C = _compile;
2119   FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
2120   _nodes.at_put(n->_idx, field);
2121 }
2122 
2123 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
2124                                     PointsToNode* src, PointsToNode* dst) {
2125   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2126   assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
2127   PointsToNode* ptadr = _nodes.at(n->_idx);
2128   if (ptadr != NULL) {
2129     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2130     return;
2131   }
2132   Compile* C = _compile;
2133   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2134   _nodes.at_put(n->_idx, ptadr);
2135   // Add edge from arraycopy node to source object.
2136   (void)add_edge(ptadr, src);
2137   src->set_arraycopy_src();
2138   // Add edge from destination object to arraycopy node.
2139   (void)add_edge(dst, ptadr);
2140   dst->set_arraycopy_dst();
2141 }
2142 
2143 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2144   const Type* adr_type = n->as_AddP()->bottom_type();
2145   BasicType bt = T_INT;
2146   if (offset == Type::OffsetBot) {
2147     // Check only oop fields.
2148     if (!adr_type->isa_aryptr() ||
2149         (adr_type->isa_aryptr()->klass() == NULL) ||
2150          adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
2151       // OffsetBot is used to reference array's element. Ignore first AddP.
2152       if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2153         bt = T_OBJECT;
2154       }
2155     }
2156   } else if (offset != oopDesc::klass_offset_in_bytes()) {
2157     if (adr_type->isa_instptr()) {
2158       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2159       if (field != NULL) {
2160         bt = field->layout_type();
2161       } else {
2162         // Check for unsafe oop field access
2163         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2164             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2165 #if INCLUDE_SHENANDOAHGC
2166             n->has_out_with(Op_ShenandoahCompareAndExchangeP) || n->has_out_with(Op_ShenandoahCompareAndExchangeN) ||
2167             n->has_out_with(Op_ShenandoahCompareAndSwapP, Op_ShenandoahCompareAndSwapN, Op_ShenandoahWeakCompareAndSwapP, Op_ShenandoahWeakCompareAndSwapN) ||
2168 #endif
2169             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) {
2170           bt = T_OBJECT;
2171           (*unsafe) = true;
2172         }
2173       }
2174     } else if (adr_type->isa_aryptr()) {
2175       if (offset == arrayOopDesc::length_offset_in_bytes()) {
2176         // Ignore array length load.
2177       } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2178         // Ignore first AddP.
2179       } else {
2180         const Type* elemtype = adr_type->isa_aryptr()->elem();
2181         bt = elemtype->array_element_basic_type();
2182       }
2183     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2184       // Allocation initialization, ThreadLocal field access, unsafe access
2185       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2186           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2187           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) {
2188         bt = T_OBJECT;
2189       }
2190     }
2191   }
2192   return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
2193 }
2194 
2195 // Returns unique pointed java object or NULL.
2196 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2197   assert(!_collecting, "should not call when contructed graph");
2198   // If the node was created after the escape computation we can't answer.
2199   uint idx = n->_idx;
2200   if (idx >= nodes_size()) {
2201     return NULL;
2202   }
2203   PointsToNode* ptn = ptnode_adr(idx);
2204   if (ptn == NULL) {
2205     return NULL;
2206   }
2207   if (ptn->is_JavaObject()) {
2208     return ptn->as_JavaObject();
2209   }
2210   assert(ptn->is_LocalVar(), "sanity");
2211   // Check all java objects it points to.
2212   JavaObjectNode* jobj = NULL;
2213   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2214     PointsToNode* e = i.get();
2215     if (e->is_JavaObject()) {
2216       if (jobj == NULL) {
2217         jobj = e->as_JavaObject();
2218       } else if (jobj != e) {
2219         return NULL;
2220       }
2221     }
2222   }
2223   return jobj;
2224 }
2225 
2226 // Return true if this node points only to non-escaping allocations.
2227 bool PointsToNode::non_escaping_allocation() {
2228   if (is_JavaObject()) {
2229     Node* n = ideal_node();
2230     if (n->is_Allocate() || n->is_CallStaticJava()) {
2231       return (escape_state() == PointsToNode::NoEscape);
2232     } else {
2233       return false;
2234     }
2235   }
2236   assert(is_LocalVar(), "sanity");
2237   // Check all java objects it points to.
2238   for (EdgeIterator i(this); i.has_next(); i.next()) {
2239     PointsToNode* e = i.get();
2240     if (e->is_JavaObject()) {
2241       Node* n = e->ideal_node();
2242       if ((e->escape_state() != PointsToNode::NoEscape) ||
2243           !(n->is_Allocate() || n->is_CallStaticJava())) {
2244         return false;
2245       }
2246     }
2247   }
2248   return true;
2249 }
2250 
2251 // Return true if we know the node does not escape globally.
2252 bool ConnectionGraph::not_global_escape(Node *n) {
2253   assert(!_collecting, "should not call during graph construction");
2254   // If the node was created after the escape computation we can't answer.
2255   uint idx = n->_idx;
2256   if (idx >= nodes_size()) {
2257     return false;
2258   }
2259   PointsToNode* ptn = ptnode_adr(idx);
2260   if (ptn == NULL) {
2261     return false; // not in congraph (e.g. ConI)
2262   }
2263   PointsToNode::EscapeState es = ptn->escape_state();
2264   // If we have already computed a value, return it.
2265   if (es >= PointsToNode::GlobalEscape)
2266     return false;
2267   if (ptn->is_JavaObject()) {
2268     return true; // (es < PointsToNode::GlobalEscape);
2269   }
2270   assert(ptn->is_LocalVar(), "sanity");
2271   // Check all java objects it points to.
2272   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2273     if (i.get()->escape_state() >= PointsToNode::GlobalEscape)
2274       return false;
2275   }
2276   return true;
2277 }
2278 
2279 
2280 // Helper functions
2281 
2282 // Return true if this node points to specified node or nodes it points to.
2283 bool PointsToNode::points_to(JavaObjectNode* ptn) const {
2284   if (is_JavaObject()) {
2285     return (this == ptn);
2286   }
2287   assert(is_LocalVar() || is_Field(), "sanity");
2288   for (EdgeIterator i(this); i.has_next(); i.next()) {
2289     if (i.get() == ptn)
2290       return true;
2291   }
2292   return false;
2293 }
2294 
2295 // Return true if one node points to an other.
2296 bool PointsToNode::meet(PointsToNode* ptn) {
2297   if (this == ptn) {
2298     return true;
2299   } else if (ptn->is_JavaObject()) {
2300     return this->points_to(ptn->as_JavaObject());
2301   } else if (this->is_JavaObject()) {
2302     return ptn->points_to(this->as_JavaObject());
2303   }
2304   assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
2305   int ptn_count =  ptn->edge_count();
2306   for (EdgeIterator i(this); i.has_next(); i.next()) {
2307     PointsToNode* this_e = i.get();
2308     for (int j = 0; j < ptn_count; j++) {
2309       if (this_e == ptn->edge(j))
2310         return true;
2311     }
2312   }
2313   return false;
2314 }
2315 
2316 #ifdef ASSERT
2317 // Return true if bases point to this java object.
2318 bool FieldNode::has_base(JavaObjectNode* jobj) const {
2319   for (BaseIterator i(this); i.has_next(); i.next()) {
2320     if (i.get() == jobj)
2321       return true;
2322   }
2323   return false;
2324 }
2325 #endif
2326 
2327 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2328   const Type *adr_type = phase->type(adr);
2329   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
2330       adr->in(AddPNode::Address)->is_Proj() &&
2331       adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
2332     // We are computing a raw address for a store captured by an Initialize
2333     // compute an appropriate address type. AddP cases #3 and #5 (see below).
2334     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2335     assert(offs != Type::OffsetBot ||
2336            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2337            "offset must be a constant or it is initialization of array");
2338     return offs;
2339   }
2340   const TypePtr *t_ptr = adr_type->isa_ptr();
2341   assert(t_ptr != NULL, "must be a pointer type");
2342   return t_ptr->offset();
2343 }
2344 
2345 Node* ConnectionGraph::get_addp_base(Node *addp) {
2346   assert(addp->is_AddP(), "must be AddP");
2347   //
2348   // AddP cases for Base and Address inputs:
2349   // case #1. Direct object's field reference:
2350   //     Allocate
2351   //       |
2352   //     Proj #5 ( oop result )
2353   //       |
2354   //     CheckCastPP (cast to instance type)
2355   //      | |
2356   //     AddP  ( base == address )
2357   //
2358   // case #2. Indirect object's field reference:
2359   //      Phi
2360   //       |
2361   //     CastPP (cast to instance type)
2362   //      | |
2363   //     AddP  ( base == address )
2364   //
2365   // case #3. Raw object's field reference for Initialize node:
2366   //      Allocate
2367   //        |
2368   //      Proj #5 ( oop result )
2369   //  top   |
2370   //     \  |
2371   //     AddP  ( base == top )
2372   //
2373   // case #4. Array's element reference:
2374   //   {CheckCastPP | CastPP}
2375   //     |  | |
2376   //     |  AddP ( array's element offset )
2377   //     |  |
2378   //     AddP ( array's offset )
2379   //
2380   // case #5. Raw object's field reference for arraycopy stub call:
2381   //          The inline_native_clone() case when the arraycopy stub is called
2382   //          after the allocation before Initialize and CheckCastPP nodes.
2383   //      Allocate
2384   //        |
2385   //      Proj #5 ( oop result )
2386   //       | |
2387   //       AddP  ( base == address )
2388   //
2389   // case #6. Constant Pool, ThreadLocal, CastX2P or
2390   //          Raw object's field reference:
2391   //      {ConP, ThreadLocal, CastX2P, raw Load}
2392   //  top   |
2393   //     \  |
2394   //     AddP  ( base == top )
2395   //
2396   // case #7. Klass's field reference.
2397   //      LoadKlass
2398   //       | |
2399   //       AddP  ( base == address )
2400   //
2401   // case #8. narrow Klass's field reference.
2402   //      LoadNKlass
2403   //       |
2404   //      DecodeN
2405   //       | |
2406   //       AddP  ( base == address )
2407   //
2408   // case #9. Mixed unsafe access
2409   //    {instance}
2410   //        |
2411   //      CheckCastPP (raw)
2412   //  top   |
2413   //     \  |
2414   //     AddP  ( base == top )
2415   //
2416   Node *base = addp->in(AddPNode::Base);
2417   if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
2418     base = addp->in(AddPNode::Address);
2419     while (base->is_AddP()) {
2420       // Case #6 (unsafe access) may have several chained AddP nodes.
2421       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2422       base = base->in(AddPNode::Address);
2423     }
2424     if (base->Opcode() == Op_CheckCastPP &&
2425         base->bottom_type()->isa_rawptr() &&
2426         _igvn->type(base->in(1))->isa_oopptr()) {
2427       base = base->in(1); // Case #9
2428     } else {
2429       Node* uncast_base = base->uncast();
2430       int opcode = uncast_base->Opcode();
2431       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2432              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2433              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2434              (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()) ||
2435              uncast_base->Opcode() == Op_ShenandoahLoadReferenceBarrier, "sanity");
2436     }
2437   }
2438   return base;
2439 }
2440 
2441 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2442   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2443   Node* addp2 = addp->raw_out(0);
2444   if (addp->outcnt() == 1 && addp2->is_AddP() &&
2445       addp2->in(AddPNode::Base) == n &&
2446       addp2->in(AddPNode::Address) == addp) {
2447     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2448     //
2449     // Find array's offset to push it on worklist first and
2450     // as result process an array's element offset first (pushed second)
2451     // to avoid CastPP for the array's offset.
2452     // Otherwise the inserted CastPP (LocalVar) will point to what
2453     // the AddP (Field) points to. Which would be wrong since
2454     // the algorithm expects the CastPP has the same point as
2455     // as AddP's base CheckCastPP (LocalVar).
2456     //
2457     //    ArrayAllocation
2458     //     |
2459     //    CheckCastPP
2460     //     |
2461     //    memProj (from ArrayAllocation CheckCastPP)
2462     //     |  ||
2463     //     |  ||   Int (element index)
2464     //     |  ||    |   ConI (log(element size))
2465     //     |  ||    |   /
2466     //     |  ||   LShift
2467     //     |  ||  /
2468     //     |  AddP (array's element offset)
2469     //     |  |
2470     //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits))
2471     //     | / /
2472     //     AddP (array's offset)
2473     //      |
2474     //     Load/Store (memory operation on array's element)
2475     //
2476     return addp2;
2477   }
2478   return NULL;
2479 }
2480 
2481 //
2482 // Adjust the type and inputs of an AddP which computes the
2483 // address of a field of an instance
2484 //
2485 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2486   PhaseGVN* igvn = _igvn;
2487   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2488   assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
2489   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2490   if (t == NULL) {
2491     // We are computing a raw address for a store captured by an Initialize
2492     // compute an appropriate address type (cases #3 and #5).
2493     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2494     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2495     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2496     assert(offs != Type::OffsetBot, "offset must be a constant");
2497     t = base_t->add_offset(offs)->is_oopptr();
2498   }
2499   int inst_id =  base_t->instance_id();
2500   assert(!t->is_known_instance() || t->instance_id() == inst_id,
2501                              "old type must be non-instance or match new type");
2502 
2503   // The type 't' could be subclass of 'base_t'.
2504   // As result t->offset() could be large then base_t's size and it will
2505   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2506   // constructor verifies correctness of the offset.
2507   //
2508   // It could happened on subclass's branch (from the type profiling
2509   // inlining) which was not eliminated during parsing since the exactness
2510   // of the allocation type was not propagated to the subclass type check.
2511   //
2512   // Or the type 't' could be not related to 'base_t' at all.
2513   // It could happened when CHA type is different from MDO type on a dead path
2514   // (for example, from instanceof check) which is not collapsed during parsing.
2515   //
2516   // Do nothing for such AddP node and don't process its users since
2517   // this code branch will go away.
2518   //
2519   if (!t->is_known_instance() &&
2520       !base_t->klass()->is_subtype_of(t->klass())) {
2521      return false; // bail out
2522   }
2523   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
2524   // Do NOT remove the next line: ensure a new alias index is allocated
2525   // for the instance type. Note: C++ will not remove it since the call
2526   // has side effect.
2527   int alias_idx = _compile->get_alias_index(tinst);
2528   igvn->set_type(addp, tinst);
2529   // record the allocation in the node map
2530   set_map(addp, get_map(base->_idx));
2531   // Set addp's Base and Address to 'base'.
2532   Node *abase = addp->in(AddPNode::Base);
2533   Node *adr   = addp->in(AddPNode::Address);
2534   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2535       adr->in(0)->_idx == (uint)inst_id) {
2536     // Skip AddP cases #3 and #5.
2537   } else {
2538     assert(!abase->is_top(), "sanity"); // AddP case #3
2539     if (abase != base) {
2540       igvn->hash_delete(addp);
2541       addp->set_req(AddPNode::Base, base);
2542       if (abase == adr) {
2543         addp->set_req(AddPNode::Address, base);
2544       } else {
2545         // AddP case #4 (adr is array's element offset AddP node)
2546 #ifdef ASSERT
2547         const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
2548         assert(adr->is_AddP() && atype != NULL &&
2549                atype->instance_id() == inst_id, "array's element offset should be processed first");
2550 #endif
2551       }
2552       igvn->hash_insert(addp);
2553     }
2554   }
2555   // Put on IGVN worklist since at least addp's type was changed above.
2556   record_for_optimizer(addp);
2557   return true;
2558 }
2559 
2560 //
2561 // Create a new version of orig_phi if necessary. Returns either the newly
2562 // created phi or an existing phi.  Sets create_new to indicate whether a new
2563 // phi was created.  Cache the last newly created phi in the node map.
2564 //
2565 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created) {
2566   Compile *C = _compile;
2567   PhaseGVN* igvn = _igvn;
2568   new_created = false;
2569   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
2570   // nothing to do if orig_phi is bottom memory or matches alias_idx
2571   if (phi_alias_idx == alias_idx) {
2572     return orig_phi;
2573   }
2574   // Have we recently created a Phi for this alias index?
2575   PhiNode *result = get_map_phi(orig_phi->_idx);
2576   if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
2577     return result;
2578   }
2579   // Previous check may fail when the same wide memory Phi was split into Phis
2580   // for different memory slices. Search all Phis for this region.
2581   if (result != NULL) {
2582     Node* region = orig_phi->in(0);
2583     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2584       Node* phi = region->fast_out(i);
2585       if (phi->is_Phi() &&
2586           C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
2587         assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
2588         return phi->as_Phi();
2589       }
2590     }
2591   }
2592   if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {
2593     if (C->do_escape_analysis() == true && !C->failing()) {
2594       // Retry compilation without escape analysis.
2595       // If this is the first failure, the sentinel string will "stick"
2596       // to the Compile object, and the C2Compiler will see it and retry.
2597       C->record_failure(C2Compiler::retry_no_escape_analysis());
2598     }
2599     return NULL;
2600   }
2601   orig_phi_worklist.append_if_missing(orig_phi);
2602   const TypePtr *atype = C->get_adr_type(alias_idx);
2603   result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
2604   C->copy_node_notes_to(result, orig_phi);
2605   igvn->set_type(result, result->bottom_type());
2606   record_for_optimizer(result);
2607   set_map(orig_phi, result);
2608   new_created = true;
2609   return result;
2610 }
2611 
2612 //
2613 // Return a new version of Memory Phi "orig_phi" with the inputs having the
2614 // specified alias index.
2615 //
2616 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist) {
2617   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
2618   Compile *C = _compile;
2619   PhaseGVN* igvn = _igvn;
2620   bool new_phi_created;
2621   PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
2622   if (!new_phi_created) {
2623     return result;
2624   }
2625   GrowableArray<PhiNode *>  phi_list;
2626   GrowableArray<uint>  cur_input;
2627   PhiNode *phi = orig_phi;
2628   uint idx = 1;
2629   bool finished = false;
2630   while(!finished) {
2631     while (idx < phi->req()) {
2632       Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);
2633       if (mem != NULL && mem->is_Phi()) {
2634         PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
2635         if (new_phi_created) {
2636           // found an phi for which we created a new split, push current one on worklist and begin
2637           // processing new one
2638           phi_list.push(phi);
2639           cur_input.push(idx);
2640           phi = mem->as_Phi();
2641           result = newphi;
2642           idx = 1;
2643           continue;
2644         } else {
2645           mem = newphi;
2646         }
2647       }
2648       if (C->failing()) {
2649         return NULL;
2650       }
2651       result->set_req(idx++, mem);
2652     }
2653 #ifdef ASSERT
2654     // verify that the new Phi has an input for each input of the original
2655     assert( phi->req() == result->req(), "must have same number of inputs.");
2656     assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
2657 #endif
2658     // Check if all new phi's inputs have specified alias index.
2659     // Otherwise use old phi.
2660     for (uint i = 1; i < phi->req(); i++) {
2661       Node* in = result->in(i);
2662       assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
2663     }
2664     // we have finished processing a Phi, see if there are any more to do
2665     finished = (phi_list.length() == 0 );
2666     if (!finished) {
2667       phi = phi_list.pop();
2668       idx = cur_input.pop();
2669       PhiNode *prev_result = get_map_phi(phi->_idx);
2670       prev_result->set_req(idx++, result);
2671       result = prev_result;
2672     }
2673   }
2674   return result;
2675 }
2676 
2677 //
2678 // The next methods are derived from methods in MemNode.
2679 //
2680 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
2681   Node *mem = mmem;
2682   // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
2683   // means an array I have not precisely typed yet.  Do not do any
2684   // alias stuff with it any time soon.
2685   if (toop->base() != Type::AnyPtr &&
2686       !(toop->klass() != NULL &&
2687         toop->klass()->is_java_lang_Object() &&
2688         toop->offset() == Type::OffsetBot)) {
2689     mem = mmem->memory_at(alias_idx);
2690     // Update input if it is progress over what we have now
2691   }
2692   return mem;
2693 }
2694 
2695 //
2696 // Move memory users to their memory slices.
2697 //
2698 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis) {
2699   Compile* C = _compile;
2700   PhaseGVN* igvn = _igvn;
2701   const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
2702   assert(tp != NULL, "ptr type");
2703   int alias_idx = C->get_alias_index(tp);
2704   int general_idx = C->get_general_index(alias_idx);
2705 
2706   // Move users first
2707   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2708     Node* use = n->fast_out(i);
2709     if (use->is_MergeMem()) {
2710       MergeMemNode* mmem = use->as_MergeMem();
2711       assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
2712       if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
2713         continue; // Nothing to do
2714       }
2715       // Replace previous general reference to mem node.
2716       uint orig_uniq = C->unique();
2717       Node* m = find_inst_mem(n, general_idx, orig_phis);
2718       assert(orig_uniq == C->unique(), "no new nodes");
2719       mmem->set_memory_at(general_idx, m);
2720       --imax;
2721       --i;
2722     } else if (use->is_MemBar()) {
2723       assert(!use->is_Initialize(), "initializing stores should not be moved");
2724       if (use->req() > MemBarNode::Precedent &&
2725           use->in(MemBarNode::Precedent) == n) {
2726         // Don't move related membars.
2727         record_for_optimizer(use);
2728         continue;
2729       }
2730       tp = use->as_MemBar()->adr_type()->isa_ptr();
2731       if ((tp != NULL && C->get_alias_index(tp) == alias_idx) ||
2732           alias_idx == general_idx) {
2733         continue; // Nothing to do
2734       }
2735       // Move to general memory slice.
2736       uint orig_uniq = C->unique();
2737       Node* m = find_inst_mem(n, general_idx, orig_phis);
2738       assert(orig_uniq == C->unique(), "no new nodes");
2739       igvn->hash_delete(use);
2740       imax -= use->replace_edge(n, m);
2741       igvn->hash_insert(use);
2742       record_for_optimizer(use);
2743       --i;
2744 #ifdef ASSERT
2745     } else if (use->is_Mem()) {
2746       if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
2747         // Don't move related cardmark.
2748         continue;
2749       }
2750       // Memory nodes should have new memory input.
2751       tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
2752       assert(tp != NULL, "ptr type");
2753       int idx = C->get_alias_index(tp);
2754       assert(get_map(use->_idx) != NULL || idx == alias_idx,
2755              "Following memory nodes should have new memory input or be on the same memory slice");
2756     } else if (use->is_Phi()) {
2757       // Phi nodes should be split and moved already.
2758       tp = use->as_Phi()->adr_type()->isa_ptr();
2759       assert(tp != NULL, "ptr type");
2760       int idx = C->get_alias_index(tp);
2761       assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
2762     } else {
2763       use->dump();
2764       assert(false, "should not be here");
2765 #endif
2766     }
2767   }
2768 }
2769 
2770 //
2771 // Search memory chain of "mem" to find a MemNode whose address
2772 // is the specified alias index.
2773 //
2774 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis) {
2775   if (orig_mem == NULL)
2776     return orig_mem;
2777   Compile* C = _compile;
2778   PhaseGVN* igvn = _igvn;
2779   const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
2780   bool is_instance = (toop != NULL) && toop->is_known_instance();
2781   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
2782   Node *prev = NULL;
2783   Node *result = orig_mem;
2784   while (prev != result) {
2785     prev = result;
2786     if (result == start_mem)
2787       break;  // hit one of our sentinels
2788     if (result->is_Mem()) {
2789       const Type *at = igvn->type(result->in(MemNode::Address));
2790       if (at == Type::TOP)
2791         break; // Dead
2792       assert (at->isa_ptr() != NULL, "pointer type required.");
2793       int idx = C->get_alias_index(at->is_ptr());
2794       if (idx == alias_idx)
2795         break; // Found
2796       if (!is_instance && (at->isa_oopptr() == NULL ||
2797                            !at->is_oopptr()->is_known_instance())) {
2798         break; // Do not skip store to general memory slice.
2799       }
2800       result = result->in(MemNode::Memory);
2801     }
2802     if (!is_instance)
2803       continue;  // don't search further for non-instance types
2804     // skip over a call which does not affect this memory slice
2805     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
2806       Node *proj_in = result->in(0);
2807       if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
2808         break;  // hit one of our sentinels
2809       } else if (proj_in->is_Call()) {
2810         // ArrayCopy node processed here as well
2811         CallNode *call = proj_in->as_Call();
2812         if (!call->may_modify(toop, igvn)) {
2813           result = call->in(TypeFunc::Memory);
2814         }
2815       } else if (proj_in->is_Initialize()) {
2816         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
2817         // Stop if this is the initialization for the object instance which
2818         // which contains this memory slice, otherwise skip over it.
2819         if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {
2820           result = proj_in->in(TypeFunc::Memory);
2821         }
2822       } else if (proj_in->is_MemBar()) {
2823         // Check if there is an array copy for a clone
2824         // Step over GC barrier when ReduceInitialCardMarks is disabled
2825         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2826         Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));
2827 
2828         if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
2829           // Stop if it is a clone
2830           ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
2831           if (ac->may_modify(toop, igvn)) {
2832             break;
2833           }
2834         }
2835         result = proj_in->in(TypeFunc::Memory);
2836       }
2837     } else if (result->is_MergeMem()) {
2838       MergeMemNode *mmem = result->as_MergeMem();
2839       result = step_through_mergemem(mmem, alias_idx, toop);
2840       if (result == mmem->base_memory()) {
2841         // Didn't find instance memory, search through general slice recursively.
2842         result = mmem->memory_at(C->get_general_index(alias_idx));
2843         result = find_inst_mem(result, alias_idx, orig_phis);
2844         if (C->failing()) {
2845           return NULL;
2846         }
2847         mmem->set_memory_at(alias_idx, result);
2848       }
2849     } else if (result->is_Phi() &&
2850                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
2851       Node *un = result->as_Phi()->unique_input(igvn);
2852       if (un != NULL) {
2853         orig_phis.append_if_missing(result->as_Phi());
2854         result = un;
2855       } else {
2856         break;
2857       }
2858     } else if (result->is_ClearArray()) {
2859       if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
2860         // Can not bypass initialization of the instance
2861         // we are looking for.
2862         break;
2863       }
2864       // Otherwise skip it (the call updated 'result' value).
2865     } else if (result->Opcode() == Op_SCMemProj) {
2866       Node* mem = result->in(0);
2867       Node* adr = NULL;
2868       if (mem->is_LoadStore()) {
2869         adr = mem->in(MemNode::Address);
2870       } else {
2871         assert(mem->Opcode() == Op_EncodeISOArray ||
2872                mem->Opcode() == Op_StrCompressedCopy, "sanity");
2873         adr = mem->in(3); // Memory edge corresponds to destination array
2874       }
2875       const Type *at = igvn->type(adr);
2876       if (at != Type::TOP) {
2877         assert(at->isa_ptr() != NULL, "pointer type required.");
2878         int idx = C->get_alias_index(at->is_ptr());
2879         if (idx == alias_idx) {
2880           // Assert in debug mode
2881           assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
2882           break; // In product mode return SCMemProj node
2883         }
2884       }
2885       result = mem->in(MemNode::Memory);
2886     } else if (result->Opcode() == Op_StrInflatedCopy) {
2887       Node* adr = result->in(3); // Memory edge corresponds to destination array
2888       const Type *at = igvn->type(adr);
2889       if (at != Type::TOP) {
2890         assert(at->isa_ptr() != NULL, "pointer type required.");
2891         int idx = C->get_alias_index(at->is_ptr());
2892         if (idx == alias_idx) {
2893           // Assert in debug mode
2894           assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
2895           break; // In product mode return SCMemProj node
2896         }
2897       }
2898       result = result->in(MemNode::Memory);
2899     }
2900   }
2901   if (result->is_Phi()) {
2902     PhiNode *mphi = result->as_Phi();
2903     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
2904     const TypePtr *t = mphi->adr_type();
2905     if (!is_instance) {
2906       // Push all non-instance Phis on the orig_phis worklist to update inputs
2907       // during Phase 4 if needed.
2908       orig_phis.append_if_missing(mphi);
2909     } else if (C->get_alias_index(t) != alias_idx) {
2910       // Create a new Phi with the specified alias index type.
2911       result = split_memory_phi(mphi, alias_idx, orig_phis);
2912     }
2913   }
2914   // the result is either MemNode, PhiNode, InitializeNode.
2915   return result;
2916 }
2917 
2918 //
2919 //  Convert the types of unescaped object to instance types where possible,
2920 //  propagate the new type information through the graph, and update memory
2921 //  edges and MergeMem inputs to reflect the new type.
2922 //
2923 //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
2924 //  The processing is done in 4 phases:
2925 //
2926 //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
2927 //            types for the CheckCastPP for allocations where possible.
2928 //            Propagate the new types through users as follows:
2929 //               casts and Phi:  push users on alloc_worklist
2930 //               AddP:  cast Base and Address inputs to the instance type
2931 //                      push any AddP users on alloc_worklist and push any memnode
2932 //                      users onto memnode_worklist.
2933 //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
2934 //            search the Memory chain for a store with the appropriate type
2935 //            address type.  If a Phi is found, create a new version with
2936 //            the appropriate memory slices from each of the Phi inputs.
2937 //            For stores, process the users as follows:
2938 //               MemNode:  push on memnode_worklist
2939 //               MergeMem: push on mergemem_worklist
2940 //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
2941 //            moving the first node encountered of each  instance type to the
2942 //            the input corresponding to its alias index.
2943 //            appropriate memory slice.
2944 //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
2945 //
2946 // In the following example, the CheckCastPP nodes are the cast of allocation
2947 // results and the allocation of node 29 is unescaped and eligible to be an
2948 // instance type.
2949 //
2950 // We start with:
2951 //
2952 //     7 Parm #memory
2953 //    10  ConI  "12"
2954 //    19  CheckCastPP   "Foo"
2955 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
2956 //    29  CheckCastPP   "Foo"
2957 //    30  AddP  _ 29 29 10  Foo+12  alias_index=4
2958 //
2959 //    40  StoreP  25   7  20   ... alias_index=4
2960 //    50  StoreP  35  40  30   ... alias_index=4
2961 //    60  StoreP  45  50  20   ... alias_index=4
2962 //    70  LoadP    _  60  30   ... alias_index=4
2963 //    80  Phi     75  50  60   Memory alias_index=4
2964 //    90  LoadP    _  80  30   ... alias_index=4
2965 //   100  LoadP    _  80  20   ... alias_index=4
2966 //
2967 //
2968 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
2969 // and creating a new alias index for node 30.  This gives:
2970 //
2971 //     7 Parm #memory
2972 //    10  ConI  "12"
2973 //    19  CheckCastPP   "Foo"
2974 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
2975 //    29  CheckCastPP   "Foo"  iid=24
2976 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
2977 //
2978 //    40  StoreP  25   7  20   ... alias_index=4
2979 //    50  StoreP  35  40  30   ... alias_index=6
2980 //    60  StoreP  45  50  20   ... alias_index=4
2981 //    70  LoadP    _  60  30   ... alias_index=6
2982 //    80  Phi     75  50  60   Memory alias_index=4
2983 //    90  LoadP    _  80  30   ... alias_index=6
2984 //   100  LoadP    _  80  20   ... alias_index=4
2985 //
2986 // In phase 2, new memory inputs are computed for the loads and stores,
2987 // And a new version of the phi is created.  In phase 4, the inputs to
2988 // node 80 are updated and then the memory nodes are updated with the
2989 // values computed in phase 2.  This results in:
2990 //
2991 //     7 Parm #memory
2992 //    10  ConI  "12"
2993 //    19  CheckCastPP   "Foo"
2994 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
2995 //    29  CheckCastPP   "Foo"  iid=24
2996 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
2997 //
2998 //    40  StoreP  25  7   20   ... alias_index=4
2999 //    50  StoreP  35  7   30   ... alias_index=6
3000 //    60  StoreP  45  40  20   ... alias_index=4
3001 //    70  LoadP    _  50  30   ... alias_index=6
3002 //    80  Phi     75  40  60   Memory alias_index=4
3003 //   120  Phi     75  50  50   Memory alias_index=6
3004 //    90  LoadP    _ 120  30   ... alias_index=6
3005 //   100  LoadP    _  80  20   ... alias_index=4
3006 //
3007 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) {
3008   GrowableArray<Node *>  memnode_worklist;
3009   GrowableArray<PhiNode *>  orig_phis;
3010   PhaseIterGVN  *igvn = _igvn;
3011   uint new_index_start = (uint) _compile->num_alias_types();
3012   Arena* arena = Thread::current()->resource_area();
3013   VectorSet visited(arena);
3014   ideal_nodes.clear(); // Reset for use with set_map/get_map.
3015   uint unique_old = _compile->unique();
3016 
3017   //  Phase 1:  Process possible allocations from alloc_worklist.
3018   //  Create instance types for the CheckCastPP for allocations where possible.
3019   //
3020   // (Note: don't forget to change the order of the second AddP node on
3021   //  the alloc_worklist if the order of the worklist processing is changed,
3022   //  see the comment in find_second_addp().)
3023   //
3024   while (alloc_worklist.length() != 0) {
3025     Node *n = alloc_worklist.pop();
3026     uint ni = n->_idx;
3027     if (n->is_Call()) {
3028       CallNode *alloc = n->as_Call();
3029       // copy escape information to call node
3030       PointsToNode* ptn = ptnode_adr(alloc->_idx);
3031       PointsToNode::EscapeState es = ptn->escape_state();
3032       // We have an allocation or call which returns a Java object,
3033       // see if it is unescaped.
3034       if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable())
3035         continue;
3036       // Find CheckCastPP for the allocate or for the return value of a call
3037       n = alloc->result_cast();
3038       if (n == NULL) {            // No uses except Initialize node
3039         if (alloc->is_Allocate()) {
3040           // Set the scalar_replaceable flag for allocation
3041           // so it could be eliminated if it has no uses.
3042           alloc->as_Allocate()->_is_scalar_replaceable = true;
3043         }
3044         if (alloc->is_CallStaticJava()) {
3045           // Set the scalar_replaceable flag for boxing method
3046           // so it could be eliminated if it has no uses.
3047           alloc->as_CallStaticJava()->_is_scalar_replaceable = true;
3048         }
3049         continue;
3050       }
3051       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
3052         assert(!alloc->is_Allocate(), "allocation should have unique type");
3053         continue;
3054       }
3055 
3056       // The inline code for Object.clone() casts the allocation result to
3057       // java.lang.Object and then to the actual type of the allocated
3058       // object. Detect this case and use the second cast.
3059       // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
3060       // the allocation result is cast to java.lang.Object and then
3061       // to the actual Array type.
3062       if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
3063           && (alloc->is_AllocateArray() ||
3064               igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {
3065         Node *cast2 = NULL;
3066         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3067           Node *use = n->fast_out(i);
3068           if (use->is_CheckCastPP()) {
3069             cast2 = use;
3070             break;
3071           }
3072         }
3073         if (cast2 != NULL) {
3074           n = cast2;
3075         } else {
3076           // Non-scalar replaceable if the allocation type is unknown statically
3077           // (reflection allocation), the object can't be restored during
3078           // deoptimization without precise type.
3079           continue;
3080         }
3081       }
3082 
3083       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
3084       if (t == NULL)
3085         continue;  // not a TypeOopPtr
3086       if (!t->klass_is_exact())
3087         continue; // not an unique type
3088 
3089       if (alloc->is_Allocate()) {
3090         // Set the scalar_replaceable flag for allocation
3091         // so it could be eliminated.
3092         alloc->as_Allocate()->_is_scalar_replaceable = true;
3093       }
3094       if (alloc->is_CallStaticJava()) {
3095         // Set the scalar_replaceable flag for boxing method
3096         // so it could be eliminated.
3097         alloc->as_CallStaticJava()->_is_scalar_replaceable = true;
3098       }
3099       set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state
3100       // in order for an object to be scalar-replaceable, it must be:
3101       //   - a direct allocation (not a call returning an object)
3102       //   - non-escaping
3103       //   - eligible to be a unique type
3104       //   - not determined to be ineligible by escape analysis
3105       set_map(alloc, n);
3106       set_map(n, alloc);
3107       const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
3108       igvn->hash_delete(n);
3109       igvn->set_type(n,  tinst);
3110       n->raise_bottom_type(tinst);
3111       igvn->hash_insert(n);
3112       record_for_optimizer(n);
3113       // Allocate an alias index for the header fields. Accesses to
3114       // the header emitted during macro expansion wouldn't have
3115       // correct memory state otherwise.
3116       _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
3117       _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
3118       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
3119 
3120         // First, put on the worklist all Field edges from Connection Graph
3121         // which is more accurate than putting immediate users from Ideal Graph.
3122         for (EdgeIterator e(ptn); e.has_next(); e.next()) {
3123           PointsToNode* tgt = e.get();
3124           if (tgt->is_Arraycopy()) {
3125             continue;
3126           }
3127           Node* use = tgt->ideal_node();
3128           assert(tgt->is_Field() && use->is_AddP(),
3129                  "only AddP nodes are Field edges in CG");
3130           if (use->outcnt() > 0) { // Don't process dead nodes
3131             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
3132             if (addp2 != NULL) {
3133               assert(alloc->is_AllocateArray(),"array allocation was expected");
3134               alloc_worklist.append_if_missing(addp2);
3135             }
3136             alloc_worklist.append_if_missing(use);
3137           }
3138         }
3139 
3140         // An allocation may have an Initialize which has raw stores. Scan
3141         // the users of the raw allocation result and push AddP users
3142         // on alloc_worklist.
3143         Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms);
3144         assert (raw_result != NULL, "must have an allocation result");
3145         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
3146           Node *use = raw_result->fast_out(i);
3147           if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
3148             Node* addp2 = find_second_addp(use, raw_result);
3149             if (addp2 != NULL) {
3150               assert(alloc->is_AllocateArray(),"array allocation was expected");
3151               alloc_worklist.append_if_missing(addp2);
3152             }
3153             alloc_worklist.append_if_missing(use);
3154           } else if (use->is_MemBar()) {
3155             memnode_worklist.append_if_missing(use);
3156           }
3157         }
3158       }
3159     } else if (n->is_AddP()) {
3160       JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
3161       if (jobj == NULL || jobj == phantom_obj) {
3162 #ifdef ASSERT
3163         ptnode_adr(get_addp_base(n)->_idx)->dump();
3164         ptnode_adr(n->_idx)->dump();
3165         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3166 #endif
3167         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3168         return;
3169       }
3170       Node *base = get_map(jobj->idx());  // CheckCastPP node
3171       if (!split_AddP(n, base)) continue; // wrong type from dead path
3172     } else if (n->is_Phi() ||
3173                n->is_CheckCastPP() ||
3174                n->is_EncodeP() ||
3175                n->is_DecodeN() ||
3176                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
3177       if (visited.test_set(n->_idx)) {
3178         assert(n->is_Phi(), "loops only through Phi's");
3179         continue;  // already processed
3180       }
3181       JavaObjectNode* jobj = unique_java_object(n);
3182       if (jobj == NULL || jobj == phantom_obj) {
3183 #ifdef ASSERT
3184         ptnode_adr(n->_idx)->dump();
3185         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3186 #endif
3187         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3188         return;
3189       } else {
3190         Node *val = get_map(jobj->idx());   // CheckCastPP node
3191         TypeNode *tn = n->as_Type();
3192         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3193         assert(tinst != NULL && tinst->is_known_instance() &&
3194                tinst->instance_id() == jobj->idx() , "instance type expected.");
3195 
3196         const Type *tn_type = igvn->type(tn);
3197         const TypeOopPtr *tn_t;
3198         if (tn_type->isa_narrowoop()) {
3199           tn_t = tn_type->make_ptr()->isa_oopptr();
3200         } else {
3201           tn_t = tn_type->isa_oopptr();
3202         }
3203         if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
3204           if (tn_type->isa_narrowoop()) {
3205             tn_type = tinst->make_narrowoop();
3206           } else {
3207             tn_type = tinst;
3208           }
3209           igvn->hash_delete(tn);
3210           igvn->set_type(tn, tn_type);
3211           tn->set_type(tn_type);
3212           igvn->hash_insert(tn);
3213           record_for_optimizer(n);
3214         } else {
3215           assert(tn_type == TypePtr::NULL_PTR ||
3216                  tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
3217                  "unexpected type");
3218           continue; // Skip dead path with different type
3219         }
3220       }
3221     } else {
3222       debug_only(n->dump();)
3223       assert(false, "EA: unexpected node");
3224       continue;
3225     }
3226     // push allocation's users on appropriate worklist
3227     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3228       Node *use = n->fast_out(i);
3229       if(use->is_Mem() && use->in(MemNode::Address) == n) {
3230         // Load/store to instance's field
3231         memnode_worklist.append_if_missing(use);
3232       } else if (use->is_MemBar()) {
3233         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3234           memnode_worklist.append_if_missing(use);
3235         }
3236       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3237         Node* addp2 = find_second_addp(use, n);
3238         if (addp2 != NULL) {
3239           alloc_worklist.append_if_missing(addp2);
3240         }
3241         alloc_worklist.append_if_missing(use);
3242       } else if (use->is_Phi() ||
3243                  use->is_CheckCastPP() ||
3244                  use->is_EncodeNarrowPtr() ||
3245                  use->is_DecodeNarrowPtr() ||
3246                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3247         alloc_worklist.append_if_missing(use);
3248 #ifdef ASSERT
3249       } else if (use->is_Mem()) {
3250         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3251       } else if (use->is_MergeMem()) {
3252         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3253       } else if (use->is_SafePoint()) {
3254         // Look for MergeMem nodes for calls which reference unique allocation
3255         // (through CheckCastPP nodes) even for debug info.
3256         Node* m = use->in(TypeFunc::Memory);
3257         if (m->is_MergeMem()) {
3258           assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3259         }
3260       } else if (use->Opcode() == Op_EncodeISOArray) {
3261         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3262           // EncodeISOArray overwrites destination array
3263           memnode_worklist.append_if_missing(use);
3264         }
3265       } else {
3266         uint op = use->Opcode();
3267         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3268             (use->in(MemNode::Memory) == n)) {
3269           // They overwrite memory edge corresponding to destination array,
3270           memnode_worklist.append_if_missing(use);
3271         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3272               op == Op_CastP2X || op == Op_StoreCM ||
3273               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3274               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3275               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3276               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3277           n->dump();
3278           use->dump();
3279           assert(false, "EA: missing allocation reference path");
3280         }
3281 #endif
3282       }
3283     }
3284 
3285   }
3286 
3287   // Go over all ArrayCopy nodes and if one of the inputs has a unique
3288   // type, record it in the ArrayCopy node so we know what memory this
3289   // node uses/modified.
3290   for (int next = 0; next < arraycopy_worklist.length(); next++) {
3291     ArrayCopyNode* ac = arraycopy_worklist.at(next);
3292     Node* dest = ac->in(ArrayCopyNode::Dest);
3293     if (dest->is_AddP()) {
3294       dest = get_addp_base(dest);
3295     }
3296     JavaObjectNode* jobj = unique_java_object(dest);
3297     if (jobj != NULL) {
3298       Node *base = get_map(jobj->idx());
3299       if (base != NULL) {
3300         const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
3301         ac->_dest_type = base_t;
3302       }
3303     }
3304     Node* src = ac->in(ArrayCopyNode::Src);
3305     if (src->is_AddP()) {
3306       src = get_addp_base(src);
3307     }
3308     jobj = unique_java_object(src);
3309     if (jobj != NULL) {
3310       Node* base = get_map(jobj->idx());
3311       if (base != NULL) {
3312         const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
3313         ac->_src_type = base_t;
3314       }
3315     }
3316   }
3317 
3318   // New alias types were created in split_AddP().
3319   uint new_index_end = (uint) _compile->num_alias_types();
3320   assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
3321 
3322   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
3323   //            compute new values for Memory inputs  (the Memory inputs are not
3324   //            actually updated until phase 4.)
3325   if (memnode_worklist.length() == 0)
3326     return;  // nothing to do
3327   while (memnode_worklist.length() != 0) {
3328     Node *n = memnode_worklist.pop();
3329     if (visited.test_set(n->_idx))
3330       continue;
3331     if (n->is_Phi() || n->is_ClearArray()) {
3332       // we don't need to do anything, but the users must be pushed
3333     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3334       // we don't need to do anything, but the users must be pushed
3335       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
3336       if (n == NULL)
3337         continue;
3338     } else if (n->Opcode() == Op_StrCompressedCopy ||
3339                n->Opcode() == Op_EncodeISOArray) {
3340       // get the memory projection
3341       n = n->find_out_with(Op_SCMemProj);
3342       assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3343     } else {
3344       assert(n->is_Mem(), "memory node required.");
3345       Node *addr = n->in(MemNode::Address);
3346       const Type *addr_t = igvn->type(addr);
3347       if (addr_t == Type::TOP)
3348         continue;
3349       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3350       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3351       assert ((uint)alias_idx < new_index_end, "wrong alias index");
3352       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3353       if (_compile->failing()) {
3354         return;
3355       }
3356       if (mem != n->in(MemNode::Memory)) {
3357         // We delay the memory edge update since we need old one in
3358         // MergeMem code below when instances memory slices are separated.
3359         set_map(n, mem);
3360       }
3361       if (n->is_Load()) {
3362         continue;  // don't push users
3363       } else if (n->is_LoadStore()) {
3364         // get the memory projection
3365         n = n->find_out_with(Op_SCMemProj);
3366         assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3367       }
3368     }
3369     // push user on appropriate worklist
3370     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3371       Node *use = n->fast_out(i);
3372       if (use->is_Phi() || use->is_ClearArray()) {
3373         memnode_worklist.append_if_missing(use);
3374       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3375         if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
3376           continue;
3377         memnode_worklist.append_if_missing(use);
3378       } else if (use->is_MemBar()) {
3379         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3380           memnode_worklist.append_if_missing(use);
3381         }
3382 #ifdef ASSERT
3383       } else if(use->is_Mem()) {
3384         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3385       } else if (use->is_MergeMem()) {
3386         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3387       } else if (use->Opcode() == Op_EncodeISOArray) {
3388         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3389           // EncodeISOArray overwrites destination array
3390           memnode_worklist.append_if_missing(use);
3391         }
3392       } else {
3393         uint op = use->Opcode();
3394         if ((use->in(MemNode::Memory) == n) &&
3395             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3396           // They overwrite memory edge corresponding to destination array,
3397           memnode_worklist.append_if_missing(use);
3398         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
3399               op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3400               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3401               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
3402           n->dump();
3403           use->dump();
3404           assert(false, "EA: missing memory path");
3405         }
3406 #endif
3407       }
3408     }
3409   }
3410 
3411   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
3412   //            Walk each memory slice moving the first node encountered of each
3413   //            instance type to the the input corresponding to its alias index.
3414   uint length = _mergemem_worklist.length();
3415   for( uint next = 0; next < length; ++next ) {
3416     MergeMemNode* nmm = _mergemem_worklist.at(next);
3417     assert(!visited.test_set(nmm->_idx), "should not be visited before");
3418     // Note: we don't want to use MergeMemStream here because we only want to
3419     // scan inputs which exist at the start, not ones we add during processing.
3420     // Note 2: MergeMem may already contains instance memory slices added
3421     // during find_inst_mem() call when memory nodes were processed above.
3422     igvn->hash_delete(nmm);
3423     uint nslices = MIN2(nmm->req(), new_index_start);
3424     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
3425       Node* mem = nmm->in(i);
3426       Node* cur = NULL;
3427       if (mem == NULL || mem->is_top())
3428         continue;
3429       // First, update mergemem by moving memory nodes to corresponding slices
3430       // if their type became more precise since this mergemem was created.
3431       while (mem->is_Mem()) {
3432         const Type *at = igvn->type(mem->in(MemNode::Address));
3433         if (at != Type::TOP) {
3434           assert (at->isa_ptr() != NULL, "pointer type required.");
3435           uint idx = (uint)_compile->get_alias_index(at->is_ptr());
3436           if (idx == i) {
3437             if (cur == NULL)
3438               cur = mem;
3439           } else {
3440             if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
3441               nmm->set_memory_at(idx, mem);
3442             }
3443           }
3444         }
3445         mem = mem->in(MemNode::Memory);
3446       }
3447       nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
3448       // Find any instance of the current type if we haven't encountered
3449       // already a memory slice of the instance along the memory chain.
3450       for (uint ni = new_index_start; ni < new_index_end; ni++) {
3451         if((uint)_compile->get_general_index(ni) == i) {
3452           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
3453           if (nmm->is_empty_memory(m)) {
3454             Node* result = find_inst_mem(mem, ni, orig_phis);
3455             if (_compile->failing()) {
3456               return;
3457             }
3458             nmm->set_memory_at(ni, result);
3459           }
3460         }
3461       }
3462     }
3463     // Find the rest of instances values
3464     for (uint ni = new_index_start; ni < new_index_end; ni++) {
3465       const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
3466       Node* result = step_through_mergemem(nmm, ni, tinst);
3467       if (result == nmm->base_memory()) {
3468         // Didn't find instance memory, search through general slice recursively.
3469         result = nmm->memory_at(_compile->get_general_index(ni));
3470         result = find_inst_mem(result, ni, orig_phis);
3471         if (_compile->failing()) {
3472           return;
3473         }
3474         nmm->set_memory_at(ni, result);
3475       }
3476     }
3477     igvn->hash_insert(nmm);
3478     record_for_optimizer(nmm);
3479   }
3480 
3481   //  Phase 4:  Update the inputs of non-instance memory Phis and
3482   //            the Memory input of memnodes
3483   // First update the inputs of any non-instance Phi's from
3484   // which we split out an instance Phi.  Note we don't have
3485   // to recursively process Phi's encounted on the input memory
3486   // chains as is done in split_memory_phi() since they  will
3487   // also be processed here.
3488   for (int j = 0; j < orig_phis.length(); j++) {
3489     PhiNode *phi = orig_phis.at(j);
3490     int alias_idx = _compile->get_alias_index(phi->adr_type());
3491     igvn->hash_delete(phi);
3492     for (uint i = 1; i < phi->req(); i++) {
3493       Node *mem = phi->in(i);
3494       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3495       if (_compile->failing()) {
3496         return;
3497       }
3498       if (mem != new_mem) {
3499         phi->set_req(i, new_mem);
3500       }
3501     }
3502     igvn->hash_insert(phi);
3503     record_for_optimizer(phi);
3504   }
3505 
3506   // Update the memory inputs of MemNodes with the value we computed
3507   // in Phase 2 and move stores memory users to corresponding memory slices.
3508   // Disable memory split verification code until the fix for 6984348.
3509   // Currently it produces false negative results since it does not cover all cases.
3510 #if 0 // ifdef ASSERT
3511   visited.Reset();
3512   Node_Stack old_mems(arena, _compile->unique() >> 2);
3513 #endif
3514   for (uint i = 0; i < ideal_nodes.size(); i++) {
3515     Node*    n = ideal_nodes.at(i);
3516     Node* nmem = get_map(n->_idx);
3517     assert(nmem != NULL, "sanity");
3518     if (n->is_Mem()) {
3519 #if 0 // ifdef ASSERT
3520       Node* old_mem = n->in(MemNode::Memory);
3521       if (!visited.test_set(old_mem->_idx)) {
3522         old_mems.push(old_mem, old_mem->outcnt());
3523       }
3524 #endif
3525       assert(n->in(MemNode::Memory) != nmem, "sanity");
3526       if (!n->is_Load()) {
3527         // Move memory users of a store first.
3528         move_inst_mem(n, orig_phis);
3529       }
3530       // Now update memory input
3531       igvn->hash_delete(n);
3532       n->set_req(MemNode::Memory, nmem);
3533       igvn->hash_insert(n);
3534       record_for_optimizer(n);
3535     } else {
3536       assert(n->is_Allocate() || n->is_CheckCastPP() ||
3537              n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
3538     }
3539   }
3540 #if 0 // ifdef ASSERT
3541   // Verify that memory was split correctly
3542   while (old_mems.is_nonempty()) {
3543     Node* old_mem = old_mems.node();
3544     uint  old_cnt = old_mems.index();
3545     old_mems.pop();
3546     assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
3547   }
3548 #endif
3549 }
3550 
3551 #ifndef PRODUCT
3552 static const char *node_type_names[] = {
3553   "UnknownType",
3554   "JavaObject",
3555   "LocalVar",
3556   "Field",
3557   "Arraycopy"
3558 };
3559 
3560 static const char *esc_names[] = {
3561   "UnknownEscape",
3562   "NoEscape",
3563   "ArgEscape",
3564   "GlobalEscape"
3565 };
3566 
3567 void PointsToNode::dump(bool print_state) const {
3568   NodeType nt = node_type();
3569   tty->print("%s ", node_type_names[(int) nt]);
3570   if (print_state) {
3571     EscapeState es = escape_state();
3572     EscapeState fields_es = fields_escape_state();
3573     tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
3574     if (nt == PointsToNode::JavaObject && !this->scalar_replaceable())
3575       tty->print("NSR ");
3576   }
3577   if (is_Field()) {
3578     FieldNode* f = (FieldNode*)this;
3579     if (f->is_oop())
3580       tty->print("oop ");
3581     if (f->offset() > 0)
3582       tty->print("+%d ", f->offset());
3583     tty->print("(");
3584     for (BaseIterator i(f); i.has_next(); i.next()) {
3585       PointsToNode* b = i.get();
3586       tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
3587     }
3588     tty->print(" )");
3589   }
3590   tty->print("[");
3591   for (EdgeIterator i(this); i.has_next(); i.next()) {
3592     PointsToNode* e = i.get();
3593     tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
3594   }
3595   tty->print(" [");
3596   for (UseIterator i(this); i.has_next(); i.next()) {
3597     PointsToNode* u = i.get();
3598     bool is_base = false;
3599     if (PointsToNode::is_base_use(u)) {
3600       is_base = true;
3601       u = PointsToNode::get_use_node(u)->as_Field();
3602     }
3603     tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
3604   }
3605   tty->print(" ]]  ");
3606   if (_node == NULL)
3607     tty->print_cr("<null>");
3608   else
3609     _node->dump();
3610 }
3611 
3612 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
3613   bool first = true;
3614   int ptnodes_length = ptnodes_worklist.length();
3615   for (int i = 0; i < ptnodes_length; i++) {
3616     PointsToNode *ptn = ptnodes_worklist.at(i);
3617     if (ptn == NULL || !ptn->is_JavaObject())
3618       continue;
3619     PointsToNode::EscapeState es = ptn->escape_state();
3620     if ((es != PointsToNode::NoEscape) && !Verbose) {
3621       continue;
3622     }
3623     Node* n = ptn->ideal_node();
3624     if (n->is_Allocate() || (n->is_CallStaticJava() &&
3625                              n->as_CallStaticJava()->is_boxing_method())) {
3626       if (first) {
3627         tty->cr();
3628         tty->print("======== Connection graph for ");
3629         _compile->method()->print_short_name();
3630         tty->cr();
3631         first = false;
3632       }
3633       ptn->dump();
3634       // Print all locals and fields which reference this allocation
3635       for (UseIterator j(ptn); j.has_next(); j.next()) {
3636         PointsToNode* use = j.get();
3637         if (use->is_LocalVar()) {
3638           use->dump(Verbose);
3639         } else if (Verbose) {
3640           use->dump();
3641         }
3642       }
3643       tty->cr();
3644     }
3645   }
3646 }
3647 #endif