1 /* 2 * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/c2/barrierSetC2.hpp" 29 #include "libadt/vectset.hpp" 30 #include "memory/allocation.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "opto/c2compiler.hpp" 33 #include "opto/arraycopynode.hpp" 34 #include "opto/callnode.hpp" 35 #include "opto/cfgnode.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/escape.hpp" 38 #include "opto/phaseX.hpp" 39 #include "opto/movenode.hpp" 40 #include "opto/rootnode.hpp" 41 #include "utilities/macros.hpp" 42 #if INCLUDE_G1GC 43 #include "gc/g1/g1ThreadLocalData.hpp" 44 #endif // INCLUDE_G1GC 45 #if INCLUDE_ZGC 46 #include "gc/z/c2/zBarrierSetC2.hpp" 47 #endif 48 #if INCLUDE_SHENANDOAHGC 49 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" 50 #endif 51 52 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 53 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 54 _in_worklist(C->comp_arena()), 55 _next_pidx(0), 56 _collecting(true), 57 _verify(false), 58 _compile(C), 59 _igvn(igvn), 60 _node_map(C->comp_arena()) { 61 // Add unknown java object. 62 add_java_object(C->top(), PointsToNode::GlobalEscape); 63 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 64 // Add ConP(#NULL) and ConN(#NULL) nodes. 65 Node* oop_null = igvn->zerocon(T_OBJECT); 66 assert(oop_null->_idx < nodes_size(), "should be created already"); 67 add_java_object(oop_null, PointsToNode::NoEscape); 68 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 69 if (UseCompressedOops) { 70 Node* noop_null = igvn->zerocon(T_NARROWOOP); 71 assert(noop_null->_idx < nodes_size(), "should be created already"); 72 map_ideal_node(noop_null, null_obj); 73 } 74 _pcmp_neq = NULL; // Should be initialized 75 _pcmp_eq = NULL; 76 } 77 78 bool ConnectionGraph::has_candidates(Compile *C) { 79 // EA brings benefits only when the code has allocations and/or locks which 80 // are represented by ideal Macro nodes. 81 int cnt = C->macro_count(); 82 for (int i = 0; i < cnt; i++) { 83 Node *n = C->macro_node(i); 84 if (n->is_Allocate()) 85 return true; 86 if (n->is_Lock()) { 87 Node* obj = n->as_Lock()->obj_node()->uncast(); 88 if (!(obj->is_Parm() || obj->is_Con())) 89 return true; 90 } 91 if (n->is_CallStaticJava() && 92 n->as_CallStaticJava()->is_boxing_method()) { 93 return true; 94 } 95 } 96 return false; 97 } 98 99 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 100 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 101 ResourceMark rm; 102 103 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 104 // to create space for them in ConnectionGraph::_nodes[]. 105 Node* oop_null = igvn->zerocon(T_OBJECT); 106 Node* noop_null = igvn->zerocon(T_NARROWOOP); 107 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 108 // Perform escape analysis 109 if (congraph->compute_escape()) { 110 // There are non escaping objects. 111 C->set_congraph(congraph); 112 } 113 // Cleanup. 114 if (oop_null->outcnt() == 0) 115 igvn->hash_delete(oop_null); 116 if (noop_null->outcnt() == 0) 117 igvn->hash_delete(noop_null); 118 } 119 120 bool ConnectionGraph::compute_escape() { 121 Compile* C = _compile; 122 PhaseGVN* igvn = _igvn; 123 124 // Worklists used by EA. 125 Unique_Node_List delayed_worklist; 126 GrowableArray<Node*> alloc_worklist; 127 GrowableArray<Node*> ptr_cmp_worklist; 128 GrowableArray<Node*> storestore_worklist; 129 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 130 GrowableArray<PointsToNode*> ptnodes_worklist; 131 GrowableArray<JavaObjectNode*> java_objects_worklist; 132 GrowableArray<JavaObjectNode*> non_escaped_worklist; 133 GrowableArray<FieldNode*> oop_fields_worklist; 134 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 135 136 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 137 138 // 1. Populate Connection Graph (CG) with PointsTo nodes. 139 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 140 // Initialize worklist 141 if (C->root() != NULL) { 142 ideal_nodes.push(C->root()); 143 } 144 // Processed ideal nodes are unique on ideal_nodes list 145 // but several ideal nodes are mapped to the phantom_obj. 146 // To avoid duplicated entries on the following worklists 147 // add the phantom_obj only once to them. 148 ptnodes_worklist.append(phantom_obj); 149 java_objects_worklist.append(phantom_obj); 150 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 151 Node* n = ideal_nodes.at(next); 152 // Create PointsTo nodes and add them to Connection Graph. Called 153 // only once per ideal node since ideal_nodes is Unique_Node list. 154 add_node_to_connection_graph(n, &delayed_worklist); 155 PointsToNode* ptn = ptnode_adr(n->_idx); 156 if (ptn != NULL && ptn != phantom_obj) { 157 ptnodes_worklist.append(ptn); 158 if (ptn->is_JavaObject()) { 159 java_objects_worklist.append(ptn->as_JavaObject()); 160 if ((n->is_Allocate() || n->is_CallStaticJava()) && 161 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 162 // Only allocations and java static calls results are interesting. 163 non_escaped_worklist.append(ptn->as_JavaObject()); 164 } 165 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 166 oop_fields_worklist.append(ptn->as_Field()); 167 } 168 } 169 if (n->is_MergeMem()) { 170 // Collect all MergeMem nodes to add memory slices for 171 // scalar replaceable objects in split_unique_types(). 172 _mergemem_worklist.append(n->as_MergeMem()); 173 } else if (OptimizePtrCompare && n->is_Cmp() && 174 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { 175 // Collect compare pointers nodes. 176 ptr_cmp_worklist.append(n); 177 } else if (n->is_MemBarStoreStore()) { 178 // Collect all MemBarStoreStore nodes so that depending on the 179 // escape status of the associated Allocate node some of them 180 // may be eliminated. 181 storestore_worklist.append(n); 182 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && 183 (n->req() > MemBarNode::Precedent)) { 184 record_for_optimizer(n); 185 #ifdef ASSERT 186 } else if (n->is_AddP()) { 187 // Collect address nodes for graph verification. 188 addp_worklist.append(n); 189 #endif 190 } else if (n->is_ArrayCopy()) { 191 // Keep a list of ArrayCopy nodes so if one of its input is non 192 // escaping, we can record a unique type 193 arraycopy_worklist.append(n->as_ArrayCopy()); 194 } 195 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 196 Node* m = n->fast_out(i); // Get user 197 ideal_nodes.push(m); 198 } 199 } 200 if (non_escaped_worklist.length() == 0) { 201 _collecting = false; 202 return false; // Nothing to do. 203 } 204 // Add final simple edges to graph. 205 while(delayed_worklist.size() > 0) { 206 Node* n = delayed_worklist.pop(); 207 add_final_edges(n); 208 } 209 int ptnodes_length = ptnodes_worklist.length(); 210 211 #ifdef ASSERT 212 if (VerifyConnectionGraph) { 213 // Verify that no new simple edges could be created and all 214 // local vars has edges. 215 _verify = true; 216 for (int next = 0; next < ptnodes_length; ++next) { 217 PointsToNode* ptn = ptnodes_worklist.at(next); 218 add_final_edges(ptn->ideal_node()); 219 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 220 ptn->dump(); 221 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 222 } 223 } 224 _verify = false; 225 } 226 #endif 227 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 228 // processing, calls to CI to resolve symbols (types, fields, methods) 229 // referenced in bytecode. During symbol resolution VM may throw 230 // an exception which CI cleans and converts to compilation failure. 231 if (C->failing()) return false; 232 233 // 2. Finish Graph construction by propagating references to all 234 // java objects through graph. 235 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, 236 java_objects_worklist, oop_fields_worklist)) { 237 // All objects escaped or hit time or iterations limits. 238 _collecting = false; 239 return false; 240 } 241 242 // 3. Adjust scalar_replaceable state of nonescaping objects and push 243 // scalar replaceable allocations on alloc_worklist for processing 244 // in split_unique_types(). 245 int non_escaped_length = non_escaped_worklist.length(); 246 for (int next = 0; next < non_escaped_length; next++) { 247 JavaObjectNode* ptn = non_escaped_worklist.at(next); 248 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 249 Node* n = ptn->ideal_node(); 250 if (n->is_Allocate()) { 251 n->as_Allocate()->_is_non_escaping = noescape; 252 } 253 if (n->is_CallStaticJava()) { 254 n->as_CallStaticJava()->_is_non_escaping = noescape; 255 } 256 if (noescape && ptn->scalar_replaceable()) { 257 adjust_scalar_replaceable_state(ptn); 258 if (ptn->scalar_replaceable()) { 259 alloc_worklist.append(ptn->ideal_node()); 260 } 261 } 262 } 263 264 #ifdef ASSERT 265 if (VerifyConnectionGraph) { 266 // Verify that graph is complete - no new edges could be added or needed. 267 verify_connection_graph(ptnodes_worklist, non_escaped_worklist, 268 java_objects_worklist, addp_worklist); 269 } 270 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 271 assert(null_obj->escape_state() == PointsToNode::NoEscape && 272 null_obj->edge_count() == 0 && 273 !null_obj->arraycopy_src() && 274 !null_obj->arraycopy_dst(), "sanity"); 275 #endif 276 277 _collecting = false; 278 279 } // TracePhase t3("connectionGraph") 280 281 // 4. Optimize ideal graph based on EA information. 282 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); 283 if (has_non_escaping_obj) { 284 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 285 } 286 287 #ifndef PRODUCT 288 if (PrintEscapeAnalysis) { 289 dump(ptnodes_worklist); // Dump ConnectionGraph 290 } 291 #endif 292 293 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 294 #ifdef ASSERT 295 if (VerifyConnectionGraph) { 296 int alloc_length = alloc_worklist.length(); 297 for (int next = 0; next < alloc_length; ++next) { 298 Node* n = alloc_worklist.at(next); 299 PointsToNode* ptn = ptnode_adr(n->_idx); 300 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 301 } 302 } 303 #endif 304 305 // 5. Separate memory graph for scalar replaceable allcations. 306 if (has_scalar_replaceable_candidates && 307 C->AliasLevel() >= 3 && EliminateAllocations) { 308 // Now use the escape information to create unique types for 309 // scalar replaceable objects. 310 split_unique_types(alloc_worklist, arraycopy_worklist); 311 if (C->failing()) return false; 312 C->print_method(PHASE_AFTER_EA, 2); 313 314 #ifdef ASSERT 315 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 316 tty->print("=== No allocations eliminated for "); 317 C->method()->print_short_name(); 318 if(!EliminateAllocations) { 319 tty->print(" since EliminateAllocations is off ==="); 320 } else if(!has_scalar_replaceable_candidates) { 321 tty->print(" since there are no scalar replaceable candidates ==="); 322 } else if(C->AliasLevel() < 3) { 323 tty->print(" since AliasLevel < 3 ==="); 324 } 325 tty->cr(); 326 #endif 327 } 328 return has_non_escaping_obj; 329 } 330 331 // Utility function for nodes that load an object 332 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 333 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 334 // ThreadLocal has RawPtr type. 335 const Type* t = _igvn->type(n); 336 if (t->make_ptr() != NULL) { 337 Node* adr = n->in(MemNode::Address); 338 #ifdef ASSERT 339 if (!adr->is_AddP()) { 340 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 341 } else { 342 assert((ptnode_adr(adr->_idx) == NULL || 343 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 344 } 345 #endif 346 add_local_var_and_edge(n, PointsToNode::NoEscape, 347 adr, delayed_worklist); 348 } 349 } 350 351 // Populate Connection Graph with PointsTo nodes and create simple 352 // connection graph edges. 353 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 354 assert(!_verify, "this method should not be called for verification"); 355 PhaseGVN* igvn = _igvn; 356 uint n_idx = n->_idx; 357 PointsToNode* n_ptn = ptnode_adr(n_idx); 358 if (n_ptn != NULL) 359 return; // No need to redefine PointsTo node during first iteration. 360 361 if (n->is_Call()) { 362 // Arguments to allocation and locking don't escape. 363 if (n->is_AbstractLock()) { 364 // Put Lock and Unlock nodes on IGVN worklist to process them during 365 // first IGVN optimization when escape information is still available. 366 record_for_optimizer(n); 367 } else if (n->is_Allocate()) { 368 add_call_node(n->as_Call()); 369 record_for_optimizer(n); 370 } else { 371 if (n->is_CallStaticJava()) { 372 const char* name = n->as_CallStaticJava()->_name; 373 if (name != NULL && strcmp(name, "uncommon_trap") == 0) 374 return; // Skip uncommon traps 375 } 376 // Don't mark as processed since call's arguments have to be processed. 377 delayed_worklist->push(n); 378 // Check if a call returns an object. 379 if ((n->as_Call()->returns_pointer() && 380 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) || 381 (n->is_CallStaticJava() && 382 n->as_CallStaticJava()->is_boxing_method())) { 383 add_call_node(n->as_Call()); 384 } 385 } 386 return; 387 } 388 // Put this check here to process call arguments since some call nodes 389 // point to phantom_obj. 390 if (n_ptn == phantom_obj || n_ptn == null_obj) 391 return; // Skip predefined nodes. 392 393 int opcode = n->Opcode(); 394 switch (opcode) { 395 case Op_AddP: { 396 Node* base = get_addp_base(n); 397 PointsToNode* ptn_base = ptnode_adr(base->_idx); 398 // Field nodes are created for all field types. They are used in 399 // adjust_scalar_replaceable_state() and split_unique_types(). 400 // Note, non-oop fields will have only base edges in Connection 401 // Graph because such fields are not used for oop loads and stores. 402 int offset = address_offset(n, igvn); 403 add_field(n, PointsToNode::NoEscape, offset); 404 if (ptn_base == NULL) { 405 delayed_worklist->push(n); // Process it later. 406 } else { 407 n_ptn = ptnode_adr(n_idx); 408 add_base(n_ptn->as_Field(), ptn_base); 409 } 410 break; 411 } 412 case Op_CastX2P: { 413 map_ideal_node(n, phantom_obj); 414 break; 415 } 416 case Op_CastPP: 417 case Op_CheckCastPP: 418 case Op_EncodeP: 419 case Op_DecodeN: 420 case Op_EncodePKlass: 421 case Op_DecodeNKlass: { 422 add_local_var_and_edge(n, PointsToNode::NoEscape, 423 n->in(1), delayed_worklist); 424 break; 425 } 426 case Op_CMoveP: { 427 add_local_var(n, PointsToNode::NoEscape); 428 // Do not add edges during first iteration because some could be 429 // not defined yet. 430 delayed_worklist->push(n); 431 break; 432 } 433 case Op_ConP: 434 case Op_ConN: 435 case Op_ConNKlass: { 436 // assume all oop constants globally escape except for null 437 PointsToNode::EscapeState es; 438 const Type* t = igvn->type(n); 439 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 440 es = PointsToNode::NoEscape; 441 } else { 442 es = PointsToNode::GlobalEscape; 443 } 444 add_java_object(n, es); 445 break; 446 } 447 case Op_CreateEx: { 448 // assume that all exception objects globally escape 449 map_ideal_node(n, phantom_obj); 450 break; 451 } 452 case Op_LoadKlass: 453 case Op_LoadNKlass: { 454 // Unknown class is loaded 455 map_ideal_node(n, phantom_obj); 456 break; 457 } 458 case Op_LoadP: 459 #if INCLUDE_ZGC 460 case Op_LoadBarrierSlowReg: 461 case Op_LoadBarrierWeakSlowReg: 462 #endif 463 case Op_LoadN: 464 case Op_LoadPLocked: { 465 add_objload_to_connection_graph(n, delayed_worklist); 466 break; 467 } 468 case Op_Parm: { 469 map_ideal_node(n, phantom_obj); 470 break; 471 } 472 case Op_PartialSubtypeCheck: { 473 // Produces Null or notNull and is used in only in CmpP so 474 // phantom_obj could be used. 475 map_ideal_node(n, phantom_obj); // Result is unknown 476 break; 477 } 478 case Op_Phi: { 479 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 480 // ThreadLocal has RawPtr type. 481 const Type* t = n->as_Phi()->type(); 482 if (t->make_ptr() != NULL) { 483 add_local_var(n, PointsToNode::NoEscape); 484 // Do not add edges during first iteration because some could be 485 // not defined yet. 486 delayed_worklist->push(n); 487 } 488 break; 489 } 490 case Op_Proj: { 491 // we are only interested in the oop result projection from a call 492 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 493 n->in(0)->as_Call()->returns_pointer()) { 494 add_local_var_and_edge(n, PointsToNode::NoEscape, 495 n->in(0), delayed_worklist); 496 } 497 #if INCLUDE_ZGC 498 else if (UseZGC) { 499 if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) { 500 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist); 501 } 502 } 503 #endif 504 break; 505 } 506 case Op_Rethrow: // Exception object escapes 507 case Op_Return: { 508 if (n->req() > TypeFunc::Parms && 509 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 510 // Treat Return value as LocalVar with GlobalEscape escape state. 511 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 512 n->in(TypeFunc::Parms), delayed_worklist); 513 } 514 break; 515 } 516 case Op_CompareAndExchangeP: 517 case Op_CompareAndExchangeN: 518 #if INCLUDE_SHENANDOAHGC 519 case Op_ShenandoahCompareAndExchangeP: 520 case Op_ShenandoahCompareAndExchangeN: 521 #endif 522 case Op_GetAndSetP: 523 case Op_GetAndSetN: { 524 add_objload_to_connection_graph(n, delayed_worklist); 525 // fallthrough 526 } 527 case Op_StoreP: 528 case Op_StoreN: 529 case Op_StoreNKlass: 530 case Op_StorePConditional: 531 #if INCLUDE_SHENANDOAHGC 532 case Op_ShenandoahWeakCompareAndSwapP: 533 case Op_ShenandoahWeakCompareAndSwapN: 534 case Op_ShenandoahCompareAndSwapP: 535 case Op_ShenandoahCompareAndSwapN: 536 #endif 537 case Op_WeakCompareAndSwapP: 538 case Op_WeakCompareAndSwapN: 539 case Op_CompareAndSwapP: 540 case Op_CompareAndSwapN: { 541 Node* adr = n->in(MemNode::Address); 542 const Type *adr_type = igvn->type(adr); 543 adr_type = adr_type->make_ptr(); 544 if (adr_type == NULL) { 545 break; // skip dead nodes 546 } 547 if ( adr_type->isa_oopptr() 548 || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 549 && adr_type == TypeRawPtr::NOTNULL 550 && adr->in(AddPNode::Address)->is_Proj() 551 && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 552 delayed_worklist->push(n); // Process it later. 553 #ifdef ASSERT 554 assert(adr->is_AddP(), "expecting an AddP"); 555 if (adr_type == TypeRawPtr::NOTNULL) { 556 // Verify a raw address for a store captured by Initialize node. 557 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 558 assert(offs != Type::OffsetBot, "offset must be a constant"); 559 } 560 #endif 561 } else { 562 // Ignore copy the displaced header to the BoxNode (OSR compilation). 563 if (adr->is_BoxLock()) 564 break; 565 // Stored value escapes in unsafe access. 566 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 567 // Pointer stores in G1 barriers looks like unsafe access. 568 // Ignore such stores to be able scalar replace non-escaping 569 // allocations. 570 #if INCLUDE_G1GC || INCLUDE_SHENANDOAHGC 571 if ((UseG1GC || UseShenandoahGC) && adr->is_AddP()) { 572 Node* base = get_addp_base(adr); 573 if (base->Opcode() == Op_LoadP && 574 base->in(MemNode::Address)->is_AddP()) { 575 adr = base->in(MemNode::Address); 576 Node* tls = get_addp_base(adr); 577 if (tls->Opcode() == Op_ThreadLocal) { 578 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 579 #if INCLUDE_G1GC && INCLUDE_SHENANDOAHGC 580 const int buf_offset = in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_buffer_offset() 581 : ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); 582 #elif INCLUDE_G1GC 583 const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()); 584 #else 585 const int buf_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); 586 #endif 587 if (offs == buf_offset) { 588 break; // G1 pre barrier previous oop value store. 589 } 590 if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) { 591 break; // G1 post barrier card address store. 592 } 593 } 594 } 595 } 596 #endif 597 delayed_worklist->push(n); // Process unsafe access later. 598 break; 599 } 600 #ifdef ASSERT 601 n->dump(1); 602 assert(false, "not unsafe or G1 barrier raw StoreP"); 603 #endif 604 } 605 break; 606 } 607 case Op_AryEq: 608 case Op_HasNegatives: 609 case Op_StrComp: 610 case Op_StrEquals: 611 case Op_StrIndexOf: 612 case Op_StrIndexOfChar: 613 case Op_StrInflatedCopy: 614 case Op_StrCompressedCopy: 615 case Op_EncodeISOArray: { 616 add_local_var(n, PointsToNode::ArgEscape); 617 delayed_worklist->push(n); // Process it later. 618 break; 619 } 620 case Op_ThreadLocal: { 621 add_java_object(n, PointsToNode::ArgEscape); 622 break; 623 } 624 #if INCLUDE_SHENANDOAHGC 625 case Op_ShenandoahEnqueueBarrier: 626 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 627 break; 628 case Op_ShenandoahLoadReferenceBarrier: 629 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist); 630 #endif 631 default: 632 ; // Do nothing for nodes not related to EA. 633 } 634 return; 635 } 636 637 #ifdef ASSERT 638 #define ELSE_FAIL(name) \ 639 /* Should not be called for not pointer type. */ \ 640 n->dump(1); \ 641 assert(false, name); \ 642 break; 643 #else 644 #define ELSE_FAIL(name) \ 645 break; 646 #endif 647 648 // Add final simple edges to graph. 649 void ConnectionGraph::add_final_edges(Node *n) { 650 PointsToNode* n_ptn = ptnode_adr(n->_idx); 651 #ifdef ASSERT 652 if (_verify && n_ptn->is_JavaObject()) 653 return; // This method does not change graph for JavaObject. 654 #endif 655 656 if (n->is_Call()) { 657 process_call_arguments(n->as_Call()); 658 return; 659 } 660 assert(n->is_Store() || n->is_LoadStore() || 661 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 662 "node should be registered already"); 663 int opcode = n->Opcode(); 664 switch (opcode) { 665 case Op_AddP: { 666 Node* base = get_addp_base(n); 667 PointsToNode* ptn_base = ptnode_adr(base->_idx); 668 assert(ptn_base != NULL, "field's base should be registered"); 669 add_base(n_ptn->as_Field(), ptn_base); 670 break; 671 } 672 case Op_CastPP: 673 case Op_CheckCastPP: 674 case Op_EncodeP: 675 case Op_DecodeN: 676 case Op_EncodePKlass: 677 case Op_DecodeNKlass: { 678 add_local_var_and_edge(n, PointsToNode::NoEscape, 679 n->in(1), NULL); 680 break; 681 } 682 case Op_CMoveP: { 683 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 684 Node* in = n->in(i); 685 if (in == NULL) 686 continue; // ignore NULL 687 Node* uncast_in = in->uncast(); 688 if (uncast_in->is_top() || uncast_in == n) 689 continue; // ignore top or inputs which go back this node 690 PointsToNode* ptn = ptnode_adr(in->_idx); 691 assert(ptn != NULL, "node should be registered"); 692 add_edge(n_ptn, ptn); 693 } 694 break; 695 } 696 case Op_LoadP: 697 #if INCLUDE_ZGC 698 case Op_LoadBarrierSlowReg: 699 case Op_LoadBarrierWeakSlowReg: 700 #endif 701 case Op_LoadN: 702 case Op_LoadPLocked: { 703 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 704 // ThreadLocal has RawPtr type. 705 const Type* t = _igvn->type(n); 706 if (t->make_ptr() != NULL) { 707 Node* adr = n->in(MemNode::Address); 708 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 709 break; 710 } 711 ELSE_FAIL("Op_LoadP"); 712 } 713 case Op_Phi: { 714 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 715 // ThreadLocal has RawPtr type. 716 const Type* t = n->as_Phi()->type(); 717 if (t->make_ptr() != NULL) { 718 for (uint i = 1; i < n->req(); i++) { 719 Node* in = n->in(i); 720 if (in == NULL) 721 continue; // ignore NULL 722 Node* uncast_in = in->uncast(); 723 if (uncast_in->is_top() || uncast_in == n) 724 continue; // ignore top or inputs which go back this node 725 PointsToNode* ptn = ptnode_adr(in->_idx); 726 assert(ptn != NULL, "node should be registered"); 727 add_edge(n_ptn, ptn); 728 } 729 break; 730 } 731 ELSE_FAIL("Op_Phi"); 732 } 733 case Op_Proj: { 734 // we are only interested in the oop result projection from a call 735 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 736 n->in(0)->as_Call()->returns_pointer()) { 737 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 738 break; 739 } 740 #if INCLUDE_ZGC 741 else if (UseZGC) { 742 if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) { 743 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), NULL); 744 break; 745 } 746 } 747 #endif 748 ELSE_FAIL("Op_Proj"); 749 } 750 case Op_Rethrow: // Exception object escapes 751 case Op_Return: { 752 if (n->req() > TypeFunc::Parms && 753 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 754 // Treat Return value as LocalVar with GlobalEscape escape state. 755 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 756 n->in(TypeFunc::Parms), NULL); 757 break; 758 } 759 ELSE_FAIL("Op_Return"); 760 } 761 case Op_StoreP: 762 case Op_StoreN: 763 case Op_StoreNKlass: 764 case Op_StorePConditional: 765 case Op_CompareAndExchangeP: 766 case Op_CompareAndExchangeN: 767 case Op_CompareAndSwapP: 768 case Op_CompareAndSwapN: 769 case Op_WeakCompareAndSwapP: 770 case Op_WeakCompareAndSwapN: 771 #if INCLUDE_SHENANDOAHGC 772 case Op_ShenandoahCompareAndExchangeP: 773 case Op_ShenandoahCompareAndExchangeN: 774 case Op_ShenandoahCompareAndSwapP: 775 case Op_ShenandoahCompareAndSwapN: 776 case Op_ShenandoahWeakCompareAndSwapP: 777 case Op_ShenandoahWeakCompareAndSwapN: 778 #endif 779 case Op_GetAndSetP: 780 case Op_GetAndSetN: { 781 Node* adr = n->in(MemNode::Address); 782 const Type *adr_type = _igvn->type(adr); 783 adr_type = adr_type->make_ptr(); 784 #ifdef ASSERT 785 if (adr_type == NULL) { 786 n->dump(1); 787 assert(adr_type != NULL, "dead node should not be on list"); 788 break; 789 } 790 #endif 791 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN || 792 #if INCLUDE_SHENANDOAHGC 793 opcode == Op_ShenandoahCompareAndExchangeN || opcode == Op_ShenandoahCompareAndExchangeP || 794 #endif 795 opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) { 796 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 797 } 798 if ( adr_type->isa_oopptr() 799 || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 800 && adr_type == TypeRawPtr::NOTNULL 801 && adr->in(AddPNode::Address)->is_Proj() 802 && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 803 // Point Address to Value 804 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 805 assert(adr_ptn != NULL && 806 adr_ptn->as_Field()->is_oop(), "node should be registered"); 807 Node *val = n->in(MemNode::ValueIn); 808 PointsToNode* ptn = ptnode_adr(val->_idx); 809 assert(ptn != NULL, "node should be registered"); 810 add_edge(adr_ptn, ptn); 811 break; 812 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 813 // Stored value escapes in unsafe access. 814 Node *val = n->in(MemNode::ValueIn); 815 PointsToNode* ptn = ptnode_adr(val->_idx); 816 assert(ptn != NULL, "node should be registered"); 817 set_escape_state(ptn, PointsToNode::GlobalEscape); 818 // Add edge to object for unsafe access with offset. 819 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 820 assert(adr_ptn != NULL, "node should be registered"); 821 if (adr_ptn->is_Field()) { 822 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 823 add_edge(adr_ptn, ptn); 824 } 825 break; 826 } 827 ELSE_FAIL("Op_StoreP"); 828 } 829 case Op_AryEq: 830 case Op_HasNegatives: 831 case Op_StrComp: 832 case Op_StrEquals: 833 case Op_StrIndexOf: 834 case Op_StrIndexOfChar: 835 case Op_StrInflatedCopy: 836 case Op_StrCompressedCopy: 837 case Op_EncodeISOArray: { 838 // char[]/byte[] arrays passed to string intrinsic do not escape but 839 // they are not scalar replaceable. Adjust escape state for them. 840 // Start from in(2) edge since in(1) is memory edge. 841 for (uint i = 2; i < n->req(); i++) { 842 Node* adr = n->in(i); 843 const Type* at = _igvn->type(adr); 844 if (!adr->is_top() && at->isa_ptr()) { 845 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 846 at->isa_ptr() != NULL, "expecting a pointer"); 847 if (adr->is_AddP()) { 848 adr = get_addp_base(adr); 849 } 850 PointsToNode* ptn = ptnode_adr(adr->_idx); 851 assert(ptn != NULL, "node should be registered"); 852 add_edge(n_ptn, ptn); 853 } 854 } 855 break; 856 } 857 #if INCLUDE_SHENANDOAHGC 858 case Op_ShenandoahEnqueueBarrier: 859 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL); 860 break; 861 case Op_ShenandoahLoadReferenceBarrier: 862 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL); 863 break; 864 #endif 865 default: { 866 // This method should be called only for EA specific nodes which may 867 // miss some edges when they were created. 868 #ifdef ASSERT 869 n->dump(1); 870 #endif 871 guarantee(false, "unknown node"); 872 } 873 } 874 return; 875 } 876 877 void ConnectionGraph::add_call_node(CallNode* call) { 878 assert(call->returns_pointer(), "only for call which returns pointer"); 879 uint call_idx = call->_idx; 880 if (call->is_Allocate()) { 881 Node* k = call->in(AllocateNode::KlassNode); 882 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 883 assert(kt != NULL, "TypeKlassPtr required."); 884 ciKlass* cik = kt->klass(); 885 PointsToNode::EscapeState es = PointsToNode::NoEscape; 886 bool scalar_replaceable = true; 887 if (call->is_AllocateArray()) { 888 if (!cik->is_array_klass()) { // StressReflectiveCode 889 es = PointsToNode::GlobalEscape; 890 } else { 891 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 892 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 893 // Not scalar replaceable if the length is not constant or too big. 894 scalar_replaceable = false; 895 } 896 } 897 } else { // Allocate instance 898 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 899 cik->is_subclass_of(_compile->env()->Reference_klass()) || 900 !cik->is_instance_klass() || // StressReflectiveCode 901 !cik->as_instance_klass()->can_be_instantiated() || 902 cik->as_instance_klass()->has_finalizer()) { 903 es = PointsToNode::GlobalEscape; 904 } 905 } 906 add_java_object(call, es); 907 PointsToNode* ptn = ptnode_adr(call_idx); 908 if (!scalar_replaceable && ptn->scalar_replaceable()) { 909 ptn->set_scalar_replaceable(false); 910 } 911 } else if (call->is_CallStaticJava()) { 912 // Call nodes could be different types: 913 // 914 // 1. CallDynamicJavaNode (what happened during call is unknown): 915 // 916 // - mapped to GlobalEscape JavaObject node if oop is returned; 917 // 918 // - all oop arguments are escaping globally; 919 // 920 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 921 // 922 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 923 // 924 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 925 // - mapped to NoEscape JavaObject node if non-escaping object allocated 926 // during call is returned; 927 // - mapped to ArgEscape LocalVar node pointed to object arguments 928 // which are returned and does not escape during call; 929 // 930 // - oop arguments escaping status is defined by bytecode analysis; 931 // 932 // For a static call, we know exactly what method is being called. 933 // Use bytecode estimator to record whether the call's return value escapes. 934 ciMethod* meth = call->as_CallJava()->method(); 935 if (meth == NULL) { 936 const char* name = call->as_CallStaticJava()->_name; 937 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 938 // Returns a newly allocated unescaped object. 939 add_java_object(call, PointsToNode::NoEscape); 940 ptnode_adr(call_idx)->set_scalar_replaceable(false); 941 } else if (meth->is_boxing_method()) { 942 // Returns boxing object 943 PointsToNode::EscapeState es; 944 vmIntrinsics::ID intr = meth->intrinsic_id(); 945 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 946 // It does not escape if object is always allocated. 947 es = PointsToNode::NoEscape; 948 } else { 949 // It escapes globally if object could be loaded from cache. 950 es = PointsToNode::GlobalEscape; 951 } 952 add_java_object(call, es); 953 } else { 954 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 955 call_analyzer->copy_dependencies(_compile->dependencies()); 956 if (call_analyzer->is_return_allocated()) { 957 // Returns a newly allocated unescaped object, simply 958 // update dependency information. 959 // Mark it as NoEscape so that objects referenced by 960 // it's fields will be marked as NoEscape at least. 961 add_java_object(call, PointsToNode::NoEscape); 962 ptnode_adr(call_idx)->set_scalar_replaceable(false); 963 } else { 964 // Determine whether any arguments are returned. 965 const TypeTuple* d = call->tf()->domain(); 966 bool ret_arg = false; 967 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 968 if (d->field_at(i)->isa_ptr() != NULL && 969 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 970 ret_arg = true; 971 break; 972 } 973 } 974 if (ret_arg) { 975 add_local_var(call, PointsToNode::ArgEscape); 976 } else { 977 // Returns unknown object. 978 map_ideal_node(call, phantom_obj); 979 } 980 } 981 } 982 } else { 983 // An other type of call, assume the worst case: 984 // returned value is unknown and globally escapes. 985 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 986 map_ideal_node(call, phantom_obj); 987 } 988 } 989 990 void ConnectionGraph::process_call_arguments(CallNode *call) { 991 bool is_arraycopy = false; 992 switch (call->Opcode()) { 993 #ifdef ASSERT 994 case Op_Allocate: 995 case Op_AllocateArray: 996 case Op_Lock: 997 case Op_Unlock: 998 assert(false, "should be done already"); 999 break; 1000 #endif 1001 case Op_ArrayCopy: 1002 case Op_CallLeafNoFP: 1003 // Most array copies are ArrayCopy nodes at this point but there 1004 // are still a few direct calls to the copy subroutines (See 1005 // PhaseStringOpts::copy_string()) 1006 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 1007 call->as_CallLeaf()->is_call_to_arraycopystub(); 1008 // fall through 1009 case Op_CallLeaf: { 1010 // Stub calls, objects do not escape but they are not scale replaceable. 1011 // Adjust escape state for outgoing arguments. 1012 const TypeTuple * d = call->tf()->domain(); 1013 bool src_has_oops = false; 1014 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1015 const Type* at = d->field_at(i); 1016 Node *arg = call->in(i); 1017 if (arg == NULL) { 1018 continue; 1019 } 1020 const Type *aat = _igvn->type(arg); 1021 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) 1022 continue; 1023 if (arg->is_AddP()) { 1024 // 1025 // The inline_native_clone() case when the arraycopy stub is called 1026 // after the allocation before Initialize and CheckCastPP nodes. 1027 // Or normal arraycopy for object arrays case. 1028 // 1029 // Set AddP's base (Allocate) as not scalar replaceable since 1030 // pointer to the base (with offset) is passed as argument. 1031 // 1032 arg = get_addp_base(arg); 1033 } 1034 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1035 assert(arg_ptn != NULL, "should be registered"); 1036 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 1037 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 1038 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 1039 aat->isa_ptr() != NULL, "expecting an Ptr"); 1040 bool arg_has_oops = aat->isa_oopptr() && 1041 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || 1042 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); 1043 if (i == TypeFunc::Parms) { 1044 src_has_oops = arg_has_oops; 1045 } 1046 // 1047 // src or dst could be j.l.Object when other is basic type array: 1048 // 1049 // arraycopy(char[],0,Object*,0,size); 1050 // arraycopy(Object*,0,char[],0,size); 1051 // 1052 // Don't add edges in such cases. 1053 // 1054 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 1055 arg_has_oops && (i > TypeFunc::Parms); 1056 #ifdef ASSERT 1057 if (!(is_arraycopy || 1058 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 1059 (call->as_CallLeaf()->_name != NULL && 1060 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 1061 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 1062 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 1063 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 1064 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 1065 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 1066 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 1067 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 1068 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 1069 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 1070 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 1071 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1072 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1073 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1074 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1075 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1076 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1077 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1078 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1079 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1080 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1081 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0) 1082 ))) { 1083 call->dump(); 1084 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1085 } 1086 #endif 1087 // Always process arraycopy's destination object since 1088 // we need to add all possible edges to references in 1089 // source object. 1090 if (arg_esc >= PointsToNode::ArgEscape && 1091 !arg_is_arraycopy_dest) { 1092 continue; 1093 } 1094 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1095 if (call->is_ArrayCopy()) { 1096 ArrayCopyNode* ac = call->as_ArrayCopy(); 1097 if (ac->is_clonebasic() || 1098 ac->is_arraycopy_validated() || 1099 ac->is_copyof_validated() || 1100 ac->is_copyofrange_validated()) { 1101 es = PointsToNode::NoEscape; 1102 } 1103 } 1104 set_escape_state(arg_ptn, es); 1105 if (arg_is_arraycopy_dest) { 1106 Node* src = call->in(TypeFunc::Parms); 1107 if (src->is_AddP()) { 1108 src = get_addp_base(src); 1109 } 1110 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1111 assert(src_ptn != NULL, "should be registered"); 1112 if (arg_ptn != src_ptn) { 1113 // Special arraycopy edge: 1114 // A destination object's field can't have the source object 1115 // as base since objects escape states are not related. 1116 // Only escape state of destination object's fields affects 1117 // escape state of fields in source object. 1118 add_arraycopy(call, es, src_ptn, arg_ptn); 1119 } 1120 } 1121 } 1122 } 1123 break; 1124 } 1125 case Op_CallStaticJava: { 1126 // For a static call, we know exactly what method is being called. 1127 // Use bytecode estimator to record the call's escape affects 1128 #ifdef ASSERT 1129 const char* name = call->as_CallStaticJava()->_name; 1130 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1131 #endif 1132 ciMethod* meth = call->as_CallJava()->method(); 1133 if ((meth != NULL) && meth->is_boxing_method()) { 1134 break; // Boxing methods do not modify any oops. 1135 } 1136 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1137 // fall-through if not a Java method or no analyzer information 1138 if (call_analyzer != NULL) { 1139 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1140 const TypeTuple* d = call->tf()->domain(); 1141 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1142 const Type* at = d->field_at(i); 1143 int k = i - TypeFunc::Parms; 1144 Node* arg = call->in(i); 1145 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1146 if (at->isa_ptr() != NULL && 1147 call_analyzer->is_arg_returned(k)) { 1148 // The call returns arguments. 1149 if (call_ptn != NULL) { // Is call's result used? 1150 assert(call_ptn->is_LocalVar(), "node should be registered"); 1151 assert(arg_ptn != NULL, "node should be registered"); 1152 add_edge(call_ptn, arg_ptn); 1153 } 1154 } 1155 if (at->isa_oopptr() != NULL && 1156 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1157 if (!call_analyzer->is_arg_stack(k)) { 1158 // The argument global escapes 1159 set_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1160 } else { 1161 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 1162 if (!call_analyzer->is_arg_local(k)) { 1163 // The argument itself doesn't escape, but any fields might 1164 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1165 } 1166 } 1167 } 1168 } 1169 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1170 // The call returns arguments. 1171 assert(call_ptn->edge_count() > 0, "sanity"); 1172 if (!call_analyzer->is_return_local()) { 1173 // Returns also unknown object. 1174 add_edge(call_ptn, phantom_obj); 1175 } 1176 } 1177 break; 1178 } 1179 } 1180 default: { 1181 // Fall-through here if not a Java method or no analyzer information 1182 // or some other type of call, assume the worst case: all arguments 1183 // globally escape. 1184 const TypeTuple* d = call->tf()->domain(); 1185 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1186 const Type* at = d->field_at(i); 1187 if (at->isa_oopptr() != NULL) { 1188 Node* arg = call->in(i); 1189 if (arg->is_AddP()) { 1190 arg = get_addp_base(arg); 1191 } 1192 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1193 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); 1194 } 1195 } 1196 } 1197 } 1198 } 1199 1200 1201 // Finish Graph construction. 1202 bool ConnectionGraph::complete_connection_graph( 1203 GrowableArray<PointsToNode*>& ptnodes_worklist, 1204 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1205 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1206 GrowableArray<FieldNode*>& oop_fields_worklist) { 1207 // Normally only 1-3 passes needed to build Connection Graph depending 1208 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1209 // Set limit to 20 to catch situation when something did go wrong and 1210 // bailout Escape Analysis. 1211 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1212 #define CG_BUILD_ITER_LIMIT 20 1213 1214 // Propagate GlobalEscape and ArgEscape escape states and check that 1215 // we still have non-escaping objects. The method pushs on _worklist 1216 // Field nodes which reference phantom_object. 1217 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1218 return false; // Nothing to do. 1219 } 1220 // Now propagate references to all JavaObject nodes. 1221 int java_objects_length = java_objects_worklist.length(); 1222 elapsedTimer time; 1223 bool timeout = false; 1224 int new_edges = 1; 1225 int iterations = 0; 1226 do { 1227 while ((new_edges > 0) && 1228 (iterations++ < CG_BUILD_ITER_LIMIT)) { 1229 double start_time = time.seconds(); 1230 time.start(); 1231 new_edges = 0; 1232 // Propagate references to phantom_object for nodes pushed on _worklist 1233 // by find_non_escaped_objects() and find_field_value(). 1234 new_edges += add_java_object_edges(phantom_obj, false); 1235 for (int next = 0; next < java_objects_length; ++next) { 1236 JavaObjectNode* ptn = java_objects_worklist.at(next); 1237 new_edges += add_java_object_edges(ptn, true); 1238 1239 #define SAMPLE_SIZE 4 1240 if ((next % SAMPLE_SIZE) == 0) { 1241 // Each 4 iterations calculate how much time it will take 1242 // to complete graph construction. 1243 time.stop(); 1244 // Poll for requests from shutdown mechanism to quiesce compiler 1245 // because Connection graph construction may take long time. 1246 CompileBroker::maybe_block(); 1247 double stop_time = time.seconds(); 1248 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1249 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1250 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1251 timeout = true; 1252 break; // Timeout 1253 } 1254 start_time = stop_time; 1255 time.start(); 1256 } 1257 #undef SAMPLE_SIZE 1258 1259 } 1260 if (timeout) break; 1261 if (new_edges > 0) { 1262 // Update escape states on each iteration if graph was updated. 1263 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1264 return false; // Nothing to do. 1265 } 1266 } 1267 time.stop(); 1268 if (time.seconds() >= EscapeAnalysisTimeout) { 1269 timeout = true; 1270 break; 1271 } 1272 } 1273 if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { 1274 time.start(); 1275 // Find fields which have unknown value. 1276 int fields_length = oop_fields_worklist.length(); 1277 for (int next = 0; next < fields_length; next++) { 1278 FieldNode* field = oop_fields_worklist.at(next); 1279 if (field->edge_count() == 0) { 1280 new_edges += find_field_value(field); 1281 // This code may added new edges to phantom_object. 1282 // Need an other cycle to propagate references to phantom_object. 1283 } 1284 } 1285 time.stop(); 1286 if (time.seconds() >= EscapeAnalysisTimeout) { 1287 timeout = true; 1288 break; 1289 } 1290 } else { 1291 new_edges = 0; // Bailout 1292 } 1293 } while (new_edges > 0); 1294 1295 // Bailout if passed limits. 1296 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { 1297 Compile* C = _compile; 1298 if (C->log() != NULL) { 1299 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1300 C->log()->text("%s", timeout ? "time" : "iterations"); 1301 C->log()->end_elem(" limit'"); 1302 } 1303 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", 1304 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()); 1305 // Possible infinite build_connection_graph loop, 1306 // bailout (no changes to ideal graph were made). 1307 return false; 1308 } 1309 #ifdef ASSERT 1310 if (Verbose && PrintEscapeAnalysis) { 1311 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", 1312 iterations, nodes_size(), ptnodes_worklist.length()); 1313 } 1314 #endif 1315 1316 #undef CG_BUILD_ITER_LIMIT 1317 1318 // Find fields initialized by NULL for non-escaping Allocations. 1319 int non_escaped_length = non_escaped_worklist.length(); 1320 for (int next = 0; next < non_escaped_length; next++) { 1321 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1322 PointsToNode::EscapeState es = ptn->escape_state(); 1323 assert(es <= PointsToNode::ArgEscape, "sanity"); 1324 if (es == PointsToNode::NoEscape) { 1325 if (find_init_values(ptn, null_obj, _igvn) > 0) { 1326 // Adding references to NULL object does not change escape states 1327 // since it does not escape. Also no fields are added to NULL object. 1328 add_java_object_edges(null_obj, false); 1329 } 1330 } 1331 Node* n = ptn->ideal_node(); 1332 if (n->is_Allocate()) { 1333 // The object allocated by this Allocate node will never be 1334 // seen by an other thread. Mark it so that when it is 1335 // expanded no MemBarStoreStore is added. 1336 InitializeNode* ini = n->as_Allocate()->initialization(); 1337 if (ini != NULL) 1338 ini->set_does_not_escape(); 1339 } 1340 } 1341 return true; // Finished graph construction. 1342 } 1343 1344 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1345 // and check that we still have non-escaping java objects. 1346 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1347 GrowableArray<JavaObjectNode*>& non_escaped_worklist) { 1348 GrowableArray<PointsToNode*> escape_worklist; 1349 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1350 int ptnodes_length = ptnodes_worklist.length(); 1351 for (int next = 0; next < ptnodes_length; ++next) { 1352 PointsToNode* ptn = ptnodes_worklist.at(next); 1353 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1354 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1355 escape_worklist.push(ptn); 1356 } 1357 } 1358 // Set escape states to referenced nodes (edges list). 1359 while (escape_worklist.length() > 0) { 1360 PointsToNode* ptn = escape_worklist.pop(); 1361 PointsToNode::EscapeState es = ptn->escape_state(); 1362 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1363 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1364 es >= PointsToNode::ArgEscape) { 1365 // GlobalEscape or ArgEscape state of field means it has unknown value. 1366 if (add_edge(ptn, phantom_obj)) { 1367 // New edge was added 1368 add_field_uses_to_worklist(ptn->as_Field()); 1369 } 1370 } 1371 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1372 PointsToNode* e = i.get(); 1373 if (e->is_Arraycopy()) { 1374 assert(ptn->arraycopy_dst(), "sanity"); 1375 // Propagate only fields escape state through arraycopy edge. 1376 if (e->fields_escape_state() < field_es) { 1377 set_fields_escape_state(e, field_es); 1378 escape_worklist.push(e); 1379 } 1380 } else if (es >= field_es) { 1381 // fields_escape_state is also set to 'es' if it is less than 'es'. 1382 if (e->escape_state() < es) { 1383 set_escape_state(e, es); 1384 escape_worklist.push(e); 1385 } 1386 } else { 1387 // Propagate field escape state. 1388 bool es_changed = false; 1389 if (e->fields_escape_state() < field_es) { 1390 set_fields_escape_state(e, field_es); 1391 es_changed = true; 1392 } 1393 if ((e->escape_state() < field_es) && 1394 e->is_Field() && ptn->is_JavaObject() && 1395 e->as_Field()->is_oop()) { 1396 // Change escape state of referenced fields. 1397 set_escape_state(e, field_es); 1398 es_changed = true; 1399 } else if (e->escape_state() < es) { 1400 set_escape_state(e, es); 1401 es_changed = true; 1402 } 1403 if (es_changed) { 1404 escape_worklist.push(e); 1405 } 1406 } 1407 } 1408 } 1409 // Remove escaped objects from non_escaped list. 1410 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { 1411 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1412 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1413 non_escaped_worklist.delete_at(next); 1414 } 1415 if (ptn->escape_state() == PointsToNode::NoEscape) { 1416 // Find fields in non-escaped allocations which have unknown value. 1417 find_init_values(ptn, phantom_obj, NULL); 1418 } 1419 } 1420 return (non_escaped_worklist.length() > 0); 1421 } 1422 1423 // Add all references to JavaObject node by walking over all uses. 1424 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1425 int new_edges = 0; 1426 if (populate_worklist) { 1427 // Populate _worklist by uses of jobj's uses. 1428 for (UseIterator i(jobj); i.has_next(); i.next()) { 1429 PointsToNode* use = i.get(); 1430 if (use->is_Arraycopy()) 1431 continue; 1432 add_uses_to_worklist(use); 1433 if (use->is_Field() && use->as_Field()->is_oop()) { 1434 // Put on worklist all field's uses (loads) and 1435 // related field nodes (same base and offset). 1436 add_field_uses_to_worklist(use->as_Field()); 1437 } 1438 } 1439 } 1440 for (int l = 0; l < _worklist.length(); l++) { 1441 PointsToNode* use = _worklist.at(l); 1442 if (PointsToNode::is_base_use(use)) { 1443 // Add reference from jobj to field and from field to jobj (field's base). 1444 use = PointsToNode::get_use_node(use)->as_Field(); 1445 if (add_base(use->as_Field(), jobj)) { 1446 new_edges++; 1447 } 1448 continue; 1449 } 1450 assert(!use->is_JavaObject(), "sanity"); 1451 if (use->is_Arraycopy()) { 1452 if (jobj == null_obj) // NULL object does not have field edges 1453 continue; 1454 // Added edge from Arraycopy node to arraycopy's source java object 1455 if (add_edge(use, jobj)) { 1456 jobj->set_arraycopy_src(); 1457 new_edges++; 1458 } 1459 // and stop here. 1460 continue; 1461 } 1462 if (!add_edge(use, jobj)) 1463 continue; // No new edge added, there was such edge already. 1464 new_edges++; 1465 if (use->is_LocalVar()) { 1466 add_uses_to_worklist(use); 1467 if (use->arraycopy_dst()) { 1468 for (EdgeIterator i(use); i.has_next(); i.next()) { 1469 PointsToNode* e = i.get(); 1470 if (e->is_Arraycopy()) { 1471 if (jobj == null_obj) // NULL object does not have field edges 1472 continue; 1473 // Add edge from arraycopy's destination java object to Arraycopy node. 1474 if (add_edge(jobj, e)) { 1475 new_edges++; 1476 jobj->set_arraycopy_dst(); 1477 } 1478 } 1479 } 1480 } 1481 } else { 1482 // Added new edge to stored in field values. 1483 // Put on worklist all field's uses (loads) and 1484 // related field nodes (same base and offset). 1485 add_field_uses_to_worklist(use->as_Field()); 1486 } 1487 } 1488 _worklist.clear(); 1489 _in_worklist.Reset(); 1490 return new_edges; 1491 } 1492 1493 // Put on worklist all related field nodes. 1494 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1495 assert(field->is_oop(), "sanity"); 1496 int offset = field->offset(); 1497 add_uses_to_worklist(field); 1498 // Loop over all bases of this field and push on worklist Field nodes 1499 // with the same offset and base (since they may reference the same field). 1500 for (BaseIterator i(field); i.has_next(); i.next()) { 1501 PointsToNode* base = i.get(); 1502 add_fields_to_worklist(field, base); 1503 // Check if the base was source object of arraycopy and go over arraycopy's 1504 // destination objects since values stored to a field of source object are 1505 // accessable by uses (loads) of fields of destination objects. 1506 if (base->arraycopy_src()) { 1507 for (UseIterator j(base); j.has_next(); j.next()) { 1508 PointsToNode* arycp = j.get(); 1509 if (arycp->is_Arraycopy()) { 1510 for (UseIterator k(arycp); k.has_next(); k.next()) { 1511 PointsToNode* abase = k.get(); 1512 if (abase->arraycopy_dst() && abase != base) { 1513 // Look for the same arraycopy reference. 1514 add_fields_to_worklist(field, abase); 1515 } 1516 } 1517 } 1518 } 1519 } 1520 } 1521 } 1522 1523 // Put on worklist all related field nodes. 1524 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1525 int offset = field->offset(); 1526 if (base->is_LocalVar()) { 1527 for (UseIterator j(base); j.has_next(); j.next()) { 1528 PointsToNode* f = j.get(); 1529 if (PointsToNode::is_base_use(f)) { // Field 1530 f = PointsToNode::get_use_node(f); 1531 if (f == field || !f->as_Field()->is_oop()) 1532 continue; 1533 int offs = f->as_Field()->offset(); 1534 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1535 add_to_worklist(f); 1536 } 1537 } 1538 } 1539 } else { 1540 assert(base->is_JavaObject(), "sanity"); 1541 if (// Skip phantom_object since it is only used to indicate that 1542 // this field's content globally escapes. 1543 (base != phantom_obj) && 1544 // NULL object node does not have fields. 1545 (base != null_obj)) { 1546 for (EdgeIterator i(base); i.has_next(); i.next()) { 1547 PointsToNode* f = i.get(); 1548 // Skip arraycopy edge since store to destination object field 1549 // does not update value in source object field. 1550 if (f->is_Arraycopy()) { 1551 assert(base->arraycopy_dst(), "sanity"); 1552 continue; 1553 } 1554 if (f == field || !f->as_Field()->is_oop()) 1555 continue; 1556 int offs = f->as_Field()->offset(); 1557 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1558 add_to_worklist(f); 1559 } 1560 } 1561 } 1562 } 1563 } 1564 1565 // Find fields which have unknown value. 1566 int ConnectionGraph::find_field_value(FieldNode* field) { 1567 // Escaped fields should have init value already. 1568 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1569 int new_edges = 0; 1570 for (BaseIterator i(field); i.has_next(); i.next()) { 1571 PointsToNode* base = i.get(); 1572 if (base->is_JavaObject()) { 1573 // Skip Allocate's fields which will be processed later. 1574 if (base->ideal_node()->is_Allocate()) 1575 return 0; 1576 assert(base == null_obj, "only NULL ptr base expected here"); 1577 } 1578 } 1579 if (add_edge(field, phantom_obj)) { 1580 // New edge was added 1581 new_edges++; 1582 add_field_uses_to_worklist(field); 1583 } 1584 return new_edges; 1585 } 1586 1587 // Find fields initializing values for allocations. 1588 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { 1589 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1590 int new_edges = 0; 1591 Node* alloc = pta->ideal_node(); 1592 if (init_val == phantom_obj) { 1593 // Do nothing for Allocate nodes since its fields values are 1594 // "known" unless they are initialized by arraycopy/clone. 1595 if (alloc->is_Allocate() && !pta->arraycopy_dst()) 1596 return 0; 1597 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 1598 #ifdef ASSERT 1599 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { 1600 const char* name = alloc->as_CallStaticJava()->_name; 1601 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1602 } 1603 #endif 1604 // Non-escaped allocation returned from Java or runtime call have 1605 // unknown values in fields. 1606 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1607 PointsToNode* field = i.get(); 1608 if (field->is_Field() && field->as_Field()->is_oop()) { 1609 if (add_edge(field, phantom_obj)) { 1610 // New edge was added 1611 new_edges++; 1612 add_field_uses_to_worklist(field->as_Field()); 1613 } 1614 } 1615 } 1616 return new_edges; 1617 } 1618 assert(init_val == null_obj, "sanity"); 1619 // Do nothing for Call nodes since its fields values are unknown. 1620 if (!alloc->is_Allocate()) 1621 return 0; 1622 1623 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1624 bool visited_bottom_offset = false; 1625 GrowableArray<int> offsets_worklist; 1626 1627 // Check if an oop field's initializing value is recorded and add 1628 // a corresponding NULL if field's value if it is not recorded. 1629 // Connection Graph does not record a default initialization by NULL 1630 // captured by Initialize node. 1631 // 1632 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1633 PointsToNode* field = i.get(); // Field (AddP) 1634 if (!field->is_Field() || !field->as_Field()->is_oop()) 1635 continue; // Not oop field 1636 int offset = field->as_Field()->offset(); 1637 if (offset == Type::OffsetBot) { 1638 if (!visited_bottom_offset) { 1639 // OffsetBot is used to reference array's element, 1640 // always add reference to NULL to all Field nodes since we don't 1641 // known which element is referenced. 1642 if (add_edge(field, null_obj)) { 1643 // New edge was added 1644 new_edges++; 1645 add_field_uses_to_worklist(field->as_Field()); 1646 visited_bottom_offset = true; 1647 } 1648 } 1649 } else { 1650 // Check only oop fields. 1651 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1652 if (adr_type->isa_rawptr()) { 1653 #ifdef ASSERT 1654 // Raw pointers are used for initializing stores so skip it 1655 // since it should be recorded already 1656 Node* base = get_addp_base(field->ideal_node()); 1657 assert(adr_type->isa_rawptr() && base->is_Proj() && 1658 (base->in(0) == alloc),"unexpected pointer type"); 1659 #endif 1660 continue; 1661 } 1662 if (!offsets_worklist.contains(offset)) { 1663 offsets_worklist.append(offset); 1664 Node* value = NULL; 1665 if (ini != NULL) { 1666 // StoreP::memory_type() == T_ADDRESS 1667 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1668 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1669 // Make sure initializing store has the same type as this AddP. 1670 // This AddP may reference non existing field because it is on a 1671 // dead branch of bimorphic call which is not eliminated yet. 1672 if (store != NULL && store->is_Store() && 1673 store->as_Store()->memory_type() == ft) { 1674 value = store->in(MemNode::ValueIn); 1675 #ifdef ASSERT 1676 if (VerifyConnectionGraph) { 1677 // Verify that AddP already points to all objects the value points to. 1678 PointsToNode* val = ptnode_adr(value->_idx); 1679 assert((val != NULL), "should be processed already"); 1680 PointsToNode* missed_obj = NULL; 1681 if (val->is_JavaObject()) { 1682 if (!field->points_to(val->as_JavaObject())) { 1683 missed_obj = val; 1684 } 1685 } else { 1686 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1687 tty->print_cr("----------init store has invalid value -----"); 1688 store->dump(); 1689 val->dump(); 1690 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1691 } 1692 for (EdgeIterator j(val); j.has_next(); j.next()) { 1693 PointsToNode* obj = j.get(); 1694 if (obj->is_JavaObject()) { 1695 if (!field->points_to(obj->as_JavaObject())) { 1696 missed_obj = obj; 1697 break; 1698 } 1699 } 1700 } 1701 } 1702 if (missed_obj != NULL) { 1703 tty->print_cr("----------field---------------------------------"); 1704 field->dump(); 1705 tty->print_cr("----------missed referernce to object-----------"); 1706 missed_obj->dump(); 1707 tty->print_cr("----------object referernced by init store -----"); 1708 store->dump(); 1709 val->dump(); 1710 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1711 } 1712 } 1713 #endif 1714 } else { 1715 // There could be initializing stores which follow allocation. 1716 // For example, a volatile field store is not collected 1717 // by Initialize node. 1718 // 1719 // Need to check for dependent loads to separate such stores from 1720 // stores which follow loads. For now, add initial value NULL so 1721 // that compare pointers optimization works correctly. 1722 } 1723 } 1724 if (value == NULL) { 1725 // A field's initializing value was not recorded. Add NULL. 1726 if (add_edge(field, null_obj)) { 1727 // New edge was added 1728 new_edges++; 1729 add_field_uses_to_worklist(field->as_Field()); 1730 } 1731 } 1732 } 1733 } 1734 } 1735 return new_edges; 1736 } 1737 1738 // Adjust scalar_replaceable state after Connection Graph is built. 1739 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1740 // Search for non-escaping objects which are not scalar replaceable 1741 // and mark them to propagate the state to referenced objects. 1742 1743 // 1. An object is not scalar replaceable if the field into which it is 1744 // stored has unknown offset (stored into unknown element of an array). 1745 // 1746 for (UseIterator i(jobj); i.has_next(); i.next()) { 1747 PointsToNode* use = i.get(); 1748 if (use->is_Arraycopy()) { 1749 continue; 1750 } 1751 if (use->is_Field()) { 1752 FieldNode* field = use->as_Field(); 1753 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 1754 if (field->offset() == Type::OffsetBot) { 1755 jobj->set_scalar_replaceable(false); 1756 return; 1757 } 1758 // 2. An object is not scalar replaceable if the field into which it is 1759 // stored has multiple bases one of which is null. 1760 if (field->base_count() > 1) { 1761 for (BaseIterator i(field); i.has_next(); i.next()) { 1762 PointsToNode* base = i.get(); 1763 if (base == null_obj) { 1764 jobj->set_scalar_replaceable(false); 1765 return; 1766 } 1767 } 1768 } 1769 } 1770 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1771 // 3. An object is not scalar replaceable if it is merged with other objects. 1772 for (EdgeIterator j(use); j.has_next(); j.next()) { 1773 PointsToNode* ptn = j.get(); 1774 if (ptn->is_JavaObject() && ptn != jobj) { 1775 // Mark all objects. 1776 jobj->set_scalar_replaceable(false); 1777 ptn->set_scalar_replaceable(false); 1778 } 1779 } 1780 if (!jobj->scalar_replaceable()) { 1781 return; 1782 } 1783 } 1784 1785 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1786 if (j.get()->is_Arraycopy()) { 1787 continue; 1788 } 1789 1790 // Non-escaping object node should point only to field nodes. 1791 FieldNode* field = j.get()->as_Field(); 1792 int offset = field->as_Field()->offset(); 1793 1794 // 4. An object is not scalar replaceable if it has a field with unknown 1795 // offset (array's element is accessed in loop). 1796 if (offset == Type::OffsetBot) { 1797 jobj->set_scalar_replaceable(false); 1798 return; 1799 } 1800 // 5. Currently an object is not scalar replaceable if a LoadStore node 1801 // access its field since the field value is unknown after it. 1802 // 1803 Node* n = field->ideal_node(); 1804 1805 // Test for an unsafe access that was parsed as maybe off heap 1806 // (with a CheckCastPP to raw memory). 1807 assert(n->is_AddP(), "expect an address computation"); 1808 if (n->in(AddPNode::Base)->is_top() && 1809 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 1810 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 1811 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 1812 jobj->set_scalar_replaceable(false); 1813 return; 1814 } 1815 1816 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1817 Node* u = n->fast_out(i); 1818 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 1819 jobj->set_scalar_replaceable(false); 1820 return; 1821 } 1822 } 1823 1824 // 6. Or the address may point to more then one object. This may produce 1825 // the false positive result (set not scalar replaceable) 1826 // since the flow-insensitive escape analysis can't separate 1827 // the case when stores overwrite the field's value from the case 1828 // when stores happened on different control branches. 1829 // 1830 // Note: it will disable scalar replacement in some cases: 1831 // 1832 // Point p[] = new Point[1]; 1833 // p[0] = new Point(); // Will be not scalar replaced 1834 // 1835 // but it will save us from incorrect optimizations in next cases: 1836 // 1837 // Point p[] = new Point[1]; 1838 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1839 // 1840 if (field->base_count() > 1) { 1841 for (BaseIterator i(field); i.has_next(); i.next()) { 1842 PointsToNode* base = i.get(); 1843 // Don't take into account LocalVar nodes which 1844 // may point to only one object which should be also 1845 // this field's base by now. 1846 if (base->is_JavaObject() && base != jobj) { 1847 // Mark all bases. 1848 jobj->set_scalar_replaceable(false); 1849 base->set_scalar_replaceable(false); 1850 } 1851 } 1852 } 1853 } 1854 } 1855 1856 #ifdef ASSERT 1857 void ConnectionGraph::verify_connection_graph( 1858 GrowableArray<PointsToNode*>& ptnodes_worklist, 1859 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1860 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1861 GrowableArray<Node*>& addp_worklist) { 1862 // Verify that graph is complete - no new edges could be added. 1863 int java_objects_length = java_objects_worklist.length(); 1864 int non_escaped_length = non_escaped_worklist.length(); 1865 int new_edges = 0; 1866 for (int next = 0; next < java_objects_length; ++next) { 1867 JavaObjectNode* ptn = java_objects_worklist.at(next); 1868 new_edges += add_java_object_edges(ptn, true); 1869 } 1870 assert(new_edges == 0, "graph was not complete"); 1871 // Verify that escape state is final. 1872 int length = non_escaped_worklist.length(); 1873 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); 1874 assert((non_escaped_length == non_escaped_worklist.length()) && 1875 (non_escaped_length == length) && 1876 (_worklist.length() == 0), "escape state was not final"); 1877 1878 // Verify fields information. 1879 int addp_length = addp_worklist.length(); 1880 for (int next = 0; next < addp_length; ++next ) { 1881 Node* n = addp_worklist.at(next); 1882 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 1883 if (field->is_oop()) { 1884 // Verify that field has all bases 1885 Node* base = get_addp_base(n); 1886 PointsToNode* ptn = ptnode_adr(base->_idx); 1887 if (ptn->is_JavaObject()) { 1888 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 1889 } else { 1890 assert(ptn->is_LocalVar(), "sanity"); 1891 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1892 PointsToNode* e = i.get(); 1893 if (e->is_JavaObject()) { 1894 assert(field->has_base(e->as_JavaObject()), "sanity"); 1895 } 1896 } 1897 } 1898 // Verify that all fields have initializing values. 1899 if (field->edge_count() == 0) { 1900 tty->print_cr("----------field does not have references----------"); 1901 field->dump(); 1902 for (BaseIterator i(field); i.has_next(); i.next()) { 1903 PointsToNode* base = i.get(); 1904 tty->print_cr("----------field has next base---------------------"); 1905 base->dump(); 1906 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 1907 tty->print_cr("----------base has fields-------------------------"); 1908 for (EdgeIterator j(base); j.has_next(); j.next()) { 1909 j.get()->dump(); 1910 } 1911 tty->print_cr("----------base has references---------------------"); 1912 for (UseIterator j(base); j.has_next(); j.next()) { 1913 j.get()->dump(); 1914 } 1915 } 1916 } 1917 for (UseIterator i(field); i.has_next(); i.next()) { 1918 i.get()->dump(); 1919 } 1920 assert(field->edge_count() > 0, "sanity"); 1921 } 1922 } 1923 } 1924 } 1925 #endif 1926 1927 // Optimize ideal graph. 1928 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 1929 GrowableArray<Node*>& storestore_worklist) { 1930 Compile* C = _compile; 1931 PhaseIterGVN* igvn = _igvn; 1932 if (EliminateLocks) { 1933 // Mark locks before changing ideal graph. 1934 int cnt = C->macro_count(); 1935 for( int i=0; i < cnt; i++ ) { 1936 Node *n = C->macro_node(i); 1937 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1938 AbstractLockNode* alock = n->as_AbstractLock(); 1939 if (!alock->is_non_esc_obj()) { 1940 if (not_global_escape(alock->obj_node())) { 1941 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 1942 // The lock could be marked eliminated by lock coarsening 1943 // code during first IGVN before EA. Replace coarsened flag 1944 // to eliminate all associated locks/unlocks. 1945 #ifdef ASSERT 1946 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 1947 #endif 1948 alock->set_non_esc_obj(); 1949 } 1950 } 1951 } 1952 } 1953 } 1954 1955 if (OptimizePtrCompare) { 1956 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1957 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1958 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1959 // Optimize objects compare. 1960 while (ptr_cmp_worklist.length() != 0) { 1961 Node *n = ptr_cmp_worklist.pop(); 1962 Node *res = optimize_ptr_compare(n); 1963 if (res != NULL) { 1964 #ifndef PRODUCT 1965 if (PrintOptimizePtrCompare) { 1966 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1967 if (Verbose) { 1968 n->dump(1); 1969 } 1970 } 1971 #endif 1972 igvn->replace_node(n, res); 1973 } 1974 } 1975 // cleanup 1976 if (_pcmp_neq->outcnt() == 0) 1977 igvn->hash_delete(_pcmp_neq); 1978 if (_pcmp_eq->outcnt() == 0) 1979 igvn->hash_delete(_pcmp_eq); 1980 } 1981 1982 // For MemBarStoreStore nodes added in library_call.cpp, check 1983 // escape status of associated AllocateNode and optimize out 1984 // MemBarStoreStore node if the allocated object never escapes. 1985 while (storestore_worklist.length() != 0) { 1986 Node *n = storestore_worklist.pop(); 1987 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 1988 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 1989 assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); 1990 if (not_global_escape(alloc)) { 1991 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 1992 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 1993 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 1994 igvn->register_new_node_with_optimizer(mb); 1995 igvn->replace_node(storestore, mb); 1996 } 1997 } 1998 } 1999 2000 // Optimize objects compare. 2001 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 2002 assert(OptimizePtrCompare, "sanity"); 2003 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 2004 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 2005 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 2006 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 2007 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 2008 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 2009 2010 // Check simple cases first. 2011 if (jobj1 != NULL) { 2012 if (jobj1->escape_state() == PointsToNode::NoEscape) { 2013 if (jobj1 == jobj2) { 2014 // Comparing the same not escaping object. 2015 return _pcmp_eq; 2016 } 2017 Node* obj = jobj1->ideal_node(); 2018 // Comparing not escaping allocation. 2019 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2020 !ptn2->points_to(jobj1)) { 2021 return _pcmp_neq; // This includes nullness check. 2022 } 2023 } 2024 } 2025 if (jobj2 != NULL) { 2026 if (jobj2->escape_state() == PointsToNode::NoEscape) { 2027 Node* obj = jobj2->ideal_node(); 2028 // Comparing not escaping allocation. 2029 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2030 !ptn1->points_to(jobj2)) { 2031 return _pcmp_neq; // This includes nullness check. 2032 } 2033 } 2034 } 2035 if (jobj1 != NULL && jobj1 != phantom_obj && 2036 jobj2 != NULL && jobj2 != phantom_obj && 2037 jobj1->ideal_node()->is_Con() && 2038 jobj2->ideal_node()->is_Con()) { 2039 // Klass or String constants compare. Need to be careful with 2040 // compressed pointers - compare types of ConN and ConP instead of nodes. 2041 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 2042 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 2043 if (t1->make_ptr() == t2->make_ptr()) { 2044 return _pcmp_eq; 2045 } else { 2046 return _pcmp_neq; 2047 } 2048 } 2049 if (ptn1->meet(ptn2)) { 2050 return NULL; // Sets are not disjoint 2051 } 2052 2053 // Sets are disjoint. 2054 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 2055 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 2056 bool set1_has_null_ptr = ptn1->points_to(null_obj); 2057 bool set2_has_null_ptr = ptn2->points_to(null_obj); 2058 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 2059 (set2_has_unknown_ptr && set1_has_null_ptr)) { 2060 // Check nullness of unknown object. 2061 return NULL; 2062 } 2063 2064 // Disjointness by itself is not sufficient since 2065 // alias analysis is not complete for escaped objects. 2066 // Disjoint sets are definitely unrelated only when 2067 // at least one set has only not escaping allocations. 2068 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 2069 if (ptn1->non_escaping_allocation()) { 2070 return _pcmp_neq; 2071 } 2072 } 2073 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2074 if (ptn2->non_escaping_allocation()) { 2075 return _pcmp_neq; 2076 } 2077 } 2078 return NULL; 2079 } 2080 2081 // Connection Graph constuction functions. 2082 2083 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2084 PointsToNode* ptadr = _nodes.at(n->_idx); 2085 if (ptadr != NULL) { 2086 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2087 return; 2088 } 2089 Compile* C = _compile; 2090 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2091 _nodes.at_put(n->_idx, ptadr); 2092 } 2093 2094 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2095 PointsToNode* ptadr = _nodes.at(n->_idx); 2096 if (ptadr != NULL) { 2097 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2098 return; 2099 } 2100 Compile* C = _compile; 2101 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2102 _nodes.at_put(n->_idx, ptadr); 2103 } 2104 2105 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2106 PointsToNode* ptadr = _nodes.at(n->_idx); 2107 if (ptadr != NULL) { 2108 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2109 return; 2110 } 2111 bool unsafe = false; 2112 bool is_oop = is_oop_field(n, offset, &unsafe); 2113 if (unsafe) { 2114 es = PointsToNode::GlobalEscape; 2115 } 2116 Compile* C = _compile; 2117 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2118 _nodes.at_put(n->_idx, field); 2119 } 2120 2121 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2122 PointsToNode* src, PointsToNode* dst) { 2123 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2124 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 2125 PointsToNode* ptadr = _nodes.at(n->_idx); 2126 if (ptadr != NULL) { 2127 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2128 return; 2129 } 2130 Compile* C = _compile; 2131 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2132 _nodes.at_put(n->_idx, ptadr); 2133 // Add edge from arraycopy node to source object. 2134 (void)add_edge(ptadr, src); 2135 src->set_arraycopy_src(); 2136 // Add edge from destination object to arraycopy node. 2137 (void)add_edge(dst, ptadr); 2138 dst->set_arraycopy_dst(); 2139 } 2140 2141 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2142 const Type* adr_type = n->as_AddP()->bottom_type(); 2143 BasicType bt = T_INT; 2144 if (offset == Type::OffsetBot) { 2145 // Check only oop fields. 2146 if (!adr_type->isa_aryptr() || 2147 (adr_type->isa_aryptr()->klass() == NULL) || 2148 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { 2149 // OffsetBot is used to reference array's element. Ignore first AddP. 2150 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 2151 bt = T_OBJECT; 2152 } 2153 } 2154 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2155 if (adr_type->isa_instptr()) { 2156 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 2157 if (field != NULL) { 2158 bt = field->layout_type(); 2159 } else { 2160 // Check for unsafe oop field access 2161 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2162 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2163 #if INCLUDE_SHENANDOAHGC 2164 n->has_out_with(Op_ShenandoahCompareAndExchangeP) || n->has_out_with(Op_ShenandoahCompareAndExchangeN) || 2165 n->has_out_with(Op_ShenandoahCompareAndSwapP, Op_ShenandoahCompareAndSwapN, Op_ShenandoahWeakCompareAndSwapP, Op_ShenandoahWeakCompareAndSwapN) || 2166 #endif 2167 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) { 2168 bt = T_OBJECT; 2169 (*unsafe) = true; 2170 } 2171 } 2172 } else if (adr_type->isa_aryptr()) { 2173 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2174 // Ignore array length load. 2175 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2176 // Ignore first AddP. 2177 } else { 2178 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2179 bt = elemtype->array_element_basic_type(); 2180 } 2181 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2182 // Allocation initialization, ThreadLocal field access, unsafe access 2183 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2184 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2185 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) { 2186 bt = T_OBJECT; 2187 } 2188 } 2189 } 2190 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); 2191 } 2192 2193 // Returns unique pointed java object or NULL. 2194 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2195 assert(!_collecting, "should not call when contructed graph"); 2196 // If the node was created after the escape computation we can't answer. 2197 uint idx = n->_idx; 2198 if (idx >= nodes_size()) { 2199 return NULL; 2200 } 2201 PointsToNode* ptn = ptnode_adr(idx); 2202 if (ptn == NULL) { 2203 return NULL; 2204 } 2205 if (ptn->is_JavaObject()) { 2206 return ptn->as_JavaObject(); 2207 } 2208 assert(ptn->is_LocalVar(), "sanity"); 2209 // Check all java objects it points to. 2210 JavaObjectNode* jobj = NULL; 2211 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2212 PointsToNode* e = i.get(); 2213 if (e->is_JavaObject()) { 2214 if (jobj == NULL) { 2215 jobj = e->as_JavaObject(); 2216 } else if (jobj != e) { 2217 return NULL; 2218 } 2219 } 2220 } 2221 return jobj; 2222 } 2223 2224 // Return true if this node points only to non-escaping allocations. 2225 bool PointsToNode::non_escaping_allocation() { 2226 if (is_JavaObject()) { 2227 Node* n = ideal_node(); 2228 if (n->is_Allocate() || n->is_CallStaticJava()) { 2229 return (escape_state() == PointsToNode::NoEscape); 2230 } else { 2231 return false; 2232 } 2233 } 2234 assert(is_LocalVar(), "sanity"); 2235 // Check all java objects it points to. 2236 for (EdgeIterator i(this); i.has_next(); i.next()) { 2237 PointsToNode* e = i.get(); 2238 if (e->is_JavaObject()) { 2239 Node* n = e->ideal_node(); 2240 if ((e->escape_state() != PointsToNode::NoEscape) || 2241 !(n->is_Allocate() || n->is_CallStaticJava())) { 2242 return false; 2243 } 2244 } 2245 } 2246 return true; 2247 } 2248 2249 // Return true if we know the node does not escape globally. 2250 bool ConnectionGraph::not_global_escape(Node *n) { 2251 assert(!_collecting, "should not call during graph construction"); 2252 // If the node was created after the escape computation we can't answer. 2253 uint idx = n->_idx; 2254 if (idx >= nodes_size()) { 2255 return false; 2256 } 2257 PointsToNode* ptn = ptnode_adr(idx); 2258 if (ptn == NULL) { 2259 return false; // not in congraph (e.g. ConI) 2260 } 2261 PointsToNode::EscapeState es = ptn->escape_state(); 2262 // If we have already computed a value, return it. 2263 if (es >= PointsToNode::GlobalEscape) 2264 return false; 2265 if (ptn->is_JavaObject()) { 2266 return true; // (es < PointsToNode::GlobalEscape); 2267 } 2268 assert(ptn->is_LocalVar(), "sanity"); 2269 // Check all java objects it points to. 2270 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2271 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) 2272 return false; 2273 } 2274 return true; 2275 } 2276 2277 2278 // Helper functions 2279 2280 // Return true if this node points to specified node or nodes it points to. 2281 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2282 if (is_JavaObject()) { 2283 return (this == ptn); 2284 } 2285 assert(is_LocalVar() || is_Field(), "sanity"); 2286 for (EdgeIterator i(this); i.has_next(); i.next()) { 2287 if (i.get() == ptn) 2288 return true; 2289 } 2290 return false; 2291 } 2292 2293 // Return true if one node points to an other. 2294 bool PointsToNode::meet(PointsToNode* ptn) { 2295 if (this == ptn) { 2296 return true; 2297 } else if (ptn->is_JavaObject()) { 2298 return this->points_to(ptn->as_JavaObject()); 2299 } else if (this->is_JavaObject()) { 2300 return ptn->points_to(this->as_JavaObject()); 2301 } 2302 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2303 int ptn_count = ptn->edge_count(); 2304 for (EdgeIterator i(this); i.has_next(); i.next()) { 2305 PointsToNode* this_e = i.get(); 2306 for (int j = 0; j < ptn_count; j++) { 2307 if (this_e == ptn->edge(j)) 2308 return true; 2309 } 2310 } 2311 return false; 2312 } 2313 2314 #ifdef ASSERT 2315 // Return true if bases point to this java object. 2316 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2317 for (BaseIterator i(this); i.has_next(); i.next()) { 2318 if (i.get() == jobj) 2319 return true; 2320 } 2321 return false; 2322 } 2323 #endif 2324 2325 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2326 const Type *adr_type = phase->type(adr); 2327 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 2328 adr->in(AddPNode::Address)->is_Proj() && 2329 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 2330 // We are computing a raw address for a store captured by an Initialize 2331 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2332 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2333 assert(offs != Type::OffsetBot || 2334 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2335 "offset must be a constant or it is initialization of array"); 2336 return offs; 2337 } 2338 const TypePtr *t_ptr = adr_type->isa_ptr(); 2339 assert(t_ptr != NULL, "must be a pointer type"); 2340 return t_ptr->offset(); 2341 } 2342 2343 Node* ConnectionGraph::get_addp_base(Node *addp) { 2344 assert(addp->is_AddP(), "must be AddP"); 2345 // 2346 // AddP cases for Base and Address inputs: 2347 // case #1. Direct object's field reference: 2348 // Allocate 2349 // | 2350 // Proj #5 ( oop result ) 2351 // | 2352 // CheckCastPP (cast to instance type) 2353 // | | 2354 // AddP ( base == address ) 2355 // 2356 // case #2. Indirect object's field reference: 2357 // Phi 2358 // | 2359 // CastPP (cast to instance type) 2360 // | | 2361 // AddP ( base == address ) 2362 // 2363 // case #3. Raw object's field reference for Initialize node: 2364 // Allocate 2365 // | 2366 // Proj #5 ( oop result ) 2367 // top | 2368 // \ | 2369 // AddP ( base == top ) 2370 // 2371 // case #4. Array's element reference: 2372 // {CheckCastPP | CastPP} 2373 // | | | 2374 // | AddP ( array's element offset ) 2375 // | | 2376 // AddP ( array's offset ) 2377 // 2378 // case #5. Raw object's field reference for arraycopy stub call: 2379 // The inline_native_clone() case when the arraycopy stub is called 2380 // after the allocation before Initialize and CheckCastPP nodes. 2381 // Allocate 2382 // | 2383 // Proj #5 ( oop result ) 2384 // | | 2385 // AddP ( base == address ) 2386 // 2387 // case #6. Constant Pool, ThreadLocal, CastX2P or 2388 // Raw object's field reference: 2389 // {ConP, ThreadLocal, CastX2P, raw Load} 2390 // top | 2391 // \ | 2392 // AddP ( base == top ) 2393 // 2394 // case #7. Klass's field reference. 2395 // LoadKlass 2396 // | | 2397 // AddP ( base == address ) 2398 // 2399 // case #8. narrow Klass's field reference. 2400 // LoadNKlass 2401 // | 2402 // DecodeN 2403 // | | 2404 // AddP ( base == address ) 2405 // 2406 // case #9. Mixed unsafe access 2407 // {instance} 2408 // | 2409 // CheckCastPP (raw) 2410 // top | 2411 // \ | 2412 // AddP ( base == top ) 2413 // 2414 Node *base = addp->in(AddPNode::Base); 2415 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 2416 base = addp->in(AddPNode::Address); 2417 while (base->is_AddP()) { 2418 // Case #6 (unsafe access) may have several chained AddP nodes. 2419 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2420 base = base->in(AddPNode::Address); 2421 } 2422 if (base->Opcode() == Op_CheckCastPP && 2423 base->bottom_type()->isa_rawptr() && 2424 _igvn->type(base->in(1))->isa_oopptr()) { 2425 base = base->in(1); // Case #9 2426 } else { 2427 Node* uncast_base = base->uncast(); 2428 int opcode = uncast_base->Opcode(); 2429 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2430 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2431 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2432 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()) || 2433 uncast_base->Opcode() == Op_ShenandoahLoadReferenceBarrier, "sanity"); 2434 } 2435 } 2436 return base; 2437 } 2438 2439 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2440 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2441 Node* addp2 = addp->raw_out(0); 2442 if (addp->outcnt() == 1 && addp2->is_AddP() && 2443 addp2->in(AddPNode::Base) == n && 2444 addp2->in(AddPNode::Address) == addp) { 2445 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2446 // 2447 // Find array's offset to push it on worklist first and 2448 // as result process an array's element offset first (pushed second) 2449 // to avoid CastPP for the array's offset. 2450 // Otherwise the inserted CastPP (LocalVar) will point to what 2451 // the AddP (Field) points to. Which would be wrong since 2452 // the algorithm expects the CastPP has the same point as 2453 // as AddP's base CheckCastPP (LocalVar). 2454 // 2455 // ArrayAllocation 2456 // | 2457 // CheckCastPP 2458 // | 2459 // memProj (from ArrayAllocation CheckCastPP) 2460 // | || 2461 // | || Int (element index) 2462 // | || | ConI (log(element size)) 2463 // | || | / 2464 // | || LShift 2465 // | || / 2466 // | AddP (array's element offset) 2467 // | | 2468 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2469 // | / / 2470 // AddP (array's offset) 2471 // | 2472 // Load/Store (memory operation on array's element) 2473 // 2474 return addp2; 2475 } 2476 return NULL; 2477 } 2478 2479 // 2480 // Adjust the type and inputs of an AddP which computes the 2481 // address of a field of an instance 2482 // 2483 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2484 PhaseGVN* igvn = _igvn; 2485 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2486 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2487 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2488 if (t == NULL) { 2489 // We are computing a raw address for a store captured by an Initialize 2490 // compute an appropriate address type (cases #3 and #5). 2491 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2492 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2493 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2494 assert(offs != Type::OffsetBot, "offset must be a constant"); 2495 t = base_t->add_offset(offs)->is_oopptr(); 2496 } 2497 int inst_id = base_t->instance_id(); 2498 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2499 "old type must be non-instance or match new type"); 2500 2501 // The type 't' could be subclass of 'base_t'. 2502 // As result t->offset() could be large then base_t's size and it will 2503 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2504 // constructor verifies correctness of the offset. 2505 // 2506 // It could happened on subclass's branch (from the type profiling 2507 // inlining) which was not eliminated during parsing since the exactness 2508 // of the allocation type was not propagated to the subclass type check. 2509 // 2510 // Or the type 't' could be not related to 'base_t' at all. 2511 // It could happened when CHA type is different from MDO type on a dead path 2512 // (for example, from instanceof check) which is not collapsed during parsing. 2513 // 2514 // Do nothing for such AddP node and don't process its users since 2515 // this code branch will go away. 2516 // 2517 if (!t->is_known_instance() && 2518 !base_t->klass()->is_subtype_of(t->klass())) { 2519 return false; // bail out 2520 } 2521 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 2522 // Do NOT remove the next line: ensure a new alias index is allocated 2523 // for the instance type. Note: C++ will not remove it since the call 2524 // has side effect. 2525 int alias_idx = _compile->get_alias_index(tinst); 2526 igvn->set_type(addp, tinst); 2527 // record the allocation in the node map 2528 set_map(addp, get_map(base->_idx)); 2529 // Set addp's Base and Address to 'base'. 2530 Node *abase = addp->in(AddPNode::Base); 2531 Node *adr = addp->in(AddPNode::Address); 2532 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2533 adr->in(0)->_idx == (uint)inst_id) { 2534 // Skip AddP cases #3 and #5. 2535 } else { 2536 assert(!abase->is_top(), "sanity"); // AddP case #3 2537 if (abase != base) { 2538 igvn->hash_delete(addp); 2539 addp->set_req(AddPNode::Base, base); 2540 if (abase == adr) { 2541 addp->set_req(AddPNode::Address, base); 2542 } else { 2543 // AddP case #4 (adr is array's element offset AddP node) 2544 #ifdef ASSERT 2545 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2546 assert(adr->is_AddP() && atype != NULL && 2547 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2548 #endif 2549 } 2550 igvn->hash_insert(addp); 2551 } 2552 } 2553 // Put on IGVN worklist since at least addp's type was changed above. 2554 record_for_optimizer(addp); 2555 return true; 2556 } 2557 2558 // 2559 // Create a new version of orig_phi if necessary. Returns either the newly 2560 // created phi or an existing phi. Sets create_new to indicate whether a new 2561 // phi was created. Cache the last newly created phi in the node map. 2562 // 2563 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2564 Compile *C = _compile; 2565 PhaseGVN* igvn = _igvn; 2566 new_created = false; 2567 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2568 // nothing to do if orig_phi is bottom memory or matches alias_idx 2569 if (phi_alias_idx == alias_idx) { 2570 return orig_phi; 2571 } 2572 // Have we recently created a Phi for this alias index? 2573 PhiNode *result = get_map_phi(orig_phi->_idx); 2574 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2575 return result; 2576 } 2577 // Previous check may fail when the same wide memory Phi was split into Phis 2578 // for different memory slices. Search all Phis for this region. 2579 if (result != NULL) { 2580 Node* region = orig_phi->in(0); 2581 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2582 Node* phi = region->fast_out(i); 2583 if (phi->is_Phi() && 2584 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2585 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2586 return phi->as_Phi(); 2587 } 2588 } 2589 } 2590 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 2591 if (C->do_escape_analysis() == true && !C->failing()) { 2592 // Retry compilation without escape analysis. 2593 // If this is the first failure, the sentinel string will "stick" 2594 // to the Compile object, and the C2Compiler will see it and retry. 2595 C->record_failure(C2Compiler::retry_no_escape_analysis()); 2596 } 2597 return NULL; 2598 } 2599 orig_phi_worklist.append_if_missing(orig_phi); 2600 const TypePtr *atype = C->get_adr_type(alias_idx); 2601 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2602 C->copy_node_notes_to(result, orig_phi); 2603 igvn->set_type(result, result->bottom_type()); 2604 record_for_optimizer(result); 2605 set_map(orig_phi, result); 2606 new_created = true; 2607 return result; 2608 } 2609 2610 // 2611 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2612 // specified alias index. 2613 // 2614 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2615 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2616 Compile *C = _compile; 2617 PhaseGVN* igvn = _igvn; 2618 bool new_phi_created; 2619 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2620 if (!new_phi_created) { 2621 return result; 2622 } 2623 GrowableArray<PhiNode *> phi_list; 2624 GrowableArray<uint> cur_input; 2625 PhiNode *phi = orig_phi; 2626 uint idx = 1; 2627 bool finished = false; 2628 while(!finished) { 2629 while (idx < phi->req()) { 2630 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2631 if (mem != NULL && mem->is_Phi()) { 2632 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2633 if (new_phi_created) { 2634 // found an phi for which we created a new split, push current one on worklist and begin 2635 // processing new one 2636 phi_list.push(phi); 2637 cur_input.push(idx); 2638 phi = mem->as_Phi(); 2639 result = newphi; 2640 idx = 1; 2641 continue; 2642 } else { 2643 mem = newphi; 2644 } 2645 } 2646 if (C->failing()) { 2647 return NULL; 2648 } 2649 result->set_req(idx++, mem); 2650 } 2651 #ifdef ASSERT 2652 // verify that the new Phi has an input for each input of the original 2653 assert( phi->req() == result->req(), "must have same number of inputs."); 2654 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2655 #endif 2656 // Check if all new phi's inputs have specified alias index. 2657 // Otherwise use old phi. 2658 for (uint i = 1; i < phi->req(); i++) { 2659 Node* in = result->in(i); 2660 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2661 } 2662 // we have finished processing a Phi, see if there are any more to do 2663 finished = (phi_list.length() == 0 ); 2664 if (!finished) { 2665 phi = phi_list.pop(); 2666 idx = cur_input.pop(); 2667 PhiNode *prev_result = get_map_phi(phi->_idx); 2668 prev_result->set_req(idx++, result); 2669 result = prev_result; 2670 } 2671 } 2672 return result; 2673 } 2674 2675 // 2676 // The next methods are derived from methods in MemNode. 2677 // 2678 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2679 Node *mem = mmem; 2680 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2681 // means an array I have not precisely typed yet. Do not do any 2682 // alias stuff with it any time soon. 2683 if (toop->base() != Type::AnyPtr && 2684 !(toop->klass() != NULL && 2685 toop->klass()->is_java_lang_Object() && 2686 toop->offset() == Type::OffsetBot)) { 2687 mem = mmem->memory_at(alias_idx); 2688 // Update input if it is progress over what we have now 2689 } 2690 return mem; 2691 } 2692 2693 // 2694 // Move memory users to their memory slices. 2695 // 2696 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2697 Compile* C = _compile; 2698 PhaseGVN* igvn = _igvn; 2699 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2700 assert(tp != NULL, "ptr type"); 2701 int alias_idx = C->get_alias_index(tp); 2702 int general_idx = C->get_general_index(alias_idx); 2703 2704 // Move users first 2705 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2706 Node* use = n->fast_out(i); 2707 if (use->is_MergeMem()) { 2708 MergeMemNode* mmem = use->as_MergeMem(); 2709 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2710 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2711 continue; // Nothing to do 2712 } 2713 // Replace previous general reference to mem node. 2714 uint orig_uniq = C->unique(); 2715 Node* m = find_inst_mem(n, general_idx, orig_phis); 2716 assert(orig_uniq == C->unique(), "no new nodes"); 2717 mmem->set_memory_at(general_idx, m); 2718 --imax; 2719 --i; 2720 } else if (use->is_MemBar()) { 2721 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2722 if (use->req() > MemBarNode::Precedent && 2723 use->in(MemBarNode::Precedent) == n) { 2724 // Don't move related membars. 2725 record_for_optimizer(use); 2726 continue; 2727 } 2728 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2729 if ((tp != NULL && C->get_alias_index(tp) == alias_idx) || 2730 alias_idx == general_idx) { 2731 continue; // Nothing to do 2732 } 2733 // Move to general memory slice. 2734 uint orig_uniq = C->unique(); 2735 Node* m = find_inst_mem(n, general_idx, orig_phis); 2736 assert(orig_uniq == C->unique(), "no new nodes"); 2737 igvn->hash_delete(use); 2738 imax -= use->replace_edge(n, m); 2739 igvn->hash_insert(use); 2740 record_for_optimizer(use); 2741 --i; 2742 #ifdef ASSERT 2743 } else if (use->is_Mem()) { 2744 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2745 // Don't move related cardmark. 2746 continue; 2747 } 2748 // Memory nodes should have new memory input. 2749 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2750 assert(tp != NULL, "ptr type"); 2751 int idx = C->get_alias_index(tp); 2752 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2753 "Following memory nodes should have new memory input or be on the same memory slice"); 2754 } else if (use->is_Phi()) { 2755 // Phi nodes should be split and moved already. 2756 tp = use->as_Phi()->adr_type()->isa_ptr(); 2757 assert(tp != NULL, "ptr type"); 2758 int idx = C->get_alias_index(tp); 2759 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2760 } else { 2761 use->dump(); 2762 assert(false, "should not be here"); 2763 #endif 2764 } 2765 } 2766 } 2767 2768 // 2769 // Search memory chain of "mem" to find a MemNode whose address 2770 // is the specified alias index. 2771 // 2772 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2773 if (orig_mem == NULL) 2774 return orig_mem; 2775 Compile* C = _compile; 2776 PhaseGVN* igvn = _igvn; 2777 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2778 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2779 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 2780 Node *prev = NULL; 2781 Node *result = orig_mem; 2782 while (prev != result) { 2783 prev = result; 2784 if (result == start_mem) 2785 break; // hit one of our sentinels 2786 if (result->is_Mem()) { 2787 const Type *at = igvn->type(result->in(MemNode::Address)); 2788 if (at == Type::TOP) 2789 break; // Dead 2790 assert (at->isa_ptr() != NULL, "pointer type required."); 2791 int idx = C->get_alias_index(at->is_ptr()); 2792 if (idx == alias_idx) 2793 break; // Found 2794 if (!is_instance && (at->isa_oopptr() == NULL || 2795 !at->is_oopptr()->is_known_instance())) { 2796 break; // Do not skip store to general memory slice. 2797 } 2798 result = result->in(MemNode::Memory); 2799 } 2800 if (!is_instance) 2801 continue; // don't search further for non-instance types 2802 // skip over a call which does not affect this memory slice 2803 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2804 Node *proj_in = result->in(0); 2805 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2806 break; // hit one of our sentinels 2807 } else if (proj_in->is_Call()) { 2808 // ArrayCopy node processed here as well 2809 CallNode *call = proj_in->as_Call(); 2810 if (!call->may_modify(toop, igvn)) { 2811 result = call->in(TypeFunc::Memory); 2812 } 2813 } else if (proj_in->is_Initialize()) { 2814 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2815 // Stop if this is the initialization for the object instance which 2816 // which contains this memory slice, otherwise skip over it. 2817 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2818 result = proj_in->in(TypeFunc::Memory); 2819 } 2820 } else if (proj_in->is_MemBar()) { 2821 if (proj_in->in(TypeFunc::Memory)->is_MergeMem() && 2822 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->is_Proj() && 2823 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->is_ArrayCopy()) { 2824 // clone 2825 ArrayCopyNode* ac = proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->as_ArrayCopy(); 2826 if (ac->may_modify(toop, igvn)) { 2827 break; 2828 } 2829 } 2830 result = proj_in->in(TypeFunc::Memory); 2831 } 2832 } else if (result->is_MergeMem()) { 2833 MergeMemNode *mmem = result->as_MergeMem(); 2834 result = step_through_mergemem(mmem, alias_idx, toop); 2835 if (result == mmem->base_memory()) { 2836 // Didn't find instance memory, search through general slice recursively. 2837 result = mmem->memory_at(C->get_general_index(alias_idx)); 2838 result = find_inst_mem(result, alias_idx, orig_phis); 2839 if (C->failing()) { 2840 return NULL; 2841 } 2842 mmem->set_memory_at(alias_idx, result); 2843 } 2844 } else if (result->is_Phi() && 2845 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2846 Node *un = result->as_Phi()->unique_input(igvn); 2847 if (un != NULL) { 2848 orig_phis.append_if_missing(result->as_Phi()); 2849 result = un; 2850 } else { 2851 break; 2852 } 2853 } else if (result->is_ClearArray()) { 2854 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 2855 // Can not bypass initialization of the instance 2856 // we are looking for. 2857 break; 2858 } 2859 // Otherwise skip it (the call updated 'result' value). 2860 } else if (result->Opcode() == Op_SCMemProj) { 2861 Node* mem = result->in(0); 2862 Node* adr = NULL; 2863 if (mem->is_LoadStore()) { 2864 adr = mem->in(MemNode::Address); 2865 } else { 2866 assert(mem->Opcode() == Op_EncodeISOArray || 2867 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 2868 adr = mem->in(3); // Memory edge corresponds to destination array 2869 } 2870 const Type *at = igvn->type(adr); 2871 if (at != Type::TOP) { 2872 assert(at->isa_ptr() != NULL, "pointer type required."); 2873 int idx = C->get_alias_index(at->is_ptr()); 2874 if (idx == alias_idx) { 2875 // Assert in debug mode 2876 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 2877 break; // In product mode return SCMemProj node 2878 } 2879 } 2880 result = mem->in(MemNode::Memory); 2881 } else if (result->Opcode() == Op_StrInflatedCopy) { 2882 Node* adr = result->in(3); // Memory edge corresponds to destination array 2883 const Type *at = igvn->type(adr); 2884 if (at != Type::TOP) { 2885 assert(at->isa_ptr() != NULL, "pointer type required."); 2886 int idx = C->get_alias_index(at->is_ptr()); 2887 if (idx == alias_idx) { 2888 // Assert in debug mode 2889 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 2890 break; // In product mode return SCMemProj node 2891 } 2892 } 2893 result = result->in(MemNode::Memory); 2894 } 2895 } 2896 if (result->is_Phi()) { 2897 PhiNode *mphi = result->as_Phi(); 2898 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 2899 const TypePtr *t = mphi->adr_type(); 2900 if (!is_instance) { 2901 // Push all non-instance Phis on the orig_phis worklist to update inputs 2902 // during Phase 4 if needed. 2903 orig_phis.append_if_missing(mphi); 2904 } else if (C->get_alias_index(t) != alias_idx) { 2905 // Create a new Phi with the specified alias index type. 2906 result = split_memory_phi(mphi, alias_idx, orig_phis); 2907 } 2908 } 2909 // the result is either MemNode, PhiNode, InitializeNode. 2910 return result; 2911 } 2912 2913 // 2914 // Convert the types of unescaped object to instance types where possible, 2915 // propagate the new type information through the graph, and update memory 2916 // edges and MergeMem inputs to reflect the new type. 2917 // 2918 // We start with allocations (and calls which may be allocations) on alloc_worklist. 2919 // The processing is done in 4 phases: 2920 // 2921 // Phase 1: Process possible allocations from alloc_worklist. Create instance 2922 // types for the CheckCastPP for allocations where possible. 2923 // Propagate the new types through users as follows: 2924 // casts and Phi: push users on alloc_worklist 2925 // AddP: cast Base and Address inputs to the instance type 2926 // push any AddP users on alloc_worklist and push any memnode 2927 // users onto memnode_worklist. 2928 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2929 // search the Memory chain for a store with the appropriate type 2930 // address type. If a Phi is found, create a new version with 2931 // the appropriate memory slices from each of the Phi inputs. 2932 // For stores, process the users as follows: 2933 // MemNode: push on memnode_worklist 2934 // MergeMem: push on mergemem_worklist 2935 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 2936 // moving the first node encountered of each instance type to the 2937 // the input corresponding to its alias index. 2938 // appropriate memory slice. 2939 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 2940 // 2941 // In the following example, the CheckCastPP nodes are the cast of allocation 2942 // results and the allocation of node 29 is unescaped and eligible to be an 2943 // instance type. 2944 // 2945 // We start with: 2946 // 2947 // 7 Parm #memory 2948 // 10 ConI "12" 2949 // 19 CheckCastPP "Foo" 2950 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2951 // 29 CheckCastPP "Foo" 2952 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 2953 // 2954 // 40 StoreP 25 7 20 ... alias_index=4 2955 // 50 StoreP 35 40 30 ... alias_index=4 2956 // 60 StoreP 45 50 20 ... alias_index=4 2957 // 70 LoadP _ 60 30 ... alias_index=4 2958 // 80 Phi 75 50 60 Memory alias_index=4 2959 // 90 LoadP _ 80 30 ... alias_index=4 2960 // 100 LoadP _ 80 20 ... alias_index=4 2961 // 2962 // 2963 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 2964 // and creating a new alias index for node 30. This gives: 2965 // 2966 // 7 Parm #memory 2967 // 10 ConI "12" 2968 // 19 CheckCastPP "Foo" 2969 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2970 // 29 CheckCastPP "Foo" iid=24 2971 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2972 // 2973 // 40 StoreP 25 7 20 ... alias_index=4 2974 // 50 StoreP 35 40 30 ... alias_index=6 2975 // 60 StoreP 45 50 20 ... alias_index=4 2976 // 70 LoadP _ 60 30 ... alias_index=6 2977 // 80 Phi 75 50 60 Memory alias_index=4 2978 // 90 LoadP _ 80 30 ... alias_index=6 2979 // 100 LoadP _ 80 20 ... alias_index=4 2980 // 2981 // In phase 2, new memory inputs are computed for the loads and stores, 2982 // And a new version of the phi is created. In phase 4, the inputs to 2983 // node 80 are updated and then the memory nodes are updated with the 2984 // values computed in phase 2. This results in: 2985 // 2986 // 7 Parm #memory 2987 // 10 ConI "12" 2988 // 19 CheckCastPP "Foo" 2989 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2990 // 29 CheckCastPP "Foo" iid=24 2991 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2992 // 2993 // 40 StoreP 25 7 20 ... alias_index=4 2994 // 50 StoreP 35 7 30 ... alias_index=6 2995 // 60 StoreP 45 40 20 ... alias_index=4 2996 // 70 LoadP _ 50 30 ... alias_index=6 2997 // 80 Phi 75 40 60 Memory alias_index=4 2998 // 120 Phi 75 50 50 Memory alias_index=6 2999 // 90 LoadP _ 120 30 ... alias_index=6 3000 // 100 LoadP _ 80 20 ... alias_index=4 3001 // 3002 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) { 3003 GrowableArray<Node *> memnode_worklist; 3004 GrowableArray<PhiNode *> orig_phis; 3005 PhaseIterGVN *igvn = _igvn; 3006 uint new_index_start = (uint) _compile->num_alias_types(); 3007 Arena* arena = Thread::current()->resource_area(); 3008 VectorSet visited(arena); 3009 ideal_nodes.clear(); // Reset for use with set_map/get_map. 3010 uint unique_old = _compile->unique(); 3011 3012 // Phase 1: Process possible allocations from alloc_worklist. 3013 // Create instance types for the CheckCastPP for allocations where possible. 3014 // 3015 // (Note: don't forget to change the order of the second AddP node on 3016 // the alloc_worklist if the order of the worklist processing is changed, 3017 // see the comment in find_second_addp().) 3018 // 3019 while (alloc_worklist.length() != 0) { 3020 Node *n = alloc_worklist.pop(); 3021 uint ni = n->_idx; 3022 if (n->is_Call()) { 3023 CallNode *alloc = n->as_Call(); 3024 // copy escape information to call node 3025 PointsToNode* ptn = ptnode_adr(alloc->_idx); 3026 PointsToNode::EscapeState es = ptn->escape_state(); 3027 // We have an allocation or call which returns a Java object, 3028 // see if it is unescaped. 3029 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 3030 continue; 3031 // Find CheckCastPP for the allocate or for the return value of a call 3032 n = alloc->result_cast(); 3033 if (n == NULL) { // No uses except Initialize node 3034 if (alloc->is_Allocate()) { 3035 // Set the scalar_replaceable flag for allocation 3036 // so it could be eliminated if it has no uses. 3037 alloc->as_Allocate()->_is_scalar_replaceable = true; 3038 } 3039 if (alloc->is_CallStaticJava()) { 3040 // Set the scalar_replaceable flag for boxing method 3041 // so it could be eliminated if it has no uses. 3042 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 3043 } 3044 continue; 3045 } 3046 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 3047 assert(!alloc->is_Allocate(), "allocation should have unique type"); 3048 continue; 3049 } 3050 3051 // The inline code for Object.clone() casts the allocation result to 3052 // java.lang.Object and then to the actual type of the allocated 3053 // object. Detect this case and use the second cast. 3054 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 3055 // the allocation result is cast to java.lang.Object and then 3056 // to the actual Array type. 3057 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 3058 && (alloc->is_AllocateArray() || 3059 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 3060 Node *cast2 = NULL; 3061 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3062 Node *use = n->fast_out(i); 3063 if (use->is_CheckCastPP()) { 3064 cast2 = use; 3065 break; 3066 } 3067 } 3068 if (cast2 != NULL) { 3069 n = cast2; 3070 } else { 3071 // Non-scalar replaceable if the allocation type is unknown statically 3072 // (reflection allocation), the object can't be restored during 3073 // deoptimization without precise type. 3074 continue; 3075 } 3076 } 3077 3078 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3079 if (t == NULL) 3080 continue; // not a TypeOopPtr 3081 if (!t->klass_is_exact()) 3082 continue; // not an unique type 3083 3084 if (alloc->is_Allocate()) { 3085 // Set the scalar_replaceable flag for allocation 3086 // so it could be eliminated. 3087 alloc->as_Allocate()->_is_scalar_replaceable = true; 3088 } 3089 if (alloc->is_CallStaticJava()) { 3090 // Set the scalar_replaceable flag for boxing method 3091 // so it could be eliminated. 3092 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 3093 } 3094 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state 3095 // in order for an object to be scalar-replaceable, it must be: 3096 // - a direct allocation (not a call returning an object) 3097 // - non-escaping 3098 // - eligible to be a unique type 3099 // - not determined to be ineligible by escape analysis 3100 set_map(alloc, n); 3101 set_map(n, alloc); 3102 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3103 igvn->hash_delete(n); 3104 igvn->set_type(n, tinst); 3105 n->raise_bottom_type(tinst); 3106 igvn->hash_insert(n); 3107 record_for_optimizer(n); 3108 // Allocate an alias index for the header fields. Accesses to 3109 // the header emitted during macro expansion wouldn't have 3110 // correct memory state otherwise. 3111 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 3112 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 3113 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3114 3115 // First, put on the worklist all Field edges from Connection Graph 3116 // which is more accurate than putting immediate users from Ideal Graph. 3117 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3118 PointsToNode* tgt = e.get(); 3119 if (tgt->is_Arraycopy()) { 3120 continue; 3121 } 3122 Node* use = tgt->ideal_node(); 3123 assert(tgt->is_Field() && use->is_AddP(), 3124 "only AddP nodes are Field edges in CG"); 3125 if (use->outcnt() > 0) { // Don't process dead nodes 3126 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3127 if (addp2 != NULL) { 3128 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3129 alloc_worklist.append_if_missing(addp2); 3130 } 3131 alloc_worklist.append_if_missing(use); 3132 } 3133 } 3134 3135 // An allocation may have an Initialize which has raw stores. Scan 3136 // the users of the raw allocation result and push AddP users 3137 // on alloc_worklist. 3138 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3139 assert (raw_result != NULL, "must have an allocation result"); 3140 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3141 Node *use = raw_result->fast_out(i); 3142 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3143 Node* addp2 = find_second_addp(use, raw_result); 3144 if (addp2 != NULL) { 3145 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3146 alloc_worklist.append_if_missing(addp2); 3147 } 3148 alloc_worklist.append_if_missing(use); 3149 } else if (use->is_MemBar()) { 3150 memnode_worklist.append_if_missing(use); 3151 } 3152 } 3153 } 3154 } else if (n->is_AddP()) { 3155 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 3156 if (jobj == NULL || jobj == phantom_obj) { 3157 #ifdef ASSERT 3158 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3159 ptnode_adr(n->_idx)->dump(); 3160 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3161 #endif 3162 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3163 return; 3164 } 3165 Node *base = get_map(jobj->idx()); // CheckCastPP node 3166 if (!split_AddP(n, base)) continue; // wrong type from dead path 3167 } else if (n->is_Phi() || 3168 n->is_CheckCastPP() || 3169 n->is_EncodeP() || 3170 n->is_DecodeN() || 3171 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3172 if (visited.test_set(n->_idx)) { 3173 assert(n->is_Phi(), "loops only through Phi's"); 3174 continue; // already processed 3175 } 3176 JavaObjectNode* jobj = unique_java_object(n); 3177 if (jobj == NULL || jobj == phantom_obj) { 3178 #ifdef ASSERT 3179 ptnode_adr(n->_idx)->dump(); 3180 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3181 #endif 3182 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3183 return; 3184 } else { 3185 Node *val = get_map(jobj->idx()); // CheckCastPP node 3186 TypeNode *tn = n->as_Type(); 3187 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3188 assert(tinst != NULL && tinst->is_known_instance() && 3189 tinst->instance_id() == jobj->idx() , "instance type expected."); 3190 3191 const Type *tn_type = igvn->type(tn); 3192 const TypeOopPtr *tn_t; 3193 if (tn_type->isa_narrowoop()) { 3194 tn_t = tn_type->make_ptr()->isa_oopptr(); 3195 } else { 3196 tn_t = tn_type->isa_oopptr(); 3197 } 3198 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 3199 if (tn_type->isa_narrowoop()) { 3200 tn_type = tinst->make_narrowoop(); 3201 } else { 3202 tn_type = tinst; 3203 } 3204 igvn->hash_delete(tn); 3205 igvn->set_type(tn, tn_type); 3206 tn->set_type(tn_type); 3207 igvn->hash_insert(tn); 3208 record_for_optimizer(n); 3209 } else { 3210 assert(tn_type == TypePtr::NULL_PTR || 3211 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 3212 "unexpected type"); 3213 continue; // Skip dead path with different type 3214 } 3215 } 3216 } else { 3217 debug_only(n->dump();) 3218 assert(false, "EA: unexpected node"); 3219 continue; 3220 } 3221 // push allocation's users on appropriate worklist 3222 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3223 Node *use = n->fast_out(i); 3224 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3225 // Load/store to instance's field 3226 memnode_worklist.append_if_missing(use); 3227 } else if (use->is_MemBar()) { 3228 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3229 memnode_worklist.append_if_missing(use); 3230 } 3231 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3232 Node* addp2 = find_second_addp(use, n); 3233 if (addp2 != NULL) { 3234 alloc_worklist.append_if_missing(addp2); 3235 } 3236 alloc_worklist.append_if_missing(use); 3237 } else if (use->is_Phi() || 3238 use->is_CheckCastPP() || 3239 use->is_EncodeNarrowPtr() || 3240 use->is_DecodeNarrowPtr() || 3241 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3242 alloc_worklist.append_if_missing(use); 3243 #ifdef ASSERT 3244 } else if (use->is_Mem()) { 3245 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3246 } else if (use->is_MergeMem()) { 3247 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3248 } else if (use->is_SafePoint()) { 3249 // Look for MergeMem nodes for calls which reference unique allocation 3250 // (through CheckCastPP nodes) even for debug info. 3251 Node* m = use->in(TypeFunc::Memory); 3252 if (m->is_MergeMem()) { 3253 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3254 } 3255 } else if (use->Opcode() == Op_EncodeISOArray) { 3256 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3257 // EncodeISOArray overwrites destination array 3258 memnode_worklist.append_if_missing(use); 3259 } 3260 } else { 3261 uint op = use->Opcode(); 3262 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3263 (use->in(MemNode::Memory) == n)) { 3264 // They overwrite memory edge corresponding to destination array, 3265 memnode_worklist.append_if_missing(use); 3266 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3267 op == Op_CastP2X || op == Op_StoreCM || 3268 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3269 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3270 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3271 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 3272 n->dump(); 3273 use->dump(); 3274 assert(false, "EA: missing allocation reference path"); 3275 } 3276 #endif 3277 } 3278 } 3279 3280 } 3281 3282 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3283 // type, record it in the ArrayCopy node so we know what memory this 3284 // node uses/modified. 3285 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3286 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3287 Node* dest = ac->in(ArrayCopyNode::Dest); 3288 if (dest->is_AddP()) { 3289 dest = get_addp_base(dest); 3290 } 3291 JavaObjectNode* jobj = unique_java_object(dest); 3292 if (jobj != NULL) { 3293 Node *base = get_map(jobj->idx()); 3294 if (base != NULL) { 3295 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3296 ac->_dest_type = base_t; 3297 } 3298 } 3299 Node* src = ac->in(ArrayCopyNode::Src); 3300 if (src->is_AddP()) { 3301 src = get_addp_base(src); 3302 } 3303 jobj = unique_java_object(src); 3304 if (jobj != NULL) { 3305 Node* base = get_map(jobj->idx()); 3306 if (base != NULL) { 3307 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3308 ac->_src_type = base_t; 3309 } 3310 } 3311 } 3312 3313 // New alias types were created in split_AddP(). 3314 uint new_index_end = (uint) _compile->num_alias_types(); 3315 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3316 3317 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3318 // compute new values for Memory inputs (the Memory inputs are not 3319 // actually updated until phase 4.) 3320 if (memnode_worklist.length() == 0) 3321 return; // nothing to do 3322 while (memnode_worklist.length() != 0) { 3323 Node *n = memnode_worklist.pop(); 3324 if (visited.test_set(n->_idx)) 3325 continue; 3326 if (n->is_Phi() || n->is_ClearArray()) { 3327 // we don't need to do anything, but the users must be pushed 3328 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3329 // we don't need to do anything, but the users must be pushed 3330 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 3331 if (n == NULL) 3332 continue; 3333 } else if (n->Opcode() == Op_StrCompressedCopy || 3334 n->Opcode() == Op_EncodeISOArray) { 3335 // get the memory projection 3336 n = n->find_out_with(Op_SCMemProj); 3337 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3338 } else { 3339 assert(n->is_Mem(), "memory node required."); 3340 Node *addr = n->in(MemNode::Address); 3341 const Type *addr_t = igvn->type(addr); 3342 if (addr_t == Type::TOP) 3343 continue; 3344 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3345 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3346 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3347 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3348 if (_compile->failing()) { 3349 return; 3350 } 3351 if (mem != n->in(MemNode::Memory)) { 3352 // We delay the memory edge update since we need old one in 3353 // MergeMem code below when instances memory slices are separated. 3354 set_map(n, mem); 3355 } 3356 if (n->is_Load()) { 3357 continue; // don't push users 3358 } else if (n->is_LoadStore()) { 3359 // get the memory projection 3360 n = n->find_out_with(Op_SCMemProj); 3361 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3362 } 3363 } 3364 // push user on appropriate worklist 3365 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3366 Node *use = n->fast_out(i); 3367 if (use->is_Phi() || use->is_ClearArray()) { 3368 memnode_worklist.append_if_missing(use); 3369 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3370 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 3371 continue; 3372 memnode_worklist.append_if_missing(use); 3373 } else if (use->is_MemBar()) { 3374 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3375 memnode_worklist.append_if_missing(use); 3376 } 3377 #ifdef ASSERT 3378 } else if(use->is_Mem()) { 3379 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3380 } else if (use->is_MergeMem()) { 3381 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3382 } else if (use->Opcode() == Op_EncodeISOArray) { 3383 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3384 // EncodeISOArray overwrites destination array 3385 memnode_worklist.append_if_missing(use); 3386 } 3387 } else { 3388 uint op = use->Opcode(); 3389 if ((use->in(MemNode::Memory) == n) && 3390 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 3391 // They overwrite memory edge corresponding to destination array, 3392 memnode_worklist.append_if_missing(use); 3393 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 3394 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3395 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3396 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 3397 n->dump(); 3398 use->dump(); 3399 assert(false, "EA: missing memory path"); 3400 } 3401 #endif 3402 } 3403 } 3404 } 3405 3406 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3407 // Walk each memory slice moving the first node encountered of each 3408 // instance type to the the input corresponding to its alias index. 3409 uint length = _mergemem_worklist.length(); 3410 for( uint next = 0; next < length; ++next ) { 3411 MergeMemNode* nmm = _mergemem_worklist.at(next); 3412 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3413 // Note: we don't want to use MergeMemStream here because we only want to 3414 // scan inputs which exist at the start, not ones we add during processing. 3415 // Note 2: MergeMem may already contains instance memory slices added 3416 // during find_inst_mem() call when memory nodes were processed above. 3417 igvn->hash_delete(nmm); 3418 uint nslices = MIN2(nmm->req(), new_index_start); 3419 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3420 Node* mem = nmm->in(i); 3421 Node* cur = NULL; 3422 if (mem == NULL || mem->is_top()) 3423 continue; 3424 // First, update mergemem by moving memory nodes to corresponding slices 3425 // if their type became more precise since this mergemem was created. 3426 while (mem->is_Mem()) { 3427 const Type *at = igvn->type(mem->in(MemNode::Address)); 3428 if (at != Type::TOP) { 3429 assert (at->isa_ptr() != NULL, "pointer type required."); 3430 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3431 if (idx == i) { 3432 if (cur == NULL) 3433 cur = mem; 3434 } else { 3435 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3436 nmm->set_memory_at(idx, mem); 3437 } 3438 } 3439 } 3440 mem = mem->in(MemNode::Memory); 3441 } 3442 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3443 // Find any instance of the current type if we haven't encountered 3444 // already a memory slice of the instance along the memory chain. 3445 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3446 if((uint)_compile->get_general_index(ni) == i) { 3447 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3448 if (nmm->is_empty_memory(m)) { 3449 Node* result = find_inst_mem(mem, ni, orig_phis); 3450 if (_compile->failing()) { 3451 return; 3452 } 3453 nmm->set_memory_at(ni, result); 3454 } 3455 } 3456 } 3457 } 3458 // Find the rest of instances values 3459 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3460 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3461 Node* result = step_through_mergemem(nmm, ni, tinst); 3462 if (result == nmm->base_memory()) { 3463 // Didn't find instance memory, search through general slice recursively. 3464 result = nmm->memory_at(_compile->get_general_index(ni)); 3465 result = find_inst_mem(result, ni, orig_phis); 3466 if (_compile->failing()) { 3467 return; 3468 } 3469 nmm->set_memory_at(ni, result); 3470 } 3471 } 3472 igvn->hash_insert(nmm); 3473 record_for_optimizer(nmm); 3474 } 3475 3476 // Phase 4: Update the inputs of non-instance memory Phis and 3477 // the Memory input of memnodes 3478 // First update the inputs of any non-instance Phi's from 3479 // which we split out an instance Phi. Note we don't have 3480 // to recursively process Phi's encounted on the input memory 3481 // chains as is done in split_memory_phi() since they will 3482 // also be processed here. 3483 for (int j = 0; j < orig_phis.length(); j++) { 3484 PhiNode *phi = orig_phis.at(j); 3485 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3486 igvn->hash_delete(phi); 3487 for (uint i = 1; i < phi->req(); i++) { 3488 Node *mem = phi->in(i); 3489 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3490 if (_compile->failing()) { 3491 return; 3492 } 3493 if (mem != new_mem) { 3494 phi->set_req(i, new_mem); 3495 } 3496 } 3497 igvn->hash_insert(phi); 3498 record_for_optimizer(phi); 3499 } 3500 3501 // Update the memory inputs of MemNodes with the value we computed 3502 // in Phase 2 and move stores memory users to corresponding memory slices. 3503 // Disable memory split verification code until the fix for 6984348. 3504 // Currently it produces false negative results since it does not cover all cases. 3505 #if 0 // ifdef ASSERT 3506 visited.Reset(); 3507 Node_Stack old_mems(arena, _compile->unique() >> 2); 3508 #endif 3509 for (uint i = 0; i < ideal_nodes.size(); i++) { 3510 Node* n = ideal_nodes.at(i); 3511 Node* nmem = get_map(n->_idx); 3512 assert(nmem != NULL, "sanity"); 3513 if (n->is_Mem()) { 3514 #if 0 // ifdef ASSERT 3515 Node* old_mem = n->in(MemNode::Memory); 3516 if (!visited.test_set(old_mem->_idx)) { 3517 old_mems.push(old_mem, old_mem->outcnt()); 3518 } 3519 #endif 3520 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3521 if (!n->is_Load()) { 3522 // Move memory users of a store first. 3523 move_inst_mem(n, orig_phis); 3524 } 3525 // Now update memory input 3526 igvn->hash_delete(n); 3527 n->set_req(MemNode::Memory, nmem); 3528 igvn->hash_insert(n); 3529 record_for_optimizer(n); 3530 } else { 3531 assert(n->is_Allocate() || n->is_CheckCastPP() || 3532 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3533 } 3534 } 3535 #if 0 // ifdef ASSERT 3536 // Verify that memory was split correctly 3537 while (old_mems.is_nonempty()) { 3538 Node* old_mem = old_mems.node(); 3539 uint old_cnt = old_mems.index(); 3540 old_mems.pop(); 3541 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3542 } 3543 #endif 3544 } 3545 3546 #ifndef PRODUCT 3547 static const char *node_type_names[] = { 3548 "UnknownType", 3549 "JavaObject", 3550 "LocalVar", 3551 "Field", 3552 "Arraycopy" 3553 }; 3554 3555 static const char *esc_names[] = { 3556 "UnknownEscape", 3557 "NoEscape", 3558 "ArgEscape", 3559 "GlobalEscape" 3560 }; 3561 3562 void PointsToNode::dump(bool print_state) const { 3563 NodeType nt = node_type(); 3564 tty->print("%s ", node_type_names[(int) nt]); 3565 if (print_state) { 3566 EscapeState es = escape_state(); 3567 EscapeState fields_es = fields_escape_state(); 3568 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3569 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) 3570 tty->print("NSR "); 3571 } 3572 if (is_Field()) { 3573 FieldNode* f = (FieldNode*)this; 3574 if (f->is_oop()) 3575 tty->print("oop "); 3576 if (f->offset() > 0) 3577 tty->print("+%d ", f->offset()); 3578 tty->print("("); 3579 for (BaseIterator i(f); i.has_next(); i.next()) { 3580 PointsToNode* b = i.get(); 3581 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3582 } 3583 tty->print(" )"); 3584 } 3585 tty->print("["); 3586 for (EdgeIterator i(this); i.has_next(); i.next()) { 3587 PointsToNode* e = i.get(); 3588 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3589 } 3590 tty->print(" ["); 3591 for (UseIterator i(this); i.has_next(); i.next()) { 3592 PointsToNode* u = i.get(); 3593 bool is_base = false; 3594 if (PointsToNode::is_base_use(u)) { 3595 is_base = true; 3596 u = PointsToNode::get_use_node(u)->as_Field(); 3597 } 3598 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3599 } 3600 tty->print(" ]] "); 3601 if (_node == NULL) 3602 tty->print_cr("<null>"); 3603 else 3604 _node->dump(); 3605 } 3606 3607 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3608 bool first = true; 3609 int ptnodes_length = ptnodes_worklist.length(); 3610 for (int i = 0; i < ptnodes_length; i++) { 3611 PointsToNode *ptn = ptnodes_worklist.at(i); 3612 if (ptn == NULL || !ptn->is_JavaObject()) 3613 continue; 3614 PointsToNode::EscapeState es = ptn->escape_state(); 3615 if ((es != PointsToNode::NoEscape) && !Verbose) { 3616 continue; 3617 } 3618 Node* n = ptn->ideal_node(); 3619 if (n->is_Allocate() || (n->is_CallStaticJava() && 3620 n->as_CallStaticJava()->is_boxing_method())) { 3621 if (first) { 3622 tty->cr(); 3623 tty->print("======== Connection graph for "); 3624 _compile->method()->print_short_name(); 3625 tty->cr(); 3626 first = false; 3627 } 3628 ptn->dump(); 3629 // Print all locals and fields which reference this allocation 3630 for (UseIterator j(ptn); j.has_next(); j.next()) { 3631 PointsToNode* use = j.get(); 3632 if (use->is_LocalVar()) { 3633 use->dump(Verbose); 3634 } else if (Verbose) { 3635 use->dump(); 3636 } 3637 } 3638 tty->cr(); 3639 } 3640 } 3641 } 3642 #endif