1 /* 2 * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/c2/barrierSetC2.hpp" 29 #include "libadt/vectset.hpp" 30 #include "memory/allocation.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "opto/c2compiler.hpp" 33 #include "opto/arraycopynode.hpp" 34 #include "opto/callnode.hpp" 35 #include "opto/cfgnode.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/escape.hpp" 38 #include "opto/phaseX.hpp" 39 #include "opto/movenode.hpp" 40 #include "opto/rootnode.hpp" 41 #include "utilities/macros.hpp" 42 #if INCLUDE_G1GC 43 #include "gc/g1/g1ThreadLocalData.hpp" 44 #endif // INCLUDE_G1GC 45 #if INCLUDE_ZGC 46 #include "gc/z/c2/zBarrierSetC2.hpp" 47 #endif 48 49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 50 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 51 _in_worklist(C->comp_arena()), 52 _next_pidx(0), 53 _collecting(true), 54 _verify(false), 55 _compile(C), 56 _igvn(igvn), 57 _node_map(C->comp_arena()) { 58 // Add unknown java object. 59 add_java_object(C->top(), PointsToNode::GlobalEscape); 60 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 61 // Add ConP(#NULL) and ConN(#NULL) nodes. 62 Node* oop_null = igvn->zerocon(T_OBJECT); 63 assert(oop_null->_idx < nodes_size(), "should be created already"); 64 add_java_object(oop_null, PointsToNode::NoEscape); 65 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 66 if (UseCompressedOops) { 67 Node* noop_null = igvn->zerocon(T_NARROWOOP); 68 assert(noop_null->_idx < nodes_size(), "should be created already"); 69 map_ideal_node(noop_null, null_obj); 70 } 71 _pcmp_neq = NULL; // Should be initialized 72 _pcmp_eq = NULL; 73 } 74 75 bool ConnectionGraph::has_candidates(Compile *C) { 76 // EA brings benefits only when the code has allocations and/or locks which 77 // are represented by ideal Macro nodes. 78 int cnt = C->macro_count(); 79 for (int i = 0; i < cnt; i++) { 80 Node *n = C->macro_node(i); 81 if (n->is_Allocate()) 82 return true; 83 if (n->is_Lock()) { 84 Node* obj = n->as_Lock()->obj_node()->uncast(); 85 if (!(obj->is_Parm() || obj->is_Con())) 86 return true; 87 } 88 if (n->is_CallStaticJava() && 89 n->as_CallStaticJava()->is_boxing_method()) { 90 return true; 91 } 92 } 93 return false; 94 } 95 96 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 97 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 98 ResourceMark rm; 99 100 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 101 // to create space for them in ConnectionGraph::_nodes[]. 102 Node* oop_null = igvn->zerocon(T_OBJECT); 103 Node* noop_null = igvn->zerocon(T_NARROWOOP); 104 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 105 // Perform escape analysis 106 if (congraph->compute_escape()) { 107 // There are non escaping objects. 108 C->set_congraph(congraph); 109 } 110 // Cleanup. 111 if (oop_null->outcnt() == 0) 112 igvn->hash_delete(oop_null); 113 if (noop_null->outcnt() == 0) 114 igvn->hash_delete(noop_null); 115 } 116 117 bool ConnectionGraph::compute_escape() { 118 Compile* C = _compile; 119 PhaseGVN* igvn = _igvn; 120 121 // Worklists used by EA. 122 Unique_Node_List delayed_worklist; 123 GrowableArray<Node*> alloc_worklist; 124 GrowableArray<Node*> ptr_cmp_worklist; 125 GrowableArray<Node*> storestore_worklist; 126 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 127 GrowableArray<PointsToNode*> ptnodes_worklist; 128 GrowableArray<JavaObjectNode*> java_objects_worklist; 129 GrowableArray<JavaObjectNode*> non_escaped_worklist; 130 GrowableArray<FieldNode*> oop_fields_worklist; 131 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 132 133 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 134 135 // 1. Populate Connection Graph (CG) with PointsTo nodes. 136 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 137 // Initialize worklist 138 if (C->root() != NULL) { 139 ideal_nodes.push(C->root()); 140 } 141 // Processed ideal nodes are unique on ideal_nodes list 142 // but several ideal nodes are mapped to the phantom_obj. 143 // To avoid duplicated entries on the following worklists 144 // add the phantom_obj only once to them. 145 ptnodes_worklist.append(phantom_obj); 146 java_objects_worklist.append(phantom_obj); 147 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 148 Node* n = ideal_nodes.at(next); 149 // Create PointsTo nodes and add them to Connection Graph. Called 150 // only once per ideal node since ideal_nodes is Unique_Node list. 151 add_node_to_connection_graph(n, &delayed_worklist); 152 PointsToNode* ptn = ptnode_adr(n->_idx); 153 if (ptn != NULL && ptn != phantom_obj) { 154 ptnodes_worklist.append(ptn); 155 if (ptn->is_JavaObject()) { 156 java_objects_worklist.append(ptn->as_JavaObject()); 157 if ((n->is_Allocate() || n->is_CallStaticJava()) && 158 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 159 // Only allocations and java static calls results are interesting. 160 non_escaped_worklist.append(ptn->as_JavaObject()); 161 } 162 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 163 oop_fields_worklist.append(ptn->as_Field()); 164 } 165 } 166 if (n->is_MergeMem()) { 167 // Collect all MergeMem nodes to add memory slices for 168 // scalar replaceable objects in split_unique_types(). 169 _mergemem_worklist.append(n->as_MergeMem()); 170 } else if (OptimizePtrCompare && n->is_Cmp() && 171 ((n->Opcode() == Op_CmpP && !(((CmpPNode*)n)->has_perturbed_operand() != NULL)) || 172 n->Opcode() == Op_CmpN)) { 173 // Collect compare pointers nodes. 174 ptr_cmp_worklist.append(n); 175 } else if (n->is_MemBarStoreStore()) { 176 // Collect all MemBarStoreStore nodes so that depending on the 177 // escape status of the associated Allocate node some of them 178 // may be eliminated. 179 storestore_worklist.append(n); 180 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && 181 (n->req() > MemBarNode::Precedent)) { 182 record_for_optimizer(n); 183 #ifdef ASSERT 184 } else if (n->is_AddP()) { 185 // Collect address nodes for graph verification. 186 addp_worklist.append(n); 187 #endif 188 } else if (n->is_ArrayCopy()) { 189 // Keep a list of ArrayCopy nodes so if one of its input is non 190 // escaping, we can record a unique type 191 arraycopy_worklist.append(n->as_ArrayCopy()); 192 } 193 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 194 Node* m = n->fast_out(i); // Get user 195 ideal_nodes.push(m); 196 } 197 } 198 if (non_escaped_worklist.length() == 0) { 199 _collecting = false; 200 return false; // Nothing to do. 201 } 202 // Add final simple edges to graph. 203 while(delayed_worklist.size() > 0) { 204 Node* n = delayed_worklist.pop(); 205 add_final_edges(n); 206 } 207 int ptnodes_length = ptnodes_worklist.length(); 208 209 #ifdef ASSERT 210 if (VerifyConnectionGraph) { 211 // Verify that no new simple edges could be created and all 212 // local vars has edges. 213 _verify = true; 214 for (int next = 0; next < ptnodes_length; ++next) { 215 PointsToNode* ptn = ptnodes_worklist.at(next); 216 add_final_edges(ptn->ideal_node()); 217 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 218 ptn->dump(); 219 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 220 } 221 } 222 _verify = false; 223 } 224 #endif 225 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 226 // processing, calls to CI to resolve symbols (types, fields, methods) 227 // referenced in bytecode. During symbol resolution VM may throw 228 // an exception which CI cleans and converts to compilation failure. 229 if (C->failing()) return false; 230 231 // 2. Finish Graph construction by propagating references to all 232 // java objects through graph. 233 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, 234 java_objects_worklist, oop_fields_worklist)) { 235 // All objects escaped or hit time or iterations limits. 236 _collecting = false; 237 return false; 238 } 239 240 // 3. Adjust scalar_replaceable state of nonescaping objects and push 241 // scalar replaceable allocations on alloc_worklist for processing 242 // in split_unique_types(). 243 int non_escaped_length = non_escaped_worklist.length(); 244 for (int next = 0; next < non_escaped_length; next++) { 245 JavaObjectNode* ptn = non_escaped_worklist.at(next); 246 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 247 Node* n = ptn->ideal_node(); 248 if (n->is_Allocate()) { 249 n->as_Allocate()->_is_non_escaping = noescape; 250 } 251 if (n->is_CallStaticJava()) { 252 n->as_CallStaticJava()->_is_non_escaping = noescape; 253 } 254 if (noescape && ptn->scalar_replaceable()) { 255 adjust_scalar_replaceable_state(ptn); 256 if (ptn->scalar_replaceable()) { 257 alloc_worklist.append(ptn->ideal_node()); 258 } 259 } 260 } 261 262 #ifdef ASSERT 263 if (VerifyConnectionGraph) { 264 // Verify that graph is complete - no new edges could be added or needed. 265 verify_connection_graph(ptnodes_worklist, non_escaped_worklist, 266 java_objects_worklist, addp_worklist); 267 } 268 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 269 assert(null_obj->escape_state() == PointsToNode::NoEscape && 270 null_obj->edge_count() == 0 && 271 !null_obj->arraycopy_src() && 272 !null_obj->arraycopy_dst(), "sanity"); 273 #endif 274 275 _collecting = false; 276 277 } // TracePhase t3("connectionGraph") 278 279 // 4. Optimize ideal graph based on EA information. 280 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); 281 if (has_non_escaping_obj) { 282 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 283 } 284 285 #ifndef PRODUCT 286 if (PrintEscapeAnalysis) { 287 dump(ptnodes_worklist); // Dump ConnectionGraph 288 } 289 #endif 290 291 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 292 #ifdef ASSERT 293 if (VerifyConnectionGraph) { 294 int alloc_length = alloc_worklist.length(); 295 for (int next = 0; next < alloc_length; ++next) { 296 Node* n = alloc_worklist.at(next); 297 PointsToNode* ptn = ptnode_adr(n->_idx); 298 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 299 } 300 } 301 #endif 302 303 // 5. Separate memory graph for scalar replaceable allcations. 304 if (has_scalar_replaceable_candidates && 305 C->AliasLevel() >= 3 && EliminateAllocations) { 306 // Now use the escape information to create unique types for 307 // scalar replaceable objects. 308 split_unique_types(alloc_worklist, arraycopy_worklist); 309 if (C->failing()) return false; 310 C->print_method(PHASE_AFTER_EA, 2); 311 312 #ifdef ASSERT 313 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 314 tty->print("=== No allocations eliminated for "); 315 C->method()->print_short_name(); 316 if(!EliminateAllocations) { 317 tty->print(" since EliminateAllocations is off ==="); 318 } else if(!has_scalar_replaceable_candidates) { 319 tty->print(" since there are no scalar replaceable candidates ==="); 320 } else if(C->AliasLevel() < 3) { 321 tty->print(" since AliasLevel < 3 ==="); 322 } 323 tty->cr(); 324 #endif 325 } 326 return has_non_escaping_obj; 327 } 328 329 // Utility function for nodes that load an object 330 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 331 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 332 // ThreadLocal has RawPtr type. 333 const Type* t = _igvn->type(n); 334 if (t->make_ptr() != NULL) { 335 Node* adr = n->in(MemNode::Address); 336 #ifdef ASSERT 337 if (!adr->is_AddP()) { 338 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 339 } else { 340 assert((ptnode_adr(adr->_idx) == NULL || 341 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 342 } 343 #endif 344 add_local_var_and_edge(n, PointsToNode::NoEscape, 345 adr, delayed_worklist); 346 } 347 } 348 349 // Populate Connection Graph with PointsTo nodes and create simple 350 // connection graph edges. 351 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 352 assert(!_verify, "this method should not be called for verification"); 353 PhaseGVN* igvn = _igvn; 354 uint n_idx = n->_idx; 355 PointsToNode* n_ptn = ptnode_adr(n_idx); 356 if (n_ptn != NULL) 357 return; // No need to redefine PointsTo node during first iteration. 358 359 if (n->is_Call()) { 360 // Arguments to allocation and locking don't escape. 361 if (n->is_AbstractLock()) { 362 // Put Lock and Unlock nodes on IGVN worklist to process them during 363 // first IGVN optimization when escape information is still available. 364 record_for_optimizer(n); 365 } else if (n->is_Allocate()) { 366 add_call_node(n->as_Call()); 367 record_for_optimizer(n); 368 } else { 369 if (n->is_CallStaticJava()) { 370 const char* name = n->as_CallStaticJava()->_name; 371 if (name != NULL && strcmp(name, "uncommon_trap") == 0) 372 return; // Skip uncommon traps 373 } 374 // Don't mark as processed since call's arguments have to be processed. 375 delayed_worklist->push(n); 376 // Check if a call returns an object. 377 if ((n->as_Call()->returns_pointer() && 378 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) || 379 (n->is_CallStaticJava() && 380 n->as_CallStaticJava()->is_boxing_method())) { 381 add_call_node(n->as_Call()); 382 } else if (n->as_Call()->tf()->returns_value_type_as_fields()) { 383 bool returns_oop = false; 384 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) { 385 ProjNode* pn = n->fast_out(i)->as_Proj(); 386 if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) { 387 returns_oop = true; 388 } 389 } 390 if (returns_oop) { 391 add_call_node(n->as_Call()); 392 } 393 } 394 } 395 return; 396 } 397 // Put this check here to process call arguments since some call nodes 398 // point to phantom_obj. 399 if (n_ptn == phantom_obj || n_ptn == null_obj) 400 return; // Skip predefined nodes. 401 402 int opcode = n->Opcode(); 403 switch (opcode) { 404 case Op_AddP: { 405 Node* base = get_addp_base(n); 406 PointsToNode* ptn_base = ptnode_adr(base->_idx); 407 // Field nodes are created for all field types. They are used in 408 // adjust_scalar_replaceable_state() and split_unique_types(). 409 // Note, non-oop fields will have only base edges in Connection 410 // Graph because such fields are not used for oop loads and stores. 411 int offset = address_offset(n, igvn); 412 add_field(n, PointsToNode::NoEscape, offset); 413 if (ptn_base == NULL) { 414 delayed_worklist->push(n); // Process it later. 415 } else { 416 n_ptn = ptnode_adr(n_idx); 417 add_base(n_ptn->as_Field(), ptn_base); 418 } 419 break; 420 } 421 case Op_CastX2P: { 422 map_ideal_node(n, phantom_obj); 423 break; 424 } 425 case Op_CastPP: 426 case Op_CheckCastPP: 427 case Op_EncodeP: 428 case Op_DecodeN: 429 case Op_EncodePKlass: 430 case Op_DecodeNKlass: { 431 add_local_var_and_edge(n, PointsToNode::NoEscape, 432 n->in(1), delayed_worklist); 433 break; 434 } 435 case Op_CMoveP: { 436 add_local_var(n, PointsToNode::NoEscape); 437 // Do not add edges during first iteration because some could be 438 // not defined yet. 439 delayed_worklist->push(n); 440 break; 441 } 442 case Op_ConP: 443 case Op_ConN: 444 case Op_ConNKlass: { 445 // assume all oop constants globally escape except for null 446 PointsToNode::EscapeState es; 447 const Type* t = igvn->type(n); 448 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 449 es = PointsToNode::NoEscape; 450 } else { 451 es = PointsToNode::GlobalEscape; 452 } 453 add_java_object(n, es); 454 break; 455 } 456 case Op_CreateEx: { 457 // assume that all exception objects globally escape 458 map_ideal_node(n, phantom_obj); 459 break; 460 } 461 case Op_LoadKlass: 462 case Op_LoadNKlass: { 463 // Unknown class is loaded 464 map_ideal_node(n, phantom_obj); 465 break; 466 } 467 case Op_LoadP: 468 #if INCLUDE_ZGC 469 case Op_LoadBarrierSlowReg: 470 case Op_LoadBarrierWeakSlowReg: 471 #endif 472 case Op_LoadN: 473 case Op_LoadPLocked: { 474 add_objload_to_connection_graph(n, delayed_worklist); 475 break; 476 } 477 case Op_Parm: { 478 map_ideal_node(n, phantom_obj); 479 break; 480 } 481 case Op_PartialSubtypeCheck: { 482 // Produces Null or notNull and is used in only in CmpP so 483 // phantom_obj could be used. 484 map_ideal_node(n, phantom_obj); // Result is unknown 485 break; 486 } 487 case Op_Phi: { 488 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 489 // ThreadLocal has RawPtr type. 490 const Type* t = n->as_Phi()->type(); 491 if (t->make_ptr() != NULL) { 492 add_local_var(n, PointsToNode::NoEscape); 493 // Do not add edges during first iteration because some could be 494 // not defined yet. 495 delayed_worklist->push(n); 496 } 497 break; 498 } 499 case Op_Proj: { 500 // we are only interested in the oop result projection from a call 501 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && 502 (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) { 503 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 504 n->in(0)->as_Call()->tf()->returns_value_type_as_fields(), "what kind of oop return is it?"); 505 add_local_var_and_edge(n, PointsToNode::NoEscape, 506 n->in(0), delayed_worklist); 507 } 508 #if INCLUDE_ZGC 509 else if (UseZGC) { 510 if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) { 511 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist); 512 } 513 } 514 #endif 515 break; 516 } 517 case Op_Rethrow: // Exception object escapes 518 case Op_Return: { 519 if (n->req() > TypeFunc::Parms && 520 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 521 // Treat Return value as LocalVar with GlobalEscape escape state. 522 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 523 n->in(TypeFunc::Parms), delayed_worklist); 524 } 525 break; 526 } 527 case Op_CompareAndExchangeP: 528 case Op_CompareAndExchangeN: 529 case Op_GetAndSetP: 530 case Op_GetAndSetN: { 531 add_objload_to_connection_graph(n, delayed_worklist); 532 // fallthrough 533 } 534 case Op_StoreP: 535 case Op_StoreN: 536 case Op_StoreNKlass: 537 case Op_StorePConditional: 538 case Op_WeakCompareAndSwapP: 539 case Op_WeakCompareAndSwapN: 540 case Op_CompareAndSwapP: 541 case Op_CompareAndSwapN: { 542 Node* adr = n->in(MemNode::Address); 543 const Type *adr_type = igvn->type(adr); 544 adr_type = adr_type->make_ptr(); 545 if (adr_type == NULL) { 546 break; // skip dead nodes 547 } 548 if ( adr_type->isa_oopptr() 549 || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 550 && adr_type == TypeRawPtr::NOTNULL 551 && adr->in(AddPNode::Address)->is_Proj() 552 && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 553 delayed_worklist->push(n); // Process it later. 554 #ifdef ASSERT 555 assert(adr->is_AddP(), "expecting an AddP"); 556 if (adr_type == TypeRawPtr::NOTNULL) { 557 // Verify a raw address for a store captured by Initialize node. 558 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 559 assert(offs != Type::OffsetBot, "offset must be a constant"); 560 } 561 #endif 562 } else { 563 // Ignore copy the displaced header to the BoxNode (OSR compilation). 564 if (adr->is_BoxLock()) 565 break; 566 // Stored value escapes in unsafe access. 567 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 568 // Pointer stores in G1 barriers looks like unsafe access. 569 // Ignore such stores to be able scalar replace non-escaping 570 // allocations. 571 #if INCLUDE_G1GC 572 if (UseG1GC && adr->is_AddP()) { 573 Node* base = get_addp_base(adr); 574 if (base->Opcode() == Op_LoadP && 575 base->in(MemNode::Address)->is_AddP()) { 576 adr = base->in(MemNode::Address); 577 Node* tls = get_addp_base(adr); 578 if (tls->Opcode() == Op_ThreadLocal) { 579 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 580 if (offs == in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())) { 581 break; // G1 pre barrier previous oop value store. 582 } 583 if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) { 584 break; // G1 post barrier card address store. 585 } 586 } 587 } 588 } 589 #endif 590 delayed_worklist->push(n); // Process unsafe access later. 591 break; 592 } 593 #ifdef ASSERT 594 n->dump(1); 595 assert(false, "not unsafe or G1 barrier raw StoreP"); 596 #endif 597 } 598 break; 599 } 600 case Op_AryEq: 601 case Op_HasNegatives: 602 case Op_StrComp: 603 case Op_StrEquals: 604 case Op_StrIndexOf: 605 case Op_StrIndexOfChar: 606 case Op_StrInflatedCopy: 607 case Op_StrCompressedCopy: 608 case Op_EncodeISOArray: { 609 add_local_var(n, PointsToNode::ArgEscape); 610 delayed_worklist->push(n); // Process it later. 611 break; 612 } 613 case Op_ThreadLocal: { 614 add_java_object(n, PointsToNode::ArgEscape); 615 break; 616 } 617 default: 618 ; // Do nothing for nodes not related to EA. 619 } 620 return; 621 } 622 623 #ifdef ASSERT 624 #define ELSE_FAIL(name) \ 625 /* Should not be called for not pointer type. */ \ 626 n->dump(1); \ 627 assert(false, name); \ 628 break; 629 #else 630 #define ELSE_FAIL(name) \ 631 break; 632 #endif 633 634 // Add final simple edges to graph. 635 void ConnectionGraph::add_final_edges(Node *n) { 636 PointsToNode* n_ptn = ptnode_adr(n->_idx); 637 #ifdef ASSERT 638 if (_verify && n_ptn->is_JavaObject()) 639 return; // This method does not change graph for JavaObject. 640 #endif 641 642 if (n->is_Call()) { 643 process_call_arguments(n->as_Call()); 644 return; 645 } 646 assert(n->is_Store() || n->is_LoadStore() || 647 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 648 "node should be registered already"); 649 int opcode = n->Opcode(); 650 switch (opcode) { 651 case Op_AddP: { 652 Node* base = get_addp_base(n); 653 PointsToNode* ptn_base = ptnode_adr(base->_idx); 654 assert(ptn_base != NULL, "field's base should be registered"); 655 add_base(n_ptn->as_Field(), ptn_base); 656 break; 657 } 658 case Op_CastPP: 659 case Op_CheckCastPP: 660 case Op_EncodeP: 661 case Op_DecodeN: 662 case Op_EncodePKlass: 663 case Op_DecodeNKlass: { 664 add_local_var_and_edge(n, PointsToNode::NoEscape, 665 n->in(1), NULL); 666 break; 667 } 668 case Op_CMoveP: { 669 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 670 Node* in = n->in(i); 671 if (in == NULL) 672 continue; // ignore NULL 673 Node* uncast_in = in->uncast(); 674 if (uncast_in->is_top() || uncast_in == n) 675 continue; // ignore top or inputs which go back this node 676 PointsToNode* ptn = ptnode_adr(in->_idx); 677 assert(ptn != NULL, "node should be registered"); 678 add_edge(n_ptn, ptn); 679 } 680 break; 681 } 682 case Op_LoadP: 683 #if INCLUDE_ZGC 684 case Op_LoadBarrierSlowReg: 685 case Op_LoadBarrierWeakSlowReg: 686 #endif 687 case Op_LoadN: 688 case Op_LoadPLocked: { 689 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 690 // ThreadLocal has RawPtr type. 691 const Type* t = _igvn->type(n); 692 if (t->make_ptr() != NULL) { 693 Node* adr = n->in(MemNode::Address); 694 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 695 break; 696 } 697 ELSE_FAIL("Op_LoadP"); 698 } 699 case Op_Phi: { 700 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 701 // ThreadLocal has RawPtr type. 702 const Type* t = n->as_Phi()->type(); 703 if (t->make_ptr() != NULL) { 704 for (uint i = 1; i < n->req(); i++) { 705 Node* in = n->in(i); 706 if (in == NULL) 707 continue; // ignore NULL 708 Node* uncast_in = in->uncast(); 709 if (uncast_in->is_top() || uncast_in == n) 710 continue; // ignore top or inputs which go back this node 711 PointsToNode* ptn = ptnode_adr(in->_idx); 712 assert(ptn != NULL, "node should be registered"); 713 add_edge(n_ptn, ptn); 714 } 715 break; 716 } 717 ELSE_FAIL("Op_Phi"); 718 } 719 case Op_Proj: { 720 // we are only interested in the oop result projection from a call 721 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && 722 (n->in(0)->as_Call()->returns_pointer()|| n->bottom_type()->isa_ptr())) { 723 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 724 n->in(0)->as_Call()->tf()->returns_value_type_as_fields(), "what kind of oop return is it?"); 725 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 726 break; 727 } 728 #if INCLUDE_ZGC 729 else if (UseZGC) { 730 if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) { 731 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), NULL); 732 break; 733 } 734 } 735 #endif 736 ELSE_FAIL("Op_Proj"); 737 } 738 case Op_Rethrow: // Exception object escapes 739 case Op_Return: { 740 if (n->req() > TypeFunc::Parms && 741 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 742 // Treat Return value as LocalVar with GlobalEscape escape state. 743 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 744 n->in(TypeFunc::Parms), NULL); 745 break; 746 } 747 ELSE_FAIL("Op_Return"); 748 } 749 case Op_StoreP: 750 case Op_StoreN: 751 case Op_StoreNKlass: 752 case Op_StorePConditional: 753 case Op_CompareAndExchangeP: 754 case Op_CompareAndExchangeN: 755 case Op_CompareAndSwapP: 756 case Op_CompareAndSwapN: 757 case Op_WeakCompareAndSwapP: 758 case Op_WeakCompareAndSwapN: 759 case Op_GetAndSetP: 760 case Op_GetAndSetN: { 761 Node* adr = n->in(MemNode::Address); 762 const Type *adr_type = _igvn->type(adr); 763 adr_type = adr_type->make_ptr(); 764 #ifdef ASSERT 765 if (adr_type == NULL) { 766 n->dump(1); 767 assert(adr_type != NULL, "dead node should not be on list"); 768 break; 769 } 770 #endif 771 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN || 772 opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) { 773 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 774 } 775 if ( adr_type->isa_oopptr() 776 || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 777 && adr_type == TypeRawPtr::NOTNULL 778 && adr->in(AddPNode::Address)->is_Proj() 779 && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 780 // Point Address to Value 781 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 782 assert(adr_ptn != NULL && 783 adr_ptn->as_Field()->is_oop(), "node should be registered"); 784 Node *val = n->in(MemNode::ValueIn); 785 PointsToNode* ptn = ptnode_adr(val->_idx); 786 assert(ptn != NULL, "node should be registered"); 787 add_edge(adr_ptn, ptn); 788 break; 789 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 790 // Stored value escapes in unsafe access. 791 Node *val = n->in(MemNode::ValueIn); 792 PointsToNode* ptn = ptnode_adr(val->_idx); 793 assert(ptn != NULL, "node should be registered"); 794 set_escape_state(ptn, PointsToNode::GlobalEscape); 795 // Add edge to object for unsafe access with offset. 796 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 797 assert(adr_ptn != NULL, "node should be registered"); 798 if (adr_ptn->is_Field()) { 799 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 800 add_edge(adr_ptn, ptn); 801 } 802 break; 803 } 804 ELSE_FAIL("Op_StoreP"); 805 } 806 case Op_AryEq: 807 case Op_HasNegatives: 808 case Op_StrComp: 809 case Op_StrEquals: 810 case Op_StrIndexOf: 811 case Op_StrIndexOfChar: 812 case Op_StrInflatedCopy: 813 case Op_StrCompressedCopy: 814 case Op_EncodeISOArray: { 815 // char[]/byte[] arrays passed to string intrinsic do not escape but 816 // they are not scalar replaceable. Adjust escape state for them. 817 // Start from in(2) edge since in(1) is memory edge. 818 for (uint i = 2; i < n->req(); i++) { 819 Node* adr = n->in(i); 820 const Type* at = _igvn->type(adr); 821 if (!adr->is_top() && at->isa_ptr()) { 822 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 823 at->isa_ptr() != NULL, "expecting a pointer"); 824 if (adr->is_AddP()) { 825 adr = get_addp_base(adr); 826 } 827 PointsToNode* ptn = ptnode_adr(adr->_idx); 828 assert(ptn != NULL, "node should be registered"); 829 add_edge(n_ptn, ptn); 830 } 831 } 832 break; 833 } 834 default: { 835 // This method should be called only for EA specific nodes which may 836 // miss some edges when they were created. 837 #ifdef ASSERT 838 n->dump(1); 839 #endif 840 guarantee(false, "unknown node"); 841 } 842 } 843 return; 844 } 845 846 void ConnectionGraph::add_call_node(CallNode* call) { 847 assert(call->returns_pointer() || call->tf()->returns_value_type_as_fields(), "only for call which returns pointer"); 848 uint call_idx = call->_idx; 849 if (call->is_Allocate()) { 850 Node* k = call->in(AllocateNode::KlassNode); 851 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 852 assert(kt != NULL, "TypeKlassPtr required."); 853 ciKlass* cik = kt->klass(); 854 PointsToNode::EscapeState es = PointsToNode::NoEscape; 855 bool scalar_replaceable = true; 856 if (call->is_AllocateArray()) { 857 if (!cik->is_array_klass()) { // StressReflectiveCode 858 es = PointsToNode::GlobalEscape; 859 } else { 860 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 861 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 862 // Not scalar replaceable if the length is not constant or too big. 863 scalar_replaceable = false; 864 } 865 } 866 } else { // Allocate instance 867 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 868 cik->is_subclass_of(_compile->env()->Reference_klass()) || 869 !cik->is_instance_klass() || // StressReflectiveCode 870 !cik->as_instance_klass()->can_be_instantiated() || 871 cik->as_instance_klass()->has_finalizer()) { 872 es = PointsToNode::GlobalEscape; 873 } 874 } 875 add_java_object(call, es); 876 PointsToNode* ptn = ptnode_adr(call_idx); 877 if (!scalar_replaceable && ptn->scalar_replaceable()) { 878 ptn->set_scalar_replaceable(false); 879 } 880 } else if (call->is_CallStaticJava()) { 881 // Call nodes could be different types: 882 // 883 // 1. CallDynamicJavaNode (what happened during call is unknown): 884 // 885 // - mapped to GlobalEscape JavaObject node if oop is returned; 886 // 887 // - all oop arguments are escaping globally; 888 // 889 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 890 // 891 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 892 // 893 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 894 // - mapped to NoEscape JavaObject node if non-escaping object allocated 895 // during call is returned; 896 // - mapped to ArgEscape LocalVar node pointed to object arguments 897 // which are returned and does not escape during call; 898 // 899 // - oop arguments escaping status is defined by bytecode analysis; 900 // 901 // For a static call, we know exactly what method is being called. 902 // Use bytecode estimator to record whether the call's return value escapes. 903 ciMethod* meth = call->as_CallJava()->method(); 904 if (meth == NULL) { 905 const char* name = call->as_CallStaticJava()->_name; 906 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 907 // Returns a newly allocated unescaped object. 908 add_java_object(call, PointsToNode::NoEscape); 909 ptnode_adr(call_idx)->set_scalar_replaceable(false); 910 } else if (meth->is_boxing_method()) { 911 // Returns boxing object 912 PointsToNode::EscapeState es; 913 vmIntrinsics::ID intr = meth->intrinsic_id(); 914 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 915 // It does not escape if object is always allocated. 916 es = PointsToNode::NoEscape; 917 } else { 918 // It escapes globally if object could be loaded from cache. 919 es = PointsToNode::GlobalEscape; 920 } 921 add_java_object(call, es); 922 } else { 923 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 924 call_analyzer->copy_dependencies(_compile->dependencies()); 925 if (call_analyzer->is_return_allocated()) { 926 // Returns a newly allocated unescaped object, simply 927 // update dependency information. 928 // Mark it as NoEscape so that objects referenced by 929 // it's fields will be marked as NoEscape at least. 930 add_java_object(call, PointsToNode::NoEscape); 931 ptnode_adr(call_idx)->set_scalar_replaceable(false); 932 } else { 933 // Determine whether any arguments are returned. 934 const TypeTuple* d = call->tf()->domain_cc(); 935 bool ret_arg = false; 936 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 937 if (d->field_at(i)->isa_ptr() != NULL && 938 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 939 ret_arg = true; 940 break; 941 } 942 } 943 if (ret_arg) { 944 add_local_var(call, PointsToNode::ArgEscape); 945 } else { 946 // Returns unknown object. 947 map_ideal_node(call, phantom_obj); 948 } 949 } 950 } 951 } else { 952 // An other type of call, assume the worst case: 953 // returned value is unknown and globally escapes. 954 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 955 map_ideal_node(call, phantom_obj); 956 } 957 } 958 959 void ConnectionGraph::process_call_arguments(CallNode *call) { 960 bool is_arraycopy = false; 961 switch (call->Opcode()) { 962 #ifdef ASSERT 963 case Op_Allocate: 964 case Op_AllocateArray: 965 case Op_Lock: 966 case Op_Unlock: 967 assert(false, "should be done already"); 968 break; 969 #endif 970 case Op_ArrayCopy: 971 case Op_CallLeafNoFP: 972 // Most array copies are ArrayCopy nodes at this point but there 973 // are still a few direct calls to the copy subroutines (See 974 // PhaseStringOpts::copy_string()) 975 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 976 call->as_CallLeaf()->is_call_to_arraycopystub(); 977 // fall through 978 case Op_CallLeaf: { 979 // Stub calls, objects do not escape but they are not scale replaceable. 980 // Adjust escape state for outgoing arguments. 981 const TypeTuple * d = call->tf()->domain_sig(); 982 bool src_has_oops = false; 983 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 984 const Type* at = d->field_at(i); 985 Node *arg = call->in(i); 986 if (arg == NULL) { 987 continue; 988 } 989 const Type *aat = _igvn->type(arg); 990 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) 991 continue; 992 if (arg->is_AddP()) { 993 // 994 // The inline_native_clone() case when the arraycopy stub is called 995 // after the allocation before Initialize and CheckCastPP nodes. 996 // Or normal arraycopy for object arrays case. 997 // 998 // Set AddP's base (Allocate) as not scalar replaceable since 999 // pointer to the base (with offset) is passed as argument. 1000 // 1001 arg = get_addp_base(arg); 1002 } 1003 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1004 assert(arg_ptn != NULL, "should be registered"); 1005 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 1006 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 1007 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 1008 aat->isa_ptr() != NULL, "expecting an Ptr"); 1009 bool arg_has_oops = aat->isa_oopptr() && 1010 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || 1011 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); 1012 if (i == TypeFunc::Parms) { 1013 src_has_oops = arg_has_oops; 1014 } 1015 // 1016 // src or dst could be j.l.Object when other is basic type array: 1017 // 1018 // arraycopy(char[],0,Object*,0,size); 1019 // arraycopy(Object*,0,char[],0,size); 1020 // 1021 // Don't add edges in such cases. 1022 // 1023 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 1024 arg_has_oops && (i > TypeFunc::Parms); 1025 #ifdef ASSERT 1026 if (!(is_arraycopy || 1027 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 1028 (call->as_CallLeaf()->_name != NULL && 1029 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 1030 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 1031 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 1032 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 1033 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 1034 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 1035 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 1036 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 1037 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 1038 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 1039 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 1040 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1041 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1042 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1043 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1044 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1045 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1046 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1047 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1048 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1049 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1050 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 1051 strcmp(call->as_CallLeaf()->_name, "load_unknown_value") == 0 || 1052 strcmp(call->as_CallLeaf()->_name, "store_unknown_value") == 0) 1053 ))) { 1054 call->dump(); 1055 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1056 } 1057 #endif 1058 // Always process arraycopy's destination object since 1059 // we need to add all possible edges to references in 1060 // source object. 1061 if (arg_esc >= PointsToNode::ArgEscape && 1062 !arg_is_arraycopy_dest) { 1063 continue; 1064 } 1065 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1066 if (call->is_ArrayCopy()) { 1067 ArrayCopyNode* ac = call->as_ArrayCopy(); 1068 if (ac->is_clonebasic() || 1069 ac->is_arraycopy_validated() || 1070 ac->is_copyof_validated() || 1071 ac->is_copyofrange_validated()) { 1072 es = PointsToNode::NoEscape; 1073 } 1074 } 1075 set_escape_state(arg_ptn, es); 1076 if (arg_is_arraycopy_dest) { 1077 Node* src = call->in(TypeFunc::Parms); 1078 if (src->is_AddP()) { 1079 src = get_addp_base(src); 1080 } 1081 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1082 assert(src_ptn != NULL, "should be registered"); 1083 if (arg_ptn != src_ptn) { 1084 // Special arraycopy edge: 1085 // A destination object's field can't have the source object 1086 // as base since objects escape states are not related. 1087 // Only escape state of destination object's fields affects 1088 // escape state of fields in source object. 1089 add_arraycopy(call, es, src_ptn, arg_ptn); 1090 } 1091 } 1092 } 1093 } 1094 break; 1095 } 1096 case Op_CallStaticJava: { 1097 // For a static call, we know exactly what method is being called. 1098 // Use bytecode estimator to record the call's escape affects 1099 #ifdef ASSERT 1100 const char* name = call->as_CallStaticJava()->_name; 1101 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1102 #endif 1103 ciMethod* meth = call->as_CallJava()->method(); 1104 if ((meth != NULL) && meth->is_boxing_method()) { 1105 break; // Boxing methods do not modify any oops. 1106 } 1107 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1108 // fall-through if not a Java method or no analyzer information 1109 if (call_analyzer != NULL) { 1110 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1111 const TypeTuple* d = call->tf()->domain_cc(); 1112 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1113 const Type* at = d->field_at(i); 1114 int k = i - TypeFunc::Parms; 1115 Node* arg = call->in(i); 1116 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1117 if (at->isa_ptr() != NULL && 1118 call_analyzer->is_arg_returned(k)) { 1119 // The call returns arguments. 1120 if (call_ptn != NULL) { // Is call's result used? 1121 assert(call_ptn->is_LocalVar(), "node should be registered"); 1122 assert(arg_ptn != NULL, "node should be registered"); 1123 add_edge(call_ptn, arg_ptn); 1124 } 1125 } 1126 if (at->isa_oopptr() != NULL && 1127 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1128 if (!call_analyzer->is_arg_stack(k)) { 1129 // The argument global escapes 1130 set_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1131 } else { 1132 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 1133 if (!call_analyzer->is_arg_local(k)) { 1134 // The argument itself doesn't escape, but any fields might 1135 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1136 } 1137 } 1138 } 1139 } 1140 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1141 // The call returns arguments. 1142 assert(call_ptn->edge_count() > 0, "sanity"); 1143 if (!call_analyzer->is_return_local()) { 1144 // Returns also unknown object. 1145 add_edge(call_ptn, phantom_obj); 1146 } 1147 } 1148 break; 1149 } 1150 } 1151 default: { 1152 // Fall-through here if not a Java method or no analyzer information 1153 // or some other type of call, assume the worst case: all arguments 1154 // globally escape. 1155 const TypeTuple* d = call->tf()->domain_cc(); 1156 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1157 const Type* at = d->field_at(i); 1158 if (at->isa_oopptr() != NULL) { 1159 Node* arg = call->in(i); 1160 if (arg->is_AddP()) { 1161 arg = get_addp_base(arg); 1162 } 1163 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1164 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); 1165 } 1166 } 1167 } 1168 } 1169 } 1170 1171 1172 // Finish Graph construction. 1173 bool ConnectionGraph::complete_connection_graph( 1174 GrowableArray<PointsToNode*>& ptnodes_worklist, 1175 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1176 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1177 GrowableArray<FieldNode*>& oop_fields_worklist) { 1178 // Normally only 1-3 passes needed to build Connection Graph depending 1179 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1180 // Set limit to 20 to catch situation when something did go wrong and 1181 // bailout Escape Analysis. 1182 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1183 #define CG_BUILD_ITER_LIMIT 20 1184 1185 // Propagate GlobalEscape and ArgEscape escape states and check that 1186 // we still have non-escaping objects. The method pushs on _worklist 1187 // Field nodes which reference phantom_object. 1188 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1189 return false; // Nothing to do. 1190 } 1191 // Now propagate references to all JavaObject nodes. 1192 int java_objects_length = java_objects_worklist.length(); 1193 elapsedTimer time; 1194 bool timeout = false; 1195 int new_edges = 1; 1196 int iterations = 0; 1197 do { 1198 while ((new_edges > 0) && 1199 (iterations++ < CG_BUILD_ITER_LIMIT)) { 1200 double start_time = time.seconds(); 1201 time.start(); 1202 new_edges = 0; 1203 // Propagate references to phantom_object for nodes pushed on _worklist 1204 // by find_non_escaped_objects() and find_field_value(). 1205 new_edges += add_java_object_edges(phantom_obj, false); 1206 for (int next = 0; next < java_objects_length; ++next) { 1207 JavaObjectNode* ptn = java_objects_worklist.at(next); 1208 new_edges += add_java_object_edges(ptn, true); 1209 1210 #define SAMPLE_SIZE 4 1211 if ((next % SAMPLE_SIZE) == 0) { 1212 // Each 4 iterations calculate how much time it will take 1213 // to complete graph construction. 1214 time.stop(); 1215 // Poll for requests from shutdown mechanism to quiesce compiler 1216 // because Connection graph construction may take long time. 1217 CompileBroker::maybe_block(); 1218 double stop_time = time.seconds(); 1219 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1220 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1221 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1222 timeout = true; 1223 break; // Timeout 1224 } 1225 start_time = stop_time; 1226 time.start(); 1227 } 1228 #undef SAMPLE_SIZE 1229 1230 } 1231 if (timeout) break; 1232 if (new_edges > 0) { 1233 // Update escape states on each iteration if graph was updated. 1234 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1235 return false; // Nothing to do. 1236 } 1237 } 1238 time.stop(); 1239 if (time.seconds() >= EscapeAnalysisTimeout) { 1240 timeout = true; 1241 break; 1242 } 1243 } 1244 if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { 1245 time.start(); 1246 // Find fields which have unknown value. 1247 int fields_length = oop_fields_worklist.length(); 1248 for (int next = 0; next < fields_length; next++) { 1249 FieldNode* field = oop_fields_worklist.at(next); 1250 if (field->edge_count() == 0) { 1251 new_edges += find_field_value(field); 1252 // This code may added new edges to phantom_object. 1253 // Need an other cycle to propagate references to phantom_object. 1254 } 1255 } 1256 time.stop(); 1257 if (time.seconds() >= EscapeAnalysisTimeout) { 1258 timeout = true; 1259 break; 1260 } 1261 } else { 1262 new_edges = 0; // Bailout 1263 } 1264 } while (new_edges > 0); 1265 1266 // Bailout if passed limits. 1267 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { 1268 Compile* C = _compile; 1269 if (C->log() != NULL) { 1270 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1271 C->log()->text("%s", timeout ? "time" : "iterations"); 1272 C->log()->end_elem(" limit'"); 1273 } 1274 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", 1275 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()); 1276 // Possible infinite build_connection_graph loop, 1277 // bailout (no changes to ideal graph were made). 1278 return false; 1279 } 1280 #ifdef ASSERT 1281 if (Verbose && PrintEscapeAnalysis) { 1282 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", 1283 iterations, nodes_size(), ptnodes_worklist.length()); 1284 } 1285 #endif 1286 1287 #undef CG_BUILD_ITER_LIMIT 1288 1289 // Find fields initialized by NULL for non-escaping Allocations. 1290 int non_escaped_length = non_escaped_worklist.length(); 1291 for (int next = 0; next < non_escaped_length; next++) { 1292 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1293 PointsToNode::EscapeState es = ptn->escape_state(); 1294 assert(es <= PointsToNode::ArgEscape, "sanity"); 1295 if (es == PointsToNode::NoEscape) { 1296 if (find_init_values(ptn, null_obj, _igvn) > 0) { 1297 // Adding references to NULL object does not change escape states 1298 // since it does not escape. Also no fields are added to NULL object. 1299 add_java_object_edges(null_obj, false); 1300 } 1301 } 1302 Node* n = ptn->ideal_node(); 1303 if (n->is_Allocate()) { 1304 // The object allocated by this Allocate node will never be 1305 // seen by an other thread. Mark it so that when it is 1306 // expanded no MemBarStoreStore is added. 1307 InitializeNode* ini = n->as_Allocate()->initialization(); 1308 if (ini != NULL) 1309 ini->set_does_not_escape(); 1310 } 1311 } 1312 return true; // Finished graph construction. 1313 } 1314 1315 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1316 // and check that we still have non-escaping java objects. 1317 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1318 GrowableArray<JavaObjectNode*>& non_escaped_worklist) { 1319 GrowableArray<PointsToNode*> escape_worklist; 1320 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1321 int ptnodes_length = ptnodes_worklist.length(); 1322 for (int next = 0; next < ptnodes_length; ++next) { 1323 PointsToNode* ptn = ptnodes_worklist.at(next); 1324 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1325 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1326 escape_worklist.push(ptn); 1327 } 1328 } 1329 // Set escape states to referenced nodes (edges list). 1330 while (escape_worklist.length() > 0) { 1331 PointsToNode* ptn = escape_worklist.pop(); 1332 PointsToNode::EscapeState es = ptn->escape_state(); 1333 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1334 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1335 es >= PointsToNode::ArgEscape) { 1336 // GlobalEscape or ArgEscape state of field means it has unknown value. 1337 if (add_edge(ptn, phantom_obj)) { 1338 // New edge was added 1339 add_field_uses_to_worklist(ptn->as_Field()); 1340 } 1341 } 1342 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1343 PointsToNode* e = i.get(); 1344 if (e->is_Arraycopy()) { 1345 assert(ptn->arraycopy_dst(), "sanity"); 1346 // Propagate only fields escape state through arraycopy edge. 1347 if (e->fields_escape_state() < field_es) { 1348 set_fields_escape_state(e, field_es); 1349 escape_worklist.push(e); 1350 } 1351 } else if (es >= field_es) { 1352 // fields_escape_state is also set to 'es' if it is less than 'es'. 1353 if (e->escape_state() < es) { 1354 set_escape_state(e, es); 1355 escape_worklist.push(e); 1356 } 1357 } else { 1358 // Propagate field escape state. 1359 bool es_changed = false; 1360 if (e->fields_escape_state() < field_es) { 1361 set_fields_escape_state(e, field_es); 1362 es_changed = true; 1363 } 1364 if ((e->escape_state() < field_es) && 1365 e->is_Field() && ptn->is_JavaObject() && 1366 e->as_Field()->is_oop()) { 1367 // Change escape state of referenced fields. 1368 set_escape_state(e, field_es); 1369 es_changed = true; 1370 } else if (e->escape_state() < es) { 1371 set_escape_state(e, es); 1372 es_changed = true; 1373 } 1374 if (es_changed) { 1375 escape_worklist.push(e); 1376 } 1377 } 1378 } 1379 } 1380 // Remove escaped objects from non_escaped list. 1381 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { 1382 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1383 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1384 non_escaped_worklist.delete_at(next); 1385 } 1386 if (ptn->escape_state() == PointsToNode::NoEscape) { 1387 // Find fields in non-escaped allocations which have unknown value. 1388 find_init_values(ptn, phantom_obj, NULL); 1389 } 1390 } 1391 return (non_escaped_worklist.length() > 0); 1392 } 1393 1394 // Add all references to JavaObject node by walking over all uses. 1395 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1396 int new_edges = 0; 1397 if (populate_worklist) { 1398 // Populate _worklist by uses of jobj's uses. 1399 for (UseIterator i(jobj); i.has_next(); i.next()) { 1400 PointsToNode* use = i.get(); 1401 if (use->is_Arraycopy()) 1402 continue; 1403 add_uses_to_worklist(use); 1404 if (use->is_Field() && use->as_Field()->is_oop()) { 1405 // Put on worklist all field's uses (loads) and 1406 // related field nodes (same base and offset). 1407 add_field_uses_to_worklist(use->as_Field()); 1408 } 1409 } 1410 } 1411 for (int l = 0; l < _worklist.length(); l++) { 1412 PointsToNode* use = _worklist.at(l); 1413 if (PointsToNode::is_base_use(use)) { 1414 // Add reference from jobj to field and from field to jobj (field's base). 1415 use = PointsToNode::get_use_node(use)->as_Field(); 1416 if (add_base(use->as_Field(), jobj)) { 1417 new_edges++; 1418 } 1419 continue; 1420 } 1421 assert(!use->is_JavaObject(), "sanity"); 1422 if (use->is_Arraycopy()) { 1423 if (jobj == null_obj) // NULL object does not have field edges 1424 continue; 1425 // Added edge from Arraycopy node to arraycopy's source java object 1426 if (add_edge(use, jobj)) { 1427 jobj->set_arraycopy_src(); 1428 new_edges++; 1429 } 1430 // and stop here. 1431 continue; 1432 } 1433 if (!add_edge(use, jobj)) 1434 continue; // No new edge added, there was such edge already. 1435 new_edges++; 1436 if (use->is_LocalVar()) { 1437 add_uses_to_worklist(use); 1438 if (use->arraycopy_dst()) { 1439 for (EdgeIterator i(use); i.has_next(); i.next()) { 1440 PointsToNode* e = i.get(); 1441 if (e->is_Arraycopy()) { 1442 if (jobj == null_obj) // NULL object does not have field edges 1443 continue; 1444 // Add edge from arraycopy's destination java object to Arraycopy node. 1445 if (add_edge(jobj, e)) { 1446 new_edges++; 1447 jobj->set_arraycopy_dst(); 1448 } 1449 } 1450 } 1451 } 1452 } else { 1453 // Added new edge to stored in field values. 1454 // Put on worklist all field's uses (loads) and 1455 // related field nodes (same base and offset). 1456 add_field_uses_to_worklist(use->as_Field()); 1457 } 1458 } 1459 _worklist.clear(); 1460 _in_worklist.Reset(); 1461 return new_edges; 1462 } 1463 1464 // Put on worklist all related field nodes. 1465 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1466 assert(field->is_oop(), "sanity"); 1467 int offset = field->offset(); 1468 add_uses_to_worklist(field); 1469 // Loop over all bases of this field and push on worklist Field nodes 1470 // with the same offset and base (since they may reference the same field). 1471 for (BaseIterator i(field); i.has_next(); i.next()) { 1472 PointsToNode* base = i.get(); 1473 add_fields_to_worklist(field, base); 1474 // Check if the base was source object of arraycopy and go over arraycopy's 1475 // destination objects since values stored to a field of source object are 1476 // accessable by uses (loads) of fields of destination objects. 1477 if (base->arraycopy_src()) { 1478 for (UseIterator j(base); j.has_next(); j.next()) { 1479 PointsToNode* arycp = j.get(); 1480 if (arycp->is_Arraycopy()) { 1481 for (UseIterator k(arycp); k.has_next(); k.next()) { 1482 PointsToNode* abase = k.get(); 1483 if (abase->arraycopy_dst() && abase != base) { 1484 // Look for the same arraycopy reference. 1485 add_fields_to_worklist(field, abase); 1486 } 1487 } 1488 } 1489 } 1490 } 1491 } 1492 } 1493 1494 // Put on worklist all related field nodes. 1495 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1496 int offset = field->offset(); 1497 if (base->is_LocalVar()) { 1498 for (UseIterator j(base); j.has_next(); j.next()) { 1499 PointsToNode* f = j.get(); 1500 if (PointsToNode::is_base_use(f)) { // Field 1501 f = PointsToNode::get_use_node(f); 1502 if (f == field || !f->as_Field()->is_oop()) 1503 continue; 1504 int offs = f->as_Field()->offset(); 1505 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1506 add_to_worklist(f); 1507 } 1508 } 1509 } 1510 } else { 1511 assert(base->is_JavaObject(), "sanity"); 1512 if (// Skip phantom_object since it is only used to indicate that 1513 // this field's content globally escapes. 1514 (base != phantom_obj) && 1515 // NULL object node does not have fields. 1516 (base != null_obj)) { 1517 for (EdgeIterator i(base); i.has_next(); i.next()) { 1518 PointsToNode* f = i.get(); 1519 // Skip arraycopy edge since store to destination object field 1520 // does not update value in source object field. 1521 if (f->is_Arraycopy()) { 1522 assert(base->arraycopy_dst(), "sanity"); 1523 continue; 1524 } 1525 if (f == field || !f->as_Field()->is_oop()) 1526 continue; 1527 int offs = f->as_Field()->offset(); 1528 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1529 add_to_worklist(f); 1530 } 1531 } 1532 } 1533 } 1534 } 1535 1536 // Find fields which have unknown value. 1537 int ConnectionGraph::find_field_value(FieldNode* field) { 1538 // Escaped fields should have init value already. 1539 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1540 int new_edges = 0; 1541 for (BaseIterator i(field); i.has_next(); i.next()) { 1542 PointsToNode* base = i.get(); 1543 if (base->is_JavaObject()) { 1544 // Skip Allocate's fields which will be processed later. 1545 if (base->ideal_node()->is_Allocate()) 1546 return 0; 1547 assert(base == null_obj, "only NULL ptr base expected here"); 1548 } 1549 } 1550 if (add_edge(field, phantom_obj)) { 1551 // New edge was added 1552 new_edges++; 1553 add_field_uses_to_worklist(field); 1554 } 1555 return new_edges; 1556 } 1557 1558 // Find fields initializing values for allocations. 1559 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { 1560 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1561 int new_edges = 0; 1562 Node* alloc = pta->ideal_node(); 1563 if (init_val == phantom_obj) { 1564 // Do nothing for Allocate nodes since its fields values are 1565 // "known" unless they are initialized by arraycopy/clone. 1566 if (alloc->is_Allocate() && !pta->arraycopy_dst()) 1567 return 0; 1568 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 1569 #ifdef ASSERT 1570 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { 1571 const char* name = alloc->as_CallStaticJava()->_name; 1572 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1573 } 1574 #endif 1575 // Non-escaped allocation returned from Java or runtime call have 1576 // unknown values in fields. 1577 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1578 PointsToNode* field = i.get(); 1579 if (field->is_Field() && field->as_Field()->is_oop()) { 1580 if (add_edge(field, phantom_obj)) { 1581 // New edge was added 1582 new_edges++; 1583 add_field_uses_to_worklist(field->as_Field()); 1584 } 1585 } 1586 } 1587 return new_edges; 1588 } 1589 assert(init_val == null_obj, "sanity"); 1590 // Do nothing for Call nodes since its fields values are unknown. 1591 if (!alloc->is_Allocate()) 1592 return 0; 1593 1594 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1595 bool visited_bottom_offset = false; 1596 GrowableArray<int> offsets_worklist; 1597 1598 // Check if an oop field's initializing value is recorded and add 1599 // a corresponding NULL if field's value if it is not recorded. 1600 // Connection Graph does not record a default initialization by NULL 1601 // captured by Initialize node. 1602 // 1603 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1604 PointsToNode* field = i.get(); // Field (AddP) 1605 if (!field->is_Field() || !field->as_Field()->is_oop()) 1606 continue; // Not oop field 1607 int offset = field->as_Field()->offset(); 1608 if (offset == Type::OffsetBot) { 1609 if (!visited_bottom_offset) { 1610 // OffsetBot is used to reference array's element, 1611 // always add reference to NULL to all Field nodes since we don't 1612 // known which element is referenced. 1613 if (add_edge(field, null_obj)) { 1614 // New edge was added 1615 new_edges++; 1616 add_field_uses_to_worklist(field->as_Field()); 1617 visited_bottom_offset = true; 1618 } 1619 } 1620 } else { 1621 // Check only oop fields. 1622 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1623 if (adr_type->isa_rawptr()) { 1624 #ifdef ASSERT 1625 // Raw pointers are used for initializing stores so skip it 1626 // since it should be recorded already 1627 Node* base = get_addp_base(field->ideal_node()); 1628 assert(adr_type->isa_rawptr() && base->is_Proj() && 1629 (base->in(0) == alloc),"unexpected pointer type"); 1630 #endif 1631 continue; 1632 } 1633 if (!offsets_worklist.contains(offset)) { 1634 offsets_worklist.append(offset); 1635 Node* value = NULL; 1636 if (ini != NULL) { 1637 // StoreP::memory_type() == T_ADDRESS 1638 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1639 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1640 // Make sure initializing store has the same type as this AddP. 1641 // This AddP may reference non existing field because it is on a 1642 // dead branch of bimorphic call which is not eliminated yet. 1643 if (store != NULL && store->is_Store() && 1644 store->as_Store()->memory_type() == ft) { 1645 value = store->in(MemNode::ValueIn); 1646 #ifdef ASSERT 1647 if (VerifyConnectionGraph) { 1648 // Verify that AddP already points to all objects the value points to. 1649 PointsToNode* val = ptnode_adr(value->_idx); 1650 assert((val != NULL), "should be processed already"); 1651 PointsToNode* missed_obj = NULL; 1652 if (val->is_JavaObject()) { 1653 if (!field->points_to(val->as_JavaObject())) { 1654 missed_obj = val; 1655 } 1656 } else { 1657 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1658 tty->print_cr("----------init store has invalid value -----"); 1659 store->dump(); 1660 val->dump(); 1661 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1662 } 1663 for (EdgeIterator j(val); j.has_next(); j.next()) { 1664 PointsToNode* obj = j.get(); 1665 if (obj->is_JavaObject()) { 1666 if (!field->points_to(obj->as_JavaObject())) { 1667 missed_obj = obj; 1668 break; 1669 } 1670 } 1671 } 1672 } 1673 if (missed_obj != NULL) { 1674 tty->print_cr("----------field---------------------------------"); 1675 field->dump(); 1676 tty->print_cr("----------missed reference to object------------"); 1677 missed_obj->dump(); 1678 tty->print_cr("----------object referenced by init store-------"); 1679 store->dump(); 1680 val->dump(); 1681 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1682 } 1683 } 1684 #endif 1685 } else { 1686 // There could be initializing stores which follow allocation. 1687 // For example, a volatile field store is not collected 1688 // by Initialize node. 1689 // 1690 // Need to check for dependent loads to separate such stores from 1691 // stores which follow loads. For now, add initial value NULL so 1692 // that compare pointers optimization works correctly. 1693 } 1694 } 1695 if (value == NULL) { 1696 // A field's initializing value was not recorded. Add NULL. 1697 if (add_edge(field, null_obj)) { 1698 // New edge was added 1699 new_edges++; 1700 add_field_uses_to_worklist(field->as_Field()); 1701 } 1702 } 1703 } 1704 } 1705 } 1706 return new_edges; 1707 } 1708 1709 // Adjust scalar_replaceable state after Connection Graph is built. 1710 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1711 // Search for non-escaping objects which are not scalar replaceable 1712 // and mark them to propagate the state to referenced objects. 1713 1714 // 1. An object is not scalar replaceable if the field into which it is 1715 // stored has unknown offset (stored into unknown element of an array). 1716 // 1717 for (UseIterator i(jobj); i.has_next(); i.next()) { 1718 PointsToNode* use = i.get(); 1719 if (use->is_Arraycopy()) { 1720 continue; 1721 } 1722 if (use->is_Field()) { 1723 FieldNode* field = use->as_Field(); 1724 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 1725 if (field->offset() == Type::OffsetBot) { 1726 jobj->set_scalar_replaceable(false); 1727 return; 1728 } 1729 // 2. An object is not scalar replaceable if the field into which it is 1730 // stored has multiple bases one of which is null. 1731 if (field->base_count() > 1) { 1732 for (BaseIterator i(field); i.has_next(); i.next()) { 1733 PointsToNode* base = i.get(); 1734 if (base == null_obj) { 1735 jobj->set_scalar_replaceable(false); 1736 return; 1737 } 1738 } 1739 } 1740 } 1741 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1742 // 3. An object is not scalar replaceable if it is merged with other objects. 1743 for (EdgeIterator j(use); j.has_next(); j.next()) { 1744 PointsToNode* ptn = j.get(); 1745 if (ptn->is_JavaObject() && ptn != jobj) { 1746 // Mark all objects. 1747 jobj->set_scalar_replaceable(false); 1748 ptn->set_scalar_replaceable(false); 1749 } 1750 } 1751 if (!jobj->scalar_replaceable()) { 1752 return; 1753 } 1754 } 1755 1756 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1757 if (j.get()->is_Arraycopy()) { 1758 continue; 1759 } 1760 1761 // Non-escaping object node should point only to field nodes. 1762 FieldNode* field = j.get()->as_Field(); 1763 int offset = field->as_Field()->offset(); 1764 1765 // 4. An object is not scalar replaceable if it has a field with unknown 1766 // offset (array's element is accessed in loop). 1767 if (offset == Type::OffsetBot) { 1768 jobj->set_scalar_replaceable(false); 1769 return; 1770 } 1771 // 5. Currently an object is not scalar replaceable if a LoadStore node 1772 // access its field since the field value is unknown after it. 1773 // 1774 Node* n = field->ideal_node(); 1775 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1776 if (n->fast_out(i)->is_LoadStore()) { 1777 jobj->set_scalar_replaceable(false); 1778 return; 1779 } 1780 } 1781 1782 // 6. Or the address may point to more then one object. This may produce 1783 // the false positive result (set not scalar replaceable) 1784 // since the flow-insensitive escape analysis can't separate 1785 // the case when stores overwrite the field's value from the case 1786 // when stores happened on different control branches. 1787 // 1788 // Note: it will disable scalar replacement in some cases: 1789 // 1790 // Point p[] = new Point[1]; 1791 // p[0] = new Point(); // Will be not scalar replaced 1792 // 1793 // but it will save us from incorrect optimizations in next cases: 1794 // 1795 // Point p[] = new Point[1]; 1796 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1797 // 1798 if (field->base_count() > 1) { 1799 for (BaseIterator i(field); i.has_next(); i.next()) { 1800 PointsToNode* base = i.get(); 1801 // Don't take into account LocalVar nodes which 1802 // may point to only one object which should be also 1803 // this field's base by now. 1804 if (base->is_JavaObject() && base != jobj) { 1805 // Mark all bases. 1806 jobj->set_scalar_replaceable(false); 1807 base->set_scalar_replaceable(false); 1808 } 1809 } 1810 } 1811 } 1812 } 1813 1814 #ifdef ASSERT 1815 void ConnectionGraph::verify_connection_graph( 1816 GrowableArray<PointsToNode*>& ptnodes_worklist, 1817 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1818 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1819 GrowableArray<Node*>& addp_worklist) { 1820 // Verify that graph is complete - no new edges could be added. 1821 int java_objects_length = java_objects_worklist.length(); 1822 int non_escaped_length = non_escaped_worklist.length(); 1823 int new_edges = 0; 1824 for (int next = 0; next < java_objects_length; ++next) { 1825 JavaObjectNode* ptn = java_objects_worklist.at(next); 1826 new_edges += add_java_object_edges(ptn, true); 1827 } 1828 assert(new_edges == 0, "graph was not complete"); 1829 // Verify that escape state is final. 1830 int length = non_escaped_worklist.length(); 1831 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); 1832 assert((non_escaped_length == non_escaped_worklist.length()) && 1833 (non_escaped_length == length) && 1834 (_worklist.length() == 0), "escape state was not final"); 1835 1836 // Verify fields information. 1837 int addp_length = addp_worklist.length(); 1838 for (int next = 0; next < addp_length; ++next ) { 1839 Node* n = addp_worklist.at(next); 1840 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 1841 if (field->is_oop()) { 1842 // Verify that field has all bases 1843 Node* base = get_addp_base(n); 1844 PointsToNode* ptn = ptnode_adr(base->_idx); 1845 if (ptn->is_JavaObject()) { 1846 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 1847 } else { 1848 assert(ptn->is_LocalVar(), "sanity"); 1849 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1850 PointsToNode* e = i.get(); 1851 if (e->is_JavaObject()) { 1852 assert(field->has_base(e->as_JavaObject()), "sanity"); 1853 } 1854 } 1855 } 1856 // Verify that all fields have initializing values. 1857 if (field->edge_count() == 0) { 1858 tty->print_cr("----------field does not have references----------"); 1859 field->dump(); 1860 for (BaseIterator i(field); i.has_next(); i.next()) { 1861 PointsToNode* base = i.get(); 1862 tty->print_cr("----------field has next base---------------------"); 1863 base->dump(); 1864 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 1865 tty->print_cr("----------base has fields-------------------------"); 1866 for (EdgeIterator j(base); j.has_next(); j.next()) { 1867 j.get()->dump(); 1868 } 1869 tty->print_cr("----------base has references---------------------"); 1870 for (UseIterator j(base); j.has_next(); j.next()) { 1871 j.get()->dump(); 1872 } 1873 } 1874 } 1875 for (UseIterator i(field); i.has_next(); i.next()) { 1876 i.get()->dump(); 1877 } 1878 assert(field->edge_count() > 0, "sanity"); 1879 } 1880 } 1881 } 1882 } 1883 #endif 1884 1885 // Optimize ideal graph. 1886 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 1887 GrowableArray<Node*>& storestore_worklist) { 1888 Compile* C = _compile; 1889 PhaseIterGVN* igvn = _igvn; 1890 if (EliminateLocks) { 1891 // Mark locks before changing ideal graph. 1892 int cnt = C->macro_count(); 1893 for( int i=0; i < cnt; i++ ) { 1894 Node *n = C->macro_node(i); 1895 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1896 AbstractLockNode* alock = n->as_AbstractLock(); 1897 if (!alock->is_non_esc_obj()) { 1898 if (not_global_escape(alock->obj_node())) { 1899 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 1900 // The lock could be marked eliminated by lock coarsening 1901 // code during first IGVN before EA. Replace coarsened flag 1902 // to eliminate all associated locks/unlocks. 1903 #ifdef ASSERT 1904 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 1905 #endif 1906 alock->set_non_esc_obj(); 1907 } 1908 } 1909 } 1910 } 1911 } 1912 1913 if (OptimizePtrCompare) { 1914 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1915 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1916 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1917 // Optimize objects compare. 1918 while (ptr_cmp_worklist.length() != 0) { 1919 Node *n = ptr_cmp_worklist.pop(); 1920 Node *res = optimize_ptr_compare(n); 1921 if (res != NULL) { 1922 #ifndef PRODUCT 1923 if (PrintOptimizePtrCompare) { 1924 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1925 if (Verbose) { 1926 n->dump(1); 1927 } 1928 } 1929 #endif 1930 igvn->replace_node(n, res); 1931 } 1932 } 1933 // cleanup 1934 if (_pcmp_neq->outcnt() == 0) 1935 igvn->hash_delete(_pcmp_neq); 1936 if (_pcmp_eq->outcnt() == 0) 1937 igvn->hash_delete(_pcmp_eq); 1938 } 1939 1940 // For MemBarStoreStore nodes added in library_call.cpp, check 1941 // escape status of associated AllocateNode and optimize out 1942 // MemBarStoreStore node if the allocated object never escapes. 1943 while (storestore_worklist.length() != 0) { 1944 Node *n = storestore_worklist.pop(); 1945 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 1946 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 1947 assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); 1948 if (not_global_escape(alloc)) { 1949 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 1950 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 1951 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 1952 igvn->register_new_node_with_optimizer(mb); 1953 igvn->replace_node(storestore, mb); 1954 } 1955 } 1956 } 1957 1958 // Optimize objects compare. 1959 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 1960 assert(OptimizePtrCompare, "sanity"); 1961 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 1962 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 1963 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 1964 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 1965 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 1966 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 1967 1968 // Check simple cases first. 1969 if (jobj1 != NULL) { 1970 if (jobj1->escape_state() == PointsToNode::NoEscape) { 1971 if (jobj1 == jobj2) { 1972 // Comparing the same not escaping object. 1973 return _pcmp_eq; 1974 } 1975 Node* obj = jobj1->ideal_node(); 1976 // Comparing not escaping allocation. 1977 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1978 !ptn2->points_to(jobj1)) { 1979 return _pcmp_neq; // This includes nullness check. 1980 } 1981 } 1982 } 1983 if (jobj2 != NULL) { 1984 if (jobj2->escape_state() == PointsToNode::NoEscape) { 1985 Node* obj = jobj2->ideal_node(); 1986 // Comparing not escaping allocation. 1987 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1988 !ptn1->points_to(jobj2)) { 1989 return _pcmp_neq; // This includes nullness check. 1990 } 1991 } 1992 } 1993 if (jobj1 != NULL && jobj1 != phantom_obj && 1994 jobj2 != NULL && jobj2 != phantom_obj && 1995 jobj1->ideal_node()->is_Con() && 1996 jobj2->ideal_node()->is_Con()) { 1997 // Klass or String constants compare. Need to be careful with 1998 // compressed pointers - compare types of ConN and ConP instead of nodes. 1999 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 2000 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 2001 if (t1->make_ptr() == t2->make_ptr()) { 2002 return _pcmp_eq; 2003 } else { 2004 return _pcmp_neq; 2005 } 2006 } 2007 if (ptn1->meet(ptn2)) { 2008 return NULL; // Sets are not disjoint 2009 } 2010 2011 // Sets are disjoint. 2012 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 2013 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 2014 bool set1_has_null_ptr = ptn1->points_to(null_obj); 2015 bool set2_has_null_ptr = ptn2->points_to(null_obj); 2016 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 2017 (set2_has_unknown_ptr && set1_has_null_ptr)) { 2018 // Check nullness of unknown object. 2019 return NULL; 2020 } 2021 2022 // Disjointness by itself is not sufficient since 2023 // alias analysis is not complete for escaped objects. 2024 // Disjoint sets are definitely unrelated only when 2025 // at least one set has only not escaping allocations. 2026 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 2027 if (ptn1->non_escaping_allocation()) { 2028 return _pcmp_neq; 2029 } 2030 } 2031 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2032 if (ptn2->non_escaping_allocation()) { 2033 return _pcmp_neq; 2034 } 2035 } 2036 return NULL; 2037 } 2038 2039 // Connection Graph constuction functions. 2040 2041 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2042 PointsToNode* ptadr = _nodes.at(n->_idx); 2043 if (ptadr != NULL) { 2044 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2045 return; 2046 } 2047 Compile* C = _compile; 2048 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2049 _nodes.at_put(n->_idx, ptadr); 2050 } 2051 2052 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2053 PointsToNode* ptadr = _nodes.at(n->_idx); 2054 if (ptadr != NULL) { 2055 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2056 return; 2057 } 2058 Compile* C = _compile; 2059 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2060 _nodes.at_put(n->_idx, ptadr); 2061 } 2062 2063 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2064 PointsToNode* ptadr = _nodes.at(n->_idx); 2065 if (ptadr != NULL) { 2066 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2067 return; 2068 } 2069 bool unsafe = false; 2070 bool is_oop = is_oop_field(n, offset, &unsafe); 2071 if (unsafe) { 2072 es = PointsToNode::GlobalEscape; 2073 } 2074 Compile* C = _compile; 2075 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2076 _nodes.at_put(n->_idx, field); 2077 } 2078 2079 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2080 PointsToNode* src, PointsToNode* dst) { 2081 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2082 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 2083 PointsToNode* ptadr = _nodes.at(n->_idx); 2084 if (ptadr != NULL) { 2085 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2086 return; 2087 } 2088 Compile* C = _compile; 2089 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2090 _nodes.at_put(n->_idx, ptadr); 2091 // Add edge from arraycopy node to source object. 2092 (void)add_edge(ptadr, src); 2093 src->set_arraycopy_src(); 2094 // Add edge from destination object to arraycopy node. 2095 (void)add_edge(dst, ptadr); 2096 dst->set_arraycopy_dst(); 2097 } 2098 2099 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2100 const Type* adr_type = n->as_AddP()->bottom_type(); 2101 int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot; 2102 BasicType bt = T_INT; 2103 if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) { 2104 // Check only oop fields. 2105 if (!adr_type->isa_aryptr() || 2106 (adr_type->isa_aryptr()->klass() == NULL) || 2107 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { 2108 // OffsetBot is used to reference array's element. Ignore first AddP. 2109 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 2110 bt = T_OBJECT; 2111 } 2112 } 2113 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2114 if (adr_type->isa_instptr()) { 2115 ciField* field = _compile->alias_type(adr_type->is_ptr())->field(); 2116 if (field != NULL) { 2117 bt = field->layout_type(); 2118 } else { 2119 // Check for unsafe oop field access 2120 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2121 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2122 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) { 2123 bt = T_OBJECT; 2124 (*unsafe) = true; 2125 } 2126 } 2127 } else if (adr_type->isa_aryptr()) { 2128 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2129 // Ignore array length load. 2130 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2131 // Ignore first AddP. 2132 } else { 2133 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2134 if (elemtype->isa_valuetype() && field_offset != Type::OffsetBot) { 2135 ciValueKlass* vk = elemtype->is_valuetype()->value_klass(); 2136 field_offset += vk->first_field_offset(); 2137 bt = vk->get_field_by_offset(field_offset, false)->layout_type(); 2138 } else { 2139 bt = elemtype->array_element_basic_type(); 2140 } 2141 } 2142 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2143 // Allocation initialization, ThreadLocal field access, unsafe access 2144 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2145 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2146 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) { 2147 bt = T_OBJECT; 2148 } 2149 } 2150 } 2151 // TODO enable when using T_VALUETYPEPTR 2152 //assert(bt != T_VALUETYPE, "should not have valuetype here"); 2153 return (bt == T_OBJECT || bt == T_VALUETYPE || bt == T_VALUETYPEPTR || bt == T_NARROWOOP || bt == T_ARRAY); 2154 } 2155 2156 // Returns unique pointed java object or NULL. 2157 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2158 assert(!_collecting, "should not call when constructed graph"); 2159 // If the node was created after the escape computation we can't answer. 2160 uint idx = n->_idx; 2161 if (idx >= nodes_size()) { 2162 return NULL; 2163 } 2164 PointsToNode* ptn = ptnode_adr(idx); 2165 if (ptn->is_JavaObject()) { 2166 return ptn->as_JavaObject(); 2167 } 2168 assert(ptn->is_LocalVar(), "sanity"); 2169 // Check all java objects it points to. 2170 JavaObjectNode* jobj = NULL; 2171 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2172 PointsToNode* e = i.get(); 2173 if (e->is_JavaObject()) { 2174 if (jobj == NULL) { 2175 jobj = e->as_JavaObject(); 2176 } else if (jobj != e) { 2177 return NULL; 2178 } 2179 } 2180 } 2181 return jobj; 2182 } 2183 2184 // Return true if this node points only to non-escaping allocations. 2185 bool PointsToNode::non_escaping_allocation() { 2186 if (is_JavaObject()) { 2187 Node* n = ideal_node(); 2188 if (n->is_Allocate() || n->is_CallStaticJava()) { 2189 return (escape_state() == PointsToNode::NoEscape); 2190 } else { 2191 return false; 2192 } 2193 } 2194 assert(is_LocalVar(), "sanity"); 2195 // Check all java objects it points to. 2196 for (EdgeIterator i(this); i.has_next(); i.next()) { 2197 PointsToNode* e = i.get(); 2198 if (e->is_JavaObject()) { 2199 Node* n = e->ideal_node(); 2200 if ((e->escape_state() != PointsToNode::NoEscape) || 2201 !(n->is_Allocate() || n->is_CallStaticJava())) { 2202 return false; 2203 } 2204 } 2205 } 2206 return true; 2207 } 2208 2209 // Return true if we know the node does not escape globally. 2210 bool ConnectionGraph::not_global_escape(Node *n) { 2211 assert(!_collecting, "should not call during graph construction"); 2212 // If the node was created after the escape computation we can't answer. 2213 uint idx = n->_idx; 2214 if (idx >= nodes_size()) { 2215 return false; 2216 } 2217 PointsToNode* ptn = ptnode_adr(idx); 2218 PointsToNode::EscapeState es = ptn->escape_state(); 2219 // If we have already computed a value, return it. 2220 if (es >= PointsToNode::GlobalEscape) 2221 return false; 2222 if (ptn->is_JavaObject()) { 2223 return true; // (es < PointsToNode::GlobalEscape); 2224 } 2225 assert(ptn->is_LocalVar(), "sanity"); 2226 // Check all java objects it points to. 2227 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2228 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) 2229 return false; 2230 } 2231 return true; 2232 } 2233 2234 2235 // Helper functions 2236 2237 // Return true if this node points to specified node or nodes it points to. 2238 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2239 if (is_JavaObject()) { 2240 return (this == ptn); 2241 } 2242 assert(is_LocalVar() || is_Field(), "sanity"); 2243 for (EdgeIterator i(this); i.has_next(); i.next()) { 2244 if (i.get() == ptn) 2245 return true; 2246 } 2247 return false; 2248 } 2249 2250 // Return true if one node points to an other. 2251 bool PointsToNode::meet(PointsToNode* ptn) { 2252 if (this == ptn) { 2253 return true; 2254 } else if (ptn->is_JavaObject()) { 2255 return this->points_to(ptn->as_JavaObject()); 2256 } else if (this->is_JavaObject()) { 2257 return ptn->points_to(this->as_JavaObject()); 2258 } 2259 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2260 int ptn_count = ptn->edge_count(); 2261 for (EdgeIterator i(this); i.has_next(); i.next()) { 2262 PointsToNode* this_e = i.get(); 2263 for (int j = 0; j < ptn_count; j++) { 2264 if (this_e == ptn->edge(j)) 2265 return true; 2266 } 2267 } 2268 return false; 2269 } 2270 2271 #ifdef ASSERT 2272 // Return true if bases point to this java object. 2273 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2274 for (BaseIterator i(this); i.has_next(); i.next()) { 2275 if (i.get() == jobj) 2276 return true; 2277 } 2278 return false; 2279 } 2280 #endif 2281 2282 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2283 const Type *adr_type = phase->type(adr); 2284 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 2285 adr->in(AddPNode::Address)->is_Proj() && 2286 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 2287 // We are computing a raw address for a store captured by an Initialize 2288 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2289 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2290 assert(offs != Type::OffsetBot || 2291 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2292 "offset must be a constant or it is initialization of array"); 2293 return offs; 2294 } 2295 return adr_type->is_ptr()->flattened_offset(); 2296 } 2297 2298 Node* ConnectionGraph::get_addp_base(Node *addp) { 2299 assert(addp->is_AddP(), "must be AddP"); 2300 // 2301 // AddP cases for Base and Address inputs: 2302 // case #1. Direct object's field reference: 2303 // Allocate 2304 // | 2305 // Proj #5 ( oop result ) 2306 // | 2307 // CheckCastPP (cast to instance type) 2308 // | | 2309 // AddP ( base == address ) 2310 // 2311 // case #2. Indirect object's field reference: 2312 // Phi 2313 // | 2314 // CastPP (cast to instance type) 2315 // | | 2316 // AddP ( base == address ) 2317 // 2318 // case #3. Raw object's field reference for Initialize node: 2319 // Allocate 2320 // | 2321 // Proj #5 ( oop result ) 2322 // top | 2323 // \ | 2324 // AddP ( base == top ) 2325 // 2326 // case #4. Array's element reference: 2327 // {CheckCastPP | CastPP} 2328 // | | | 2329 // | AddP ( array's element offset ) 2330 // | | 2331 // AddP ( array's offset ) 2332 // 2333 // case #5. Raw object's field reference for arraycopy stub call: 2334 // The inline_native_clone() case when the arraycopy stub is called 2335 // after the allocation before Initialize and CheckCastPP nodes. 2336 // Allocate 2337 // | 2338 // Proj #5 ( oop result ) 2339 // | | 2340 // AddP ( base == address ) 2341 // 2342 // case #6. Constant Pool, ThreadLocal, CastX2P or 2343 // Raw object's field reference: 2344 // {ConP, ThreadLocal, CastX2P, raw Load} 2345 // top | 2346 // \ | 2347 // AddP ( base == top ) 2348 // 2349 // case #7. Klass's field reference. 2350 // LoadKlass 2351 // | | 2352 // AddP ( base == address ) 2353 // 2354 // case #8. narrow Klass's field reference. 2355 // LoadNKlass 2356 // | 2357 // DecodeN 2358 // | | 2359 // AddP ( base == address ) 2360 // 2361 // case #9. Mixed unsafe access 2362 // {instance} 2363 // | 2364 // CheckCastPP (raw) 2365 // top | 2366 // \ | 2367 // AddP ( base == top ) 2368 // 2369 Node *base = addp->in(AddPNode::Base); 2370 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 2371 base = addp->in(AddPNode::Address); 2372 while (base->is_AddP()) { 2373 // Case #6 (unsafe access) may have several chained AddP nodes. 2374 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2375 base = base->in(AddPNode::Address); 2376 } 2377 if (base->Opcode() == Op_CheckCastPP && 2378 base->bottom_type()->isa_rawptr() && 2379 _igvn->type(base->in(1))->isa_oopptr()) { 2380 base = base->in(1); // Case #9 2381 } else { 2382 Node* uncast_base = base->uncast(); 2383 int opcode = uncast_base->Opcode(); 2384 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2385 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2386 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2387 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); 2388 } 2389 } 2390 return base; 2391 } 2392 2393 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2394 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2395 Node* addp2 = addp->raw_out(0); 2396 if (addp->outcnt() == 1 && addp2->is_AddP() && 2397 addp2->in(AddPNode::Base) == n && 2398 addp2->in(AddPNode::Address) == addp) { 2399 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2400 // 2401 // Find array's offset to push it on worklist first and 2402 // as result process an array's element offset first (pushed second) 2403 // to avoid CastPP for the array's offset. 2404 // Otherwise the inserted CastPP (LocalVar) will point to what 2405 // the AddP (Field) points to. Which would be wrong since 2406 // the algorithm expects the CastPP has the same point as 2407 // as AddP's base CheckCastPP (LocalVar). 2408 // 2409 // ArrayAllocation 2410 // | 2411 // CheckCastPP 2412 // | 2413 // memProj (from ArrayAllocation CheckCastPP) 2414 // | || 2415 // | || Int (element index) 2416 // | || | ConI (log(element size)) 2417 // | || | / 2418 // | || LShift 2419 // | || / 2420 // | AddP (array's element offset) 2421 // | | 2422 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2423 // | / / 2424 // AddP (array's offset) 2425 // | 2426 // Load/Store (memory operation on array's element) 2427 // 2428 return addp2; 2429 } 2430 return NULL; 2431 } 2432 2433 // 2434 // Adjust the type and inputs of an AddP which computes the 2435 // address of a field of an instance 2436 // 2437 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2438 PhaseGVN* igvn = _igvn; 2439 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2440 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2441 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2442 if (t == NULL) { 2443 // We are computing a raw address for a store captured by an Initialize 2444 // compute an appropriate address type (cases #3 and #5). 2445 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2446 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2447 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2448 assert(offs != Type::OffsetBot, "offset must be a constant"); 2449 if (base_t->isa_aryptr() != NULL) { 2450 // In the case of a flattened value type array, each field has its 2451 // own slice so we need to extract the field being accessed from 2452 // the address computation 2453 t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr(); 2454 } else { 2455 t = base_t->add_offset(offs)->is_oopptr(); 2456 } 2457 } 2458 int inst_id = base_t->instance_id(); 2459 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2460 "old type must be non-instance or match new type"); 2461 2462 // The type 't' could be subclass of 'base_t'. 2463 // As result t->offset() could be large then base_t's size and it will 2464 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2465 // constructor verifies correctness of the offset. 2466 // 2467 // It could happened on subclass's branch (from the type profiling 2468 // inlining) which was not eliminated during parsing since the exactness 2469 // of the allocation type was not propagated to the subclass type check. 2470 // 2471 // Or the type 't' could be not related to 'base_t' at all. 2472 // It could happen when CHA type is different from MDO type on a dead path 2473 // (for example, from instanceof check) which is not collapsed during parsing. 2474 // 2475 // Do nothing for such AddP node and don't process its users since 2476 // this code branch will go away. 2477 // 2478 if (!t->is_known_instance() && 2479 !base_t->klass()->is_subtype_of(t->klass())) { 2480 return false; // bail out 2481 } 2482 const TypePtr* tinst = base_t->add_offset(t->offset()); 2483 if (tinst->isa_aryptr() && t->isa_aryptr()) { 2484 // In the case of a flattened value type array, each field has its 2485 // own slice so we need to keep track of the field being accessed. 2486 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get()); 2487 } 2488 2489 // Do NOT remove the next line: ensure a new alias index is allocated 2490 // for the instance type. Note: C++ will not remove it since the call 2491 // has side effect. 2492 int alias_idx = _compile->get_alias_index(tinst); 2493 igvn->set_type(addp, tinst); 2494 // record the allocation in the node map 2495 set_map(addp, get_map(base->_idx)); 2496 // Set addp's Base and Address to 'base'. 2497 Node *abase = addp->in(AddPNode::Base); 2498 Node *adr = addp->in(AddPNode::Address); 2499 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2500 adr->in(0)->_idx == (uint)inst_id) { 2501 // Skip AddP cases #3 and #5. 2502 } else { 2503 assert(!abase->is_top(), "sanity"); // AddP case #3 2504 if (abase != base) { 2505 igvn->hash_delete(addp); 2506 addp->set_req(AddPNode::Base, base); 2507 if (abase == adr) { 2508 addp->set_req(AddPNode::Address, base); 2509 } else { 2510 // AddP case #4 (adr is array's element offset AddP node) 2511 #ifdef ASSERT 2512 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2513 assert(adr->is_AddP() && atype != NULL && 2514 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2515 #endif 2516 } 2517 igvn->hash_insert(addp); 2518 } 2519 } 2520 // Put on IGVN worklist since at least addp's type was changed above. 2521 record_for_optimizer(addp); 2522 return true; 2523 } 2524 2525 // 2526 // Create a new version of orig_phi if necessary. Returns either the newly 2527 // created phi or an existing phi. Sets create_new to indicate whether a new 2528 // phi was created. Cache the last newly created phi in the node map. 2529 // 2530 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2531 Compile *C = _compile; 2532 PhaseGVN* igvn = _igvn; 2533 new_created = false; 2534 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2535 // nothing to do if orig_phi is bottom memory or matches alias_idx 2536 if (phi_alias_idx == alias_idx) { 2537 return orig_phi; 2538 } 2539 // Have we recently created a Phi for this alias index? 2540 PhiNode *result = get_map_phi(orig_phi->_idx); 2541 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2542 return result; 2543 } 2544 // Previous check may fail when the same wide memory Phi was split into Phis 2545 // for different memory slices. Search all Phis for this region. 2546 if (result != NULL) { 2547 Node* region = orig_phi->in(0); 2548 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2549 Node* phi = region->fast_out(i); 2550 if (phi->is_Phi() && 2551 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2552 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2553 return phi->as_Phi(); 2554 } 2555 } 2556 } 2557 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 2558 if (C->do_escape_analysis() == true && !C->failing()) { 2559 // Retry compilation without escape analysis. 2560 // If this is the first failure, the sentinel string will "stick" 2561 // to the Compile object, and the C2Compiler will see it and retry. 2562 C->record_failure(C2Compiler::retry_no_escape_analysis()); 2563 } 2564 return NULL; 2565 } 2566 orig_phi_worklist.append_if_missing(orig_phi); 2567 const TypePtr *atype = C->get_adr_type(alias_idx); 2568 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2569 C->copy_node_notes_to(result, orig_phi); 2570 igvn->set_type(result, result->bottom_type()); 2571 record_for_optimizer(result); 2572 set_map(orig_phi, result); 2573 new_created = true; 2574 return result; 2575 } 2576 2577 // 2578 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2579 // specified alias index. 2580 // 2581 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2582 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2583 Compile *C = _compile; 2584 PhaseGVN* igvn = _igvn; 2585 bool new_phi_created; 2586 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2587 if (!new_phi_created) { 2588 return result; 2589 } 2590 GrowableArray<PhiNode *> phi_list; 2591 GrowableArray<uint> cur_input; 2592 PhiNode *phi = orig_phi; 2593 uint idx = 1; 2594 bool finished = false; 2595 while(!finished) { 2596 while (idx < phi->req()) { 2597 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2598 if (mem != NULL && mem->is_Phi()) { 2599 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2600 if (new_phi_created) { 2601 // found an phi for which we created a new split, push current one on worklist and begin 2602 // processing new one 2603 phi_list.push(phi); 2604 cur_input.push(idx); 2605 phi = mem->as_Phi(); 2606 result = newphi; 2607 idx = 1; 2608 continue; 2609 } else { 2610 mem = newphi; 2611 } 2612 } 2613 if (C->failing()) { 2614 return NULL; 2615 } 2616 result->set_req(idx++, mem); 2617 } 2618 #ifdef ASSERT 2619 // verify that the new Phi has an input for each input of the original 2620 assert( phi->req() == result->req(), "must have same number of inputs."); 2621 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2622 #endif 2623 // Check if all new phi's inputs have specified alias index. 2624 // Otherwise use old phi. 2625 for (uint i = 1; i < phi->req(); i++) { 2626 Node* in = result->in(i); 2627 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2628 } 2629 // we have finished processing a Phi, see if there are any more to do 2630 finished = (phi_list.length() == 0 ); 2631 if (!finished) { 2632 phi = phi_list.pop(); 2633 idx = cur_input.pop(); 2634 PhiNode *prev_result = get_map_phi(phi->_idx); 2635 prev_result->set_req(idx++, result); 2636 result = prev_result; 2637 } 2638 } 2639 return result; 2640 } 2641 2642 // 2643 // The next methods are derived from methods in MemNode. 2644 // 2645 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2646 Node *mem = mmem; 2647 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2648 // means an array I have not precisely typed yet. Do not do any 2649 // alias stuff with it any time soon. 2650 if (toop->base() != Type::AnyPtr && 2651 !(toop->klass() != NULL && 2652 toop->klass()->is_java_lang_Object() && 2653 toop->offset() == Type::OffsetBot)) { 2654 mem = mmem->memory_at(alias_idx); 2655 // Update input if it is progress over what we have now 2656 } 2657 return mem; 2658 } 2659 2660 // 2661 // Move memory users to their memory slices. 2662 // 2663 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2664 Compile* C = _compile; 2665 PhaseGVN* igvn = _igvn; 2666 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2667 assert(tp != NULL, "ptr type"); 2668 int alias_idx = C->get_alias_index(tp); 2669 int general_idx = C->get_general_index(alias_idx); 2670 2671 // Move users first 2672 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2673 Node* use = n->fast_out(i); 2674 if (use->is_MergeMem()) { 2675 MergeMemNode* mmem = use->as_MergeMem(); 2676 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2677 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2678 continue; // Nothing to do 2679 } 2680 // Replace previous general reference to mem node. 2681 uint orig_uniq = C->unique(); 2682 Node* m = find_inst_mem(n, general_idx, orig_phis); 2683 assert(orig_uniq == C->unique(), "no new nodes"); 2684 mmem->set_memory_at(general_idx, m); 2685 --imax; 2686 --i; 2687 } else if (use->is_MemBar()) { 2688 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2689 if (use->req() > MemBarNode::Precedent && 2690 use->in(MemBarNode::Precedent) == n) { 2691 // Don't move related membars. 2692 record_for_optimizer(use); 2693 continue; 2694 } 2695 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2696 if ((tp != NULL && C->get_alias_index(tp) == alias_idx) || 2697 alias_idx == general_idx) { 2698 continue; // Nothing to do 2699 } 2700 // Move to general memory slice. 2701 uint orig_uniq = C->unique(); 2702 Node* m = find_inst_mem(n, general_idx, orig_phis); 2703 assert(orig_uniq == C->unique(), "no new nodes"); 2704 igvn->hash_delete(use); 2705 imax -= use->replace_edge(n, m); 2706 igvn->hash_insert(use); 2707 record_for_optimizer(use); 2708 --i; 2709 #ifdef ASSERT 2710 } else if (use->is_Mem()) { 2711 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2712 // Don't move related cardmark. 2713 continue; 2714 } 2715 // Memory nodes should have new memory input. 2716 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2717 assert(tp != NULL, "ptr type"); 2718 int idx = C->get_alias_index(tp); 2719 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2720 "Following memory nodes should have new memory input or be on the same memory slice"); 2721 } else if (use->is_Phi()) { 2722 // Phi nodes should be split and moved already. 2723 tp = use->as_Phi()->adr_type()->isa_ptr(); 2724 assert(tp != NULL, "ptr type"); 2725 int idx = C->get_alias_index(tp); 2726 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2727 } else { 2728 use->dump(); 2729 assert(false, "should not be here"); 2730 #endif 2731 } 2732 } 2733 } 2734 2735 // 2736 // Search memory chain of "mem" to find a MemNode whose address 2737 // is the specified alias index. 2738 // 2739 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2740 if (orig_mem == NULL) 2741 return orig_mem; 2742 Compile* C = _compile; 2743 PhaseGVN* igvn = _igvn; 2744 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2745 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2746 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 2747 Node *prev = NULL; 2748 Node *result = orig_mem; 2749 while (prev != result) { 2750 prev = result; 2751 if (result == start_mem) 2752 break; // hit one of our sentinels 2753 if (result->is_Mem()) { 2754 const Type *at = igvn->type(result->in(MemNode::Address)); 2755 if (at == Type::TOP) 2756 break; // Dead 2757 assert (at->isa_ptr() != NULL, "pointer type required."); 2758 int idx = C->get_alias_index(at->is_ptr()); 2759 if (idx == alias_idx) 2760 break; // Found 2761 if (!is_instance && (at->isa_oopptr() == NULL || 2762 !at->is_oopptr()->is_known_instance())) { 2763 break; // Do not skip store to general memory slice. 2764 } 2765 result = result->in(MemNode::Memory); 2766 } 2767 if (!is_instance) 2768 continue; // don't search further for non-instance types 2769 // skip over a call which does not affect this memory slice 2770 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2771 Node *proj_in = result->in(0); 2772 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2773 break; // hit one of our sentinels 2774 } else if (proj_in->is_Call()) { 2775 // ArrayCopy node processed here as well 2776 CallNode *call = proj_in->as_Call(); 2777 if (!call->may_modify(toop, igvn)) { 2778 result = call->in(TypeFunc::Memory); 2779 } 2780 } else if (proj_in->is_Initialize()) { 2781 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2782 // Stop if this is the initialization for the object instance which 2783 // which contains this memory slice, otherwise skip over it. 2784 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2785 result = proj_in->in(TypeFunc::Memory); 2786 } 2787 } else if (proj_in->is_MemBar()) { 2788 if (proj_in->in(TypeFunc::Memory)->is_MergeMem() && 2789 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->is_Proj() && 2790 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->is_ArrayCopy()) { 2791 // clone 2792 ArrayCopyNode* ac = proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->as_ArrayCopy(); 2793 if (ac->may_modify(toop, igvn)) { 2794 break; 2795 } 2796 } 2797 result = proj_in->in(TypeFunc::Memory); 2798 } 2799 } else if (result->is_MergeMem()) { 2800 MergeMemNode *mmem = result->as_MergeMem(); 2801 result = step_through_mergemem(mmem, alias_idx, toop); 2802 if (result == mmem->base_memory()) { 2803 // Didn't find instance memory, search through general slice recursively. 2804 result = mmem->memory_at(C->get_general_index(alias_idx)); 2805 result = find_inst_mem(result, alias_idx, orig_phis); 2806 if (C->failing()) { 2807 return NULL; 2808 } 2809 mmem->set_memory_at(alias_idx, result); 2810 } 2811 } else if (result->is_Phi() && 2812 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2813 Node *un = result->as_Phi()->unique_input(igvn); 2814 if (un != NULL) { 2815 orig_phis.append_if_missing(result->as_Phi()); 2816 result = un; 2817 } else { 2818 break; 2819 } 2820 } else if (result->is_ClearArray()) { 2821 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 2822 // Can not bypass initialization of the instance 2823 // we are looking for. 2824 break; 2825 } 2826 // Otherwise skip it (the call updated 'result' value). 2827 } else if (result->Opcode() == Op_SCMemProj) { 2828 Node* mem = result->in(0); 2829 Node* adr = NULL; 2830 if (mem->is_LoadStore()) { 2831 adr = mem->in(MemNode::Address); 2832 } else { 2833 assert(mem->Opcode() == Op_EncodeISOArray || 2834 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 2835 adr = mem->in(3); // Memory edge corresponds to destination array 2836 } 2837 const Type *at = igvn->type(adr); 2838 if (at != Type::TOP) { 2839 assert(at->isa_ptr() != NULL, "pointer type required."); 2840 int idx = C->get_alias_index(at->is_ptr()); 2841 if (idx == alias_idx) { 2842 // Assert in debug mode 2843 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 2844 break; // In product mode return SCMemProj node 2845 } 2846 } 2847 result = mem->in(MemNode::Memory); 2848 } else if (result->Opcode() == Op_StrInflatedCopy) { 2849 Node* adr = result->in(3); // Memory edge corresponds to destination array 2850 const Type *at = igvn->type(adr); 2851 if (at != Type::TOP) { 2852 assert(at->isa_ptr() != NULL, "pointer type required."); 2853 int idx = C->get_alias_index(at->is_ptr()); 2854 if (idx == alias_idx) { 2855 // Assert in debug mode 2856 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 2857 break; // In product mode return SCMemProj node 2858 } 2859 } 2860 result = result->in(MemNode::Memory); 2861 } 2862 } 2863 if (result->is_Phi()) { 2864 PhiNode *mphi = result->as_Phi(); 2865 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 2866 const TypePtr *t = mphi->adr_type(); 2867 if (!is_instance) { 2868 // Push all non-instance Phis on the orig_phis worklist to update inputs 2869 // during Phase 4 if needed. 2870 orig_phis.append_if_missing(mphi); 2871 } else if (C->get_alias_index(t) != alias_idx) { 2872 // Create a new Phi with the specified alias index type. 2873 result = split_memory_phi(mphi, alias_idx, orig_phis); 2874 } 2875 } 2876 // the result is either MemNode, PhiNode, InitializeNode. 2877 return result; 2878 } 2879 2880 // 2881 // Convert the types of unescaped object to instance types where possible, 2882 // propagate the new type information through the graph, and update memory 2883 // edges and MergeMem inputs to reflect the new type. 2884 // 2885 // We start with allocations (and calls which may be allocations) on alloc_worklist. 2886 // The processing is done in 4 phases: 2887 // 2888 // Phase 1: Process possible allocations from alloc_worklist. Create instance 2889 // types for the CheckCastPP for allocations where possible. 2890 // Propagate the new types through users as follows: 2891 // casts and Phi: push users on alloc_worklist 2892 // AddP: cast Base and Address inputs to the instance type 2893 // push any AddP users on alloc_worklist and push any memnode 2894 // users onto memnode_worklist. 2895 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2896 // search the Memory chain for a store with the appropriate type 2897 // address type. If a Phi is found, create a new version with 2898 // the appropriate memory slices from each of the Phi inputs. 2899 // For stores, process the users as follows: 2900 // MemNode: push on memnode_worklist 2901 // MergeMem: push on mergemem_worklist 2902 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 2903 // moving the first node encountered of each instance type to the 2904 // the input corresponding to its alias index. 2905 // appropriate memory slice. 2906 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 2907 // 2908 // In the following example, the CheckCastPP nodes are the cast of allocation 2909 // results and the allocation of node 29 is unescaped and eligible to be an 2910 // instance type. 2911 // 2912 // We start with: 2913 // 2914 // 7 Parm #memory 2915 // 10 ConI "12" 2916 // 19 CheckCastPP "Foo" 2917 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2918 // 29 CheckCastPP "Foo" 2919 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 2920 // 2921 // 40 StoreP 25 7 20 ... alias_index=4 2922 // 50 StoreP 35 40 30 ... alias_index=4 2923 // 60 StoreP 45 50 20 ... alias_index=4 2924 // 70 LoadP _ 60 30 ... alias_index=4 2925 // 80 Phi 75 50 60 Memory alias_index=4 2926 // 90 LoadP _ 80 30 ... alias_index=4 2927 // 100 LoadP _ 80 20 ... alias_index=4 2928 // 2929 // 2930 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 2931 // and creating a new alias index for node 30. This gives: 2932 // 2933 // 7 Parm #memory 2934 // 10 ConI "12" 2935 // 19 CheckCastPP "Foo" 2936 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2937 // 29 CheckCastPP "Foo" iid=24 2938 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2939 // 2940 // 40 StoreP 25 7 20 ... alias_index=4 2941 // 50 StoreP 35 40 30 ... alias_index=6 2942 // 60 StoreP 45 50 20 ... alias_index=4 2943 // 70 LoadP _ 60 30 ... alias_index=6 2944 // 80 Phi 75 50 60 Memory alias_index=4 2945 // 90 LoadP _ 80 30 ... alias_index=6 2946 // 100 LoadP _ 80 20 ... alias_index=4 2947 // 2948 // In phase 2, new memory inputs are computed for the loads and stores, 2949 // And a new version of the phi is created. In phase 4, the inputs to 2950 // node 80 are updated and then the memory nodes are updated with the 2951 // values computed in phase 2. This results in: 2952 // 2953 // 7 Parm #memory 2954 // 10 ConI "12" 2955 // 19 CheckCastPP "Foo" 2956 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2957 // 29 CheckCastPP "Foo" iid=24 2958 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2959 // 2960 // 40 StoreP 25 7 20 ... alias_index=4 2961 // 50 StoreP 35 7 30 ... alias_index=6 2962 // 60 StoreP 45 40 20 ... alias_index=4 2963 // 70 LoadP _ 50 30 ... alias_index=6 2964 // 80 Phi 75 40 60 Memory alias_index=4 2965 // 120 Phi 75 50 50 Memory alias_index=6 2966 // 90 LoadP _ 120 30 ... alias_index=6 2967 // 100 LoadP _ 80 20 ... alias_index=4 2968 // 2969 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) { 2970 GrowableArray<Node *> memnode_worklist; 2971 GrowableArray<PhiNode *> orig_phis; 2972 PhaseIterGVN *igvn = _igvn; 2973 uint new_index_start = (uint) _compile->num_alias_types(); 2974 Arena* arena = Thread::current()->resource_area(); 2975 VectorSet visited(arena); 2976 ideal_nodes.clear(); // Reset for use with set_map/get_map. 2977 uint unique_old = _compile->unique(); 2978 2979 // Phase 1: Process possible allocations from alloc_worklist. 2980 // Create instance types for the CheckCastPP for allocations where possible. 2981 // 2982 // (Note: don't forget to change the order of the second AddP node on 2983 // the alloc_worklist if the order of the worklist processing is changed, 2984 // see the comment in find_second_addp().) 2985 // 2986 while (alloc_worklist.length() != 0) { 2987 Node *n = alloc_worklist.pop(); 2988 uint ni = n->_idx; 2989 if (n->is_Call()) { 2990 CallNode *alloc = n->as_Call(); 2991 // copy escape information to call node 2992 PointsToNode* ptn = ptnode_adr(alloc->_idx); 2993 PointsToNode::EscapeState es = ptn->escape_state(); 2994 // We have an allocation or call which returns a Java object, 2995 // see if it is unescaped. 2996 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 2997 continue; 2998 // Find CheckCastPP for the allocate or for the return value of a call 2999 n = alloc->result_cast(); 3000 if (n == NULL) { // No uses except Initialize node 3001 if (alloc->is_Allocate()) { 3002 // Set the scalar_replaceable flag for allocation 3003 // so it could be eliminated if it has no uses. 3004 alloc->as_Allocate()->_is_scalar_replaceable = true; 3005 } 3006 if (alloc->is_CallStaticJava()) { 3007 // Set the scalar_replaceable flag for boxing method 3008 // so it could be eliminated if it has no uses. 3009 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 3010 } 3011 continue; 3012 } 3013 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 3014 assert(!alloc->is_Allocate(), "allocation should have unique type"); 3015 continue; 3016 } 3017 3018 // The inline code for Object.clone() casts the allocation result to 3019 // java.lang.Object and then to the actual type of the allocated 3020 // object. Detect this case and use the second cast. 3021 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 3022 // the allocation result is cast to java.lang.Object and then 3023 // to the actual Array type. 3024 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 3025 && (alloc->is_AllocateArray() || 3026 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 3027 Node *cast2 = NULL; 3028 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3029 Node *use = n->fast_out(i); 3030 if (use->is_CheckCastPP()) { 3031 cast2 = use; 3032 break; 3033 } 3034 } 3035 if (cast2 != NULL) { 3036 n = cast2; 3037 } else { 3038 // Non-scalar replaceable if the allocation type is unknown statically 3039 // (reflection allocation), the object can't be restored during 3040 // deoptimization without precise type. 3041 continue; 3042 } 3043 } 3044 3045 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3046 if (t == NULL) 3047 continue; // not a TypeOopPtr 3048 if (!t->klass_is_exact()) 3049 continue; // not an unique type 3050 3051 if (alloc->is_Allocate()) { 3052 // Set the scalar_replaceable flag for allocation 3053 // so it could be eliminated. 3054 alloc->as_Allocate()->_is_scalar_replaceable = true; 3055 } 3056 if (alloc->is_CallStaticJava()) { 3057 // Set the scalar_replaceable flag for boxing method 3058 // so it could be eliminated. 3059 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 3060 } 3061 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state 3062 // in order for an object to be scalar-replaceable, it must be: 3063 // - a direct allocation (not a call returning an object) 3064 // - non-escaping 3065 // - eligible to be a unique type 3066 // - not determined to be ineligible by escape analysis 3067 set_map(alloc, n); 3068 set_map(n, alloc); 3069 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3070 igvn->hash_delete(n); 3071 igvn->set_type(n, tinst); 3072 n->raise_bottom_type(tinst); 3073 igvn->hash_insert(n); 3074 record_for_optimizer(n); 3075 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3076 3077 // First, put on the worklist all Field edges from Connection Graph 3078 // which is more accurate than putting immediate users from Ideal Graph. 3079 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3080 PointsToNode* tgt = e.get(); 3081 if (tgt->is_Arraycopy()) { 3082 continue; 3083 } 3084 Node* use = tgt->ideal_node(); 3085 assert(tgt->is_Field() && use->is_AddP(), 3086 "only AddP nodes are Field edges in CG"); 3087 if (use->outcnt() > 0) { // Don't process dead nodes 3088 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3089 if (addp2 != NULL) { 3090 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3091 alloc_worklist.append_if_missing(addp2); 3092 } 3093 alloc_worklist.append_if_missing(use); 3094 } 3095 } 3096 3097 // An allocation may have an Initialize which has raw stores. Scan 3098 // the users of the raw allocation result and push AddP users 3099 // on alloc_worklist. 3100 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3101 assert (raw_result != NULL, "must have an allocation result"); 3102 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3103 Node *use = raw_result->fast_out(i); 3104 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3105 Node* addp2 = find_second_addp(use, raw_result); 3106 if (addp2 != NULL) { 3107 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3108 alloc_worklist.append_if_missing(addp2); 3109 } 3110 alloc_worklist.append_if_missing(use); 3111 } else if (use->is_MemBar()) { 3112 memnode_worklist.append_if_missing(use); 3113 } 3114 } 3115 } 3116 } else if (n->is_AddP()) { 3117 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 3118 if (jobj == NULL || jobj == phantom_obj) { 3119 #ifdef ASSERT 3120 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3121 ptnode_adr(n->_idx)->dump(); 3122 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3123 #endif 3124 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3125 return; 3126 } 3127 Node *base = get_map(jobj->idx()); // CheckCastPP node 3128 if (!split_AddP(n, base)) continue; // wrong type from dead path 3129 } else if (n->is_Phi() || 3130 n->is_CheckCastPP() || 3131 n->is_EncodeP() || 3132 n->is_DecodeN() || 3133 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3134 if (visited.test_set(n->_idx)) { 3135 assert(n->is_Phi(), "loops only through Phi's"); 3136 continue; // already processed 3137 } 3138 JavaObjectNode* jobj = unique_java_object(n); 3139 if (jobj == NULL || jobj == phantom_obj) { 3140 #ifdef ASSERT 3141 ptnode_adr(n->_idx)->dump(); 3142 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3143 #endif 3144 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3145 return; 3146 } else { 3147 Node *val = get_map(jobj->idx()); // CheckCastPP node 3148 TypeNode *tn = n->as_Type(); 3149 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3150 assert(tinst != NULL && tinst->is_known_instance() && 3151 tinst->instance_id() == jobj->idx() , "instance type expected."); 3152 3153 const Type *tn_type = igvn->type(tn); 3154 const TypeOopPtr *tn_t; 3155 if (tn_type->isa_narrowoop()) { 3156 tn_t = tn_type->make_ptr()->isa_oopptr(); 3157 } else { 3158 tn_t = tn_type->isa_oopptr(); 3159 } 3160 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 3161 if (tn_type->isa_narrowoop()) { 3162 tn_type = tinst->make_narrowoop(); 3163 } else { 3164 tn_type = tinst; 3165 } 3166 igvn->hash_delete(tn); 3167 igvn->set_type(tn, tn_type); 3168 tn->set_type(tn_type); 3169 igvn->hash_insert(tn); 3170 record_for_optimizer(n); 3171 } else { 3172 assert(tn_type == TypePtr::NULL_PTR || 3173 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 3174 "unexpected type"); 3175 continue; // Skip dead path with different type 3176 } 3177 } 3178 } else { 3179 debug_only(n->dump();) 3180 assert(false, "EA: unexpected node"); 3181 continue; 3182 } 3183 // push allocation's users on appropriate worklist 3184 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3185 Node *use = n->fast_out(i); 3186 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3187 // Load/store to instance's field 3188 memnode_worklist.append_if_missing(use); 3189 } else if (use->is_MemBar()) { 3190 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3191 memnode_worklist.append_if_missing(use); 3192 } 3193 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3194 Node* addp2 = find_second_addp(use, n); 3195 if (addp2 != NULL) { 3196 alloc_worklist.append_if_missing(addp2); 3197 } 3198 alloc_worklist.append_if_missing(use); 3199 } else if (use->is_Phi() || 3200 use->is_CheckCastPP() || 3201 use->is_EncodeNarrowPtr() || 3202 use->is_DecodeNarrowPtr() || 3203 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3204 alloc_worklist.append_if_missing(use); 3205 #ifdef ASSERT 3206 } else if (use->is_Mem()) { 3207 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3208 } else if (use->is_MergeMem()) { 3209 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3210 } else if (use->is_SafePoint()) { 3211 // Look for MergeMem nodes for calls which reference unique allocation 3212 // (through CheckCastPP nodes) even for debug info. 3213 Node* m = use->in(TypeFunc::Memory); 3214 if (m->is_MergeMem()) { 3215 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3216 } 3217 } else if (use->Opcode() == Op_EncodeISOArray) { 3218 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3219 // EncodeISOArray overwrites destination array 3220 memnode_worklist.append_if_missing(use); 3221 } 3222 } else if (use->Opcode() == Op_Return) { 3223 assert(_compile->tf()->returns_value_type_as_fields(), "must return a value type"); 3224 // Get ValueKlass by removing the tag bit from the metadata pointer 3225 Node* klass = use->in(TypeFunc::Parms); 3226 intptr_t ptr = igvn->type(klass)->isa_rawptr()->get_con(); 3227 clear_nth_bit(ptr, 0); 3228 assert(Metaspace::contains((void*)ptr), "should be klass"); 3229 assert(((ValueKlass*)ptr)->contains_oops(), "returned value type must contain a reference field"); 3230 } else { 3231 uint op = use->Opcode(); 3232 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3233 (use->in(MemNode::Memory) == n)) { 3234 // They overwrite memory edge corresponding to destination array, 3235 memnode_worklist.append_if_missing(use); 3236 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3237 op == Op_CastP2X || op == Op_StoreCM || 3238 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3239 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3240 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3241 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 3242 op == Op_ValueType)) { 3243 n->dump(); 3244 use->dump(); 3245 assert(false, "EA: missing allocation reference path"); 3246 } 3247 #endif 3248 } 3249 } 3250 3251 } 3252 3253 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3254 // type, record it in the ArrayCopy node so we know what memory this 3255 // node uses/modified. 3256 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3257 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3258 Node* dest = ac->in(ArrayCopyNode::Dest); 3259 if (dest->is_AddP()) { 3260 dest = get_addp_base(dest); 3261 } 3262 JavaObjectNode* jobj = unique_java_object(dest); 3263 if (jobj != NULL) { 3264 Node *base = get_map(jobj->idx()); 3265 if (base != NULL) { 3266 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3267 ac->_dest_type = base_t; 3268 } 3269 } 3270 Node* src = ac->in(ArrayCopyNode::Src); 3271 if (src->is_AddP()) { 3272 src = get_addp_base(src); 3273 } 3274 jobj = unique_java_object(src); 3275 if (jobj != NULL) { 3276 Node* base = get_map(jobj->idx()); 3277 if (base != NULL) { 3278 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3279 ac->_src_type = base_t; 3280 } 3281 } 3282 } 3283 3284 // New alias types were created in split_AddP(). 3285 uint new_index_end = (uint) _compile->num_alias_types(); 3286 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3287 3288 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3289 // compute new values for Memory inputs (the Memory inputs are not 3290 // actually updated until phase 4.) 3291 if (memnode_worklist.length() == 0) 3292 return; // nothing to do 3293 while (memnode_worklist.length() != 0) { 3294 Node *n = memnode_worklist.pop(); 3295 if (visited.test_set(n->_idx)) 3296 continue; 3297 if (n->is_Phi() || n->is_ClearArray()) { 3298 // we don't need to do anything, but the users must be pushed 3299 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3300 // we don't need to do anything, but the users must be pushed 3301 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 3302 if (n == NULL) 3303 continue; 3304 } else if (n->Opcode() == Op_StrCompressedCopy || 3305 n->Opcode() == Op_EncodeISOArray) { 3306 // get the memory projection 3307 n = n->find_out_with(Op_SCMemProj); 3308 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3309 } else { 3310 assert(n->is_Mem(), "memory node required."); 3311 Node *addr = n->in(MemNode::Address); 3312 const Type *addr_t = igvn->type(addr); 3313 if (addr_t == Type::TOP) 3314 continue; 3315 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3316 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3317 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3318 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3319 if (_compile->failing()) { 3320 return; 3321 } 3322 if (mem != n->in(MemNode::Memory)) { 3323 // We delay the memory edge update since we need old one in 3324 // MergeMem code below when instances memory slices are separated. 3325 set_map(n, mem); 3326 } 3327 if (n->is_Load()) { 3328 continue; // don't push users 3329 } else if (n->is_LoadStore()) { 3330 // get the memory projection 3331 n = n->find_out_with(Op_SCMemProj); 3332 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3333 } 3334 } 3335 // push user on appropriate worklist 3336 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3337 Node *use = n->fast_out(i); 3338 if (use->is_Phi() || use->is_ClearArray()) { 3339 memnode_worklist.append_if_missing(use); 3340 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3341 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 3342 continue; 3343 memnode_worklist.append_if_missing(use); 3344 } else if (use->is_MemBar()) { 3345 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3346 memnode_worklist.append_if_missing(use); 3347 } 3348 #ifdef ASSERT 3349 } else if(use->is_Mem()) { 3350 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3351 } else if (use->is_MergeMem()) { 3352 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3353 } else if (use->Opcode() == Op_EncodeISOArray) { 3354 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3355 // EncodeISOArray overwrites destination array 3356 memnode_worklist.append_if_missing(use); 3357 } 3358 } else { 3359 uint op = use->Opcode(); 3360 if ((use->in(MemNode::Memory) == n) && 3361 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 3362 // They overwrite memory edge corresponding to destination array, 3363 memnode_worklist.append_if_missing(use); 3364 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 3365 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3366 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3367 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 3368 n->dump(); 3369 use->dump(); 3370 assert(false, "EA: missing memory path"); 3371 } 3372 #endif 3373 } 3374 } 3375 } 3376 3377 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3378 // Walk each memory slice moving the first node encountered of each 3379 // instance type to the input corresponding to its alias index. 3380 uint length = _mergemem_worklist.length(); 3381 for( uint next = 0; next < length; ++next ) { 3382 MergeMemNode* nmm = _mergemem_worklist.at(next); 3383 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3384 // Note: we don't want to use MergeMemStream here because we only want to 3385 // scan inputs which exist at the start, not ones we add during processing. 3386 // Note 2: MergeMem may already contains instance memory slices added 3387 // during find_inst_mem() call when memory nodes were processed above. 3388 igvn->hash_delete(nmm); 3389 uint nslices = MIN2(nmm->req(), new_index_start); 3390 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3391 Node* mem = nmm->in(i); 3392 Node* cur = NULL; 3393 if (mem == NULL || mem->is_top()) 3394 continue; 3395 // First, update mergemem by moving memory nodes to corresponding slices 3396 // if their type became more precise since this mergemem was created. 3397 while (mem->is_Mem()) { 3398 const Type *at = igvn->type(mem->in(MemNode::Address)); 3399 if (at != Type::TOP) { 3400 assert (at->isa_ptr() != NULL, "pointer type required."); 3401 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3402 if (idx == i) { 3403 if (cur == NULL) 3404 cur = mem; 3405 } else { 3406 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3407 nmm->set_memory_at(idx, mem); 3408 } 3409 } 3410 } 3411 mem = mem->in(MemNode::Memory); 3412 } 3413 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3414 // Find any instance of the current type if we haven't encountered 3415 // already a memory slice of the instance along the memory chain. 3416 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3417 if((uint)_compile->get_general_index(ni) == i) { 3418 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3419 if (nmm->is_empty_memory(m)) { 3420 Node* result = find_inst_mem(mem, ni, orig_phis); 3421 if (_compile->failing()) { 3422 return; 3423 } 3424 nmm->set_memory_at(ni, result); 3425 } 3426 } 3427 } 3428 } 3429 // Find the rest of instances values 3430 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3431 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3432 Node* result = step_through_mergemem(nmm, ni, tinst); 3433 if (result == nmm->base_memory()) { 3434 // Didn't find instance memory, search through general slice recursively. 3435 result = nmm->memory_at(_compile->get_general_index(ni)); 3436 result = find_inst_mem(result, ni, orig_phis); 3437 if (_compile->failing()) { 3438 return; 3439 } 3440 nmm->set_memory_at(ni, result); 3441 } 3442 } 3443 igvn->hash_insert(nmm); 3444 record_for_optimizer(nmm); 3445 } 3446 3447 // Phase 4: Update the inputs of non-instance memory Phis and 3448 // the Memory input of memnodes 3449 // First update the inputs of any non-instance Phi's from 3450 // which we split out an instance Phi. Note we don't have 3451 // to recursively process Phi's encountered on the input memory 3452 // chains as is done in split_memory_phi() since they will 3453 // also be processed here. 3454 for (int j = 0; j < orig_phis.length(); j++) { 3455 PhiNode *phi = orig_phis.at(j); 3456 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3457 igvn->hash_delete(phi); 3458 for (uint i = 1; i < phi->req(); i++) { 3459 Node *mem = phi->in(i); 3460 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3461 if (_compile->failing()) { 3462 return; 3463 } 3464 if (mem != new_mem) { 3465 phi->set_req(i, new_mem); 3466 } 3467 } 3468 igvn->hash_insert(phi); 3469 record_for_optimizer(phi); 3470 } 3471 3472 // Update the memory inputs of MemNodes with the value we computed 3473 // in Phase 2 and move stores memory users to corresponding memory slices. 3474 // Disable memory split verification code until the fix for 6984348. 3475 // Currently it produces false negative results since it does not cover all cases. 3476 #if 0 // ifdef ASSERT 3477 visited.Reset(); 3478 Node_Stack old_mems(arena, _compile->unique() >> 2); 3479 #endif 3480 for (uint i = 0; i < ideal_nodes.size(); i++) { 3481 Node* n = ideal_nodes.at(i); 3482 Node* nmem = get_map(n->_idx); 3483 assert(nmem != NULL, "sanity"); 3484 if (n->is_Mem()) { 3485 #if 0 // ifdef ASSERT 3486 Node* old_mem = n->in(MemNode::Memory); 3487 if (!visited.test_set(old_mem->_idx)) { 3488 old_mems.push(old_mem, old_mem->outcnt()); 3489 } 3490 #endif 3491 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3492 if (!n->is_Load()) { 3493 // Move memory users of a store first. 3494 move_inst_mem(n, orig_phis); 3495 } 3496 // Now update memory input 3497 igvn->hash_delete(n); 3498 n->set_req(MemNode::Memory, nmem); 3499 igvn->hash_insert(n); 3500 record_for_optimizer(n); 3501 } else { 3502 assert(n->is_Allocate() || n->is_CheckCastPP() || 3503 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3504 } 3505 } 3506 #if 0 // ifdef ASSERT 3507 // Verify that memory was split correctly 3508 while (old_mems.is_nonempty()) { 3509 Node* old_mem = old_mems.node(); 3510 uint old_cnt = old_mems.index(); 3511 old_mems.pop(); 3512 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3513 } 3514 #endif 3515 } 3516 3517 #ifndef PRODUCT 3518 static const char *node_type_names[] = { 3519 "UnknownType", 3520 "JavaObject", 3521 "LocalVar", 3522 "Field", 3523 "Arraycopy" 3524 }; 3525 3526 static const char *esc_names[] = { 3527 "UnknownEscape", 3528 "NoEscape", 3529 "ArgEscape", 3530 "GlobalEscape" 3531 }; 3532 3533 void PointsToNode::dump(bool print_state) const { 3534 NodeType nt = node_type(); 3535 tty->print("%s ", node_type_names[(int) nt]); 3536 if (print_state) { 3537 EscapeState es = escape_state(); 3538 EscapeState fields_es = fields_escape_state(); 3539 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3540 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) 3541 tty->print("NSR "); 3542 } 3543 if (is_Field()) { 3544 FieldNode* f = (FieldNode*)this; 3545 if (f->is_oop()) 3546 tty->print("oop "); 3547 if (f->offset() > 0) 3548 tty->print("+%d ", f->offset()); 3549 tty->print("("); 3550 for (BaseIterator i(f); i.has_next(); i.next()) { 3551 PointsToNode* b = i.get(); 3552 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3553 } 3554 tty->print(" )"); 3555 } 3556 tty->print("["); 3557 for (EdgeIterator i(this); i.has_next(); i.next()) { 3558 PointsToNode* e = i.get(); 3559 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3560 } 3561 tty->print(" ["); 3562 for (UseIterator i(this); i.has_next(); i.next()) { 3563 PointsToNode* u = i.get(); 3564 bool is_base = false; 3565 if (PointsToNode::is_base_use(u)) { 3566 is_base = true; 3567 u = PointsToNode::get_use_node(u)->as_Field(); 3568 } 3569 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3570 } 3571 tty->print(" ]] "); 3572 if (_node == NULL) 3573 tty->print_cr("<null>"); 3574 else 3575 _node->dump(); 3576 } 3577 3578 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3579 bool first = true; 3580 int ptnodes_length = ptnodes_worklist.length(); 3581 for (int i = 0; i < ptnodes_length; i++) { 3582 PointsToNode *ptn = ptnodes_worklist.at(i); 3583 if (ptn == NULL || !ptn->is_JavaObject()) 3584 continue; 3585 PointsToNode::EscapeState es = ptn->escape_state(); 3586 if ((es != PointsToNode::NoEscape) && !Verbose) { 3587 continue; 3588 } 3589 Node* n = ptn->ideal_node(); 3590 if (n->is_Allocate() || (n->is_CallStaticJava() && 3591 n->as_CallStaticJava()->is_boxing_method())) { 3592 if (first) { 3593 tty->cr(); 3594 tty->print("======== Connection graph for "); 3595 _compile->method()->print_short_name(); 3596 tty->cr(); 3597 first = false; 3598 } 3599 ptn->dump(); 3600 // Print all locals and fields which reference this allocation 3601 for (UseIterator j(ptn); j.has_next(); j.next()) { 3602 PointsToNode* use = j.get(); 3603 if (use->is_LocalVar()) { 3604 use->dump(Verbose); 3605 } else if (Verbose) { 3606 use->dump(); 3607 } 3608 } 3609 tty->cr(); 3610 } 3611 } 3612 } 3613 #endif