1 /* 2 * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/c2/barrierSetC2.hpp" 29 #include "libadt/vectset.hpp" 30 #include "memory/allocation.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "opto/c2compiler.hpp" 33 #include "opto/arraycopynode.hpp" 34 #include "opto/callnode.hpp" 35 #include "opto/cfgnode.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/escape.hpp" 38 #include "opto/phaseX.hpp" 39 #include "opto/movenode.hpp" 40 #include "opto/rootnode.hpp" 41 #include "utilities/macros.hpp" 42 #if INCLUDE_G1GC 43 #include "gc/g1/g1ThreadLocalData.hpp" 44 #endif // INCLUDE_G1GC 45 #if INCLUDE_ZGC 46 #include "gc/z/c2/zBarrierSetC2.hpp" 47 #endif 48 49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 50 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 51 _in_worklist(C->comp_arena()), 52 _next_pidx(0), 53 _collecting(true), 54 _verify(false), 55 _compile(C), 56 _igvn(igvn), 57 _node_map(C->comp_arena()) { 58 // Add unknown java object. 59 add_java_object(C->top(), PointsToNode::GlobalEscape); 60 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 61 // Add ConP(#NULL) and ConN(#NULL) nodes. 62 Node* oop_null = igvn->zerocon(T_OBJECT); 63 assert(oop_null->_idx < nodes_size(), "should be created already"); 64 add_java_object(oop_null, PointsToNode::NoEscape); 65 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 66 if (UseCompressedOops) { 67 Node* noop_null = igvn->zerocon(T_NARROWOOP); 68 assert(noop_null->_idx < nodes_size(), "should be created already"); 69 map_ideal_node(noop_null, null_obj); 70 } 71 _pcmp_neq = NULL; // Should be initialized 72 _pcmp_eq = NULL; 73 } 74 75 bool ConnectionGraph::has_candidates(Compile *C) { 76 // EA brings benefits only when the code has allocations and/or locks which 77 // are represented by ideal Macro nodes. 78 int cnt = C->macro_count(); 79 for (int i = 0; i < cnt; i++) { 80 Node *n = C->macro_node(i); 81 if (n->is_Allocate()) 82 return true; 83 if (n->is_Lock()) { 84 Node* obj = n->as_Lock()->obj_node()->uncast(); 85 if (!(obj->is_Parm() || obj->is_Con())) 86 return true; 87 } 88 if (n->is_CallStaticJava() && 89 n->as_CallStaticJava()->is_boxing_method()) { 90 return true; 91 } 92 } 93 return false; 94 } 95 96 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 97 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 98 ResourceMark rm; 99 100 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 101 // to create space for them in ConnectionGraph::_nodes[]. 102 Node* oop_null = igvn->zerocon(T_OBJECT); 103 Node* noop_null = igvn->zerocon(T_NARROWOOP); 104 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 105 // Perform escape analysis 106 if (congraph->compute_escape()) { 107 // There are non escaping objects. 108 C->set_congraph(congraph); 109 } 110 // Cleanup. 111 if (oop_null->outcnt() == 0) 112 igvn->hash_delete(oop_null); 113 if (noop_null->outcnt() == 0) 114 igvn->hash_delete(noop_null); 115 } 116 117 bool ConnectionGraph::compute_escape() { 118 Compile* C = _compile; 119 PhaseGVN* igvn = _igvn; 120 121 // Worklists used by EA. 122 Unique_Node_List delayed_worklist; 123 GrowableArray<Node*> alloc_worklist; 124 GrowableArray<Node*> ptr_cmp_worklist; 125 GrowableArray<Node*> storestore_worklist; 126 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 127 GrowableArray<PointsToNode*> ptnodes_worklist; 128 GrowableArray<JavaObjectNode*> java_objects_worklist; 129 GrowableArray<JavaObjectNode*> non_escaped_worklist; 130 GrowableArray<FieldNode*> oop_fields_worklist; 131 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 132 133 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 134 135 // 1. Populate Connection Graph (CG) with PointsTo nodes. 136 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 137 // Initialize worklist 138 if (C->root() != NULL) { 139 ideal_nodes.push(C->root()); 140 } 141 // Processed ideal nodes are unique on ideal_nodes list 142 // but several ideal nodes are mapped to the phantom_obj. 143 // To avoid duplicated entries on the following worklists 144 // add the phantom_obj only once to them. 145 ptnodes_worklist.append(phantom_obj); 146 java_objects_worklist.append(phantom_obj); 147 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 148 Node* n = ideal_nodes.at(next); 149 // Create PointsTo nodes and add them to Connection Graph. Called 150 // only once per ideal node since ideal_nodes is Unique_Node list. 151 add_node_to_connection_graph(n, &delayed_worklist); 152 PointsToNode* ptn = ptnode_adr(n->_idx); 153 if (ptn != NULL && ptn != phantom_obj) { 154 ptnodes_worklist.append(ptn); 155 if (ptn->is_JavaObject()) { 156 java_objects_worklist.append(ptn->as_JavaObject()); 157 if ((n->is_Allocate() || n->is_CallStaticJava()) && 158 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 159 // Only allocations and java static calls results are interesting. 160 non_escaped_worklist.append(ptn->as_JavaObject()); 161 } 162 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 163 oop_fields_worklist.append(ptn->as_Field()); 164 } 165 } 166 if (n->is_MergeMem()) { 167 // Collect all MergeMem nodes to add memory slices for 168 // scalar replaceable objects in split_unique_types(). 169 _mergemem_worklist.append(n->as_MergeMem()); 170 } else if (OptimizePtrCompare && n->is_Cmp() && 171 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { 172 // Collect compare pointers nodes. 173 ptr_cmp_worklist.append(n); 174 } else if (n->is_MemBarStoreStore()) { 175 // Collect all MemBarStoreStore nodes so that depending on the 176 // escape status of the associated Allocate node some of them 177 // may be eliminated. 178 storestore_worklist.append(n); 179 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && 180 (n->req() > MemBarNode::Precedent)) { 181 record_for_optimizer(n); 182 #ifdef ASSERT 183 } else if (n->is_AddP()) { 184 // Collect address nodes for graph verification. 185 addp_worklist.append(n); 186 #endif 187 } else if (n->is_ArrayCopy()) { 188 // Keep a list of ArrayCopy nodes so if one of its input is non 189 // escaping, we can record a unique type 190 arraycopy_worklist.append(n->as_ArrayCopy()); 191 } 192 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 193 Node* m = n->fast_out(i); // Get user 194 ideal_nodes.push(m); 195 } 196 } 197 if (non_escaped_worklist.length() == 0) { 198 _collecting = false; 199 return false; // Nothing to do. 200 } 201 // Add final simple edges to graph. 202 while(delayed_worklist.size() > 0) { 203 Node* n = delayed_worklist.pop(); 204 add_final_edges(n); 205 } 206 int ptnodes_length = ptnodes_worklist.length(); 207 208 #ifdef ASSERT 209 if (VerifyConnectionGraph) { 210 // Verify that no new simple edges could be created and all 211 // local vars has edges. 212 _verify = true; 213 for (int next = 0; next < ptnodes_length; ++next) { 214 PointsToNode* ptn = ptnodes_worklist.at(next); 215 add_final_edges(ptn->ideal_node()); 216 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 217 ptn->dump(); 218 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 219 } 220 } 221 _verify = false; 222 } 223 #endif 224 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 225 // processing, calls to CI to resolve symbols (types, fields, methods) 226 // referenced in bytecode. During symbol resolution VM may throw 227 // an exception which CI cleans and converts to compilation failure. 228 if (C->failing()) return false; 229 230 // 2. Finish Graph construction by propagating references to all 231 // java objects through graph. 232 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, 233 java_objects_worklist, oop_fields_worklist)) { 234 // All objects escaped or hit time or iterations limits. 235 _collecting = false; 236 return false; 237 } 238 239 // 3. Adjust scalar_replaceable state of nonescaping objects and push 240 // scalar replaceable allocations on alloc_worklist for processing 241 // in split_unique_types(). 242 int non_escaped_length = non_escaped_worklist.length(); 243 for (int next = 0; next < non_escaped_length; next++) { 244 JavaObjectNode* ptn = non_escaped_worklist.at(next); 245 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 246 Node* n = ptn->ideal_node(); 247 if (n->is_Allocate()) { 248 n->as_Allocate()->_is_non_escaping = noescape; 249 } 250 if (n->is_CallStaticJava()) { 251 n->as_CallStaticJava()->_is_non_escaping = noescape; 252 } 253 if (noescape && ptn->scalar_replaceable()) { 254 adjust_scalar_replaceable_state(ptn); 255 if (ptn->scalar_replaceable()) { 256 alloc_worklist.append(ptn->ideal_node()); 257 } 258 } 259 } 260 261 #ifdef ASSERT 262 if (VerifyConnectionGraph) { 263 // Verify that graph is complete - no new edges could be added or needed. 264 verify_connection_graph(ptnodes_worklist, non_escaped_worklist, 265 java_objects_worklist, addp_worklist); 266 } 267 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 268 assert(null_obj->escape_state() == PointsToNode::NoEscape && 269 null_obj->edge_count() == 0 && 270 !null_obj->arraycopy_src() && 271 !null_obj->arraycopy_dst(), "sanity"); 272 #endif 273 274 _collecting = false; 275 276 } // TracePhase t3("connectionGraph") 277 278 // 4. Optimize ideal graph based on EA information. 279 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); 280 if (has_non_escaping_obj) { 281 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 282 } 283 284 #ifndef PRODUCT 285 if (PrintEscapeAnalysis) { 286 dump(ptnodes_worklist); // Dump ConnectionGraph 287 } 288 #endif 289 290 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 291 #ifdef ASSERT 292 if (VerifyConnectionGraph) { 293 int alloc_length = alloc_worklist.length(); 294 for (int next = 0; next < alloc_length; ++next) { 295 Node* n = alloc_worklist.at(next); 296 PointsToNode* ptn = ptnode_adr(n->_idx); 297 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 298 } 299 } 300 #endif 301 302 // 5. Separate memory graph for scalar replaceable allcations. 303 if (has_scalar_replaceable_candidates && 304 C->AliasLevel() >= 3 && EliminateAllocations) { 305 // Now use the escape information to create unique types for 306 // scalar replaceable objects. 307 split_unique_types(alloc_worklist, arraycopy_worklist); 308 if (C->failing()) return false; 309 C->print_method(PHASE_AFTER_EA, 2); 310 311 #ifdef ASSERT 312 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 313 tty->print("=== No allocations eliminated for "); 314 C->method()->print_short_name(); 315 if(!EliminateAllocations) { 316 tty->print(" since EliminateAllocations is off ==="); 317 } else if(!has_scalar_replaceable_candidates) { 318 tty->print(" since there are no scalar replaceable candidates ==="); 319 } else if(C->AliasLevel() < 3) { 320 tty->print(" since AliasLevel < 3 ==="); 321 } 322 tty->cr(); 323 #endif 324 } 325 return has_non_escaping_obj; 326 } 327 328 // Utility function for nodes that load an object 329 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 330 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 331 // ThreadLocal has RawPtr type. 332 const Type* t = _igvn->type(n); 333 if (t->make_ptr() != NULL) { 334 Node* adr = n->in(MemNode::Address); 335 #ifdef ASSERT 336 if (!adr->is_AddP()) { 337 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 338 } else { 339 assert((ptnode_adr(adr->_idx) == NULL || 340 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 341 } 342 #endif 343 add_local_var_and_edge(n, PointsToNode::NoEscape, 344 adr, delayed_worklist); 345 } 346 } 347 348 // Populate Connection Graph with PointsTo nodes and create simple 349 // connection graph edges. 350 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 351 assert(!_verify, "this method should not be called for verification"); 352 PhaseGVN* igvn = _igvn; 353 uint n_idx = n->_idx; 354 PointsToNode* n_ptn = ptnode_adr(n_idx); 355 if (n_ptn != NULL) 356 return; // No need to redefine PointsTo node during first iteration. 357 358 if (n->is_Call()) { 359 // Arguments to allocation and locking don't escape. 360 if (n->is_AbstractLock()) { 361 // Put Lock and Unlock nodes on IGVN worklist to process them during 362 // first IGVN optimization when escape information is still available. 363 record_for_optimizer(n); 364 } else if (n->is_Allocate()) { 365 add_call_node(n->as_Call()); 366 record_for_optimizer(n); 367 } else { 368 if (n->is_CallStaticJava()) { 369 const char* name = n->as_CallStaticJava()->_name; 370 if (name != NULL && strcmp(name, "uncommon_trap") == 0) 371 return; // Skip uncommon traps 372 } 373 // Don't mark as processed since call's arguments have to be processed. 374 delayed_worklist->push(n); 375 // Check if a call returns an object. 376 if ((n->as_Call()->returns_pointer() && 377 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) || 378 (n->is_CallStaticJava() && 379 n->as_CallStaticJava()->is_boxing_method())) { 380 add_call_node(n->as_Call()); 381 } 382 } 383 return; 384 } 385 // Put this check here to process call arguments since some call nodes 386 // point to phantom_obj. 387 if (n_ptn == phantom_obj || n_ptn == null_obj) 388 return; // Skip predefined nodes. 389 390 int opcode = n->Opcode(); 391 switch (opcode) { 392 case Op_AddP: { 393 Node* base = get_addp_base(n); 394 PointsToNode* ptn_base = ptnode_adr(base->_idx); 395 // Field nodes are created for all field types. They are used in 396 // adjust_scalar_replaceable_state() and split_unique_types(). 397 // Note, non-oop fields will have only base edges in Connection 398 // Graph because such fields are not used for oop loads and stores. 399 int offset = address_offset(n, igvn); 400 add_field(n, PointsToNode::NoEscape, offset); 401 if (ptn_base == NULL) { 402 delayed_worklist->push(n); // Process it later. 403 } else { 404 n_ptn = ptnode_adr(n_idx); 405 add_base(n_ptn->as_Field(), ptn_base); 406 } 407 break; 408 } 409 case Op_CastX2P: { 410 map_ideal_node(n, phantom_obj); 411 break; 412 } 413 case Op_CastPP: 414 case Op_CheckCastPP: 415 case Op_EncodeP: 416 case Op_DecodeN: 417 case Op_EncodePKlass: 418 case Op_DecodeNKlass: { 419 add_local_var_and_edge(n, PointsToNode::NoEscape, 420 n->in(1), delayed_worklist); 421 break; 422 } 423 case Op_CMoveP: { 424 add_local_var(n, PointsToNode::NoEscape); 425 // Do not add edges during first iteration because some could be 426 // not defined yet. 427 delayed_worklist->push(n); 428 break; 429 } 430 case Op_ConP: 431 case Op_ConN: 432 case Op_ConNKlass: { 433 // assume all oop constants globally escape except for null 434 PointsToNode::EscapeState es; 435 const Type* t = igvn->type(n); 436 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 437 es = PointsToNode::NoEscape; 438 } else { 439 es = PointsToNode::GlobalEscape; 440 } 441 add_java_object(n, es); 442 break; 443 } 444 case Op_CreateEx: { 445 // assume that all exception objects globally escape 446 map_ideal_node(n, phantom_obj); 447 break; 448 } 449 case Op_LoadKlass: 450 case Op_LoadNKlass: { 451 // Unknown class is loaded 452 map_ideal_node(n, phantom_obj); 453 break; 454 } 455 case Op_LoadP: 456 #if INCLUDE_ZGC 457 case Op_LoadBarrierSlowReg: 458 case Op_LoadBarrierWeakSlowReg: 459 #endif 460 case Op_LoadN: 461 case Op_LoadPLocked: { 462 add_objload_to_connection_graph(n, delayed_worklist); 463 break; 464 } 465 case Op_Parm: { 466 map_ideal_node(n, phantom_obj); 467 break; 468 } 469 case Op_PartialSubtypeCheck: { 470 // Produces Null or notNull and is used in only in CmpP so 471 // phantom_obj could be used. 472 map_ideal_node(n, phantom_obj); // Result is unknown 473 break; 474 } 475 case Op_Phi: { 476 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 477 // ThreadLocal has RawPtr type. 478 const Type* t = n->as_Phi()->type(); 479 if (t->make_ptr() != NULL) { 480 add_local_var(n, PointsToNode::NoEscape); 481 // Do not add edges during first iteration because some could be 482 // not defined yet. 483 delayed_worklist->push(n); 484 } 485 break; 486 } 487 case Op_Proj: { 488 // we are only interested in the oop result projection from a call 489 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 490 n->in(0)->as_Call()->returns_pointer()) { 491 add_local_var_and_edge(n, PointsToNode::NoEscape, 492 n->in(0), delayed_worklist); 493 } 494 #if INCLUDE_ZGC 495 else if (UseZGC) { 496 if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) { 497 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist); 498 } 499 } 500 #endif 501 break; 502 } 503 case Op_Rethrow: // Exception object escapes 504 case Op_Return: { 505 if (n->req() > TypeFunc::Parms && 506 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 507 // Treat Return value as LocalVar with GlobalEscape escape state. 508 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 509 n->in(TypeFunc::Parms), delayed_worklist); 510 } 511 break; 512 } 513 case Op_CompareAndExchangeP: 514 case Op_CompareAndExchangeN: 515 case Op_GetAndSetP: 516 case Op_GetAndSetN: { 517 add_objload_to_connection_graph(n, delayed_worklist); 518 // fallthrough 519 } 520 case Op_StoreP: 521 case Op_StoreN: 522 case Op_StoreNKlass: 523 case Op_StorePConditional: 524 case Op_WeakCompareAndSwapP: 525 case Op_WeakCompareAndSwapN: 526 case Op_CompareAndSwapP: 527 case Op_CompareAndSwapN: { 528 Node* adr = n->in(MemNode::Address); 529 const Type *adr_type = igvn->type(adr); 530 adr_type = adr_type->make_ptr(); 531 if (adr_type == NULL) { 532 break; // skip dead nodes 533 } 534 if ( adr_type->isa_oopptr() 535 || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 536 && adr_type == TypeRawPtr::NOTNULL 537 && adr->in(AddPNode::Address)->is_Proj() 538 && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 539 delayed_worklist->push(n); // Process it later. 540 #ifdef ASSERT 541 assert(adr->is_AddP(), "expecting an AddP"); 542 if (adr_type == TypeRawPtr::NOTNULL) { 543 // Verify a raw address for a store captured by Initialize node. 544 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 545 assert(offs != Type::OffsetBot, "offset must be a constant"); 546 } 547 #endif 548 } else { 549 // Ignore copy the displaced header to the BoxNode (OSR compilation). 550 if (adr->is_BoxLock()) 551 break; 552 // Stored value escapes in unsafe access. 553 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 554 // Pointer stores in G1 barriers looks like unsafe access. 555 // Ignore such stores to be able scalar replace non-escaping 556 // allocations. 557 #if INCLUDE_G1GC 558 if (UseG1GC && adr->is_AddP()) { 559 Node* base = get_addp_base(adr); 560 if (base->Opcode() == Op_LoadP && 561 base->in(MemNode::Address)->is_AddP()) { 562 adr = base->in(MemNode::Address); 563 Node* tls = get_addp_base(adr); 564 if (tls->Opcode() == Op_ThreadLocal) { 565 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 566 if (offs == in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())) { 567 break; // G1 pre barrier previous oop value store. 568 } 569 if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) { 570 break; // G1 post barrier card address store. 571 } 572 } 573 } 574 } 575 #endif 576 delayed_worklist->push(n); // Process unsafe access later. 577 break; 578 } 579 #ifdef ASSERT 580 n->dump(1); 581 assert(false, "not unsafe or G1 barrier raw StoreP"); 582 #endif 583 } 584 break; 585 } 586 case Op_AryEq: 587 case Op_HasNegatives: 588 case Op_StrComp: 589 case Op_StrEquals: 590 case Op_StrIndexOf: 591 case Op_StrIndexOfChar: 592 case Op_StrInflatedCopy: 593 case Op_StrCompressedCopy: 594 case Op_EncodeISOArray: { 595 add_local_var(n, PointsToNode::ArgEscape); 596 delayed_worklist->push(n); // Process it later. 597 break; 598 } 599 case Op_ThreadLocal: { 600 add_java_object(n, PointsToNode::ArgEscape); 601 break; 602 } 603 default: 604 ; // Do nothing for nodes not related to EA. 605 } 606 return; 607 } 608 609 #ifdef ASSERT 610 #define ELSE_FAIL(name) \ 611 /* Should not be called for not pointer type. */ \ 612 n->dump(1); \ 613 assert(false, name); \ 614 break; 615 #else 616 #define ELSE_FAIL(name) \ 617 break; 618 #endif 619 620 // Add final simple edges to graph. 621 void ConnectionGraph::add_final_edges(Node *n) { 622 PointsToNode* n_ptn = ptnode_adr(n->_idx); 623 #ifdef ASSERT 624 if (_verify && n_ptn->is_JavaObject()) 625 return; // This method does not change graph for JavaObject. 626 #endif 627 628 if (n->is_Call()) { 629 process_call_arguments(n->as_Call()); 630 return; 631 } 632 assert(n->is_Store() || n->is_LoadStore() || 633 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 634 "node should be registered already"); 635 int opcode = n->Opcode(); 636 switch (opcode) { 637 case Op_AddP: { 638 Node* base = get_addp_base(n); 639 PointsToNode* ptn_base = ptnode_adr(base->_idx); 640 assert(ptn_base != NULL, "field's base should be registered"); 641 add_base(n_ptn->as_Field(), ptn_base); 642 break; 643 } 644 case Op_CastPP: 645 case Op_CheckCastPP: 646 case Op_EncodeP: 647 case Op_DecodeN: 648 case Op_EncodePKlass: 649 case Op_DecodeNKlass: { 650 add_local_var_and_edge(n, PointsToNode::NoEscape, 651 n->in(1), NULL); 652 break; 653 } 654 case Op_CMoveP: { 655 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 656 Node* in = n->in(i); 657 if (in == NULL) 658 continue; // ignore NULL 659 Node* uncast_in = in->uncast(); 660 if (uncast_in->is_top() || uncast_in == n) 661 continue; // ignore top or inputs which go back this node 662 PointsToNode* ptn = ptnode_adr(in->_idx); 663 assert(ptn != NULL, "node should be registered"); 664 add_edge(n_ptn, ptn); 665 } 666 break; 667 } 668 case Op_LoadP: 669 #if INCLUDE_ZGC 670 case Op_LoadBarrierSlowReg: 671 case Op_LoadBarrierWeakSlowReg: 672 #endif 673 case Op_LoadN: 674 case Op_LoadPLocked: { 675 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 676 // ThreadLocal has RawPtr type. 677 const Type* t = _igvn->type(n); 678 if (t->make_ptr() != NULL) { 679 Node* adr = n->in(MemNode::Address); 680 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 681 break; 682 } 683 ELSE_FAIL("Op_LoadP"); 684 } 685 case Op_Phi: { 686 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 687 // ThreadLocal has RawPtr type. 688 const Type* t = n->as_Phi()->type(); 689 if (t->make_ptr() != NULL) { 690 for (uint i = 1; i < n->req(); i++) { 691 Node* in = n->in(i); 692 if (in == NULL) 693 continue; // ignore NULL 694 Node* uncast_in = in->uncast(); 695 if (uncast_in->is_top() || uncast_in == n) 696 continue; // ignore top or inputs which go back this node 697 PointsToNode* ptn = ptnode_adr(in->_idx); 698 assert(ptn != NULL, "node should be registered"); 699 add_edge(n_ptn, ptn); 700 } 701 break; 702 } 703 ELSE_FAIL("Op_Phi"); 704 } 705 case Op_Proj: { 706 // we are only interested in the oop result projection from a call 707 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 708 n->in(0)->as_Call()->returns_pointer()) { 709 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 710 break; 711 } 712 #if INCLUDE_ZGC 713 else if (UseZGC) { 714 if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) { 715 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), NULL); 716 break; 717 } 718 } 719 #endif 720 ELSE_FAIL("Op_Proj"); 721 } 722 case Op_Rethrow: // Exception object escapes 723 case Op_Return: { 724 if (n->req() > TypeFunc::Parms && 725 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 726 // Treat Return value as LocalVar with GlobalEscape escape state. 727 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 728 n->in(TypeFunc::Parms), NULL); 729 break; 730 } 731 ELSE_FAIL("Op_Return"); 732 } 733 case Op_StoreP: 734 case Op_StoreN: 735 case Op_StoreNKlass: 736 case Op_StorePConditional: 737 case Op_CompareAndExchangeP: 738 case Op_CompareAndExchangeN: 739 case Op_CompareAndSwapP: 740 case Op_CompareAndSwapN: 741 case Op_WeakCompareAndSwapP: 742 case Op_WeakCompareAndSwapN: 743 case Op_GetAndSetP: 744 case Op_GetAndSetN: { 745 Node* adr = n->in(MemNode::Address); 746 const Type *adr_type = _igvn->type(adr); 747 adr_type = adr_type->make_ptr(); 748 #ifdef ASSERT 749 if (adr_type == NULL) { 750 n->dump(1); 751 assert(adr_type != NULL, "dead node should not be on list"); 752 break; 753 } 754 #endif 755 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN || 756 opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) { 757 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 758 } 759 if ( adr_type->isa_oopptr() 760 || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 761 && adr_type == TypeRawPtr::NOTNULL 762 && adr->in(AddPNode::Address)->is_Proj() 763 && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 764 // Point Address to Value 765 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 766 assert(adr_ptn != NULL && 767 adr_ptn->as_Field()->is_oop(), "node should be registered"); 768 Node *val = n->in(MemNode::ValueIn); 769 PointsToNode* ptn = ptnode_adr(val->_idx); 770 assert(ptn != NULL, "node should be registered"); 771 add_edge(adr_ptn, ptn); 772 break; 773 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 774 // Stored value escapes in unsafe access. 775 Node *val = n->in(MemNode::ValueIn); 776 PointsToNode* ptn = ptnode_adr(val->_idx); 777 assert(ptn != NULL, "node should be registered"); 778 set_escape_state(ptn, PointsToNode::GlobalEscape); 779 // Add edge to object for unsafe access with offset. 780 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 781 assert(adr_ptn != NULL, "node should be registered"); 782 if (adr_ptn->is_Field()) { 783 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 784 add_edge(adr_ptn, ptn); 785 } 786 break; 787 } 788 ELSE_FAIL("Op_StoreP"); 789 } 790 case Op_AryEq: 791 case Op_HasNegatives: 792 case Op_StrComp: 793 case Op_StrEquals: 794 case Op_StrIndexOf: 795 case Op_StrIndexOfChar: 796 case Op_StrInflatedCopy: 797 case Op_StrCompressedCopy: 798 case Op_EncodeISOArray: { 799 // char[]/byte[] arrays passed to string intrinsic do not escape but 800 // they are not scalar replaceable. Adjust escape state for them. 801 // Start from in(2) edge since in(1) is memory edge. 802 for (uint i = 2; i < n->req(); i++) { 803 Node* adr = n->in(i); 804 const Type* at = _igvn->type(adr); 805 if (!adr->is_top() && at->isa_ptr()) { 806 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 807 at->isa_ptr() != NULL, "expecting a pointer"); 808 if (adr->is_AddP()) { 809 adr = get_addp_base(adr); 810 } 811 PointsToNode* ptn = ptnode_adr(adr->_idx); 812 assert(ptn != NULL, "node should be registered"); 813 add_edge(n_ptn, ptn); 814 } 815 } 816 break; 817 } 818 default: { 819 // This method should be called only for EA specific nodes which may 820 // miss some edges when they were created. 821 #ifdef ASSERT 822 n->dump(1); 823 #endif 824 guarantee(false, "unknown node"); 825 } 826 } 827 return; 828 } 829 830 void ConnectionGraph::add_call_node(CallNode* call) { 831 assert(call->returns_pointer(), "only for call which returns pointer"); 832 uint call_idx = call->_idx; 833 if (call->is_Allocate()) { 834 Node* k = call->in(AllocateNode::KlassNode); 835 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 836 assert(kt != NULL, "TypeKlassPtr required."); 837 ciKlass* cik = kt->klass(); 838 PointsToNode::EscapeState es = PointsToNode::NoEscape; 839 bool scalar_replaceable = true; 840 if (call->is_AllocateArray()) { 841 if (!cik->is_array_klass()) { // StressReflectiveCode 842 es = PointsToNode::GlobalEscape; 843 } else { 844 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 845 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 846 // Not scalar replaceable if the length is not constant or too big. 847 scalar_replaceable = false; 848 } 849 } 850 } else { // Allocate instance 851 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 852 cik->is_subclass_of(_compile->env()->Reference_klass()) || 853 !cik->is_instance_klass() || // StressReflectiveCode 854 !cik->as_instance_klass()->can_be_instantiated() || 855 cik->as_instance_klass()->has_finalizer()) { 856 es = PointsToNode::GlobalEscape; 857 } 858 } 859 add_java_object(call, es); 860 PointsToNode* ptn = ptnode_adr(call_idx); 861 if (!scalar_replaceable && ptn->scalar_replaceable()) { 862 ptn->set_scalar_replaceable(false); 863 } 864 } else if (call->is_CallStaticJava()) { 865 // Call nodes could be different types: 866 // 867 // 1. CallDynamicJavaNode (what happened during call is unknown): 868 // 869 // - mapped to GlobalEscape JavaObject node if oop is returned; 870 // 871 // - all oop arguments are escaping globally; 872 // 873 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 874 // 875 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 876 // 877 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 878 // - mapped to NoEscape JavaObject node if non-escaping object allocated 879 // during call is returned; 880 // - mapped to ArgEscape LocalVar node pointed to object arguments 881 // which are returned and does not escape during call; 882 // 883 // - oop arguments escaping status is defined by bytecode analysis; 884 // 885 // For a static call, we know exactly what method is being called. 886 // Use bytecode estimator to record whether the call's return value escapes. 887 ciMethod* meth = call->as_CallJava()->method(); 888 if (meth == NULL) { 889 const char* name = call->as_CallStaticJava()->_name; 890 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 891 // Returns a newly allocated unescaped object. 892 add_java_object(call, PointsToNode::NoEscape); 893 ptnode_adr(call_idx)->set_scalar_replaceable(false); 894 } else if (meth->is_boxing_method()) { 895 // Returns boxing object 896 PointsToNode::EscapeState es; 897 vmIntrinsics::ID intr = meth->intrinsic_id(); 898 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 899 // It does not escape if object is always allocated. 900 es = PointsToNode::NoEscape; 901 } else { 902 // It escapes globally if object could be loaded from cache. 903 es = PointsToNode::GlobalEscape; 904 } 905 add_java_object(call, es); 906 } else { 907 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 908 call_analyzer->copy_dependencies(_compile->dependencies()); 909 if (call_analyzer->is_return_allocated()) { 910 // Returns a newly allocated unescaped object, simply 911 // update dependency information. 912 // Mark it as NoEscape so that objects referenced by 913 // it's fields will be marked as NoEscape at least. 914 add_java_object(call, PointsToNode::NoEscape); 915 ptnode_adr(call_idx)->set_scalar_replaceable(false); 916 } else { 917 // Determine whether any arguments are returned. 918 const TypeTuple* d = call->tf()->domain(); 919 bool ret_arg = false; 920 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 921 if (d->field_at(i)->isa_ptr() != NULL && 922 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 923 ret_arg = true; 924 break; 925 } 926 } 927 if (ret_arg) { 928 add_local_var(call, PointsToNode::ArgEscape); 929 } else { 930 // Returns unknown object. 931 map_ideal_node(call, phantom_obj); 932 } 933 } 934 } 935 } else { 936 // An other type of call, assume the worst case: 937 // returned value is unknown and globally escapes. 938 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 939 map_ideal_node(call, phantom_obj); 940 } 941 } 942 943 void ConnectionGraph::process_call_arguments(CallNode *call) { 944 bool is_arraycopy = false; 945 switch (call->Opcode()) { 946 #ifdef ASSERT 947 case Op_Allocate: 948 case Op_AllocateArray: 949 case Op_Lock: 950 case Op_Unlock: 951 assert(false, "should be done already"); 952 break; 953 #endif 954 case Op_ArrayCopy: 955 case Op_CallLeafNoFP: 956 // Most array copies are ArrayCopy nodes at this point but there 957 // are still a few direct calls to the copy subroutines (See 958 // PhaseStringOpts::copy_string()) 959 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 960 call->as_CallLeaf()->is_call_to_arraycopystub(); 961 // fall through 962 case Op_CallLeaf: { 963 // Stub calls, objects do not escape but they are not scale replaceable. 964 // Adjust escape state for outgoing arguments. 965 const TypeTuple * d = call->tf()->domain(); 966 bool src_has_oops = false; 967 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 968 const Type* at = d->field_at(i); 969 Node *arg = call->in(i); 970 if (arg == NULL) { 971 continue; 972 } 973 const Type *aat = _igvn->type(arg); 974 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) 975 continue; 976 if (arg->is_AddP()) { 977 // 978 // The inline_native_clone() case when the arraycopy stub is called 979 // after the allocation before Initialize and CheckCastPP nodes. 980 // Or normal arraycopy for object arrays case. 981 // 982 // Set AddP's base (Allocate) as not scalar replaceable since 983 // pointer to the base (with offset) is passed as argument. 984 // 985 arg = get_addp_base(arg); 986 } 987 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 988 assert(arg_ptn != NULL, "should be registered"); 989 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 990 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 991 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 992 aat->isa_ptr() != NULL, "expecting an Ptr"); 993 bool arg_has_oops = aat->isa_oopptr() && 994 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || 995 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); 996 if (i == TypeFunc::Parms) { 997 src_has_oops = arg_has_oops; 998 } 999 // 1000 // src or dst could be j.l.Object when other is basic type array: 1001 // 1002 // arraycopy(char[],0,Object*,0,size); 1003 // arraycopy(Object*,0,char[],0,size); 1004 // 1005 // Don't add edges in such cases. 1006 // 1007 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 1008 arg_has_oops && (i > TypeFunc::Parms); 1009 #ifdef ASSERT 1010 if (!(is_arraycopy || 1011 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 1012 (call->as_CallLeaf()->_name != NULL && 1013 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 1014 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 1015 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 1016 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 1017 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 1018 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 1019 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 1020 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 1021 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 1022 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 1023 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 1024 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1025 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1026 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1027 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1028 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1029 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1030 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1031 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1032 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1033 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1034 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0) 1035 ))) { 1036 call->dump(); 1037 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1038 } 1039 #endif 1040 // Always process arraycopy's destination object since 1041 // we need to add all possible edges to references in 1042 // source object. 1043 if (arg_esc >= PointsToNode::ArgEscape && 1044 !arg_is_arraycopy_dest) { 1045 continue; 1046 } 1047 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1048 if (call->is_ArrayCopy()) { 1049 ArrayCopyNode* ac = call->as_ArrayCopy(); 1050 if (ac->is_clonebasic() || 1051 ac->is_arraycopy_validated() || 1052 ac->is_copyof_validated() || 1053 ac->is_copyofrange_validated()) { 1054 es = PointsToNode::NoEscape; 1055 } 1056 } 1057 set_escape_state(arg_ptn, es); 1058 if (arg_is_arraycopy_dest) { 1059 Node* src = call->in(TypeFunc::Parms); 1060 if (src->is_AddP()) { 1061 src = get_addp_base(src); 1062 } 1063 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1064 assert(src_ptn != NULL, "should be registered"); 1065 if (arg_ptn != src_ptn) { 1066 // Special arraycopy edge: 1067 // A destination object's field can't have the source object 1068 // as base since objects escape states are not related. 1069 // Only escape state of destination object's fields affects 1070 // escape state of fields in source object. 1071 add_arraycopy(call, es, src_ptn, arg_ptn); 1072 } 1073 } 1074 } 1075 } 1076 break; 1077 } 1078 case Op_CallStaticJava: { 1079 // For a static call, we know exactly what method is being called. 1080 // Use bytecode estimator to record the call's escape affects 1081 #ifdef ASSERT 1082 const char* name = call->as_CallStaticJava()->_name; 1083 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1084 #endif 1085 ciMethod* meth = call->as_CallJava()->method(); 1086 if ((meth != NULL) && meth->is_boxing_method()) { 1087 break; // Boxing methods do not modify any oops. 1088 } 1089 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1090 // fall-through if not a Java method or no analyzer information 1091 if (call_analyzer != NULL) { 1092 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1093 const TypeTuple* d = call->tf()->domain(); 1094 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1095 const Type* at = d->field_at(i); 1096 int k = i - TypeFunc::Parms; 1097 Node* arg = call->in(i); 1098 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1099 if (at->isa_ptr() != NULL && 1100 call_analyzer->is_arg_returned(k)) { 1101 // The call returns arguments. 1102 if (call_ptn != NULL) { // Is call's result used? 1103 assert(call_ptn->is_LocalVar(), "node should be registered"); 1104 assert(arg_ptn != NULL, "node should be registered"); 1105 add_edge(call_ptn, arg_ptn); 1106 } 1107 } 1108 if (at->isa_oopptr() != NULL && 1109 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1110 if (!call_analyzer->is_arg_stack(k)) { 1111 // The argument global escapes 1112 set_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1113 } else { 1114 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 1115 if (!call_analyzer->is_arg_local(k)) { 1116 // The argument itself doesn't escape, but any fields might 1117 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1118 } 1119 } 1120 } 1121 } 1122 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1123 // The call returns arguments. 1124 assert(call_ptn->edge_count() > 0, "sanity"); 1125 if (!call_analyzer->is_return_local()) { 1126 // Returns also unknown object. 1127 add_edge(call_ptn, phantom_obj); 1128 } 1129 } 1130 break; 1131 } 1132 } 1133 default: { 1134 // Fall-through here if not a Java method or no analyzer information 1135 // or some other type of call, assume the worst case: all arguments 1136 // globally escape. 1137 const TypeTuple* d = call->tf()->domain(); 1138 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1139 const Type* at = d->field_at(i); 1140 if (at->isa_oopptr() != NULL) { 1141 Node* arg = call->in(i); 1142 if (arg->is_AddP()) { 1143 arg = get_addp_base(arg); 1144 } 1145 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1146 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); 1147 } 1148 } 1149 } 1150 } 1151 } 1152 1153 1154 // Finish Graph construction. 1155 bool ConnectionGraph::complete_connection_graph( 1156 GrowableArray<PointsToNode*>& ptnodes_worklist, 1157 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1158 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1159 GrowableArray<FieldNode*>& oop_fields_worklist) { 1160 // Normally only 1-3 passes needed to build Connection Graph depending 1161 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1162 // Set limit to 20 to catch situation when something did go wrong and 1163 // bailout Escape Analysis. 1164 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1165 #define CG_BUILD_ITER_LIMIT 20 1166 1167 // Propagate GlobalEscape and ArgEscape escape states and check that 1168 // we still have non-escaping objects. The method pushs on _worklist 1169 // Field nodes which reference phantom_object. 1170 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1171 return false; // Nothing to do. 1172 } 1173 // Now propagate references to all JavaObject nodes. 1174 int java_objects_length = java_objects_worklist.length(); 1175 elapsedTimer time; 1176 bool timeout = false; 1177 int new_edges = 1; 1178 int iterations = 0; 1179 do { 1180 while ((new_edges > 0) && 1181 (iterations++ < CG_BUILD_ITER_LIMIT)) { 1182 double start_time = time.seconds(); 1183 time.start(); 1184 new_edges = 0; 1185 // Propagate references to phantom_object for nodes pushed on _worklist 1186 // by find_non_escaped_objects() and find_field_value(). 1187 new_edges += add_java_object_edges(phantom_obj, false); 1188 for (int next = 0; next < java_objects_length; ++next) { 1189 JavaObjectNode* ptn = java_objects_worklist.at(next); 1190 new_edges += add_java_object_edges(ptn, true); 1191 1192 #define SAMPLE_SIZE 4 1193 if ((next % SAMPLE_SIZE) == 0) { 1194 // Each 4 iterations calculate how much time it will take 1195 // to complete graph construction. 1196 time.stop(); 1197 // Poll for requests from shutdown mechanism to quiesce compiler 1198 // because Connection graph construction may take long time. 1199 CompileBroker::maybe_block(); 1200 double stop_time = time.seconds(); 1201 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1202 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1203 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1204 timeout = true; 1205 break; // Timeout 1206 } 1207 start_time = stop_time; 1208 time.start(); 1209 } 1210 #undef SAMPLE_SIZE 1211 1212 } 1213 if (timeout) break; 1214 if (new_edges > 0) { 1215 // Update escape states on each iteration if graph was updated. 1216 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1217 return false; // Nothing to do. 1218 } 1219 } 1220 time.stop(); 1221 if (time.seconds() >= EscapeAnalysisTimeout) { 1222 timeout = true; 1223 break; 1224 } 1225 } 1226 if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { 1227 time.start(); 1228 // Find fields which have unknown value. 1229 int fields_length = oop_fields_worklist.length(); 1230 for (int next = 0; next < fields_length; next++) { 1231 FieldNode* field = oop_fields_worklist.at(next); 1232 if (field->edge_count() == 0) { 1233 new_edges += find_field_value(field); 1234 // This code may added new edges to phantom_object. 1235 // Need an other cycle to propagate references to phantom_object. 1236 } 1237 } 1238 time.stop(); 1239 if (time.seconds() >= EscapeAnalysisTimeout) { 1240 timeout = true; 1241 break; 1242 } 1243 } else { 1244 new_edges = 0; // Bailout 1245 } 1246 } while (new_edges > 0); 1247 1248 // Bailout if passed limits. 1249 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { 1250 Compile* C = _compile; 1251 if (C->log() != NULL) { 1252 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1253 C->log()->text("%s", timeout ? "time" : "iterations"); 1254 C->log()->end_elem(" limit'"); 1255 } 1256 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", 1257 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()); 1258 // Possible infinite build_connection_graph loop, 1259 // bailout (no changes to ideal graph were made). 1260 return false; 1261 } 1262 #ifdef ASSERT 1263 if (Verbose && PrintEscapeAnalysis) { 1264 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", 1265 iterations, nodes_size(), ptnodes_worklist.length()); 1266 } 1267 #endif 1268 1269 #undef CG_BUILD_ITER_LIMIT 1270 1271 // Find fields initialized by NULL for non-escaping Allocations. 1272 int non_escaped_length = non_escaped_worklist.length(); 1273 for (int next = 0; next < non_escaped_length; next++) { 1274 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1275 PointsToNode::EscapeState es = ptn->escape_state(); 1276 assert(es <= PointsToNode::ArgEscape, "sanity"); 1277 if (es == PointsToNode::NoEscape) { 1278 if (find_init_values(ptn, null_obj, _igvn) > 0) { 1279 // Adding references to NULL object does not change escape states 1280 // since it does not escape. Also no fields are added to NULL object. 1281 add_java_object_edges(null_obj, false); 1282 } 1283 } 1284 Node* n = ptn->ideal_node(); 1285 if (n->is_Allocate()) { 1286 // The object allocated by this Allocate node will never be 1287 // seen by an other thread. Mark it so that when it is 1288 // expanded no MemBarStoreStore is added. 1289 InitializeNode* ini = n->as_Allocate()->initialization(); 1290 if (ini != NULL) 1291 ini->set_does_not_escape(); 1292 } 1293 } 1294 return true; // Finished graph construction. 1295 } 1296 1297 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1298 // and check that we still have non-escaping java objects. 1299 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1300 GrowableArray<JavaObjectNode*>& non_escaped_worklist) { 1301 GrowableArray<PointsToNode*> escape_worklist; 1302 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1303 int ptnodes_length = ptnodes_worklist.length(); 1304 for (int next = 0; next < ptnodes_length; ++next) { 1305 PointsToNode* ptn = ptnodes_worklist.at(next); 1306 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1307 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1308 escape_worklist.push(ptn); 1309 } 1310 } 1311 // Set escape states to referenced nodes (edges list). 1312 while (escape_worklist.length() > 0) { 1313 PointsToNode* ptn = escape_worklist.pop(); 1314 PointsToNode::EscapeState es = ptn->escape_state(); 1315 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1316 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1317 es >= PointsToNode::ArgEscape) { 1318 // GlobalEscape or ArgEscape state of field means it has unknown value. 1319 if (add_edge(ptn, phantom_obj)) { 1320 // New edge was added 1321 add_field_uses_to_worklist(ptn->as_Field()); 1322 } 1323 } 1324 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1325 PointsToNode* e = i.get(); 1326 if (e->is_Arraycopy()) { 1327 assert(ptn->arraycopy_dst(), "sanity"); 1328 // Propagate only fields escape state through arraycopy edge. 1329 if (e->fields_escape_state() < field_es) { 1330 set_fields_escape_state(e, field_es); 1331 escape_worklist.push(e); 1332 } 1333 } else if (es >= field_es) { 1334 // fields_escape_state is also set to 'es' if it is less than 'es'. 1335 if (e->escape_state() < es) { 1336 set_escape_state(e, es); 1337 escape_worklist.push(e); 1338 } 1339 } else { 1340 // Propagate field escape state. 1341 bool es_changed = false; 1342 if (e->fields_escape_state() < field_es) { 1343 set_fields_escape_state(e, field_es); 1344 es_changed = true; 1345 } 1346 if ((e->escape_state() < field_es) && 1347 e->is_Field() && ptn->is_JavaObject() && 1348 e->as_Field()->is_oop()) { 1349 // Change escape state of referenced fields. 1350 set_escape_state(e, field_es); 1351 es_changed = true; 1352 } else if (e->escape_state() < es) { 1353 set_escape_state(e, es); 1354 es_changed = true; 1355 } 1356 if (es_changed) { 1357 escape_worklist.push(e); 1358 } 1359 } 1360 } 1361 } 1362 // Remove escaped objects from non_escaped list. 1363 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { 1364 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1365 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1366 non_escaped_worklist.delete_at(next); 1367 } 1368 if (ptn->escape_state() == PointsToNode::NoEscape) { 1369 // Find fields in non-escaped allocations which have unknown value. 1370 find_init_values(ptn, phantom_obj, NULL); 1371 } 1372 } 1373 return (non_escaped_worklist.length() > 0); 1374 } 1375 1376 // Add all references to JavaObject node by walking over all uses. 1377 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1378 int new_edges = 0; 1379 if (populate_worklist) { 1380 // Populate _worklist by uses of jobj's uses. 1381 for (UseIterator i(jobj); i.has_next(); i.next()) { 1382 PointsToNode* use = i.get(); 1383 if (use->is_Arraycopy()) 1384 continue; 1385 add_uses_to_worklist(use); 1386 if (use->is_Field() && use->as_Field()->is_oop()) { 1387 // Put on worklist all field's uses (loads) and 1388 // related field nodes (same base and offset). 1389 add_field_uses_to_worklist(use->as_Field()); 1390 } 1391 } 1392 } 1393 for (int l = 0; l < _worklist.length(); l++) { 1394 PointsToNode* use = _worklist.at(l); 1395 if (PointsToNode::is_base_use(use)) { 1396 // Add reference from jobj to field and from field to jobj (field's base). 1397 use = PointsToNode::get_use_node(use)->as_Field(); 1398 if (add_base(use->as_Field(), jobj)) { 1399 new_edges++; 1400 } 1401 continue; 1402 } 1403 assert(!use->is_JavaObject(), "sanity"); 1404 if (use->is_Arraycopy()) { 1405 if (jobj == null_obj) // NULL object does not have field edges 1406 continue; 1407 // Added edge from Arraycopy node to arraycopy's source java object 1408 if (add_edge(use, jobj)) { 1409 jobj->set_arraycopy_src(); 1410 new_edges++; 1411 } 1412 // and stop here. 1413 continue; 1414 } 1415 if (!add_edge(use, jobj)) 1416 continue; // No new edge added, there was such edge already. 1417 new_edges++; 1418 if (use->is_LocalVar()) { 1419 add_uses_to_worklist(use); 1420 if (use->arraycopy_dst()) { 1421 for (EdgeIterator i(use); i.has_next(); i.next()) { 1422 PointsToNode* e = i.get(); 1423 if (e->is_Arraycopy()) { 1424 if (jobj == null_obj) // NULL object does not have field edges 1425 continue; 1426 // Add edge from arraycopy's destination java object to Arraycopy node. 1427 if (add_edge(jobj, e)) { 1428 new_edges++; 1429 jobj->set_arraycopy_dst(); 1430 } 1431 } 1432 } 1433 } 1434 } else { 1435 // Added new edge to stored in field values. 1436 // Put on worklist all field's uses (loads) and 1437 // related field nodes (same base and offset). 1438 add_field_uses_to_worklist(use->as_Field()); 1439 } 1440 } 1441 _worklist.clear(); 1442 _in_worklist.Reset(); 1443 return new_edges; 1444 } 1445 1446 // Put on worklist all related field nodes. 1447 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1448 assert(field->is_oop(), "sanity"); 1449 int offset = field->offset(); 1450 add_uses_to_worklist(field); 1451 // Loop over all bases of this field and push on worklist Field nodes 1452 // with the same offset and base (since they may reference the same field). 1453 for (BaseIterator i(field); i.has_next(); i.next()) { 1454 PointsToNode* base = i.get(); 1455 add_fields_to_worklist(field, base); 1456 // Check if the base was source object of arraycopy and go over arraycopy's 1457 // destination objects since values stored to a field of source object are 1458 // accessable by uses (loads) of fields of destination objects. 1459 if (base->arraycopy_src()) { 1460 for (UseIterator j(base); j.has_next(); j.next()) { 1461 PointsToNode* arycp = j.get(); 1462 if (arycp->is_Arraycopy()) { 1463 for (UseIterator k(arycp); k.has_next(); k.next()) { 1464 PointsToNode* abase = k.get(); 1465 if (abase->arraycopy_dst() && abase != base) { 1466 // Look for the same arraycopy reference. 1467 add_fields_to_worklist(field, abase); 1468 } 1469 } 1470 } 1471 } 1472 } 1473 } 1474 } 1475 1476 // Put on worklist all related field nodes. 1477 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1478 int offset = field->offset(); 1479 if (base->is_LocalVar()) { 1480 for (UseIterator j(base); j.has_next(); j.next()) { 1481 PointsToNode* f = j.get(); 1482 if (PointsToNode::is_base_use(f)) { // Field 1483 f = PointsToNode::get_use_node(f); 1484 if (f == field || !f->as_Field()->is_oop()) 1485 continue; 1486 int offs = f->as_Field()->offset(); 1487 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1488 add_to_worklist(f); 1489 } 1490 } 1491 } 1492 } else { 1493 assert(base->is_JavaObject(), "sanity"); 1494 if (// Skip phantom_object since it is only used to indicate that 1495 // this field's content globally escapes. 1496 (base != phantom_obj) && 1497 // NULL object node does not have fields. 1498 (base != null_obj)) { 1499 for (EdgeIterator i(base); i.has_next(); i.next()) { 1500 PointsToNode* f = i.get(); 1501 // Skip arraycopy edge since store to destination object field 1502 // does not update value in source object field. 1503 if (f->is_Arraycopy()) { 1504 assert(base->arraycopy_dst(), "sanity"); 1505 continue; 1506 } 1507 if (f == field || !f->as_Field()->is_oop()) 1508 continue; 1509 int offs = f->as_Field()->offset(); 1510 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1511 add_to_worklist(f); 1512 } 1513 } 1514 } 1515 } 1516 } 1517 1518 // Find fields which have unknown value. 1519 int ConnectionGraph::find_field_value(FieldNode* field) { 1520 // Escaped fields should have init value already. 1521 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1522 int new_edges = 0; 1523 for (BaseIterator i(field); i.has_next(); i.next()) { 1524 PointsToNode* base = i.get(); 1525 if (base->is_JavaObject()) { 1526 // Skip Allocate's fields which will be processed later. 1527 if (base->ideal_node()->is_Allocate()) 1528 return 0; 1529 assert(base == null_obj, "only NULL ptr base expected here"); 1530 } 1531 } 1532 if (add_edge(field, phantom_obj)) { 1533 // New edge was added 1534 new_edges++; 1535 add_field_uses_to_worklist(field); 1536 } 1537 return new_edges; 1538 } 1539 1540 // Find fields initializing values for allocations. 1541 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { 1542 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1543 int new_edges = 0; 1544 Node* alloc = pta->ideal_node(); 1545 if (init_val == phantom_obj) { 1546 // Do nothing for Allocate nodes since its fields values are 1547 // "known" unless they are initialized by arraycopy/clone. 1548 if (alloc->is_Allocate() && !pta->arraycopy_dst()) 1549 return 0; 1550 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 1551 #ifdef ASSERT 1552 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { 1553 const char* name = alloc->as_CallStaticJava()->_name; 1554 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1555 } 1556 #endif 1557 // Non-escaped allocation returned from Java or runtime call have 1558 // unknown values in fields. 1559 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1560 PointsToNode* field = i.get(); 1561 if (field->is_Field() && field->as_Field()->is_oop()) { 1562 if (add_edge(field, phantom_obj)) { 1563 // New edge was added 1564 new_edges++; 1565 add_field_uses_to_worklist(field->as_Field()); 1566 } 1567 } 1568 } 1569 return new_edges; 1570 } 1571 assert(init_val == null_obj, "sanity"); 1572 // Do nothing for Call nodes since its fields values are unknown. 1573 if (!alloc->is_Allocate()) 1574 return 0; 1575 1576 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1577 bool visited_bottom_offset = false; 1578 GrowableArray<int> offsets_worklist; 1579 1580 // Check if an oop field's initializing value is recorded and add 1581 // a corresponding NULL if field's value if it is not recorded. 1582 // Connection Graph does not record a default initialization by NULL 1583 // captured by Initialize node. 1584 // 1585 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1586 PointsToNode* field = i.get(); // Field (AddP) 1587 if (!field->is_Field() || !field->as_Field()->is_oop()) 1588 continue; // Not oop field 1589 int offset = field->as_Field()->offset(); 1590 if (offset == Type::OffsetBot) { 1591 if (!visited_bottom_offset) { 1592 // OffsetBot is used to reference array's element, 1593 // always add reference to NULL to all Field nodes since we don't 1594 // known which element is referenced. 1595 if (add_edge(field, null_obj)) { 1596 // New edge was added 1597 new_edges++; 1598 add_field_uses_to_worklist(field->as_Field()); 1599 visited_bottom_offset = true; 1600 } 1601 } 1602 } else { 1603 // Check only oop fields. 1604 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1605 if (adr_type->isa_rawptr()) { 1606 #ifdef ASSERT 1607 // Raw pointers are used for initializing stores so skip it 1608 // since it should be recorded already 1609 Node* base = get_addp_base(field->ideal_node()); 1610 assert(adr_type->isa_rawptr() && base->is_Proj() && 1611 (base->in(0) == alloc),"unexpected pointer type"); 1612 #endif 1613 continue; 1614 } 1615 if (!offsets_worklist.contains(offset)) { 1616 offsets_worklist.append(offset); 1617 Node* value = NULL; 1618 if (ini != NULL) { 1619 // StoreP::memory_type() == T_ADDRESS 1620 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1621 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1622 // Make sure initializing store has the same type as this AddP. 1623 // This AddP may reference non existing field because it is on a 1624 // dead branch of bimorphic call which is not eliminated yet. 1625 if (store != NULL && store->is_Store() && 1626 store->as_Store()->memory_type() == ft) { 1627 value = store->in(MemNode::ValueIn); 1628 #ifdef ASSERT 1629 if (VerifyConnectionGraph) { 1630 // Verify that AddP already points to all objects the value points to. 1631 PointsToNode* val = ptnode_adr(value->_idx); 1632 assert((val != NULL), "should be processed already"); 1633 PointsToNode* missed_obj = NULL; 1634 if (val->is_JavaObject()) { 1635 if (!field->points_to(val->as_JavaObject())) { 1636 missed_obj = val; 1637 } 1638 } else { 1639 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1640 tty->print_cr("----------init store has invalid value -----"); 1641 store->dump(); 1642 val->dump(); 1643 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1644 } 1645 for (EdgeIterator j(val); j.has_next(); j.next()) { 1646 PointsToNode* obj = j.get(); 1647 if (obj->is_JavaObject()) { 1648 if (!field->points_to(obj->as_JavaObject())) { 1649 missed_obj = obj; 1650 break; 1651 } 1652 } 1653 } 1654 } 1655 if (missed_obj != NULL) { 1656 tty->print_cr("----------field---------------------------------"); 1657 field->dump(); 1658 tty->print_cr("----------missed referernce to object-----------"); 1659 missed_obj->dump(); 1660 tty->print_cr("----------object referernced by init store -----"); 1661 store->dump(); 1662 val->dump(); 1663 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1664 } 1665 } 1666 #endif 1667 } else { 1668 // There could be initializing stores which follow allocation. 1669 // For example, a volatile field store is not collected 1670 // by Initialize node. 1671 // 1672 // Need to check for dependent loads to separate such stores from 1673 // stores which follow loads. For now, add initial value NULL so 1674 // that compare pointers optimization works correctly. 1675 } 1676 } 1677 if (value == NULL) { 1678 // A field's initializing value was not recorded. Add NULL. 1679 if (add_edge(field, null_obj)) { 1680 // New edge was added 1681 new_edges++; 1682 add_field_uses_to_worklist(field->as_Field()); 1683 } 1684 } 1685 } 1686 } 1687 } 1688 return new_edges; 1689 } 1690 1691 // Adjust scalar_replaceable state after Connection Graph is built. 1692 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1693 // Search for non-escaping objects which are not scalar replaceable 1694 // and mark them to propagate the state to referenced objects. 1695 1696 // 1. An object is not scalar replaceable if the field into which it is 1697 // stored has unknown offset (stored into unknown element of an array). 1698 // 1699 for (UseIterator i(jobj); i.has_next(); i.next()) { 1700 PointsToNode* use = i.get(); 1701 if (use->is_Arraycopy()) { 1702 continue; 1703 } 1704 if (use->is_Field()) { 1705 FieldNode* field = use->as_Field(); 1706 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 1707 if (field->offset() == Type::OffsetBot) { 1708 jobj->set_scalar_replaceable(false); 1709 return; 1710 } 1711 // 2. An object is not scalar replaceable if the field into which it is 1712 // stored has multiple bases one of which is null. 1713 if (field->base_count() > 1) { 1714 for (BaseIterator i(field); i.has_next(); i.next()) { 1715 PointsToNode* base = i.get(); 1716 if (base == null_obj) { 1717 jobj->set_scalar_replaceable(false); 1718 return; 1719 } 1720 } 1721 } 1722 } 1723 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1724 // 3. An object is not scalar replaceable if it is merged with other objects. 1725 for (EdgeIterator j(use); j.has_next(); j.next()) { 1726 PointsToNode* ptn = j.get(); 1727 if (ptn->is_JavaObject() && ptn != jobj) { 1728 // Mark all objects. 1729 jobj->set_scalar_replaceable(false); 1730 ptn->set_scalar_replaceable(false); 1731 } 1732 } 1733 if (!jobj->scalar_replaceable()) { 1734 return; 1735 } 1736 } 1737 1738 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1739 if (j.get()->is_Arraycopy()) { 1740 continue; 1741 } 1742 1743 // Non-escaping object node should point only to field nodes. 1744 FieldNode* field = j.get()->as_Field(); 1745 int offset = field->as_Field()->offset(); 1746 1747 // 4. An object is not scalar replaceable if it has a field with unknown 1748 // offset (array's element is accessed in loop). 1749 if (offset == Type::OffsetBot) { 1750 jobj->set_scalar_replaceable(false); 1751 return; 1752 } 1753 // 5. Currently an object is not scalar replaceable if a LoadStore node 1754 // access its field since the field value is unknown after it. 1755 // 1756 Node* n = field->ideal_node(); 1757 1758 // Test for an unsafe access that was parsed as maybe off heap 1759 // (with a CheckCastPP to raw memory). 1760 assert(n->is_AddP(), "expect an address computation"); 1761 if (n->in(AddPNode::Base)->is_top() && 1762 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 1763 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 1764 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 1765 jobj->set_scalar_replaceable(false); 1766 return; 1767 } 1768 1769 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1770 Node* u = n->fast_out(i); 1771 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 1772 jobj->set_scalar_replaceable(false); 1773 return; 1774 } 1775 } 1776 1777 // 6. Or the address may point to more then one object. This may produce 1778 // the false positive result (set not scalar replaceable) 1779 // since the flow-insensitive escape analysis can't separate 1780 // the case when stores overwrite the field's value from the case 1781 // when stores happened on different control branches. 1782 // 1783 // Note: it will disable scalar replacement in some cases: 1784 // 1785 // Point p[] = new Point[1]; 1786 // p[0] = new Point(); // Will be not scalar replaced 1787 // 1788 // but it will save us from incorrect optimizations in next cases: 1789 // 1790 // Point p[] = new Point[1]; 1791 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1792 // 1793 if (field->base_count() > 1) { 1794 for (BaseIterator i(field); i.has_next(); i.next()) { 1795 PointsToNode* base = i.get(); 1796 // Don't take into account LocalVar nodes which 1797 // may point to only one object which should be also 1798 // this field's base by now. 1799 if (base->is_JavaObject() && base != jobj) { 1800 // Mark all bases. 1801 jobj->set_scalar_replaceable(false); 1802 base->set_scalar_replaceable(false); 1803 } 1804 } 1805 } 1806 } 1807 } 1808 1809 #ifdef ASSERT 1810 void ConnectionGraph::verify_connection_graph( 1811 GrowableArray<PointsToNode*>& ptnodes_worklist, 1812 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1813 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1814 GrowableArray<Node*>& addp_worklist) { 1815 // Verify that graph is complete - no new edges could be added. 1816 int java_objects_length = java_objects_worklist.length(); 1817 int non_escaped_length = non_escaped_worklist.length(); 1818 int new_edges = 0; 1819 for (int next = 0; next < java_objects_length; ++next) { 1820 JavaObjectNode* ptn = java_objects_worklist.at(next); 1821 new_edges += add_java_object_edges(ptn, true); 1822 } 1823 assert(new_edges == 0, "graph was not complete"); 1824 // Verify that escape state is final. 1825 int length = non_escaped_worklist.length(); 1826 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); 1827 assert((non_escaped_length == non_escaped_worklist.length()) && 1828 (non_escaped_length == length) && 1829 (_worklist.length() == 0), "escape state was not final"); 1830 1831 // Verify fields information. 1832 int addp_length = addp_worklist.length(); 1833 for (int next = 0; next < addp_length; ++next ) { 1834 Node* n = addp_worklist.at(next); 1835 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 1836 if (field->is_oop()) { 1837 // Verify that field has all bases 1838 Node* base = get_addp_base(n); 1839 PointsToNode* ptn = ptnode_adr(base->_idx); 1840 if (ptn->is_JavaObject()) { 1841 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 1842 } else { 1843 assert(ptn->is_LocalVar(), "sanity"); 1844 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1845 PointsToNode* e = i.get(); 1846 if (e->is_JavaObject()) { 1847 assert(field->has_base(e->as_JavaObject()), "sanity"); 1848 } 1849 } 1850 } 1851 // Verify that all fields have initializing values. 1852 if (field->edge_count() == 0) { 1853 tty->print_cr("----------field does not have references----------"); 1854 field->dump(); 1855 for (BaseIterator i(field); i.has_next(); i.next()) { 1856 PointsToNode* base = i.get(); 1857 tty->print_cr("----------field has next base---------------------"); 1858 base->dump(); 1859 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 1860 tty->print_cr("----------base has fields-------------------------"); 1861 for (EdgeIterator j(base); j.has_next(); j.next()) { 1862 j.get()->dump(); 1863 } 1864 tty->print_cr("----------base has references---------------------"); 1865 for (UseIterator j(base); j.has_next(); j.next()) { 1866 j.get()->dump(); 1867 } 1868 } 1869 } 1870 for (UseIterator i(field); i.has_next(); i.next()) { 1871 i.get()->dump(); 1872 } 1873 assert(field->edge_count() > 0, "sanity"); 1874 } 1875 } 1876 } 1877 } 1878 #endif 1879 1880 // Optimize ideal graph. 1881 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 1882 GrowableArray<Node*>& storestore_worklist) { 1883 Compile* C = _compile; 1884 PhaseIterGVN* igvn = _igvn; 1885 if (EliminateLocks) { 1886 // Mark locks before changing ideal graph. 1887 int cnt = C->macro_count(); 1888 for( int i=0; i < cnt; i++ ) { 1889 Node *n = C->macro_node(i); 1890 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1891 AbstractLockNode* alock = n->as_AbstractLock(); 1892 if (!alock->is_non_esc_obj()) { 1893 if (not_global_escape(alock->obj_node())) { 1894 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 1895 // The lock could be marked eliminated by lock coarsening 1896 // code during first IGVN before EA. Replace coarsened flag 1897 // to eliminate all associated locks/unlocks. 1898 #ifdef ASSERT 1899 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 1900 #endif 1901 alock->set_non_esc_obj(); 1902 } 1903 } 1904 } 1905 } 1906 } 1907 1908 if (OptimizePtrCompare) { 1909 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1910 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1911 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1912 // Optimize objects compare. 1913 while (ptr_cmp_worklist.length() != 0) { 1914 Node *n = ptr_cmp_worklist.pop(); 1915 Node *res = optimize_ptr_compare(n); 1916 if (res != NULL) { 1917 #ifndef PRODUCT 1918 if (PrintOptimizePtrCompare) { 1919 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1920 if (Verbose) { 1921 n->dump(1); 1922 } 1923 } 1924 #endif 1925 igvn->replace_node(n, res); 1926 } 1927 } 1928 // cleanup 1929 if (_pcmp_neq->outcnt() == 0) 1930 igvn->hash_delete(_pcmp_neq); 1931 if (_pcmp_eq->outcnt() == 0) 1932 igvn->hash_delete(_pcmp_eq); 1933 } 1934 1935 // For MemBarStoreStore nodes added in library_call.cpp, check 1936 // escape status of associated AllocateNode and optimize out 1937 // MemBarStoreStore node if the allocated object never escapes. 1938 while (storestore_worklist.length() != 0) { 1939 Node *n = storestore_worklist.pop(); 1940 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 1941 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 1942 assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); 1943 if (not_global_escape(alloc)) { 1944 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 1945 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 1946 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 1947 igvn->register_new_node_with_optimizer(mb); 1948 igvn->replace_node(storestore, mb); 1949 } 1950 } 1951 } 1952 1953 // Optimize objects compare. 1954 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 1955 assert(OptimizePtrCompare, "sanity"); 1956 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 1957 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 1958 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 1959 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 1960 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 1961 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 1962 1963 // Check simple cases first. 1964 if (jobj1 != NULL) { 1965 if (jobj1->escape_state() == PointsToNode::NoEscape) { 1966 if (jobj1 == jobj2) { 1967 // Comparing the same not escaping object. 1968 return _pcmp_eq; 1969 } 1970 Node* obj = jobj1->ideal_node(); 1971 // Comparing not escaping allocation. 1972 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1973 !ptn2->points_to(jobj1)) { 1974 return _pcmp_neq; // This includes nullness check. 1975 } 1976 } 1977 } 1978 if (jobj2 != NULL) { 1979 if (jobj2->escape_state() == PointsToNode::NoEscape) { 1980 Node* obj = jobj2->ideal_node(); 1981 // Comparing not escaping allocation. 1982 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1983 !ptn1->points_to(jobj2)) { 1984 return _pcmp_neq; // This includes nullness check. 1985 } 1986 } 1987 } 1988 if (jobj1 != NULL && jobj1 != phantom_obj && 1989 jobj2 != NULL && jobj2 != phantom_obj && 1990 jobj1->ideal_node()->is_Con() && 1991 jobj2->ideal_node()->is_Con()) { 1992 // Klass or String constants compare. Need to be careful with 1993 // compressed pointers - compare types of ConN and ConP instead of nodes. 1994 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 1995 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 1996 if (t1->make_ptr() == t2->make_ptr()) { 1997 return _pcmp_eq; 1998 } else { 1999 return _pcmp_neq; 2000 } 2001 } 2002 if (ptn1->meet(ptn2)) { 2003 return NULL; // Sets are not disjoint 2004 } 2005 2006 // Sets are disjoint. 2007 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 2008 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 2009 bool set1_has_null_ptr = ptn1->points_to(null_obj); 2010 bool set2_has_null_ptr = ptn2->points_to(null_obj); 2011 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 2012 (set2_has_unknown_ptr && set1_has_null_ptr)) { 2013 // Check nullness of unknown object. 2014 return NULL; 2015 } 2016 2017 // Disjointness by itself is not sufficient since 2018 // alias analysis is not complete for escaped objects. 2019 // Disjoint sets are definitely unrelated only when 2020 // at least one set has only not escaping allocations. 2021 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 2022 if (ptn1->non_escaping_allocation()) { 2023 return _pcmp_neq; 2024 } 2025 } 2026 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2027 if (ptn2->non_escaping_allocation()) { 2028 return _pcmp_neq; 2029 } 2030 } 2031 return NULL; 2032 } 2033 2034 // Connection Graph constuction functions. 2035 2036 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2037 PointsToNode* ptadr = _nodes.at(n->_idx); 2038 if (ptadr != NULL) { 2039 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2040 return; 2041 } 2042 Compile* C = _compile; 2043 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2044 _nodes.at_put(n->_idx, ptadr); 2045 } 2046 2047 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2048 PointsToNode* ptadr = _nodes.at(n->_idx); 2049 if (ptadr != NULL) { 2050 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2051 return; 2052 } 2053 Compile* C = _compile; 2054 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2055 _nodes.at_put(n->_idx, ptadr); 2056 } 2057 2058 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2059 PointsToNode* ptadr = _nodes.at(n->_idx); 2060 if (ptadr != NULL) { 2061 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2062 return; 2063 } 2064 bool unsafe = false; 2065 bool is_oop = is_oop_field(n, offset, &unsafe); 2066 if (unsafe) { 2067 es = PointsToNode::GlobalEscape; 2068 } 2069 Compile* C = _compile; 2070 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2071 _nodes.at_put(n->_idx, field); 2072 } 2073 2074 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2075 PointsToNode* src, PointsToNode* dst) { 2076 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2077 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 2078 PointsToNode* ptadr = _nodes.at(n->_idx); 2079 if (ptadr != NULL) { 2080 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2081 return; 2082 } 2083 Compile* C = _compile; 2084 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2085 _nodes.at_put(n->_idx, ptadr); 2086 // Add edge from arraycopy node to source object. 2087 (void)add_edge(ptadr, src); 2088 src->set_arraycopy_src(); 2089 // Add edge from destination object to arraycopy node. 2090 (void)add_edge(dst, ptadr); 2091 dst->set_arraycopy_dst(); 2092 } 2093 2094 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2095 const Type* adr_type = n->as_AddP()->bottom_type(); 2096 BasicType bt = T_INT; 2097 if (offset == Type::OffsetBot) { 2098 // Check only oop fields. 2099 if (!adr_type->isa_aryptr() || 2100 (adr_type->isa_aryptr()->klass() == NULL) || 2101 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { 2102 // OffsetBot is used to reference array's element. Ignore first AddP. 2103 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 2104 bt = T_OBJECT; 2105 } 2106 } 2107 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2108 if (adr_type->isa_instptr()) { 2109 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 2110 if (field != NULL) { 2111 bt = field->layout_type(); 2112 } else { 2113 // Check for unsafe oop field access 2114 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2115 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2116 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) { 2117 bt = T_OBJECT; 2118 (*unsafe) = true; 2119 } 2120 } 2121 } else if (adr_type->isa_aryptr()) { 2122 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2123 // Ignore array length load. 2124 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2125 // Ignore first AddP. 2126 } else { 2127 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2128 bt = elemtype->array_element_basic_type(); 2129 } 2130 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2131 // Allocation initialization, ThreadLocal field access, unsafe access 2132 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2133 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2134 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) { 2135 bt = T_OBJECT; 2136 } 2137 } 2138 } 2139 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); 2140 } 2141 2142 // Returns unique pointed java object or NULL. 2143 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2144 assert(!_collecting, "should not call when contructed graph"); 2145 // If the node was created after the escape computation we can't answer. 2146 uint idx = n->_idx; 2147 if (idx >= nodes_size()) { 2148 return NULL; 2149 } 2150 PointsToNode* ptn = ptnode_adr(idx); 2151 if (ptn == NULL) { 2152 return NULL; 2153 } 2154 if (ptn->is_JavaObject()) { 2155 return ptn->as_JavaObject(); 2156 } 2157 assert(ptn->is_LocalVar(), "sanity"); 2158 // Check all java objects it points to. 2159 JavaObjectNode* jobj = NULL; 2160 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2161 PointsToNode* e = i.get(); 2162 if (e->is_JavaObject()) { 2163 if (jobj == NULL) { 2164 jobj = e->as_JavaObject(); 2165 } else if (jobj != e) { 2166 return NULL; 2167 } 2168 } 2169 } 2170 return jobj; 2171 } 2172 2173 // Return true if this node points only to non-escaping allocations. 2174 bool PointsToNode::non_escaping_allocation() { 2175 if (is_JavaObject()) { 2176 Node* n = ideal_node(); 2177 if (n->is_Allocate() || n->is_CallStaticJava()) { 2178 return (escape_state() == PointsToNode::NoEscape); 2179 } else { 2180 return false; 2181 } 2182 } 2183 assert(is_LocalVar(), "sanity"); 2184 // Check all java objects it points to. 2185 for (EdgeIterator i(this); i.has_next(); i.next()) { 2186 PointsToNode* e = i.get(); 2187 if (e->is_JavaObject()) { 2188 Node* n = e->ideal_node(); 2189 if ((e->escape_state() != PointsToNode::NoEscape) || 2190 !(n->is_Allocate() || n->is_CallStaticJava())) { 2191 return false; 2192 } 2193 } 2194 } 2195 return true; 2196 } 2197 2198 // Return true if we know the node does not escape globally. 2199 bool ConnectionGraph::not_global_escape(Node *n) { 2200 assert(!_collecting, "should not call during graph construction"); 2201 // If the node was created after the escape computation we can't answer. 2202 uint idx = n->_idx; 2203 if (idx >= nodes_size()) { 2204 return false; 2205 } 2206 PointsToNode* ptn = ptnode_adr(idx); 2207 if (ptn == NULL) { 2208 return false; // not in congraph (e.g. ConI) 2209 } 2210 PointsToNode::EscapeState es = ptn->escape_state(); 2211 // If we have already computed a value, return it. 2212 if (es >= PointsToNode::GlobalEscape) 2213 return false; 2214 if (ptn->is_JavaObject()) { 2215 return true; // (es < PointsToNode::GlobalEscape); 2216 } 2217 assert(ptn->is_LocalVar(), "sanity"); 2218 // Check all java objects it points to. 2219 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2220 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) 2221 return false; 2222 } 2223 return true; 2224 } 2225 2226 2227 // Helper functions 2228 2229 // Return true if this node points to specified node or nodes it points to. 2230 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2231 if (is_JavaObject()) { 2232 return (this == ptn); 2233 } 2234 assert(is_LocalVar() || is_Field(), "sanity"); 2235 for (EdgeIterator i(this); i.has_next(); i.next()) { 2236 if (i.get() == ptn) 2237 return true; 2238 } 2239 return false; 2240 } 2241 2242 // Return true if one node points to an other. 2243 bool PointsToNode::meet(PointsToNode* ptn) { 2244 if (this == ptn) { 2245 return true; 2246 } else if (ptn->is_JavaObject()) { 2247 return this->points_to(ptn->as_JavaObject()); 2248 } else if (this->is_JavaObject()) { 2249 return ptn->points_to(this->as_JavaObject()); 2250 } 2251 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2252 int ptn_count = ptn->edge_count(); 2253 for (EdgeIterator i(this); i.has_next(); i.next()) { 2254 PointsToNode* this_e = i.get(); 2255 for (int j = 0; j < ptn_count; j++) { 2256 if (this_e == ptn->edge(j)) 2257 return true; 2258 } 2259 } 2260 return false; 2261 } 2262 2263 #ifdef ASSERT 2264 // Return true if bases point to this java object. 2265 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2266 for (BaseIterator i(this); i.has_next(); i.next()) { 2267 if (i.get() == jobj) 2268 return true; 2269 } 2270 return false; 2271 } 2272 #endif 2273 2274 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2275 const Type *adr_type = phase->type(adr); 2276 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 2277 adr->in(AddPNode::Address)->is_Proj() && 2278 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 2279 // We are computing a raw address for a store captured by an Initialize 2280 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2281 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2282 assert(offs != Type::OffsetBot || 2283 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2284 "offset must be a constant or it is initialization of array"); 2285 return offs; 2286 } 2287 const TypePtr *t_ptr = adr_type->isa_ptr(); 2288 assert(t_ptr != NULL, "must be a pointer type"); 2289 return t_ptr->offset(); 2290 } 2291 2292 Node* ConnectionGraph::get_addp_base(Node *addp) { 2293 assert(addp->is_AddP(), "must be AddP"); 2294 // 2295 // AddP cases for Base and Address inputs: 2296 // case #1. Direct object's field reference: 2297 // Allocate 2298 // | 2299 // Proj #5 ( oop result ) 2300 // | 2301 // CheckCastPP (cast to instance type) 2302 // | | 2303 // AddP ( base == address ) 2304 // 2305 // case #2. Indirect object's field reference: 2306 // Phi 2307 // | 2308 // CastPP (cast to instance type) 2309 // | | 2310 // AddP ( base == address ) 2311 // 2312 // case #3. Raw object's field reference for Initialize node: 2313 // Allocate 2314 // | 2315 // Proj #5 ( oop result ) 2316 // top | 2317 // \ | 2318 // AddP ( base == top ) 2319 // 2320 // case #4. Array's element reference: 2321 // {CheckCastPP | CastPP} 2322 // | | | 2323 // | AddP ( array's element offset ) 2324 // | | 2325 // AddP ( array's offset ) 2326 // 2327 // case #5. Raw object's field reference for arraycopy stub call: 2328 // The inline_native_clone() case when the arraycopy stub is called 2329 // after the allocation before Initialize and CheckCastPP nodes. 2330 // Allocate 2331 // | 2332 // Proj #5 ( oop result ) 2333 // | | 2334 // AddP ( base == address ) 2335 // 2336 // case #6. Constant Pool, ThreadLocal, CastX2P or 2337 // Raw object's field reference: 2338 // {ConP, ThreadLocal, CastX2P, raw Load} 2339 // top | 2340 // \ | 2341 // AddP ( base == top ) 2342 // 2343 // case #7. Klass's field reference. 2344 // LoadKlass 2345 // | | 2346 // AddP ( base == address ) 2347 // 2348 // case #8. narrow Klass's field reference. 2349 // LoadNKlass 2350 // | 2351 // DecodeN 2352 // | | 2353 // AddP ( base == address ) 2354 // 2355 // case #9. Mixed unsafe access 2356 // {instance} 2357 // | 2358 // CheckCastPP (raw) 2359 // top | 2360 // \ | 2361 // AddP ( base == top ) 2362 // 2363 Node *base = addp->in(AddPNode::Base); 2364 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 2365 base = addp->in(AddPNode::Address); 2366 while (base->is_AddP()) { 2367 // Case #6 (unsafe access) may have several chained AddP nodes. 2368 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2369 base = base->in(AddPNode::Address); 2370 } 2371 if (base->Opcode() == Op_CheckCastPP && 2372 base->bottom_type()->isa_rawptr() && 2373 _igvn->type(base->in(1))->isa_oopptr()) { 2374 base = base->in(1); // Case #9 2375 } else { 2376 Node* uncast_base = base->uncast(); 2377 int opcode = uncast_base->Opcode(); 2378 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2379 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2380 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2381 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); 2382 } 2383 } 2384 return base; 2385 } 2386 2387 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2388 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2389 Node* addp2 = addp->raw_out(0); 2390 if (addp->outcnt() == 1 && addp2->is_AddP() && 2391 addp2->in(AddPNode::Base) == n && 2392 addp2->in(AddPNode::Address) == addp) { 2393 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2394 // 2395 // Find array's offset to push it on worklist first and 2396 // as result process an array's element offset first (pushed second) 2397 // to avoid CastPP for the array's offset. 2398 // Otherwise the inserted CastPP (LocalVar) will point to what 2399 // the AddP (Field) points to. Which would be wrong since 2400 // the algorithm expects the CastPP has the same point as 2401 // as AddP's base CheckCastPP (LocalVar). 2402 // 2403 // ArrayAllocation 2404 // | 2405 // CheckCastPP 2406 // | 2407 // memProj (from ArrayAllocation CheckCastPP) 2408 // | || 2409 // | || Int (element index) 2410 // | || | ConI (log(element size)) 2411 // | || | / 2412 // | || LShift 2413 // | || / 2414 // | AddP (array's element offset) 2415 // | | 2416 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2417 // | / / 2418 // AddP (array's offset) 2419 // | 2420 // Load/Store (memory operation on array's element) 2421 // 2422 return addp2; 2423 } 2424 return NULL; 2425 } 2426 2427 // 2428 // Adjust the type and inputs of an AddP which computes the 2429 // address of a field of an instance 2430 // 2431 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2432 PhaseGVN* igvn = _igvn; 2433 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2434 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2435 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2436 if (t == NULL) { 2437 // We are computing a raw address for a store captured by an Initialize 2438 // compute an appropriate address type (cases #3 and #5). 2439 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2440 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2441 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2442 assert(offs != Type::OffsetBot, "offset must be a constant"); 2443 t = base_t->add_offset(offs)->is_oopptr(); 2444 } 2445 int inst_id = base_t->instance_id(); 2446 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2447 "old type must be non-instance or match new type"); 2448 2449 // The type 't' could be subclass of 'base_t'. 2450 // As result t->offset() could be large then base_t's size and it will 2451 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2452 // constructor verifies correctness of the offset. 2453 // 2454 // It could happened on subclass's branch (from the type profiling 2455 // inlining) which was not eliminated during parsing since the exactness 2456 // of the allocation type was not propagated to the subclass type check. 2457 // 2458 // Or the type 't' could be not related to 'base_t' at all. 2459 // It could happened when CHA type is different from MDO type on a dead path 2460 // (for example, from instanceof check) which is not collapsed during parsing. 2461 // 2462 // Do nothing for such AddP node and don't process its users since 2463 // this code branch will go away. 2464 // 2465 if (!t->is_known_instance() && 2466 !base_t->klass()->is_subtype_of(t->klass())) { 2467 return false; // bail out 2468 } 2469 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 2470 // Do NOT remove the next line: ensure a new alias index is allocated 2471 // for the instance type. Note: C++ will not remove it since the call 2472 // has side effect. 2473 int alias_idx = _compile->get_alias_index(tinst); 2474 igvn->set_type(addp, tinst); 2475 // record the allocation in the node map 2476 set_map(addp, get_map(base->_idx)); 2477 // Set addp's Base and Address to 'base'. 2478 Node *abase = addp->in(AddPNode::Base); 2479 Node *adr = addp->in(AddPNode::Address); 2480 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2481 adr->in(0)->_idx == (uint)inst_id) { 2482 // Skip AddP cases #3 and #5. 2483 } else { 2484 assert(!abase->is_top(), "sanity"); // AddP case #3 2485 if (abase != base) { 2486 igvn->hash_delete(addp); 2487 addp->set_req(AddPNode::Base, base); 2488 if (abase == adr) { 2489 addp->set_req(AddPNode::Address, base); 2490 } else { 2491 // AddP case #4 (adr is array's element offset AddP node) 2492 #ifdef ASSERT 2493 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2494 assert(adr->is_AddP() && atype != NULL && 2495 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2496 #endif 2497 } 2498 igvn->hash_insert(addp); 2499 } 2500 } 2501 // Put on IGVN worklist since at least addp's type was changed above. 2502 record_for_optimizer(addp); 2503 return true; 2504 } 2505 2506 // 2507 // Create a new version of orig_phi if necessary. Returns either the newly 2508 // created phi or an existing phi. Sets create_new to indicate whether a new 2509 // phi was created. Cache the last newly created phi in the node map. 2510 // 2511 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2512 Compile *C = _compile; 2513 PhaseGVN* igvn = _igvn; 2514 new_created = false; 2515 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2516 // nothing to do if orig_phi is bottom memory or matches alias_idx 2517 if (phi_alias_idx == alias_idx) { 2518 return orig_phi; 2519 } 2520 // Have we recently created a Phi for this alias index? 2521 PhiNode *result = get_map_phi(orig_phi->_idx); 2522 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2523 return result; 2524 } 2525 // Previous check may fail when the same wide memory Phi was split into Phis 2526 // for different memory slices. Search all Phis for this region. 2527 if (result != NULL) { 2528 Node* region = orig_phi->in(0); 2529 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2530 Node* phi = region->fast_out(i); 2531 if (phi->is_Phi() && 2532 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2533 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2534 return phi->as_Phi(); 2535 } 2536 } 2537 } 2538 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 2539 if (C->do_escape_analysis() == true && !C->failing()) { 2540 // Retry compilation without escape analysis. 2541 // If this is the first failure, the sentinel string will "stick" 2542 // to the Compile object, and the C2Compiler will see it and retry. 2543 C->record_failure(C2Compiler::retry_no_escape_analysis()); 2544 } 2545 return NULL; 2546 } 2547 orig_phi_worklist.append_if_missing(orig_phi); 2548 const TypePtr *atype = C->get_adr_type(alias_idx); 2549 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2550 C->copy_node_notes_to(result, orig_phi); 2551 igvn->set_type(result, result->bottom_type()); 2552 record_for_optimizer(result); 2553 set_map(orig_phi, result); 2554 new_created = true; 2555 return result; 2556 } 2557 2558 // 2559 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2560 // specified alias index. 2561 // 2562 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2563 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2564 Compile *C = _compile; 2565 PhaseGVN* igvn = _igvn; 2566 bool new_phi_created; 2567 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2568 if (!new_phi_created) { 2569 return result; 2570 } 2571 GrowableArray<PhiNode *> phi_list; 2572 GrowableArray<uint> cur_input; 2573 PhiNode *phi = orig_phi; 2574 uint idx = 1; 2575 bool finished = false; 2576 while(!finished) { 2577 while (idx < phi->req()) { 2578 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2579 if (mem != NULL && mem->is_Phi()) { 2580 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2581 if (new_phi_created) { 2582 // found an phi for which we created a new split, push current one on worklist and begin 2583 // processing new one 2584 phi_list.push(phi); 2585 cur_input.push(idx); 2586 phi = mem->as_Phi(); 2587 result = newphi; 2588 idx = 1; 2589 continue; 2590 } else { 2591 mem = newphi; 2592 } 2593 } 2594 if (C->failing()) { 2595 return NULL; 2596 } 2597 result->set_req(idx++, mem); 2598 } 2599 #ifdef ASSERT 2600 // verify that the new Phi has an input for each input of the original 2601 assert( phi->req() == result->req(), "must have same number of inputs."); 2602 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2603 #endif 2604 // Check if all new phi's inputs have specified alias index. 2605 // Otherwise use old phi. 2606 for (uint i = 1; i < phi->req(); i++) { 2607 Node* in = result->in(i); 2608 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2609 } 2610 // we have finished processing a Phi, see if there are any more to do 2611 finished = (phi_list.length() == 0 ); 2612 if (!finished) { 2613 phi = phi_list.pop(); 2614 idx = cur_input.pop(); 2615 PhiNode *prev_result = get_map_phi(phi->_idx); 2616 prev_result->set_req(idx++, result); 2617 result = prev_result; 2618 } 2619 } 2620 return result; 2621 } 2622 2623 // 2624 // The next methods are derived from methods in MemNode. 2625 // 2626 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2627 Node *mem = mmem; 2628 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2629 // means an array I have not precisely typed yet. Do not do any 2630 // alias stuff with it any time soon. 2631 if (toop->base() != Type::AnyPtr && 2632 !(toop->klass() != NULL && 2633 toop->klass()->is_java_lang_Object() && 2634 toop->offset() == Type::OffsetBot)) { 2635 mem = mmem->memory_at(alias_idx); 2636 // Update input if it is progress over what we have now 2637 } 2638 return mem; 2639 } 2640 2641 // 2642 // Move memory users to their memory slices. 2643 // 2644 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2645 Compile* C = _compile; 2646 PhaseGVN* igvn = _igvn; 2647 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2648 assert(tp != NULL, "ptr type"); 2649 int alias_idx = C->get_alias_index(tp); 2650 int general_idx = C->get_general_index(alias_idx); 2651 2652 // Move users first 2653 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2654 Node* use = n->fast_out(i); 2655 if (use->is_MergeMem()) { 2656 MergeMemNode* mmem = use->as_MergeMem(); 2657 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2658 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2659 continue; // Nothing to do 2660 } 2661 // Replace previous general reference to mem node. 2662 uint orig_uniq = C->unique(); 2663 Node* m = find_inst_mem(n, general_idx, orig_phis); 2664 assert(orig_uniq == C->unique(), "no new nodes"); 2665 mmem->set_memory_at(general_idx, m); 2666 --imax; 2667 --i; 2668 } else if (use->is_MemBar()) { 2669 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2670 if (use->req() > MemBarNode::Precedent && 2671 use->in(MemBarNode::Precedent) == n) { 2672 // Don't move related membars. 2673 record_for_optimizer(use); 2674 continue; 2675 } 2676 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2677 if ((tp != NULL && C->get_alias_index(tp) == alias_idx) || 2678 alias_idx == general_idx) { 2679 continue; // Nothing to do 2680 } 2681 // Move to general memory slice. 2682 uint orig_uniq = C->unique(); 2683 Node* m = find_inst_mem(n, general_idx, orig_phis); 2684 assert(orig_uniq == C->unique(), "no new nodes"); 2685 igvn->hash_delete(use); 2686 imax -= use->replace_edge(n, m); 2687 igvn->hash_insert(use); 2688 record_for_optimizer(use); 2689 --i; 2690 #ifdef ASSERT 2691 } else if (use->is_Mem()) { 2692 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2693 // Don't move related cardmark. 2694 continue; 2695 } 2696 // Memory nodes should have new memory input. 2697 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2698 assert(tp != NULL, "ptr type"); 2699 int idx = C->get_alias_index(tp); 2700 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2701 "Following memory nodes should have new memory input or be on the same memory slice"); 2702 } else if (use->is_Phi()) { 2703 // Phi nodes should be split and moved already. 2704 tp = use->as_Phi()->adr_type()->isa_ptr(); 2705 assert(tp != NULL, "ptr type"); 2706 int idx = C->get_alias_index(tp); 2707 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2708 } else { 2709 use->dump(); 2710 assert(false, "should not be here"); 2711 #endif 2712 } 2713 } 2714 } 2715 2716 // 2717 // Search memory chain of "mem" to find a MemNode whose address 2718 // is the specified alias index. 2719 // 2720 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2721 if (orig_mem == NULL) 2722 return orig_mem; 2723 Compile* C = _compile; 2724 PhaseGVN* igvn = _igvn; 2725 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2726 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2727 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 2728 Node *prev = NULL; 2729 Node *result = orig_mem; 2730 while (prev != result) { 2731 prev = result; 2732 if (result == start_mem) 2733 break; // hit one of our sentinels 2734 if (result->is_Mem()) { 2735 const Type *at = igvn->type(result->in(MemNode::Address)); 2736 if (at == Type::TOP) 2737 break; // Dead 2738 assert (at->isa_ptr() != NULL, "pointer type required."); 2739 int idx = C->get_alias_index(at->is_ptr()); 2740 if (idx == alias_idx) 2741 break; // Found 2742 if (!is_instance && (at->isa_oopptr() == NULL || 2743 !at->is_oopptr()->is_known_instance())) { 2744 break; // Do not skip store to general memory slice. 2745 } 2746 result = result->in(MemNode::Memory); 2747 } 2748 if (!is_instance) 2749 continue; // don't search further for non-instance types 2750 // skip over a call which does not affect this memory slice 2751 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2752 Node *proj_in = result->in(0); 2753 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2754 break; // hit one of our sentinels 2755 } else if (proj_in->is_Call()) { 2756 // ArrayCopy node processed here as well 2757 CallNode *call = proj_in->as_Call(); 2758 if (!call->may_modify(toop, igvn)) { 2759 result = call->in(TypeFunc::Memory); 2760 } 2761 } else if (proj_in->is_Initialize()) { 2762 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2763 // Stop if this is the initialization for the object instance which 2764 // which contains this memory slice, otherwise skip over it. 2765 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2766 result = proj_in->in(TypeFunc::Memory); 2767 } 2768 } else if (proj_in->is_MemBar()) { 2769 if (proj_in->in(TypeFunc::Memory)->is_MergeMem() && 2770 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->is_Proj() && 2771 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->is_ArrayCopy()) { 2772 // clone 2773 ArrayCopyNode* ac = proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->as_ArrayCopy(); 2774 if (ac->may_modify(toop, igvn)) { 2775 break; 2776 } 2777 } 2778 result = proj_in->in(TypeFunc::Memory); 2779 } 2780 } else if (result->is_MergeMem()) { 2781 MergeMemNode *mmem = result->as_MergeMem(); 2782 result = step_through_mergemem(mmem, alias_idx, toop); 2783 if (result == mmem->base_memory()) { 2784 // Didn't find instance memory, search through general slice recursively. 2785 result = mmem->memory_at(C->get_general_index(alias_idx)); 2786 result = find_inst_mem(result, alias_idx, orig_phis); 2787 if (C->failing()) { 2788 return NULL; 2789 } 2790 mmem->set_memory_at(alias_idx, result); 2791 } 2792 } else if (result->is_Phi() && 2793 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2794 Node *un = result->as_Phi()->unique_input(igvn); 2795 if (un != NULL) { 2796 orig_phis.append_if_missing(result->as_Phi()); 2797 result = un; 2798 } else { 2799 break; 2800 } 2801 } else if (result->is_ClearArray()) { 2802 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 2803 // Can not bypass initialization of the instance 2804 // we are looking for. 2805 break; 2806 } 2807 // Otherwise skip it (the call updated 'result' value). 2808 } else if (result->Opcode() == Op_SCMemProj) { 2809 Node* mem = result->in(0); 2810 Node* adr = NULL; 2811 if (mem->is_LoadStore()) { 2812 adr = mem->in(MemNode::Address); 2813 } else { 2814 assert(mem->Opcode() == Op_EncodeISOArray || 2815 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 2816 adr = mem->in(3); // Memory edge corresponds to destination array 2817 } 2818 const Type *at = igvn->type(adr); 2819 if (at != Type::TOP) { 2820 assert(at->isa_ptr() != NULL, "pointer type required."); 2821 int idx = C->get_alias_index(at->is_ptr()); 2822 if (idx == alias_idx) { 2823 // Assert in debug mode 2824 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 2825 break; // In product mode return SCMemProj node 2826 } 2827 } 2828 result = mem->in(MemNode::Memory); 2829 } else if (result->Opcode() == Op_StrInflatedCopy) { 2830 Node* adr = result->in(3); // Memory edge corresponds to destination array 2831 const Type *at = igvn->type(adr); 2832 if (at != Type::TOP) { 2833 assert(at->isa_ptr() != NULL, "pointer type required."); 2834 int idx = C->get_alias_index(at->is_ptr()); 2835 if (idx == alias_idx) { 2836 // Assert in debug mode 2837 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 2838 break; // In product mode return SCMemProj node 2839 } 2840 } 2841 result = result->in(MemNode::Memory); 2842 } 2843 } 2844 if (result->is_Phi()) { 2845 PhiNode *mphi = result->as_Phi(); 2846 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 2847 const TypePtr *t = mphi->adr_type(); 2848 if (!is_instance) { 2849 // Push all non-instance Phis on the orig_phis worklist to update inputs 2850 // during Phase 4 if needed. 2851 orig_phis.append_if_missing(mphi); 2852 } else if (C->get_alias_index(t) != alias_idx) { 2853 // Create a new Phi with the specified alias index type. 2854 result = split_memory_phi(mphi, alias_idx, orig_phis); 2855 } 2856 } 2857 // the result is either MemNode, PhiNode, InitializeNode. 2858 return result; 2859 } 2860 2861 // 2862 // Convert the types of unescaped object to instance types where possible, 2863 // propagate the new type information through the graph, and update memory 2864 // edges and MergeMem inputs to reflect the new type. 2865 // 2866 // We start with allocations (and calls which may be allocations) on alloc_worklist. 2867 // The processing is done in 4 phases: 2868 // 2869 // Phase 1: Process possible allocations from alloc_worklist. Create instance 2870 // types for the CheckCastPP for allocations where possible. 2871 // Propagate the new types through users as follows: 2872 // casts and Phi: push users on alloc_worklist 2873 // AddP: cast Base and Address inputs to the instance type 2874 // push any AddP users on alloc_worklist and push any memnode 2875 // users onto memnode_worklist. 2876 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2877 // search the Memory chain for a store with the appropriate type 2878 // address type. If a Phi is found, create a new version with 2879 // the appropriate memory slices from each of the Phi inputs. 2880 // For stores, process the users as follows: 2881 // MemNode: push on memnode_worklist 2882 // MergeMem: push on mergemem_worklist 2883 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 2884 // moving the first node encountered of each instance type to the 2885 // the input corresponding to its alias index. 2886 // appropriate memory slice. 2887 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 2888 // 2889 // In the following example, the CheckCastPP nodes are the cast of allocation 2890 // results and the allocation of node 29 is unescaped and eligible to be an 2891 // instance type. 2892 // 2893 // We start with: 2894 // 2895 // 7 Parm #memory 2896 // 10 ConI "12" 2897 // 19 CheckCastPP "Foo" 2898 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2899 // 29 CheckCastPP "Foo" 2900 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 2901 // 2902 // 40 StoreP 25 7 20 ... alias_index=4 2903 // 50 StoreP 35 40 30 ... alias_index=4 2904 // 60 StoreP 45 50 20 ... alias_index=4 2905 // 70 LoadP _ 60 30 ... alias_index=4 2906 // 80 Phi 75 50 60 Memory alias_index=4 2907 // 90 LoadP _ 80 30 ... alias_index=4 2908 // 100 LoadP _ 80 20 ... alias_index=4 2909 // 2910 // 2911 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 2912 // and creating a new alias index for node 30. This gives: 2913 // 2914 // 7 Parm #memory 2915 // 10 ConI "12" 2916 // 19 CheckCastPP "Foo" 2917 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2918 // 29 CheckCastPP "Foo" iid=24 2919 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2920 // 2921 // 40 StoreP 25 7 20 ... alias_index=4 2922 // 50 StoreP 35 40 30 ... alias_index=6 2923 // 60 StoreP 45 50 20 ... alias_index=4 2924 // 70 LoadP _ 60 30 ... alias_index=6 2925 // 80 Phi 75 50 60 Memory alias_index=4 2926 // 90 LoadP _ 80 30 ... alias_index=6 2927 // 100 LoadP _ 80 20 ... alias_index=4 2928 // 2929 // In phase 2, new memory inputs are computed for the loads and stores, 2930 // And a new version of the phi is created. In phase 4, the inputs to 2931 // node 80 are updated and then the memory nodes are updated with the 2932 // values computed in phase 2. This results in: 2933 // 2934 // 7 Parm #memory 2935 // 10 ConI "12" 2936 // 19 CheckCastPP "Foo" 2937 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2938 // 29 CheckCastPP "Foo" iid=24 2939 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2940 // 2941 // 40 StoreP 25 7 20 ... alias_index=4 2942 // 50 StoreP 35 7 30 ... alias_index=6 2943 // 60 StoreP 45 40 20 ... alias_index=4 2944 // 70 LoadP _ 50 30 ... alias_index=6 2945 // 80 Phi 75 40 60 Memory alias_index=4 2946 // 120 Phi 75 50 50 Memory alias_index=6 2947 // 90 LoadP _ 120 30 ... alias_index=6 2948 // 100 LoadP _ 80 20 ... alias_index=4 2949 // 2950 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) { 2951 GrowableArray<Node *> memnode_worklist; 2952 GrowableArray<PhiNode *> orig_phis; 2953 PhaseIterGVN *igvn = _igvn; 2954 uint new_index_start = (uint) _compile->num_alias_types(); 2955 Arena* arena = Thread::current()->resource_area(); 2956 VectorSet visited(arena); 2957 ideal_nodes.clear(); // Reset for use with set_map/get_map. 2958 uint unique_old = _compile->unique(); 2959 2960 // Phase 1: Process possible allocations from alloc_worklist. 2961 // Create instance types for the CheckCastPP for allocations where possible. 2962 // 2963 // (Note: don't forget to change the order of the second AddP node on 2964 // the alloc_worklist if the order of the worklist processing is changed, 2965 // see the comment in find_second_addp().) 2966 // 2967 while (alloc_worklist.length() != 0) { 2968 Node *n = alloc_worklist.pop(); 2969 uint ni = n->_idx; 2970 if (n->is_Call()) { 2971 CallNode *alloc = n->as_Call(); 2972 // copy escape information to call node 2973 PointsToNode* ptn = ptnode_adr(alloc->_idx); 2974 PointsToNode::EscapeState es = ptn->escape_state(); 2975 // We have an allocation or call which returns a Java object, 2976 // see if it is unescaped. 2977 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 2978 continue; 2979 // Find CheckCastPP for the allocate or for the return value of a call 2980 n = alloc->result_cast(); 2981 if (n == NULL) { // No uses except Initialize node 2982 if (alloc->is_Allocate()) { 2983 // Set the scalar_replaceable flag for allocation 2984 // so it could be eliminated if it has no uses. 2985 alloc->as_Allocate()->_is_scalar_replaceable = true; 2986 } 2987 if (alloc->is_CallStaticJava()) { 2988 // Set the scalar_replaceable flag for boxing method 2989 // so it could be eliminated if it has no uses. 2990 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2991 } 2992 continue; 2993 } 2994 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 2995 assert(!alloc->is_Allocate(), "allocation should have unique type"); 2996 continue; 2997 } 2998 2999 // The inline code for Object.clone() casts the allocation result to 3000 // java.lang.Object and then to the actual type of the allocated 3001 // object. Detect this case and use the second cast. 3002 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 3003 // the allocation result is cast to java.lang.Object and then 3004 // to the actual Array type. 3005 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 3006 && (alloc->is_AllocateArray() || 3007 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 3008 Node *cast2 = NULL; 3009 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3010 Node *use = n->fast_out(i); 3011 if (use->is_CheckCastPP()) { 3012 cast2 = use; 3013 break; 3014 } 3015 } 3016 if (cast2 != NULL) { 3017 n = cast2; 3018 } else { 3019 // Non-scalar replaceable if the allocation type is unknown statically 3020 // (reflection allocation), the object can't be restored during 3021 // deoptimization without precise type. 3022 continue; 3023 } 3024 } 3025 3026 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3027 if (t == NULL) 3028 continue; // not a TypeOopPtr 3029 if (!t->klass_is_exact()) 3030 continue; // not an unique type 3031 3032 if (alloc->is_Allocate()) { 3033 // Set the scalar_replaceable flag for allocation 3034 // so it could be eliminated. 3035 alloc->as_Allocate()->_is_scalar_replaceable = true; 3036 } 3037 if (alloc->is_CallStaticJava()) { 3038 // Set the scalar_replaceable flag for boxing method 3039 // so it could be eliminated. 3040 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 3041 } 3042 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state 3043 // in order for an object to be scalar-replaceable, it must be: 3044 // - a direct allocation (not a call returning an object) 3045 // - non-escaping 3046 // - eligible to be a unique type 3047 // - not determined to be ineligible by escape analysis 3048 set_map(alloc, n); 3049 set_map(n, alloc); 3050 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3051 igvn->hash_delete(n); 3052 igvn->set_type(n, tinst); 3053 n->raise_bottom_type(tinst); 3054 igvn->hash_insert(n); 3055 record_for_optimizer(n); 3056 // Allocate an alias index for the header fields. Accesses to 3057 // the header emitted during macro expansion wouldn't have 3058 // correct memory state otherwise. 3059 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 3060 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 3061 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3062 3063 // First, put on the worklist all Field edges from Connection Graph 3064 // which is more accurate than putting immediate users from Ideal Graph. 3065 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3066 PointsToNode* tgt = e.get(); 3067 if (tgt->is_Arraycopy()) { 3068 continue; 3069 } 3070 Node* use = tgt->ideal_node(); 3071 assert(tgt->is_Field() && use->is_AddP(), 3072 "only AddP nodes are Field edges in CG"); 3073 if (use->outcnt() > 0) { // Don't process dead nodes 3074 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3075 if (addp2 != NULL) { 3076 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3077 alloc_worklist.append_if_missing(addp2); 3078 } 3079 alloc_worklist.append_if_missing(use); 3080 } 3081 } 3082 3083 // An allocation may have an Initialize which has raw stores. Scan 3084 // the users of the raw allocation result and push AddP users 3085 // on alloc_worklist. 3086 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3087 assert (raw_result != NULL, "must have an allocation result"); 3088 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3089 Node *use = raw_result->fast_out(i); 3090 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3091 Node* addp2 = find_second_addp(use, raw_result); 3092 if (addp2 != NULL) { 3093 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3094 alloc_worklist.append_if_missing(addp2); 3095 } 3096 alloc_worklist.append_if_missing(use); 3097 } else if (use->is_MemBar()) { 3098 memnode_worklist.append_if_missing(use); 3099 } 3100 } 3101 } 3102 } else if (n->is_AddP()) { 3103 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 3104 if (jobj == NULL || jobj == phantom_obj) { 3105 #ifdef ASSERT 3106 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3107 ptnode_adr(n->_idx)->dump(); 3108 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3109 #endif 3110 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3111 return; 3112 } 3113 Node *base = get_map(jobj->idx()); // CheckCastPP node 3114 if (!split_AddP(n, base)) continue; // wrong type from dead path 3115 } else if (n->is_Phi() || 3116 n->is_CheckCastPP() || 3117 n->is_EncodeP() || 3118 n->is_DecodeN() || 3119 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3120 if (visited.test_set(n->_idx)) { 3121 assert(n->is_Phi(), "loops only through Phi's"); 3122 continue; // already processed 3123 } 3124 JavaObjectNode* jobj = unique_java_object(n); 3125 if (jobj == NULL || jobj == phantom_obj) { 3126 #ifdef ASSERT 3127 ptnode_adr(n->_idx)->dump(); 3128 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3129 #endif 3130 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3131 return; 3132 } else { 3133 Node *val = get_map(jobj->idx()); // CheckCastPP node 3134 TypeNode *tn = n->as_Type(); 3135 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3136 assert(tinst != NULL && tinst->is_known_instance() && 3137 tinst->instance_id() == jobj->idx() , "instance type expected."); 3138 3139 const Type *tn_type = igvn->type(tn); 3140 const TypeOopPtr *tn_t; 3141 if (tn_type->isa_narrowoop()) { 3142 tn_t = tn_type->make_ptr()->isa_oopptr(); 3143 } else { 3144 tn_t = tn_type->isa_oopptr(); 3145 } 3146 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 3147 if (tn_type->isa_narrowoop()) { 3148 tn_type = tinst->make_narrowoop(); 3149 } else { 3150 tn_type = tinst; 3151 } 3152 igvn->hash_delete(tn); 3153 igvn->set_type(tn, tn_type); 3154 tn->set_type(tn_type); 3155 igvn->hash_insert(tn); 3156 record_for_optimizer(n); 3157 } else { 3158 assert(tn_type == TypePtr::NULL_PTR || 3159 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 3160 "unexpected type"); 3161 continue; // Skip dead path with different type 3162 } 3163 } 3164 } else { 3165 debug_only(n->dump();) 3166 assert(false, "EA: unexpected node"); 3167 continue; 3168 } 3169 // push allocation's users on appropriate worklist 3170 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3171 Node *use = n->fast_out(i); 3172 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3173 // Load/store to instance's field 3174 memnode_worklist.append_if_missing(use); 3175 } else if (use->is_MemBar()) { 3176 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3177 memnode_worklist.append_if_missing(use); 3178 } 3179 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3180 Node* addp2 = find_second_addp(use, n); 3181 if (addp2 != NULL) { 3182 alloc_worklist.append_if_missing(addp2); 3183 } 3184 alloc_worklist.append_if_missing(use); 3185 } else if (use->is_Phi() || 3186 use->is_CheckCastPP() || 3187 use->is_EncodeNarrowPtr() || 3188 use->is_DecodeNarrowPtr() || 3189 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3190 alloc_worklist.append_if_missing(use); 3191 #ifdef ASSERT 3192 } else if (use->is_Mem()) { 3193 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3194 } else if (use->is_MergeMem()) { 3195 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3196 } else if (use->is_SafePoint()) { 3197 // Look for MergeMem nodes for calls which reference unique allocation 3198 // (through CheckCastPP nodes) even for debug info. 3199 Node* m = use->in(TypeFunc::Memory); 3200 if (m->is_MergeMem()) { 3201 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3202 } 3203 } else if (use->Opcode() == Op_EncodeISOArray) { 3204 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3205 // EncodeISOArray overwrites destination array 3206 memnode_worklist.append_if_missing(use); 3207 } 3208 } else { 3209 uint op = use->Opcode(); 3210 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3211 (use->in(MemNode::Memory) == n)) { 3212 // They overwrite memory edge corresponding to destination array, 3213 memnode_worklist.append_if_missing(use); 3214 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3215 op == Op_CastP2X || op == Op_StoreCM || 3216 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3217 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3218 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3219 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 3220 n->dump(); 3221 use->dump(); 3222 assert(false, "EA: missing allocation reference path"); 3223 } 3224 #endif 3225 } 3226 } 3227 3228 } 3229 3230 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3231 // type, record it in the ArrayCopy node so we know what memory this 3232 // node uses/modified. 3233 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3234 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3235 Node* dest = ac->in(ArrayCopyNode::Dest); 3236 if (dest->is_AddP()) { 3237 dest = get_addp_base(dest); 3238 } 3239 JavaObjectNode* jobj = unique_java_object(dest); 3240 if (jobj != NULL) { 3241 Node *base = get_map(jobj->idx()); 3242 if (base != NULL) { 3243 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3244 ac->_dest_type = base_t; 3245 } 3246 } 3247 Node* src = ac->in(ArrayCopyNode::Src); 3248 if (src->is_AddP()) { 3249 src = get_addp_base(src); 3250 } 3251 jobj = unique_java_object(src); 3252 if (jobj != NULL) { 3253 Node* base = get_map(jobj->idx()); 3254 if (base != NULL) { 3255 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3256 ac->_src_type = base_t; 3257 } 3258 } 3259 } 3260 3261 // New alias types were created in split_AddP(). 3262 uint new_index_end = (uint) _compile->num_alias_types(); 3263 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3264 3265 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3266 // compute new values for Memory inputs (the Memory inputs are not 3267 // actually updated until phase 4.) 3268 if (memnode_worklist.length() == 0) 3269 return; // nothing to do 3270 while (memnode_worklist.length() != 0) { 3271 Node *n = memnode_worklist.pop(); 3272 if (visited.test_set(n->_idx)) 3273 continue; 3274 if (n->is_Phi() || n->is_ClearArray()) { 3275 // we don't need to do anything, but the users must be pushed 3276 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3277 // we don't need to do anything, but the users must be pushed 3278 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 3279 if (n == NULL) 3280 continue; 3281 } else if (n->Opcode() == Op_StrCompressedCopy || 3282 n->Opcode() == Op_EncodeISOArray) { 3283 // get the memory projection 3284 n = n->find_out_with(Op_SCMemProj); 3285 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3286 } else { 3287 assert(n->is_Mem(), "memory node required."); 3288 Node *addr = n->in(MemNode::Address); 3289 const Type *addr_t = igvn->type(addr); 3290 if (addr_t == Type::TOP) 3291 continue; 3292 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3293 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3294 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3295 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3296 if (_compile->failing()) { 3297 return; 3298 } 3299 if (mem != n->in(MemNode::Memory)) { 3300 // We delay the memory edge update since we need old one in 3301 // MergeMem code below when instances memory slices are separated. 3302 set_map(n, mem); 3303 } 3304 if (n->is_Load()) { 3305 continue; // don't push users 3306 } else if (n->is_LoadStore()) { 3307 // get the memory projection 3308 n = n->find_out_with(Op_SCMemProj); 3309 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3310 } 3311 } 3312 // push user on appropriate worklist 3313 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3314 Node *use = n->fast_out(i); 3315 if (use->is_Phi() || use->is_ClearArray()) { 3316 memnode_worklist.append_if_missing(use); 3317 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3318 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 3319 continue; 3320 memnode_worklist.append_if_missing(use); 3321 } else if (use->is_MemBar()) { 3322 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3323 memnode_worklist.append_if_missing(use); 3324 } 3325 #ifdef ASSERT 3326 } else if(use->is_Mem()) { 3327 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3328 } else if (use->is_MergeMem()) { 3329 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3330 } else if (use->Opcode() == Op_EncodeISOArray) { 3331 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3332 // EncodeISOArray overwrites destination array 3333 memnode_worklist.append_if_missing(use); 3334 } 3335 } else { 3336 uint op = use->Opcode(); 3337 if ((use->in(MemNode::Memory) == n) && 3338 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 3339 // They overwrite memory edge corresponding to destination array, 3340 memnode_worklist.append_if_missing(use); 3341 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 3342 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3343 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3344 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 3345 n->dump(); 3346 use->dump(); 3347 assert(false, "EA: missing memory path"); 3348 } 3349 #endif 3350 } 3351 } 3352 } 3353 3354 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3355 // Walk each memory slice moving the first node encountered of each 3356 // instance type to the the input corresponding to its alias index. 3357 uint length = _mergemem_worklist.length(); 3358 for( uint next = 0; next < length; ++next ) { 3359 MergeMemNode* nmm = _mergemem_worklist.at(next); 3360 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3361 // Note: we don't want to use MergeMemStream here because we only want to 3362 // scan inputs which exist at the start, not ones we add during processing. 3363 // Note 2: MergeMem may already contains instance memory slices added 3364 // during find_inst_mem() call when memory nodes were processed above. 3365 igvn->hash_delete(nmm); 3366 uint nslices = MIN2(nmm->req(), new_index_start); 3367 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3368 Node* mem = nmm->in(i); 3369 Node* cur = NULL; 3370 if (mem == NULL || mem->is_top()) 3371 continue; 3372 // First, update mergemem by moving memory nodes to corresponding slices 3373 // if their type became more precise since this mergemem was created. 3374 while (mem->is_Mem()) { 3375 const Type *at = igvn->type(mem->in(MemNode::Address)); 3376 if (at != Type::TOP) { 3377 assert (at->isa_ptr() != NULL, "pointer type required."); 3378 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3379 if (idx == i) { 3380 if (cur == NULL) 3381 cur = mem; 3382 } else { 3383 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3384 nmm->set_memory_at(idx, mem); 3385 } 3386 } 3387 } 3388 mem = mem->in(MemNode::Memory); 3389 } 3390 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3391 // Find any instance of the current type if we haven't encountered 3392 // already a memory slice of the instance along the memory chain. 3393 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3394 if((uint)_compile->get_general_index(ni) == i) { 3395 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3396 if (nmm->is_empty_memory(m)) { 3397 Node* result = find_inst_mem(mem, ni, orig_phis); 3398 if (_compile->failing()) { 3399 return; 3400 } 3401 nmm->set_memory_at(ni, result); 3402 } 3403 } 3404 } 3405 } 3406 // Find the rest of instances values 3407 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3408 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3409 Node* result = step_through_mergemem(nmm, ni, tinst); 3410 if (result == nmm->base_memory()) { 3411 // Didn't find instance memory, search through general slice recursively. 3412 result = nmm->memory_at(_compile->get_general_index(ni)); 3413 result = find_inst_mem(result, ni, orig_phis); 3414 if (_compile->failing()) { 3415 return; 3416 } 3417 nmm->set_memory_at(ni, result); 3418 } 3419 } 3420 igvn->hash_insert(nmm); 3421 record_for_optimizer(nmm); 3422 } 3423 3424 // Phase 4: Update the inputs of non-instance memory Phis and 3425 // the Memory input of memnodes 3426 // First update the inputs of any non-instance Phi's from 3427 // which we split out an instance Phi. Note we don't have 3428 // to recursively process Phi's encounted on the input memory 3429 // chains as is done in split_memory_phi() since they will 3430 // also be processed here. 3431 for (int j = 0; j < orig_phis.length(); j++) { 3432 PhiNode *phi = orig_phis.at(j); 3433 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3434 igvn->hash_delete(phi); 3435 for (uint i = 1; i < phi->req(); i++) { 3436 Node *mem = phi->in(i); 3437 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3438 if (_compile->failing()) { 3439 return; 3440 } 3441 if (mem != new_mem) { 3442 phi->set_req(i, new_mem); 3443 } 3444 } 3445 igvn->hash_insert(phi); 3446 record_for_optimizer(phi); 3447 } 3448 3449 // Update the memory inputs of MemNodes with the value we computed 3450 // in Phase 2 and move stores memory users to corresponding memory slices. 3451 // Disable memory split verification code until the fix for 6984348. 3452 // Currently it produces false negative results since it does not cover all cases. 3453 #if 0 // ifdef ASSERT 3454 visited.Reset(); 3455 Node_Stack old_mems(arena, _compile->unique() >> 2); 3456 #endif 3457 for (uint i = 0; i < ideal_nodes.size(); i++) { 3458 Node* n = ideal_nodes.at(i); 3459 Node* nmem = get_map(n->_idx); 3460 assert(nmem != NULL, "sanity"); 3461 if (n->is_Mem()) { 3462 #if 0 // ifdef ASSERT 3463 Node* old_mem = n->in(MemNode::Memory); 3464 if (!visited.test_set(old_mem->_idx)) { 3465 old_mems.push(old_mem, old_mem->outcnt()); 3466 } 3467 #endif 3468 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3469 if (!n->is_Load()) { 3470 // Move memory users of a store first. 3471 move_inst_mem(n, orig_phis); 3472 } 3473 // Now update memory input 3474 igvn->hash_delete(n); 3475 n->set_req(MemNode::Memory, nmem); 3476 igvn->hash_insert(n); 3477 record_for_optimizer(n); 3478 } else { 3479 assert(n->is_Allocate() || n->is_CheckCastPP() || 3480 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3481 } 3482 } 3483 #if 0 // ifdef ASSERT 3484 // Verify that memory was split correctly 3485 while (old_mems.is_nonempty()) { 3486 Node* old_mem = old_mems.node(); 3487 uint old_cnt = old_mems.index(); 3488 old_mems.pop(); 3489 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3490 } 3491 #endif 3492 } 3493 3494 #ifndef PRODUCT 3495 static const char *node_type_names[] = { 3496 "UnknownType", 3497 "JavaObject", 3498 "LocalVar", 3499 "Field", 3500 "Arraycopy" 3501 }; 3502 3503 static const char *esc_names[] = { 3504 "UnknownEscape", 3505 "NoEscape", 3506 "ArgEscape", 3507 "GlobalEscape" 3508 }; 3509 3510 void PointsToNode::dump(bool print_state) const { 3511 NodeType nt = node_type(); 3512 tty->print("%s ", node_type_names[(int) nt]); 3513 if (print_state) { 3514 EscapeState es = escape_state(); 3515 EscapeState fields_es = fields_escape_state(); 3516 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3517 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) 3518 tty->print("NSR "); 3519 } 3520 if (is_Field()) { 3521 FieldNode* f = (FieldNode*)this; 3522 if (f->is_oop()) 3523 tty->print("oop "); 3524 if (f->offset() > 0) 3525 tty->print("+%d ", f->offset()); 3526 tty->print("("); 3527 for (BaseIterator i(f); i.has_next(); i.next()) { 3528 PointsToNode* b = i.get(); 3529 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3530 } 3531 tty->print(" )"); 3532 } 3533 tty->print("["); 3534 for (EdgeIterator i(this); i.has_next(); i.next()) { 3535 PointsToNode* e = i.get(); 3536 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3537 } 3538 tty->print(" ["); 3539 for (UseIterator i(this); i.has_next(); i.next()) { 3540 PointsToNode* u = i.get(); 3541 bool is_base = false; 3542 if (PointsToNode::is_base_use(u)) { 3543 is_base = true; 3544 u = PointsToNode::get_use_node(u)->as_Field(); 3545 } 3546 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3547 } 3548 tty->print(" ]] "); 3549 if (_node == NULL) 3550 tty->print_cr("<null>"); 3551 else 3552 _node->dump(); 3553 } 3554 3555 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3556 bool first = true; 3557 int ptnodes_length = ptnodes_worklist.length(); 3558 for (int i = 0; i < ptnodes_length; i++) { 3559 PointsToNode *ptn = ptnodes_worklist.at(i); 3560 if (ptn == NULL || !ptn->is_JavaObject()) 3561 continue; 3562 PointsToNode::EscapeState es = ptn->escape_state(); 3563 if ((es != PointsToNode::NoEscape) && !Verbose) { 3564 continue; 3565 } 3566 Node* n = ptn->ideal_node(); 3567 if (n->is_Allocate() || (n->is_CallStaticJava() && 3568 n->as_CallStaticJava()->is_boxing_method())) { 3569 if (first) { 3570 tty->cr(); 3571 tty->print("======== Connection graph for "); 3572 _compile->method()->print_short_name(); 3573 tty->cr(); 3574 first = false; 3575 } 3576 ptn->dump(); 3577 // Print all locals and fields which reference this allocation 3578 for (UseIterator j(ptn); j.has_next(); j.next()) { 3579 PointsToNode* use = j.get(); 3580 if (use->is_LocalVar()) { 3581 use->dump(Verbose); 3582 } else if (Verbose) { 3583 use->dump(); 3584 } 3585 } 3586 tty->cr(); 3587 } 3588 } 3589 } 3590 #endif