1 /* 2 * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/c2/barrierSetC2.hpp" 29 #include "libadt/vectset.hpp" 30 #include "memory/allocation.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "opto/c2compiler.hpp" 33 #include "opto/arraycopynode.hpp" 34 #include "opto/callnode.hpp" 35 #include "opto/cfgnode.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/escape.hpp" 38 #include "opto/phaseX.hpp" 39 #include "opto/movenode.hpp" 40 #include "opto/rootnode.hpp" 41 #if INCLUDE_ALL_GCS 42 #include "gc/g1/g1ThreadLocalData.hpp" 43 #endif // INCLUDE_ALL_GCS 44 45 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 46 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 47 _in_worklist(C->comp_arena()), 48 _next_pidx(0), 49 _collecting(true), 50 _verify(false), 51 _compile(C), 52 _igvn(igvn), 53 _node_map(C->comp_arena()) { 54 // Add unknown java object. 55 add_java_object(C->top(), PointsToNode::GlobalEscape); 56 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 57 // Add ConP(#NULL) and ConN(#NULL) nodes. 58 Node* oop_null = igvn->zerocon(T_OBJECT); 59 assert(oop_null->_idx < nodes_size(), "should be created already"); 60 add_java_object(oop_null, PointsToNode::NoEscape); 61 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 62 if (UseCompressedOops) { 63 Node* noop_null = igvn->zerocon(T_NARROWOOP); 64 assert(noop_null->_idx < nodes_size(), "should be created already"); 65 map_ideal_node(noop_null, null_obj); 66 } 67 _pcmp_neq = NULL; // Should be initialized 68 _pcmp_eq = NULL; 69 } 70 71 bool ConnectionGraph::has_candidates(Compile *C) { 72 // EA brings benefits only when the code has allocations and/or locks which 73 // are represented by ideal Macro nodes. 74 int cnt = C->macro_count(); 75 for (int i = 0; i < cnt; i++) { 76 Node *n = C->macro_node(i); 77 if (n->is_Allocate()) 78 return true; 79 if (n->is_Lock()) { 80 Node* obj = n->as_Lock()->obj_node()->uncast(); 81 if (!(obj->is_Parm() || obj->is_Con())) 82 return true; 83 } 84 if (n->is_CallStaticJava() && 85 n->as_CallStaticJava()->is_boxing_method()) { 86 return true; 87 } 88 } 89 return false; 90 } 91 92 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 93 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 94 ResourceMark rm; 95 96 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 97 // to create space for them in ConnectionGraph::_nodes[]. 98 Node* oop_null = igvn->zerocon(T_OBJECT); 99 Node* noop_null = igvn->zerocon(T_NARROWOOP); 100 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 101 // Perform escape analysis 102 if (congraph->compute_escape()) { 103 // There are non escaping objects. 104 C->set_congraph(congraph); 105 } 106 // Cleanup. 107 if (oop_null->outcnt() == 0) 108 igvn->hash_delete(oop_null); 109 if (noop_null->outcnt() == 0) 110 igvn->hash_delete(noop_null); 111 } 112 113 bool ConnectionGraph::compute_escape() { 114 Compile* C = _compile; 115 PhaseGVN* igvn = _igvn; 116 117 // Worklists used by EA. 118 Unique_Node_List delayed_worklist; 119 GrowableArray<Node*> alloc_worklist; 120 GrowableArray<Node*> ptr_cmp_worklist; 121 GrowableArray<Node*> storestore_worklist; 122 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 123 GrowableArray<PointsToNode*> ptnodes_worklist; 124 GrowableArray<JavaObjectNode*> java_objects_worklist; 125 GrowableArray<JavaObjectNode*> non_escaped_worklist; 126 GrowableArray<FieldNode*> oop_fields_worklist; 127 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 128 129 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 130 131 // 1. Populate Connection Graph (CG) with PointsTo nodes. 132 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 133 // Initialize worklist 134 if (C->root() != NULL) { 135 ideal_nodes.push(C->root()); 136 } 137 // Processed ideal nodes are unique on ideal_nodes list 138 // but several ideal nodes are mapped to the phantom_obj. 139 // To avoid duplicated entries on the following worklists 140 // add the phantom_obj only once to them. 141 ptnodes_worklist.append(phantom_obj); 142 java_objects_worklist.append(phantom_obj); 143 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 144 Node* n = ideal_nodes.at(next); 145 // Create PointsTo nodes and add them to Connection Graph. Called 146 // only once per ideal node since ideal_nodes is Unique_Node list. 147 add_node_to_connection_graph(n, &delayed_worklist); 148 PointsToNode* ptn = ptnode_adr(n->_idx); 149 if (ptn != NULL && ptn != phantom_obj) { 150 ptnodes_worklist.append(ptn); 151 if (ptn->is_JavaObject()) { 152 java_objects_worklist.append(ptn->as_JavaObject()); 153 if ((n->is_Allocate() || n->is_CallStaticJava()) && 154 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 155 // Only allocations and java static calls results are interesting. 156 non_escaped_worklist.append(ptn->as_JavaObject()); 157 } 158 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 159 oop_fields_worklist.append(ptn->as_Field()); 160 } 161 } 162 if (n->is_MergeMem()) { 163 // Collect all MergeMem nodes to add memory slices for 164 // scalar replaceable objects in split_unique_types(). 165 _mergemem_worklist.append(n->as_MergeMem()); 166 } else if (OptimizePtrCompare && n->is_Cmp() && 167 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { 168 // Collect compare pointers nodes. 169 ptr_cmp_worklist.append(n); 170 } else if (n->is_MemBarStoreStore()) { 171 // Collect all MemBarStoreStore nodes so that depending on the 172 // escape status of the associated Allocate node some of them 173 // may be eliminated. 174 storestore_worklist.append(n); 175 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && 176 (n->req() > MemBarNode::Precedent)) { 177 record_for_optimizer(n); 178 #ifdef ASSERT 179 } else if (n->is_AddP()) { 180 // Collect address nodes for graph verification. 181 addp_worklist.append(n); 182 #endif 183 } else if (n->is_ArrayCopy()) { 184 // Keep a list of ArrayCopy nodes so if one of its input is non 185 // escaping, we can record a unique type 186 arraycopy_worklist.append(n->as_ArrayCopy()); 187 } 188 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 189 Node* m = n->fast_out(i); // Get user 190 ideal_nodes.push(m); 191 } 192 } 193 if (non_escaped_worklist.length() == 0) { 194 _collecting = false; 195 return false; // Nothing to do. 196 } 197 // Add final simple edges to graph. 198 while(delayed_worklist.size() > 0) { 199 Node* n = delayed_worklist.pop(); 200 add_final_edges(n); 201 } 202 int ptnodes_length = ptnodes_worklist.length(); 203 204 #ifdef ASSERT 205 if (VerifyConnectionGraph) { 206 // Verify that no new simple edges could be created and all 207 // local vars has edges. 208 _verify = true; 209 for (int next = 0; next < ptnodes_length; ++next) { 210 PointsToNode* ptn = ptnodes_worklist.at(next); 211 add_final_edges(ptn->ideal_node()); 212 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 213 ptn->dump(); 214 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 215 } 216 } 217 _verify = false; 218 } 219 #endif 220 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 221 // processing, calls to CI to resolve symbols (types, fields, methods) 222 // referenced in bytecode. During symbol resolution VM may throw 223 // an exception which CI cleans and converts to compilation failure. 224 if (C->failing()) return false; 225 226 // 2. Finish Graph construction by propagating references to all 227 // java objects through graph. 228 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, 229 java_objects_worklist, oop_fields_worklist)) { 230 // All objects escaped or hit time or iterations limits. 231 _collecting = false; 232 return false; 233 } 234 235 // 3. Adjust scalar_replaceable state of nonescaping objects and push 236 // scalar replaceable allocations on alloc_worklist for processing 237 // in split_unique_types(). 238 int non_escaped_length = non_escaped_worklist.length(); 239 for (int next = 0; next < non_escaped_length; next++) { 240 JavaObjectNode* ptn = non_escaped_worklist.at(next); 241 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 242 Node* n = ptn->ideal_node(); 243 if (n->is_Allocate()) { 244 n->as_Allocate()->_is_non_escaping = noescape; 245 } 246 if (n->is_CallStaticJava()) { 247 n->as_CallStaticJava()->_is_non_escaping = noescape; 248 } 249 if (noescape && ptn->scalar_replaceable()) { 250 adjust_scalar_replaceable_state(ptn); 251 if (ptn->scalar_replaceable()) { 252 alloc_worklist.append(ptn->ideal_node()); 253 } 254 } 255 } 256 257 #ifdef ASSERT 258 if (VerifyConnectionGraph) { 259 // Verify that graph is complete - no new edges could be added or needed. 260 verify_connection_graph(ptnodes_worklist, non_escaped_worklist, 261 java_objects_worklist, addp_worklist); 262 } 263 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 264 assert(null_obj->escape_state() == PointsToNode::NoEscape && 265 null_obj->edge_count() == 0 && 266 !null_obj->arraycopy_src() && 267 !null_obj->arraycopy_dst(), "sanity"); 268 #endif 269 270 _collecting = false; 271 272 } // TracePhase t3("connectionGraph") 273 274 // 4. Optimize ideal graph based on EA information. 275 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); 276 if (has_non_escaping_obj) { 277 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 278 } 279 280 #ifndef PRODUCT 281 if (PrintEscapeAnalysis) { 282 dump(ptnodes_worklist); // Dump ConnectionGraph 283 } 284 #endif 285 286 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 287 #ifdef ASSERT 288 if (VerifyConnectionGraph) { 289 int alloc_length = alloc_worklist.length(); 290 for (int next = 0; next < alloc_length; ++next) { 291 Node* n = alloc_worklist.at(next); 292 PointsToNode* ptn = ptnode_adr(n->_idx); 293 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 294 } 295 } 296 #endif 297 298 // 5. Separate memory graph for scalar replaceable allcations. 299 if (has_scalar_replaceable_candidates && 300 C->AliasLevel() >= 3 && EliminateAllocations) { 301 // Now use the escape information to create unique types for 302 // scalar replaceable objects. 303 split_unique_types(alloc_worklist, arraycopy_worklist); 304 if (C->failing()) return false; 305 C->print_method(PHASE_AFTER_EA, 2); 306 307 #ifdef ASSERT 308 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 309 tty->print("=== No allocations eliminated for "); 310 C->method()->print_short_name(); 311 if(!EliminateAllocations) { 312 tty->print(" since EliminateAllocations is off ==="); 313 } else if(!has_scalar_replaceable_candidates) { 314 tty->print(" since there are no scalar replaceable candidates ==="); 315 } else if(C->AliasLevel() < 3) { 316 tty->print(" since AliasLevel < 3 ==="); 317 } 318 tty->cr(); 319 #endif 320 } 321 return has_non_escaping_obj; 322 } 323 324 // Utility function for nodes that load an object 325 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 326 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 327 // ThreadLocal has RawPtr type. 328 const Type* t = _igvn->type(n); 329 if (t->make_ptr() != NULL) { 330 Node* adr = n->in(MemNode::Address); 331 #ifdef ASSERT 332 if (!adr->is_AddP()) { 333 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 334 } else { 335 assert((ptnode_adr(adr->_idx) == NULL || 336 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 337 } 338 #endif 339 add_local_var_and_edge(n, PointsToNode::NoEscape, 340 adr, delayed_worklist); 341 } 342 } 343 344 // Populate Connection Graph with PointsTo nodes and create simple 345 // connection graph edges. 346 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 347 assert(!_verify, "this method should not be called for verification"); 348 PhaseGVN* igvn = _igvn; 349 uint n_idx = n->_idx; 350 PointsToNode* n_ptn = ptnode_adr(n_idx); 351 if (n_ptn != NULL) 352 return; // No need to redefine PointsTo node during first iteration. 353 354 if (n->is_Call()) { 355 // Arguments to allocation and locking don't escape. 356 if (n->is_AbstractLock()) { 357 // Put Lock and Unlock nodes on IGVN worklist to process them during 358 // first IGVN optimization when escape information is still available. 359 record_for_optimizer(n); 360 } else if (n->is_Allocate()) { 361 add_call_node(n->as_Call()); 362 record_for_optimizer(n); 363 } else { 364 if (n->is_CallStaticJava()) { 365 const char* name = n->as_CallStaticJava()->_name; 366 if (name != NULL && strcmp(name, "uncommon_trap") == 0) 367 return; // Skip uncommon traps 368 } 369 // Don't mark as processed since call's arguments have to be processed. 370 delayed_worklist->push(n); 371 // Check if a call returns an object. 372 if ((n->as_Call()->returns_pointer() && 373 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) || 374 (n->is_CallStaticJava() && 375 n->as_CallStaticJava()->is_boxing_method())) { 376 add_call_node(n->as_Call()); 377 } 378 } 379 return; 380 } 381 // Put this check here to process call arguments since some call nodes 382 // point to phantom_obj. 383 if (n_ptn == phantom_obj || n_ptn == null_obj) 384 return; // Skip predefined nodes. 385 386 int opcode = n->Opcode(); 387 switch (opcode) { 388 case Op_AddP: { 389 Node* base = get_addp_base(n); 390 PointsToNode* ptn_base = ptnode_adr(base->_idx); 391 // Field nodes are created for all field types. They are used in 392 // adjust_scalar_replaceable_state() and split_unique_types(). 393 // Note, non-oop fields will have only base edges in Connection 394 // Graph because such fields are not used for oop loads and stores. 395 int offset = address_offset(n, igvn); 396 add_field(n, PointsToNode::NoEscape, offset); 397 if (ptn_base == NULL) { 398 delayed_worklist->push(n); // Process it later. 399 } else { 400 n_ptn = ptnode_adr(n_idx); 401 add_base(n_ptn->as_Field(), ptn_base); 402 } 403 break; 404 } 405 case Op_CastX2P: { 406 map_ideal_node(n, phantom_obj); 407 break; 408 } 409 case Op_CastPP: 410 case Op_CheckCastPP: 411 case Op_EncodeP: 412 case Op_DecodeN: 413 case Op_EncodePKlass: 414 case Op_DecodeNKlass: { 415 add_local_var_and_edge(n, PointsToNode::NoEscape, 416 n->in(1), delayed_worklist); 417 break; 418 } 419 case Op_CMoveP: { 420 add_local_var(n, PointsToNode::NoEscape); 421 // Do not add edges during first iteration because some could be 422 // not defined yet. 423 delayed_worklist->push(n); 424 break; 425 } 426 case Op_ConP: 427 case Op_ConN: 428 case Op_ConNKlass: { 429 // assume all oop constants globally escape except for null 430 PointsToNode::EscapeState es; 431 const Type* t = igvn->type(n); 432 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 433 es = PointsToNode::NoEscape; 434 } else { 435 es = PointsToNode::GlobalEscape; 436 } 437 add_java_object(n, es); 438 break; 439 } 440 case Op_CreateEx: { 441 // assume that all exception objects globally escape 442 map_ideal_node(n, phantom_obj); 443 break; 444 } 445 case Op_LoadKlass: 446 case Op_LoadNKlass: { 447 // Unknown class is loaded 448 map_ideal_node(n, phantom_obj); 449 break; 450 } 451 case Op_LoadP: 452 case Op_LoadN: 453 case Op_LoadPLocked: { 454 add_objload_to_connection_graph(n, delayed_worklist); 455 break; 456 } 457 case Op_Parm: { 458 map_ideal_node(n, phantom_obj); 459 break; 460 } 461 case Op_PartialSubtypeCheck: { 462 // Produces Null or notNull and is used in only in CmpP so 463 // phantom_obj could be used. 464 map_ideal_node(n, phantom_obj); // Result is unknown 465 break; 466 } 467 case Op_Phi: { 468 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 469 // ThreadLocal has RawPtr type. 470 const Type* t = n->as_Phi()->type(); 471 if (t->make_ptr() != NULL) { 472 add_local_var(n, PointsToNode::NoEscape); 473 // Do not add edges during first iteration because some could be 474 // not defined yet. 475 delayed_worklist->push(n); 476 } 477 break; 478 } 479 case Op_Proj: { 480 // we are only interested in the oop result projection from a call 481 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 482 n->in(0)->as_Call()->returns_pointer()) { 483 add_local_var_and_edge(n, PointsToNode::NoEscape, 484 n->in(0), delayed_worklist); 485 } 486 break; 487 } 488 case Op_Rethrow: // Exception object escapes 489 case Op_Return: { 490 if (n->req() > TypeFunc::Parms && 491 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 492 // Treat Return value as LocalVar with GlobalEscape escape state. 493 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 494 n->in(TypeFunc::Parms), delayed_worklist); 495 } 496 break; 497 } 498 case Op_CompareAndExchangeP: 499 case Op_CompareAndExchangeN: 500 case Op_GetAndSetP: 501 case Op_GetAndSetN: { 502 add_objload_to_connection_graph(n, delayed_worklist); 503 // fallthrough 504 } 505 case Op_StoreP: 506 case Op_StoreN: 507 case Op_StoreNKlass: 508 case Op_StorePConditional: 509 case Op_WeakCompareAndSwapP: 510 case Op_WeakCompareAndSwapN: 511 case Op_CompareAndSwapP: 512 case Op_CompareAndSwapN: { 513 Node* adr = n->in(MemNode::Address); 514 const Type *adr_type = igvn->type(adr); 515 adr_type = adr_type->make_ptr(); 516 if (adr_type == NULL) { 517 break; // skip dead nodes 518 } 519 if ( adr_type->isa_oopptr() 520 || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 521 && adr_type == TypeRawPtr::NOTNULL 522 && adr->in(AddPNode::Address)->is_Proj() 523 && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 524 delayed_worklist->push(n); // Process it later. 525 #ifdef ASSERT 526 assert(adr->is_AddP(), "expecting an AddP"); 527 if (adr_type == TypeRawPtr::NOTNULL) { 528 // Verify a raw address for a store captured by Initialize node. 529 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 530 assert(offs != Type::OffsetBot, "offset must be a constant"); 531 } 532 #endif 533 } else { 534 // Ignore copy the displaced header to the BoxNode (OSR compilation). 535 if (adr->is_BoxLock()) 536 break; 537 // Stored value escapes in unsafe access. 538 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 539 // Pointer stores in G1 barriers looks like unsafe access. 540 // Ignore such stores to be able scalar replace non-escaping 541 // allocations. 542 if (UseG1GC && adr->is_AddP()) { 543 Node* base = get_addp_base(adr); 544 if (base->Opcode() == Op_LoadP && 545 base->in(MemNode::Address)->is_AddP()) { 546 adr = base->in(MemNode::Address); 547 Node* tls = get_addp_base(adr); 548 if (tls->Opcode() == Op_ThreadLocal) { 549 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 550 if (offs == in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())) { 551 break; // G1 pre barrier previous oop value store. 552 } 553 if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) { 554 break; // G1 post barrier card address store. 555 } 556 } 557 } 558 } 559 delayed_worklist->push(n); // Process unsafe access later. 560 break; 561 } 562 #ifdef ASSERT 563 n->dump(1); 564 assert(false, "not unsafe or G1 barrier raw StoreP"); 565 #endif 566 } 567 break; 568 } 569 case Op_AryEq: 570 case Op_HasNegatives: 571 case Op_StrComp: 572 case Op_StrEquals: 573 case Op_StrIndexOf: 574 case Op_StrIndexOfChar: 575 case Op_StrInflatedCopy: 576 case Op_StrCompressedCopy: 577 case Op_EncodeISOArray: { 578 add_local_var(n, PointsToNode::ArgEscape); 579 delayed_worklist->push(n); // Process it later. 580 break; 581 } 582 case Op_ThreadLocal: { 583 add_java_object(n, PointsToNode::ArgEscape); 584 break; 585 } 586 default: 587 ; // Do nothing for nodes not related to EA. 588 } 589 return; 590 } 591 592 #ifdef ASSERT 593 #define ELSE_FAIL(name) \ 594 /* Should not be called for not pointer type. */ \ 595 n->dump(1); \ 596 assert(false, name); \ 597 break; 598 #else 599 #define ELSE_FAIL(name) \ 600 break; 601 #endif 602 603 // Add final simple edges to graph. 604 void ConnectionGraph::add_final_edges(Node *n) { 605 PointsToNode* n_ptn = ptnode_adr(n->_idx); 606 #ifdef ASSERT 607 if (_verify && n_ptn->is_JavaObject()) 608 return; // This method does not change graph for JavaObject. 609 #endif 610 611 if (n->is_Call()) { 612 process_call_arguments(n->as_Call()); 613 return; 614 } 615 assert(n->is_Store() || n->is_LoadStore() || 616 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 617 "node should be registered already"); 618 int opcode = n->Opcode(); 619 switch (opcode) { 620 case Op_AddP: { 621 Node* base = get_addp_base(n); 622 PointsToNode* ptn_base = ptnode_adr(base->_idx); 623 assert(ptn_base != NULL, "field's base should be registered"); 624 add_base(n_ptn->as_Field(), ptn_base); 625 break; 626 } 627 case Op_CastPP: 628 case Op_CheckCastPP: 629 case Op_EncodeP: 630 case Op_DecodeN: 631 case Op_EncodePKlass: 632 case Op_DecodeNKlass: { 633 add_local_var_and_edge(n, PointsToNode::NoEscape, 634 n->in(1), NULL); 635 break; 636 } 637 case Op_CMoveP: { 638 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 639 Node* in = n->in(i); 640 if (in == NULL) 641 continue; // ignore NULL 642 Node* uncast_in = in->uncast(); 643 if (uncast_in->is_top() || uncast_in == n) 644 continue; // ignore top or inputs which go back this node 645 PointsToNode* ptn = ptnode_adr(in->_idx); 646 assert(ptn != NULL, "node should be registered"); 647 add_edge(n_ptn, ptn); 648 } 649 break; 650 } 651 case Op_LoadP: 652 case Op_LoadN: 653 case Op_LoadPLocked: { 654 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 655 // ThreadLocal has RawPtr type. 656 const Type* t = _igvn->type(n); 657 if (t->make_ptr() != NULL) { 658 Node* adr = n->in(MemNode::Address); 659 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 660 break; 661 } 662 ELSE_FAIL("Op_LoadP"); 663 } 664 case Op_Phi: { 665 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 666 // ThreadLocal has RawPtr type. 667 const Type* t = n->as_Phi()->type(); 668 if (t->make_ptr() != NULL) { 669 for (uint i = 1; i < n->req(); i++) { 670 Node* in = n->in(i); 671 if (in == NULL) 672 continue; // ignore NULL 673 Node* uncast_in = in->uncast(); 674 if (uncast_in->is_top() || uncast_in == n) 675 continue; // ignore top or inputs which go back this node 676 PointsToNode* ptn = ptnode_adr(in->_idx); 677 assert(ptn != NULL, "node should be registered"); 678 add_edge(n_ptn, ptn); 679 } 680 break; 681 } 682 ELSE_FAIL("Op_Phi"); 683 } 684 case Op_Proj: { 685 // we are only interested in the oop result projection from a call 686 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 687 n->in(0)->as_Call()->returns_pointer()) { 688 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 689 break; 690 } 691 ELSE_FAIL("Op_Proj"); 692 } 693 case Op_Rethrow: // Exception object escapes 694 case Op_Return: { 695 if (n->req() > TypeFunc::Parms && 696 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 697 // Treat Return value as LocalVar with GlobalEscape escape state. 698 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 699 n->in(TypeFunc::Parms), NULL); 700 break; 701 } 702 ELSE_FAIL("Op_Return"); 703 } 704 case Op_StoreP: 705 case Op_StoreN: 706 case Op_StoreNKlass: 707 case Op_StorePConditional: 708 case Op_CompareAndExchangeP: 709 case Op_CompareAndExchangeN: 710 case Op_CompareAndSwapP: 711 case Op_CompareAndSwapN: 712 case Op_WeakCompareAndSwapP: 713 case Op_WeakCompareAndSwapN: 714 case Op_GetAndSetP: 715 case Op_GetAndSetN: { 716 Node* adr = n->in(MemNode::Address); 717 const Type *adr_type = _igvn->type(adr); 718 adr_type = adr_type->make_ptr(); 719 #ifdef ASSERT 720 if (adr_type == NULL) { 721 n->dump(1); 722 assert(adr_type != NULL, "dead node should not be on list"); 723 break; 724 } 725 #endif 726 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN || 727 opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) { 728 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 729 } 730 if ( adr_type->isa_oopptr() 731 || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 732 && adr_type == TypeRawPtr::NOTNULL 733 && adr->in(AddPNode::Address)->is_Proj() 734 && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 735 // Point Address to Value 736 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 737 assert(adr_ptn != NULL && 738 adr_ptn->as_Field()->is_oop(), "node should be registered"); 739 Node *val = n->in(MemNode::ValueIn); 740 PointsToNode* ptn = ptnode_adr(val->_idx); 741 assert(ptn != NULL, "node should be registered"); 742 add_edge(adr_ptn, ptn); 743 break; 744 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 745 // Stored value escapes in unsafe access. 746 Node *val = n->in(MemNode::ValueIn); 747 PointsToNode* ptn = ptnode_adr(val->_idx); 748 assert(ptn != NULL, "node should be registered"); 749 set_escape_state(ptn, PointsToNode::GlobalEscape); 750 // Add edge to object for unsafe access with offset. 751 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 752 assert(adr_ptn != NULL, "node should be registered"); 753 if (adr_ptn->is_Field()) { 754 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 755 add_edge(adr_ptn, ptn); 756 } 757 break; 758 } 759 ELSE_FAIL("Op_StoreP"); 760 } 761 case Op_AryEq: 762 case Op_HasNegatives: 763 case Op_StrComp: 764 case Op_StrEquals: 765 case Op_StrIndexOf: 766 case Op_StrIndexOfChar: 767 case Op_StrInflatedCopy: 768 case Op_StrCompressedCopy: 769 case Op_EncodeISOArray: { 770 // char[]/byte[] arrays passed to string intrinsic do not escape but 771 // they are not scalar replaceable. Adjust escape state for them. 772 // Start from in(2) edge since in(1) is memory edge. 773 for (uint i = 2; i < n->req(); i++) { 774 Node* adr = n->in(i); 775 const Type* at = _igvn->type(adr); 776 if (!adr->is_top() && at->isa_ptr()) { 777 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 778 at->isa_ptr() != NULL, "expecting a pointer"); 779 if (adr->is_AddP()) { 780 adr = get_addp_base(adr); 781 } 782 PointsToNode* ptn = ptnode_adr(adr->_idx); 783 assert(ptn != NULL, "node should be registered"); 784 add_edge(n_ptn, ptn); 785 } 786 } 787 break; 788 } 789 default: { 790 // This method should be called only for EA specific nodes which may 791 // miss some edges when they were created. 792 #ifdef ASSERT 793 n->dump(1); 794 #endif 795 guarantee(false, "unknown node"); 796 } 797 } 798 return; 799 } 800 801 void ConnectionGraph::add_call_node(CallNode* call) { 802 assert(call->returns_pointer(), "only for call which returns pointer"); 803 uint call_idx = call->_idx; 804 if (call->is_Allocate()) { 805 Node* k = call->in(AllocateNode::KlassNode); 806 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 807 assert(kt != NULL, "TypeKlassPtr required."); 808 ciKlass* cik = kt->klass(); 809 PointsToNode::EscapeState es = PointsToNode::NoEscape; 810 bool scalar_replaceable = true; 811 if (call->is_AllocateArray()) { 812 if (!cik->is_array_klass()) { // StressReflectiveCode 813 es = PointsToNode::GlobalEscape; 814 } else { 815 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 816 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 817 // Not scalar replaceable if the length is not constant or too big. 818 scalar_replaceable = false; 819 } 820 } 821 } else { // Allocate instance 822 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 823 cik->is_subclass_of(_compile->env()->Reference_klass()) || 824 !cik->is_instance_klass() || // StressReflectiveCode 825 !cik->as_instance_klass()->can_be_instantiated() || 826 cik->as_instance_klass()->has_finalizer()) { 827 es = PointsToNode::GlobalEscape; 828 } 829 } 830 add_java_object(call, es); 831 PointsToNode* ptn = ptnode_adr(call_idx); 832 if (!scalar_replaceable && ptn->scalar_replaceable()) { 833 ptn->set_scalar_replaceable(false); 834 } 835 } else if (call->is_CallStaticJava()) { 836 // Call nodes could be different types: 837 // 838 // 1. CallDynamicJavaNode (what happened during call is unknown): 839 // 840 // - mapped to GlobalEscape JavaObject node if oop is returned; 841 // 842 // - all oop arguments are escaping globally; 843 // 844 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 845 // 846 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 847 // 848 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 849 // - mapped to NoEscape JavaObject node if non-escaping object allocated 850 // during call is returned; 851 // - mapped to ArgEscape LocalVar node pointed to object arguments 852 // which are returned and does not escape during call; 853 // 854 // - oop arguments escaping status is defined by bytecode analysis; 855 // 856 // For a static call, we know exactly what method is being called. 857 // Use bytecode estimator to record whether the call's return value escapes. 858 ciMethod* meth = call->as_CallJava()->method(); 859 if (meth == NULL) { 860 const char* name = call->as_CallStaticJava()->_name; 861 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 862 // Returns a newly allocated unescaped object. 863 add_java_object(call, PointsToNode::NoEscape); 864 ptnode_adr(call_idx)->set_scalar_replaceable(false); 865 } else if (meth->is_boxing_method()) { 866 // Returns boxing object 867 PointsToNode::EscapeState es; 868 vmIntrinsics::ID intr = meth->intrinsic_id(); 869 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 870 // It does not escape if object is always allocated. 871 es = PointsToNode::NoEscape; 872 } else { 873 // It escapes globally if object could be loaded from cache. 874 es = PointsToNode::GlobalEscape; 875 } 876 add_java_object(call, es); 877 } else { 878 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 879 call_analyzer->copy_dependencies(_compile->dependencies()); 880 if (call_analyzer->is_return_allocated()) { 881 // Returns a newly allocated unescaped object, simply 882 // update dependency information. 883 // Mark it as NoEscape so that objects referenced by 884 // it's fields will be marked as NoEscape at least. 885 add_java_object(call, PointsToNode::NoEscape); 886 ptnode_adr(call_idx)->set_scalar_replaceable(false); 887 } else { 888 // Determine whether any arguments are returned. 889 const TypeTuple* d = call->tf()->domain(); 890 bool ret_arg = false; 891 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 892 if (d->field_at(i)->isa_ptr() != NULL && 893 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 894 ret_arg = true; 895 break; 896 } 897 } 898 if (ret_arg) { 899 add_local_var(call, PointsToNode::ArgEscape); 900 } else { 901 // Returns unknown object. 902 map_ideal_node(call, phantom_obj); 903 } 904 } 905 } 906 } else { 907 // An other type of call, assume the worst case: 908 // returned value is unknown and globally escapes. 909 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 910 map_ideal_node(call, phantom_obj); 911 } 912 } 913 914 void ConnectionGraph::process_call_arguments(CallNode *call) { 915 bool is_arraycopy = false; 916 switch (call->Opcode()) { 917 #ifdef ASSERT 918 case Op_Allocate: 919 case Op_AllocateArray: 920 case Op_Lock: 921 case Op_Unlock: 922 assert(false, "should be done already"); 923 break; 924 #endif 925 case Op_ArrayCopy: 926 case Op_CallLeafNoFP: 927 // Most array copies are ArrayCopy nodes at this point but there 928 // are still a few direct calls to the copy subroutines (See 929 // PhaseStringOpts::copy_string()) 930 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 931 call->as_CallLeaf()->is_call_to_arraycopystub(); 932 // fall through 933 case Op_CallLeaf: { 934 // Stub calls, objects do not escape but they are not scale replaceable. 935 // Adjust escape state for outgoing arguments. 936 const TypeTuple * d = call->tf()->domain(); 937 bool src_has_oops = false; 938 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 939 const Type* at = d->field_at(i); 940 Node *arg = call->in(i); 941 if (arg == NULL) { 942 continue; 943 } 944 const Type *aat = _igvn->type(arg); 945 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) 946 continue; 947 if (arg->is_AddP()) { 948 // 949 // The inline_native_clone() case when the arraycopy stub is called 950 // after the allocation before Initialize and CheckCastPP nodes. 951 // Or normal arraycopy for object arrays case. 952 // 953 // Set AddP's base (Allocate) as not scalar replaceable since 954 // pointer to the base (with offset) is passed as argument. 955 // 956 arg = get_addp_base(arg); 957 } 958 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 959 assert(arg_ptn != NULL, "should be registered"); 960 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 961 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 962 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 963 aat->isa_ptr() != NULL, "expecting an Ptr"); 964 bool arg_has_oops = aat->isa_oopptr() && 965 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || 966 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); 967 if (i == TypeFunc::Parms) { 968 src_has_oops = arg_has_oops; 969 } 970 // 971 // src or dst could be j.l.Object when other is basic type array: 972 // 973 // arraycopy(char[],0,Object*,0,size); 974 // arraycopy(Object*,0,char[],0,size); 975 // 976 // Don't add edges in such cases. 977 // 978 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 979 arg_has_oops && (i > TypeFunc::Parms); 980 #ifdef ASSERT 981 if (!(is_arraycopy || 982 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 983 (call->as_CallLeaf()->_name != NULL && 984 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 985 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 986 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 987 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 988 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 989 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 990 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 991 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 992 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 993 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 994 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 995 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 996 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 997 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 998 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 999 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1000 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1001 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1002 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1003 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1004 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0) 1005 ))) { 1006 call->dump(); 1007 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1008 } 1009 #endif 1010 // Always process arraycopy's destination object since 1011 // we need to add all possible edges to references in 1012 // source object. 1013 if (arg_esc >= PointsToNode::ArgEscape && 1014 !arg_is_arraycopy_dest) { 1015 continue; 1016 } 1017 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1018 if (call->is_ArrayCopy()) { 1019 ArrayCopyNode* ac = call->as_ArrayCopy(); 1020 if (ac->is_clonebasic() || 1021 ac->is_arraycopy_validated() || 1022 ac->is_copyof_validated() || 1023 ac->is_copyofrange_validated()) { 1024 es = PointsToNode::NoEscape; 1025 } 1026 } 1027 set_escape_state(arg_ptn, es); 1028 if (arg_is_arraycopy_dest) { 1029 Node* src = call->in(TypeFunc::Parms); 1030 if (src->is_AddP()) { 1031 src = get_addp_base(src); 1032 } 1033 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1034 assert(src_ptn != NULL, "should be registered"); 1035 if (arg_ptn != src_ptn) { 1036 // Special arraycopy edge: 1037 // A destination object's field can't have the source object 1038 // as base since objects escape states are not related. 1039 // Only escape state of destination object's fields affects 1040 // escape state of fields in source object. 1041 add_arraycopy(call, es, src_ptn, arg_ptn); 1042 } 1043 } 1044 } 1045 } 1046 break; 1047 } 1048 case Op_CallStaticJava: { 1049 // For a static call, we know exactly what method is being called. 1050 // Use bytecode estimator to record the call's escape affects 1051 #ifdef ASSERT 1052 const char* name = call->as_CallStaticJava()->_name; 1053 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1054 #endif 1055 ciMethod* meth = call->as_CallJava()->method(); 1056 if ((meth != NULL) && meth->is_boxing_method()) { 1057 break; // Boxing methods do not modify any oops. 1058 } 1059 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1060 // fall-through if not a Java method or no analyzer information 1061 if (call_analyzer != NULL) { 1062 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1063 const TypeTuple* d = call->tf()->domain(); 1064 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1065 const Type* at = d->field_at(i); 1066 int k = i - TypeFunc::Parms; 1067 Node* arg = call->in(i); 1068 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1069 if (at->isa_ptr() != NULL && 1070 call_analyzer->is_arg_returned(k)) { 1071 // The call returns arguments. 1072 if (call_ptn != NULL) { // Is call's result used? 1073 assert(call_ptn->is_LocalVar(), "node should be registered"); 1074 assert(arg_ptn != NULL, "node should be registered"); 1075 add_edge(call_ptn, arg_ptn); 1076 } 1077 } 1078 if (at->isa_oopptr() != NULL && 1079 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1080 if (!call_analyzer->is_arg_stack(k)) { 1081 // The argument global escapes 1082 set_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1083 } else { 1084 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 1085 if (!call_analyzer->is_arg_local(k)) { 1086 // The argument itself doesn't escape, but any fields might 1087 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1088 } 1089 } 1090 } 1091 } 1092 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1093 // The call returns arguments. 1094 assert(call_ptn->edge_count() > 0, "sanity"); 1095 if (!call_analyzer->is_return_local()) { 1096 // Returns also unknown object. 1097 add_edge(call_ptn, phantom_obj); 1098 } 1099 } 1100 break; 1101 } 1102 } 1103 default: { 1104 // Fall-through here if not a Java method or no analyzer information 1105 // or some other type of call, assume the worst case: all arguments 1106 // globally escape. 1107 const TypeTuple* d = call->tf()->domain(); 1108 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1109 const Type* at = d->field_at(i); 1110 if (at->isa_oopptr() != NULL) { 1111 Node* arg = call->in(i); 1112 if (arg->is_AddP()) { 1113 arg = get_addp_base(arg); 1114 } 1115 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1116 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); 1117 } 1118 } 1119 } 1120 } 1121 } 1122 1123 1124 // Finish Graph construction. 1125 bool ConnectionGraph::complete_connection_graph( 1126 GrowableArray<PointsToNode*>& ptnodes_worklist, 1127 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1128 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1129 GrowableArray<FieldNode*>& oop_fields_worklist) { 1130 // Normally only 1-3 passes needed to build Connection Graph depending 1131 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1132 // Set limit to 20 to catch situation when something did go wrong and 1133 // bailout Escape Analysis. 1134 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1135 #define CG_BUILD_ITER_LIMIT 20 1136 1137 // Propagate GlobalEscape and ArgEscape escape states and check that 1138 // we still have non-escaping objects. The method pushs on _worklist 1139 // Field nodes which reference phantom_object. 1140 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1141 return false; // Nothing to do. 1142 } 1143 // Now propagate references to all JavaObject nodes. 1144 int java_objects_length = java_objects_worklist.length(); 1145 elapsedTimer time; 1146 bool timeout = false; 1147 int new_edges = 1; 1148 int iterations = 0; 1149 do { 1150 while ((new_edges > 0) && 1151 (iterations++ < CG_BUILD_ITER_LIMIT)) { 1152 double start_time = time.seconds(); 1153 time.start(); 1154 new_edges = 0; 1155 // Propagate references to phantom_object for nodes pushed on _worklist 1156 // by find_non_escaped_objects() and find_field_value(). 1157 new_edges += add_java_object_edges(phantom_obj, false); 1158 for (int next = 0; next < java_objects_length; ++next) { 1159 JavaObjectNode* ptn = java_objects_worklist.at(next); 1160 new_edges += add_java_object_edges(ptn, true); 1161 1162 #define SAMPLE_SIZE 4 1163 if ((next % SAMPLE_SIZE) == 0) { 1164 // Each 4 iterations calculate how much time it will take 1165 // to complete graph construction. 1166 time.stop(); 1167 // Poll for requests from shutdown mechanism to quiesce compiler 1168 // because Connection graph construction may take long time. 1169 CompileBroker::maybe_block(); 1170 double stop_time = time.seconds(); 1171 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1172 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1173 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1174 timeout = true; 1175 break; // Timeout 1176 } 1177 start_time = stop_time; 1178 time.start(); 1179 } 1180 #undef SAMPLE_SIZE 1181 1182 } 1183 if (timeout) break; 1184 if (new_edges > 0) { 1185 // Update escape states on each iteration if graph was updated. 1186 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1187 return false; // Nothing to do. 1188 } 1189 } 1190 time.stop(); 1191 if (time.seconds() >= EscapeAnalysisTimeout) { 1192 timeout = true; 1193 break; 1194 } 1195 } 1196 if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { 1197 time.start(); 1198 // Find fields which have unknown value. 1199 int fields_length = oop_fields_worklist.length(); 1200 for (int next = 0; next < fields_length; next++) { 1201 FieldNode* field = oop_fields_worklist.at(next); 1202 if (field->edge_count() == 0) { 1203 new_edges += find_field_value(field); 1204 // This code may added new edges to phantom_object. 1205 // Need an other cycle to propagate references to phantom_object. 1206 } 1207 } 1208 time.stop(); 1209 if (time.seconds() >= EscapeAnalysisTimeout) { 1210 timeout = true; 1211 break; 1212 } 1213 } else { 1214 new_edges = 0; // Bailout 1215 } 1216 } while (new_edges > 0); 1217 1218 // Bailout if passed limits. 1219 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { 1220 Compile* C = _compile; 1221 if (C->log() != NULL) { 1222 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1223 C->log()->text("%s", timeout ? "time" : "iterations"); 1224 C->log()->end_elem(" limit'"); 1225 } 1226 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", 1227 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()); 1228 // Possible infinite build_connection_graph loop, 1229 // bailout (no changes to ideal graph were made). 1230 return false; 1231 } 1232 #ifdef ASSERT 1233 if (Verbose && PrintEscapeAnalysis) { 1234 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", 1235 iterations, nodes_size(), ptnodes_worklist.length()); 1236 } 1237 #endif 1238 1239 #undef CG_BUILD_ITER_LIMIT 1240 1241 // Find fields initialized by NULL for non-escaping Allocations. 1242 int non_escaped_length = non_escaped_worklist.length(); 1243 for (int next = 0; next < non_escaped_length; next++) { 1244 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1245 PointsToNode::EscapeState es = ptn->escape_state(); 1246 assert(es <= PointsToNode::ArgEscape, "sanity"); 1247 if (es == PointsToNode::NoEscape) { 1248 if (find_init_values(ptn, null_obj, _igvn) > 0) { 1249 // Adding references to NULL object does not change escape states 1250 // since it does not escape. Also no fields are added to NULL object. 1251 add_java_object_edges(null_obj, false); 1252 } 1253 } 1254 Node* n = ptn->ideal_node(); 1255 if (n->is_Allocate()) { 1256 // The object allocated by this Allocate node will never be 1257 // seen by an other thread. Mark it so that when it is 1258 // expanded no MemBarStoreStore is added. 1259 InitializeNode* ini = n->as_Allocate()->initialization(); 1260 if (ini != NULL) 1261 ini->set_does_not_escape(); 1262 } 1263 } 1264 return true; // Finished graph construction. 1265 } 1266 1267 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1268 // and check that we still have non-escaping java objects. 1269 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1270 GrowableArray<JavaObjectNode*>& non_escaped_worklist) { 1271 GrowableArray<PointsToNode*> escape_worklist; 1272 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1273 int ptnodes_length = ptnodes_worklist.length(); 1274 for (int next = 0; next < ptnodes_length; ++next) { 1275 PointsToNode* ptn = ptnodes_worklist.at(next); 1276 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1277 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1278 escape_worklist.push(ptn); 1279 } 1280 } 1281 // Set escape states to referenced nodes (edges list). 1282 while (escape_worklist.length() > 0) { 1283 PointsToNode* ptn = escape_worklist.pop(); 1284 PointsToNode::EscapeState es = ptn->escape_state(); 1285 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1286 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1287 es >= PointsToNode::ArgEscape) { 1288 // GlobalEscape or ArgEscape state of field means it has unknown value. 1289 if (add_edge(ptn, phantom_obj)) { 1290 // New edge was added 1291 add_field_uses_to_worklist(ptn->as_Field()); 1292 } 1293 } 1294 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1295 PointsToNode* e = i.get(); 1296 if (e->is_Arraycopy()) { 1297 assert(ptn->arraycopy_dst(), "sanity"); 1298 // Propagate only fields escape state through arraycopy edge. 1299 if (e->fields_escape_state() < field_es) { 1300 set_fields_escape_state(e, field_es); 1301 escape_worklist.push(e); 1302 } 1303 } else if (es >= field_es) { 1304 // fields_escape_state is also set to 'es' if it is less than 'es'. 1305 if (e->escape_state() < es) { 1306 set_escape_state(e, es); 1307 escape_worklist.push(e); 1308 } 1309 } else { 1310 // Propagate field escape state. 1311 bool es_changed = false; 1312 if (e->fields_escape_state() < field_es) { 1313 set_fields_escape_state(e, field_es); 1314 es_changed = true; 1315 } 1316 if ((e->escape_state() < field_es) && 1317 e->is_Field() && ptn->is_JavaObject() && 1318 e->as_Field()->is_oop()) { 1319 // Change escape state of referenced fields. 1320 set_escape_state(e, field_es); 1321 es_changed = true; 1322 } else if (e->escape_state() < es) { 1323 set_escape_state(e, es); 1324 es_changed = true; 1325 } 1326 if (es_changed) { 1327 escape_worklist.push(e); 1328 } 1329 } 1330 } 1331 } 1332 // Remove escaped objects from non_escaped list. 1333 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { 1334 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1335 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1336 non_escaped_worklist.delete_at(next); 1337 } 1338 if (ptn->escape_state() == PointsToNode::NoEscape) { 1339 // Find fields in non-escaped allocations which have unknown value. 1340 find_init_values(ptn, phantom_obj, NULL); 1341 } 1342 } 1343 return (non_escaped_worklist.length() > 0); 1344 } 1345 1346 // Add all references to JavaObject node by walking over all uses. 1347 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1348 int new_edges = 0; 1349 if (populate_worklist) { 1350 // Populate _worklist by uses of jobj's uses. 1351 for (UseIterator i(jobj); i.has_next(); i.next()) { 1352 PointsToNode* use = i.get(); 1353 if (use->is_Arraycopy()) 1354 continue; 1355 add_uses_to_worklist(use); 1356 if (use->is_Field() && use->as_Field()->is_oop()) { 1357 // Put on worklist all field's uses (loads) and 1358 // related field nodes (same base and offset). 1359 add_field_uses_to_worklist(use->as_Field()); 1360 } 1361 } 1362 } 1363 for (int l = 0; l < _worklist.length(); l++) { 1364 PointsToNode* use = _worklist.at(l); 1365 if (PointsToNode::is_base_use(use)) { 1366 // Add reference from jobj to field and from field to jobj (field's base). 1367 use = PointsToNode::get_use_node(use)->as_Field(); 1368 if (add_base(use->as_Field(), jobj)) { 1369 new_edges++; 1370 } 1371 continue; 1372 } 1373 assert(!use->is_JavaObject(), "sanity"); 1374 if (use->is_Arraycopy()) { 1375 if (jobj == null_obj) // NULL object does not have field edges 1376 continue; 1377 // Added edge from Arraycopy node to arraycopy's source java object 1378 if (add_edge(use, jobj)) { 1379 jobj->set_arraycopy_src(); 1380 new_edges++; 1381 } 1382 // and stop here. 1383 continue; 1384 } 1385 if (!add_edge(use, jobj)) 1386 continue; // No new edge added, there was such edge already. 1387 new_edges++; 1388 if (use->is_LocalVar()) { 1389 add_uses_to_worklist(use); 1390 if (use->arraycopy_dst()) { 1391 for (EdgeIterator i(use); i.has_next(); i.next()) { 1392 PointsToNode* e = i.get(); 1393 if (e->is_Arraycopy()) { 1394 if (jobj == null_obj) // NULL object does not have field edges 1395 continue; 1396 // Add edge from arraycopy's destination java object to Arraycopy node. 1397 if (add_edge(jobj, e)) { 1398 new_edges++; 1399 jobj->set_arraycopy_dst(); 1400 } 1401 } 1402 } 1403 } 1404 } else { 1405 // Added new edge to stored in field values. 1406 // Put on worklist all field's uses (loads) and 1407 // related field nodes (same base and offset). 1408 add_field_uses_to_worklist(use->as_Field()); 1409 } 1410 } 1411 _worklist.clear(); 1412 _in_worklist.Reset(); 1413 return new_edges; 1414 } 1415 1416 // Put on worklist all related field nodes. 1417 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1418 assert(field->is_oop(), "sanity"); 1419 int offset = field->offset(); 1420 add_uses_to_worklist(field); 1421 // Loop over all bases of this field and push on worklist Field nodes 1422 // with the same offset and base (since they may reference the same field). 1423 for (BaseIterator i(field); i.has_next(); i.next()) { 1424 PointsToNode* base = i.get(); 1425 add_fields_to_worklist(field, base); 1426 // Check if the base was source object of arraycopy and go over arraycopy's 1427 // destination objects since values stored to a field of source object are 1428 // accessable by uses (loads) of fields of destination objects. 1429 if (base->arraycopy_src()) { 1430 for (UseIterator j(base); j.has_next(); j.next()) { 1431 PointsToNode* arycp = j.get(); 1432 if (arycp->is_Arraycopy()) { 1433 for (UseIterator k(arycp); k.has_next(); k.next()) { 1434 PointsToNode* abase = k.get(); 1435 if (abase->arraycopy_dst() && abase != base) { 1436 // Look for the same arraycopy reference. 1437 add_fields_to_worklist(field, abase); 1438 } 1439 } 1440 } 1441 } 1442 } 1443 } 1444 } 1445 1446 // Put on worklist all related field nodes. 1447 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1448 int offset = field->offset(); 1449 if (base->is_LocalVar()) { 1450 for (UseIterator j(base); j.has_next(); j.next()) { 1451 PointsToNode* f = j.get(); 1452 if (PointsToNode::is_base_use(f)) { // Field 1453 f = PointsToNode::get_use_node(f); 1454 if (f == field || !f->as_Field()->is_oop()) 1455 continue; 1456 int offs = f->as_Field()->offset(); 1457 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1458 add_to_worklist(f); 1459 } 1460 } 1461 } 1462 } else { 1463 assert(base->is_JavaObject(), "sanity"); 1464 if (// Skip phantom_object since it is only used to indicate that 1465 // this field's content globally escapes. 1466 (base != phantom_obj) && 1467 // NULL object node does not have fields. 1468 (base != null_obj)) { 1469 for (EdgeIterator i(base); i.has_next(); i.next()) { 1470 PointsToNode* f = i.get(); 1471 // Skip arraycopy edge since store to destination object field 1472 // does not update value in source object field. 1473 if (f->is_Arraycopy()) { 1474 assert(base->arraycopy_dst(), "sanity"); 1475 continue; 1476 } 1477 if (f == field || !f->as_Field()->is_oop()) 1478 continue; 1479 int offs = f->as_Field()->offset(); 1480 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1481 add_to_worklist(f); 1482 } 1483 } 1484 } 1485 } 1486 } 1487 1488 // Find fields which have unknown value. 1489 int ConnectionGraph::find_field_value(FieldNode* field) { 1490 // Escaped fields should have init value already. 1491 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1492 int new_edges = 0; 1493 for (BaseIterator i(field); i.has_next(); i.next()) { 1494 PointsToNode* base = i.get(); 1495 if (base->is_JavaObject()) { 1496 // Skip Allocate's fields which will be processed later. 1497 if (base->ideal_node()->is_Allocate()) 1498 return 0; 1499 assert(base == null_obj, "only NULL ptr base expected here"); 1500 } 1501 } 1502 if (add_edge(field, phantom_obj)) { 1503 // New edge was added 1504 new_edges++; 1505 add_field_uses_to_worklist(field); 1506 } 1507 return new_edges; 1508 } 1509 1510 // Find fields initializing values for allocations. 1511 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { 1512 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1513 int new_edges = 0; 1514 Node* alloc = pta->ideal_node(); 1515 if (init_val == phantom_obj) { 1516 // Do nothing for Allocate nodes since its fields values are 1517 // "known" unless they are initialized by arraycopy/clone. 1518 if (alloc->is_Allocate() && !pta->arraycopy_dst()) 1519 return 0; 1520 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 1521 #ifdef ASSERT 1522 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { 1523 const char* name = alloc->as_CallStaticJava()->_name; 1524 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1525 } 1526 #endif 1527 // Non-escaped allocation returned from Java or runtime call have 1528 // unknown values in fields. 1529 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1530 PointsToNode* field = i.get(); 1531 if (field->is_Field() && field->as_Field()->is_oop()) { 1532 if (add_edge(field, phantom_obj)) { 1533 // New edge was added 1534 new_edges++; 1535 add_field_uses_to_worklist(field->as_Field()); 1536 } 1537 } 1538 } 1539 return new_edges; 1540 } 1541 assert(init_val == null_obj, "sanity"); 1542 // Do nothing for Call nodes since its fields values are unknown. 1543 if (!alloc->is_Allocate()) 1544 return 0; 1545 1546 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1547 bool visited_bottom_offset = false; 1548 GrowableArray<int> offsets_worklist; 1549 1550 // Check if an oop field's initializing value is recorded and add 1551 // a corresponding NULL if field's value if it is not recorded. 1552 // Connection Graph does not record a default initialization by NULL 1553 // captured by Initialize node. 1554 // 1555 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1556 PointsToNode* field = i.get(); // Field (AddP) 1557 if (!field->is_Field() || !field->as_Field()->is_oop()) 1558 continue; // Not oop field 1559 int offset = field->as_Field()->offset(); 1560 if (offset == Type::OffsetBot) { 1561 if (!visited_bottom_offset) { 1562 // OffsetBot is used to reference array's element, 1563 // always add reference to NULL to all Field nodes since we don't 1564 // known which element is referenced. 1565 if (add_edge(field, null_obj)) { 1566 // New edge was added 1567 new_edges++; 1568 add_field_uses_to_worklist(field->as_Field()); 1569 visited_bottom_offset = true; 1570 } 1571 } 1572 } else { 1573 // Check only oop fields. 1574 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1575 if (adr_type->isa_rawptr()) { 1576 #ifdef ASSERT 1577 // Raw pointers are used for initializing stores so skip it 1578 // since it should be recorded already 1579 Node* base = get_addp_base(field->ideal_node()); 1580 assert(adr_type->isa_rawptr() && base->is_Proj() && 1581 (base->in(0) == alloc),"unexpected pointer type"); 1582 #endif 1583 continue; 1584 } 1585 if (!offsets_worklist.contains(offset)) { 1586 offsets_worklist.append(offset); 1587 Node* value = NULL; 1588 if (ini != NULL) { 1589 // StoreP::memory_type() == T_ADDRESS 1590 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1591 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1592 // Make sure initializing store has the same type as this AddP. 1593 // This AddP may reference non existing field because it is on a 1594 // dead branch of bimorphic call which is not eliminated yet. 1595 if (store != NULL && store->is_Store() && 1596 store->as_Store()->memory_type() == ft) { 1597 value = store->in(MemNode::ValueIn); 1598 #ifdef ASSERT 1599 if (VerifyConnectionGraph) { 1600 // Verify that AddP already points to all objects the value points to. 1601 PointsToNode* val = ptnode_adr(value->_idx); 1602 assert((val != NULL), "should be processed already"); 1603 PointsToNode* missed_obj = NULL; 1604 if (val->is_JavaObject()) { 1605 if (!field->points_to(val->as_JavaObject())) { 1606 missed_obj = val; 1607 } 1608 } else { 1609 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1610 tty->print_cr("----------init store has invalid value -----"); 1611 store->dump(); 1612 val->dump(); 1613 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1614 } 1615 for (EdgeIterator j(val); j.has_next(); j.next()) { 1616 PointsToNode* obj = j.get(); 1617 if (obj->is_JavaObject()) { 1618 if (!field->points_to(obj->as_JavaObject())) { 1619 missed_obj = obj; 1620 break; 1621 } 1622 } 1623 } 1624 } 1625 if (missed_obj != NULL) { 1626 tty->print_cr("----------field---------------------------------"); 1627 field->dump(); 1628 tty->print_cr("----------missed referernce to object-----------"); 1629 missed_obj->dump(); 1630 tty->print_cr("----------object referernced by init store -----"); 1631 store->dump(); 1632 val->dump(); 1633 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1634 } 1635 } 1636 #endif 1637 } else { 1638 // There could be initializing stores which follow allocation. 1639 // For example, a volatile field store is not collected 1640 // by Initialize node. 1641 // 1642 // Need to check for dependent loads to separate such stores from 1643 // stores which follow loads. For now, add initial value NULL so 1644 // that compare pointers optimization works correctly. 1645 } 1646 } 1647 if (value == NULL) { 1648 // A field's initializing value was not recorded. Add NULL. 1649 if (add_edge(field, null_obj)) { 1650 // New edge was added 1651 new_edges++; 1652 add_field_uses_to_worklist(field->as_Field()); 1653 } 1654 } 1655 } 1656 } 1657 } 1658 return new_edges; 1659 } 1660 1661 // Adjust scalar_replaceable state after Connection Graph is built. 1662 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1663 // Search for non-escaping objects which are not scalar replaceable 1664 // and mark them to propagate the state to referenced objects. 1665 1666 // 1. An object is not scalar replaceable if the field into which it is 1667 // stored has unknown offset (stored into unknown element of an array). 1668 // 1669 for (UseIterator i(jobj); i.has_next(); i.next()) { 1670 PointsToNode* use = i.get(); 1671 if (use->is_Arraycopy()) { 1672 continue; 1673 } 1674 if (use->is_Field()) { 1675 FieldNode* field = use->as_Field(); 1676 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 1677 if (field->offset() == Type::OffsetBot) { 1678 jobj->set_scalar_replaceable(false); 1679 return; 1680 } 1681 // 2. An object is not scalar replaceable if the field into which it is 1682 // stored has multiple bases one of which is null. 1683 if (field->base_count() > 1) { 1684 for (BaseIterator i(field); i.has_next(); i.next()) { 1685 PointsToNode* base = i.get(); 1686 if (base == null_obj) { 1687 jobj->set_scalar_replaceable(false); 1688 return; 1689 } 1690 } 1691 } 1692 } 1693 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1694 // 3. An object is not scalar replaceable if it is merged with other objects. 1695 for (EdgeIterator j(use); j.has_next(); j.next()) { 1696 PointsToNode* ptn = j.get(); 1697 if (ptn->is_JavaObject() && ptn != jobj) { 1698 // Mark all objects. 1699 jobj->set_scalar_replaceable(false); 1700 ptn->set_scalar_replaceable(false); 1701 } 1702 } 1703 if (!jobj->scalar_replaceable()) { 1704 return; 1705 } 1706 } 1707 1708 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1709 if (j.get()->is_Arraycopy()) { 1710 continue; 1711 } 1712 1713 // Non-escaping object node should point only to field nodes. 1714 FieldNode* field = j.get()->as_Field(); 1715 int offset = field->as_Field()->offset(); 1716 1717 // 4. An object is not scalar replaceable if it has a field with unknown 1718 // offset (array's element is accessed in loop). 1719 if (offset == Type::OffsetBot) { 1720 jobj->set_scalar_replaceable(false); 1721 return; 1722 } 1723 // 5. Currently an object is not scalar replaceable if a LoadStore node 1724 // access its field since the field value is unknown after it. 1725 // 1726 Node* n = field->ideal_node(); 1727 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1728 if (n->fast_out(i)->is_LoadStore()) { 1729 jobj->set_scalar_replaceable(false); 1730 return; 1731 } 1732 } 1733 1734 // 6. Or the address may point to more then one object. This may produce 1735 // the false positive result (set not scalar replaceable) 1736 // since the flow-insensitive escape analysis can't separate 1737 // the case when stores overwrite the field's value from the case 1738 // when stores happened on different control branches. 1739 // 1740 // Note: it will disable scalar replacement in some cases: 1741 // 1742 // Point p[] = new Point[1]; 1743 // p[0] = new Point(); // Will be not scalar replaced 1744 // 1745 // but it will save us from incorrect optimizations in next cases: 1746 // 1747 // Point p[] = new Point[1]; 1748 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1749 // 1750 if (field->base_count() > 1) { 1751 for (BaseIterator i(field); i.has_next(); i.next()) { 1752 PointsToNode* base = i.get(); 1753 // Don't take into account LocalVar nodes which 1754 // may point to only one object which should be also 1755 // this field's base by now. 1756 if (base->is_JavaObject() && base != jobj) { 1757 // Mark all bases. 1758 jobj->set_scalar_replaceable(false); 1759 base->set_scalar_replaceable(false); 1760 } 1761 } 1762 } 1763 } 1764 } 1765 1766 #ifdef ASSERT 1767 void ConnectionGraph::verify_connection_graph( 1768 GrowableArray<PointsToNode*>& ptnodes_worklist, 1769 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1770 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1771 GrowableArray<Node*>& addp_worklist) { 1772 // Verify that graph is complete - no new edges could be added. 1773 int java_objects_length = java_objects_worklist.length(); 1774 int non_escaped_length = non_escaped_worklist.length(); 1775 int new_edges = 0; 1776 for (int next = 0; next < java_objects_length; ++next) { 1777 JavaObjectNode* ptn = java_objects_worklist.at(next); 1778 new_edges += add_java_object_edges(ptn, true); 1779 } 1780 assert(new_edges == 0, "graph was not complete"); 1781 // Verify that escape state is final. 1782 int length = non_escaped_worklist.length(); 1783 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); 1784 assert((non_escaped_length == non_escaped_worklist.length()) && 1785 (non_escaped_length == length) && 1786 (_worklist.length() == 0), "escape state was not final"); 1787 1788 // Verify fields information. 1789 int addp_length = addp_worklist.length(); 1790 for (int next = 0; next < addp_length; ++next ) { 1791 Node* n = addp_worklist.at(next); 1792 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 1793 if (field->is_oop()) { 1794 // Verify that field has all bases 1795 Node* base = get_addp_base(n); 1796 PointsToNode* ptn = ptnode_adr(base->_idx); 1797 if (ptn->is_JavaObject()) { 1798 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 1799 } else { 1800 assert(ptn->is_LocalVar(), "sanity"); 1801 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1802 PointsToNode* e = i.get(); 1803 if (e->is_JavaObject()) { 1804 assert(field->has_base(e->as_JavaObject()), "sanity"); 1805 } 1806 } 1807 } 1808 // Verify that all fields have initializing values. 1809 if (field->edge_count() == 0) { 1810 tty->print_cr("----------field does not have references----------"); 1811 field->dump(); 1812 for (BaseIterator i(field); i.has_next(); i.next()) { 1813 PointsToNode* base = i.get(); 1814 tty->print_cr("----------field has next base---------------------"); 1815 base->dump(); 1816 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 1817 tty->print_cr("----------base has fields-------------------------"); 1818 for (EdgeIterator j(base); j.has_next(); j.next()) { 1819 j.get()->dump(); 1820 } 1821 tty->print_cr("----------base has references---------------------"); 1822 for (UseIterator j(base); j.has_next(); j.next()) { 1823 j.get()->dump(); 1824 } 1825 } 1826 } 1827 for (UseIterator i(field); i.has_next(); i.next()) { 1828 i.get()->dump(); 1829 } 1830 assert(field->edge_count() > 0, "sanity"); 1831 } 1832 } 1833 } 1834 } 1835 #endif 1836 1837 // Optimize ideal graph. 1838 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 1839 GrowableArray<Node*>& storestore_worklist) { 1840 Compile* C = _compile; 1841 PhaseIterGVN* igvn = _igvn; 1842 if (EliminateLocks) { 1843 // Mark locks before changing ideal graph. 1844 int cnt = C->macro_count(); 1845 for( int i=0; i < cnt; i++ ) { 1846 Node *n = C->macro_node(i); 1847 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1848 AbstractLockNode* alock = n->as_AbstractLock(); 1849 if (!alock->is_non_esc_obj()) { 1850 if (not_global_escape(alock->obj_node())) { 1851 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 1852 // The lock could be marked eliminated by lock coarsening 1853 // code during first IGVN before EA. Replace coarsened flag 1854 // to eliminate all associated locks/unlocks. 1855 #ifdef ASSERT 1856 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 1857 #endif 1858 alock->set_non_esc_obj(); 1859 } 1860 } 1861 } 1862 } 1863 } 1864 1865 if (OptimizePtrCompare) { 1866 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1867 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1868 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1869 // Optimize objects compare. 1870 while (ptr_cmp_worklist.length() != 0) { 1871 Node *n = ptr_cmp_worklist.pop(); 1872 Node *res = optimize_ptr_compare(n); 1873 if (res != NULL) { 1874 #ifndef PRODUCT 1875 if (PrintOptimizePtrCompare) { 1876 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1877 if (Verbose) { 1878 n->dump(1); 1879 } 1880 } 1881 #endif 1882 igvn->replace_node(n, res); 1883 } 1884 } 1885 // cleanup 1886 if (_pcmp_neq->outcnt() == 0) 1887 igvn->hash_delete(_pcmp_neq); 1888 if (_pcmp_eq->outcnt() == 0) 1889 igvn->hash_delete(_pcmp_eq); 1890 } 1891 1892 // For MemBarStoreStore nodes added in library_call.cpp, check 1893 // escape status of associated AllocateNode and optimize out 1894 // MemBarStoreStore node if the allocated object never escapes. 1895 while (storestore_worklist.length() != 0) { 1896 Node *n = storestore_worklist.pop(); 1897 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 1898 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 1899 assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); 1900 if (not_global_escape(alloc)) { 1901 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 1902 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 1903 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 1904 igvn->register_new_node_with_optimizer(mb); 1905 igvn->replace_node(storestore, mb); 1906 } 1907 } 1908 } 1909 1910 // Optimize objects compare. 1911 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 1912 assert(OptimizePtrCompare, "sanity"); 1913 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 1914 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 1915 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 1916 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 1917 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 1918 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 1919 1920 // Check simple cases first. 1921 if (jobj1 != NULL) { 1922 if (jobj1->escape_state() == PointsToNode::NoEscape) { 1923 if (jobj1 == jobj2) { 1924 // Comparing the same not escaping object. 1925 return _pcmp_eq; 1926 } 1927 Node* obj = jobj1->ideal_node(); 1928 // Comparing not escaping allocation. 1929 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1930 !ptn2->points_to(jobj1)) { 1931 return _pcmp_neq; // This includes nullness check. 1932 } 1933 } 1934 } 1935 if (jobj2 != NULL) { 1936 if (jobj2->escape_state() == PointsToNode::NoEscape) { 1937 Node* obj = jobj2->ideal_node(); 1938 // Comparing not escaping allocation. 1939 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1940 !ptn1->points_to(jobj2)) { 1941 return _pcmp_neq; // This includes nullness check. 1942 } 1943 } 1944 } 1945 if (jobj1 != NULL && jobj1 != phantom_obj && 1946 jobj2 != NULL && jobj2 != phantom_obj && 1947 jobj1->ideal_node()->is_Con() && 1948 jobj2->ideal_node()->is_Con()) { 1949 // Klass or String constants compare. Need to be careful with 1950 // compressed pointers - compare types of ConN and ConP instead of nodes. 1951 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 1952 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 1953 if (t1->make_ptr() == t2->make_ptr()) { 1954 return _pcmp_eq; 1955 } else { 1956 return _pcmp_neq; 1957 } 1958 } 1959 if (ptn1->meet(ptn2)) { 1960 return NULL; // Sets are not disjoint 1961 } 1962 1963 // Sets are disjoint. 1964 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 1965 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 1966 bool set1_has_null_ptr = ptn1->points_to(null_obj); 1967 bool set2_has_null_ptr = ptn2->points_to(null_obj); 1968 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 1969 (set2_has_unknown_ptr && set1_has_null_ptr)) { 1970 // Check nullness of unknown object. 1971 return NULL; 1972 } 1973 1974 // Disjointness by itself is not sufficient since 1975 // alias analysis is not complete for escaped objects. 1976 // Disjoint sets are definitely unrelated only when 1977 // at least one set has only not escaping allocations. 1978 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 1979 if (ptn1->non_escaping_allocation()) { 1980 return _pcmp_neq; 1981 } 1982 } 1983 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 1984 if (ptn2->non_escaping_allocation()) { 1985 return _pcmp_neq; 1986 } 1987 } 1988 return NULL; 1989 } 1990 1991 // Connection Graph constuction functions. 1992 1993 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 1994 PointsToNode* ptadr = _nodes.at(n->_idx); 1995 if (ptadr != NULL) { 1996 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 1997 return; 1998 } 1999 Compile* C = _compile; 2000 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2001 _nodes.at_put(n->_idx, ptadr); 2002 } 2003 2004 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2005 PointsToNode* ptadr = _nodes.at(n->_idx); 2006 if (ptadr != NULL) { 2007 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2008 return; 2009 } 2010 Compile* C = _compile; 2011 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2012 _nodes.at_put(n->_idx, ptadr); 2013 } 2014 2015 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2016 PointsToNode* ptadr = _nodes.at(n->_idx); 2017 if (ptadr != NULL) { 2018 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2019 return; 2020 } 2021 bool unsafe = false; 2022 bool is_oop = is_oop_field(n, offset, &unsafe); 2023 if (unsafe) { 2024 es = PointsToNode::GlobalEscape; 2025 } 2026 Compile* C = _compile; 2027 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2028 _nodes.at_put(n->_idx, field); 2029 } 2030 2031 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2032 PointsToNode* src, PointsToNode* dst) { 2033 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2034 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 2035 PointsToNode* ptadr = _nodes.at(n->_idx); 2036 if (ptadr != NULL) { 2037 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2038 return; 2039 } 2040 Compile* C = _compile; 2041 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2042 _nodes.at_put(n->_idx, ptadr); 2043 // Add edge from arraycopy node to source object. 2044 (void)add_edge(ptadr, src); 2045 src->set_arraycopy_src(); 2046 // Add edge from destination object to arraycopy node. 2047 (void)add_edge(dst, ptadr); 2048 dst->set_arraycopy_dst(); 2049 } 2050 2051 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2052 const Type* adr_type = n->as_AddP()->bottom_type(); 2053 BasicType bt = T_INT; 2054 if (offset == Type::OffsetBot) { 2055 // Check only oop fields. 2056 if (!adr_type->isa_aryptr() || 2057 (adr_type->isa_aryptr()->klass() == NULL) || 2058 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { 2059 // OffsetBot is used to reference array's element. Ignore first AddP. 2060 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 2061 bt = T_OBJECT; 2062 } 2063 } 2064 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2065 if (adr_type->isa_instptr()) { 2066 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 2067 if (field != NULL) { 2068 bt = field->layout_type(); 2069 } else { 2070 // Check for unsafe oop field access 2071 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2072 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2073 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) { 2074 bt = T_OBJECT; 2075 (*unsafe) = true; 2076 } 2077 } 2078 } else if (adr_type->isa_aryptr()) { 2079 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2080 // Ignore array length load. 2081 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2082 // Ignore first AddP. 2083 } else { 2084 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2085 bt = elemtype->array_element_basic_type(); 2086 } 2087 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2088 // Allocation initialization, ThreadLocal field access, unsafe access 2089 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2090 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2091 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) { 2092 bt = T_OBJECT; 2093 } 2094 } 2095 } 2096 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); 2097 } 2098 2099 // Returns unique pointed java object or NULL. 2100 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2101 assert(!_collecting, "should not call when contructed graph"); 2102 // If the node was created after the escape computation we can't answer. 2103 uint idx = n->_idx; 2104 if (idx >= nodes_size()) { 2105 return NULL; 2106 } 2107 PointsToNode* ptn = ptnode_adr(idx); 2108 if (ptn->is_JavaObject()) { 2109 return ptn->as_JavaObject(); 2110 } 2111 assert(ptn->is_LocalVar(), "sanity"); 2112 // Check all java objects it points to. 2113 JavaObjectNode* jobj = NULL; 2114 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2115 PointsToNode* e = i.get(); 2116 if (e->is_JavaObject()) { 2117 if (jobj == NULL) { 2118 jobj = e->as_JavaObject(); 2119 } else if (jobj != e) { 2120 return NULL; 2121 } 2122 } 2123 } 2124 return jobj; 2125 } 2126 2127 // Return true if this node points only to non-escaping allocations. 2128 bool PointsToNode::non_escaping_allocation() { 2129 if (is_JavaObject()) { 2130 Node* n = ideal_node(); 2131 if (n->is_Allocate() || n->is_CallStaticJava()) { 2132 return (escape_state() == PointsToNode::NoEscape); 2133 } else { 2134 return false; 2135 } 2136 } 2137 assert(is_LocalVar(), "sanity"); 2138 // Check all java objects it points to. 2139 for (EdgeIterator i(this); i.has_next(); i.next()) { 2140 PointsToNode* e = i.get(); 2141 if (e->is_JavaObject()) { 2142 Node* n = e->ideal_node(); 2143 if ((e->escape_state() != PointsToNode::NoEscape) || 2144 !(n->is_Allocate() || n->is_CallStaticJava())) { 2145 return false; 2146 } 2147 } 2148 } 2149 return true; 2150 } 2151 2152 // Return true if we know the node does not escape globally. 2153 bool ConnectionGraph::not_global_escape(Node *n) { 2154 assert(!_collecting, "should not call during graph construction"); 2155 // If the node was created after the escape computation we can't answer. 2156 uint idx = n->_idx; 2157 if (idx >= nodes_size()) { 2158 return false; 2159 } 2160 PointsToNode* ptn = ptnode_adr(idx); 2161 PointsToNode::EscapeState es = ptn->escape_state(); 2162 // If we have already computed a value, return it. 2163 if (es >= PointsToNode::GlobalEscape) 2164 return false; 2165 if (ptn->is_JavaObject()) { 2166 return true; // (es < PointsToNode::GlobalEscape); 2167 } 2168 assert(ptn->is_LocalVar(), "sanity"); 2169 // Check all java objects it points to. 2170 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2171 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) 2172 return false; 2173 } 2174 return true; 2175 } 2176 2177 2178 // Helper functions 2179 2180 // Return true if this node points to specified node or nodes it points to. 2181 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2182 if (is_JavaObject()) { 2183 return (this == ptn); 2184 } 2185 assert(is_LocalVar() || is_Field(), "sanity"); 2186 for (EdgeIterator i(this); i.has_next(); i.next()) { 2187 if (i.get() == ptn) 2188 return true; 2189 } 2190 return false; 2191 } 2192 2193 // Return true if one node points to an other. 2194 bool PointsToNode::meet(PointsToNode* ptn) { 2195 if (this == ptn) { 2196 return true; 2197 } else if (ptn->is_JavaObject()) { 2198 return this->points_to(ptn->as_JavaObject()); 2199 } else if (this->is_JavaObject()) { 2200 return ptn->points_to(this->as_JavaObject()); 2201 } 2202 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2203 int ptn_count = ptn->edge_count(); 2204 for (EdgeIterator i(this); i.has_next(); i.next()) { 2205 PointsToNode* this_e = i.get(); 2206 for (int j = 0; j < ptn_count; j++) { 2207 if (this_e == ptn->edge(j)) 2208 return true; 2209 } 2210 } 2211 return false; 2212 } 2213 2214 #ifdef ASSERT 2215 // Return true if bases point to this java object. 2216 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2217 for (BaseIterator i(this); i.has_next(); i.next()) { 2218 if (i.get() == jobj) 2219 return true; 2220 } 2221 return false; 2222 } 2223 #endif 2224 2225 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2226 const Type *adr_type = phase->type(adr); 2227 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 2228 adr->in(AddPNode::Address)->is_Proj() && 2229 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 2230 // We are computing a raw address for a store captured by an Initialize 2231 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2232 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2233 assert(offs != Type::OffsetBot || 2234 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2235 "offset must be a constant or it is initialization of array"); 2236 return offs; 2237 } 2238 const TypePtr *t_ptr = adr_type->isa_ptr(); 2239 assert(t_ptr != NULL, "must be a pointer type"); 2240 return t_ptr->offset(); 2241 } 2242 2243 Node* ConnectionGraph::get_addp_base(Node *addp) { 2244 assert(addp->is_AddP(), "must be AddP"); 2245 // 2246 // AddP cases for Base and Address inputs: 2247 // case #1. Direct object's field reference: 2248 // Allocate 2249 // | 2250 // Proj #5 ( oop result ) 2251 // | 2252 // CheckCastPP (cast to instance type) 2253 // | | 2254 // AddP ( base == address ) 2255 // 2256 // case #2. Indirect object's field reference: 2257 // Phi 2258 // | 2259 // CastPP (cast to instance type) 2260 // | | 2261 // AddP ( base == address ) 2262 // 2263 // case #3. Raw object's field reference for Initialize node: 2264 // Allocate 2265 // | 2266 // Proj #5 ( oop result ) 2267 // top | 2268 // \ | 2269 // AddP ( base == top ) 2270 // 2271 // case #4. Array's element reference: 2272 // {CheckCastPP | CastPP} 2273 // | | | 2274 // | AddP ( array's element offset ) 2275 // | | 2276 // AddP ( array's offset ) 2277 // 2278 // case #5. Raw object's field reference for arraycopy stub call: 2279 // The inline_native_clone() case when the arraycopy stub is called 2280 // after the allocation before Initialize and CheckCastPP nodes. 2281 // Allocate 2282 // | 2283 // Proj #5 ( oop result ) 2284 // | | 2285 // AddP ( base == address ) 2286 // 2287 // case #6. Constant Pool, ThreadLocal, CastX2P or 2288 // Raw object's field reference: 2289 // {ConP, ThreadLocal, CastX2P, raw Load} 2290 // top | 2291 // \ | 2292 // AddP ( base == top ) 2293 // 2294 // case #7. Klass's field reference. 2295 // LoadKlass 2296 // | | 2297 // AddP ( base == address ) 2298 // 2299 // case #8. narrow Klass's field reference. 2300 // LoadNKlass 2301 // | 2302 // DecodeN 2303 // | | 2304 // AddP ( base == address ) 2305 // 2306 // case #9. Mixed unsafe access 2307 // {instance} 2308 // | 2309 // CheckCastPP (raw) 2310 // top | 2311 // \ | 2312 // AddP ( base == top ) 2313 // 2314 Node *base = addp->in(AddPNode::Base); 2315 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 2316 base = addp->in(AddPNode::Address); 2317 while (base->is_AddP()) { 2318 // Case #6 (unsafe access) may have several chained AddP nodes. 2319 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2320 base = base->in(AddPNode::Address); 2321 } 2322 if (base->Opcode() == Op_CheckCastPP && 2323 base->bottom_type()->isa_rawptr() && 2324 _igvn->type(base->in(1))->isa_oopptr()) { 2325 base = base->in(1); // Case #9 2326 } else { 2327 Node* uncast_base = base->uncast(); 2328 int opcode = uncast_base->Opcode(); 2329 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2330 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2331 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2332 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); 2333 } 2334 } 2335 return base; 2336 } 2337 2338 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2339 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2340 Node* addp2 = addp->raw_out(0); 2341 if (addp->outcnt() == 1 && addp2->is_AddP() && 2342 addp2->in(AddPNode::Base) == n && 2343 addp2->in(AddPNode::Address) == addp) { 2344 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2345 // 2346 // Find array's offset to push it on worklist first and 2347 // as result process an array's element offset first (pushed second) 2348 // to avoid CastPP for the array's offset. 2349 // Otherwise the inserted CastPP (LocalVar) will point to what 2350 // the AddP (Field) points to. Which would be wrong since 2351 // the algorithm expects the CastPP has the same point as 2352 // as AddP's base CheckCastPP (LocalVar). 2353 // 2354 // ArrayAllocation 2355 // | 2356 // CheckCastPP 2357 // | 2358 // memProj (from ArrayAllocation CheckCastPP) 2359 // | || 2360 // | || Int (element index) 2361 // | || | ConI (log(element size)) 2362 // | || | / 2363 // | || LShift 2364 // | || / 2365 // | AddP (array's element offset) 2366 // | | 2367 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2368 // | / / 2369 // AddP (array's offset) 2370 // | 2371 // Load/Store (memory operation on array's element) 2372 // 2373 return addp2; 2374 } 2375 return NULL; 2376 } 2377 2378 // 2379 // Adjust the type and inputs of an AddP which computes the 2380 // address of a field of an instance 2381 // 2382 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2383 PhaseGVN* igvn = _igvn; 2384 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2385 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2386 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2387 if (t == NULL) { 2388 // We are computing a raw address for a store captured by an Initialize 2389 // compute an appropriate address type (cases #3 and #5). 2390 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2391 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2392 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2393 assert(offs != Type::OffsetBot, "offset must be a constant"); 2394 t = base_t->add_offset(offs)->is_oopptr(); 2395 } 2396 int inst_id = base_t->instance_id(); 2397 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2398 "old type must be non-instance or match new type"); 2399 2400 // The type 't' could be subclass of 'base_t'. 2401 // As result t->offset() could be large then base_t's size and it will 2402 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2403 // constructor verifies correctness of the offset. 2404 // 2405 // It could happened on subclass's branch (from the type profiling 2406 // inlining) which was not eliminated during parsing since the exactness 2407 // of the allocation type was not propagated to the subclass type check. 2408 // 2409 // Or the type 't' could be not related to 'base_t' at all. 2410 // It could happened when CHA type is different from MDO type on a dead path 2411 // (for example, from instanceof check) which is not collapsed during parsing. 2412 // 2413 // Do nothing for such AddP node and don't process its users since 2414 // this code branch will go away. 2415 // 2416 if (!t->is_known_instance() && 2417 !base_t->klass()->is_subtype_of(t->klass())) { 2418 return false; // bail out 2419 } 2420 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 2421 // Do NOT remove the next line: ensure a new alias index is allocated 2422 // for the instance type. Note: C++ will not remove it since the call 2423 // has side effect. 2424 int alias_idx = _compile->get_alias_index(tinst); 2425 igvn->set_type(addp, tinst); 2426 // record the allocation in the node map 2427 set_map(addp, get_map(base->_idx)); 2428 // Set addp's Base and Address to 'base'. 2429 Node *abase = addp->in(AddPNode::Base); 2430 Node *adr = addp->in(AddPNode::Address); 2431 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2432 adr->in(0)->_idx == (uint)inst_id) { 2433 // Skip AddP cases #3 and #5. 2434 } else { 2435 assert(!abase->is_top(), "sanity"); // AddP case #3 2436 if (abase != base) { 2437 igvn->hash_delete(addp); 2438 addp->set_req(AddPNode::Base, base); 2439 if (abase == adr) { 2440 addp->set_req(AddPNode::Address, base); 2441 } else { 2442 // AddP case #4 (adr is array's element offset AddP node) 2443 #ifdef ASSERT 2444 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2445 assert(adr->is_AddP() && atype != NULL && 2446 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2447 #endif 2448 } 2449 igvn->hash_insert(addp); 2450 } 2451 } 2452 // Put on IGVN worklist since at least addp's type was changed above. 2453 record_for_optimizer(addp); 2454 return true; 2455 } 2456 2457 // 2458 // Create a new version of orig_phi if necessary. Returns either the newly 2459 // created phi or an existing phi. Sets create_new to indicate whether a new 2460 // phi was created. Cache the last newly created phi in the node map. 2461 // 2462 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2463 Compile *C = _compile; 2464 PhaseGVN* igvn = _igvn; 2465 new_created = false; 2466 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2467 // nothing to do if orig_phi is bottom memory or matches alias_idx 2468 if (phi_alias_idx == alias_idx) { 2469 return orig_phi; 2470 } 2471 // Have we recently created a Phi for this alias index? 2472 PhiNode *result = get_map_phi(orig_phi->_idx); 2473 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2474 return result; 2475 } 2476 // Previous check may fail when the same wide memory Phi was split into Phis 2477 // for different memory slices. Search all Phis for this region. 2478 if (result != NULL) { 2479 Node* region = orig_phi->in(0); 2480 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2481 Node* phi = region->fast_out(i); 2482 if (phi->is_Phi() && 2483 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2484 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2485 return phi->as_Phi(); 2486 } 2487 } 2488 } 2489 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 2490 if (C->do_escape_analysis() == true && !C->failing()) { 2491 // Retry compilation without escape analysis. 2492 // If this is the first failure, the sentinel string will "stick" 2493 // to the Compile object, and the C2Compiler will see it and retry. 2494 C->record_failure(C2Compiler::retry_no_escape_analysis()); 2495 } 2496 return NULL; 2497 } 2498 orig_phi_worklist.append_if_missing(orig_phi); 2499 const TypePtr *atype = C->get_adr_type(alias_idx); 2500 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2501 C->copy_node_notes_to(result, orig_phi); 2502 igvn->set_type(result, result->bottom_type()); 2503 record_for_optimizer(result); 2504 set_map(orig_phi, result); 2505 new_created = true; 2506 return result; 2507 } 2508 2509 // 2510 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2511 // specified alias index. 2512 // 2513 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2514 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2515 Compile *C = _compile; 2516 PhaseGVN* igvn = _igvn; 2517 bool new_phi_created; 2518 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2519 if (!new_phi_created) { 2520 return result; 2521 } 2522 GrowableArray<PhiNode *> phi_list; 2523 GrowableArray<uint> cur_input; 2524 PhiNode *phi = orig_phi; 2525 uint idx = 1; 2526 bool finished = false; 2527 while(!finished) { 2528 while (idx < phi->req()) { 2529 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2530 if (mem != NULL && mem->is_Phi()) { 2531 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2532 if (new_phi_created) { 2533 // found an phi for which we created a new split, push current one on worklist and begin 2534 // processing new one 2535 phi_list.push(phi); 2536 cur_input.push(idx); 2537 phi = mem->as_Phi(); 2538 result = newphi; 2539 idx = 1; 2540 continue; 2541 } else { 2542 mem = newphi; 2543 } 2544 } 2545 if (C->failing()) { 2546 return NULL; 2547 } 2548 result->set_req(idx++, mem); 2549 } 2550 #ifdef ASSERT 2551 // verify that the new Phi has an input for each input of the original 2552 assert( phi->req() == result->req(), "must have same number of inputs."); 2553 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2554 #endif 2555 // Check if all new phi's inputs have specified alias index. 2556 // Otherwise use old phi. 2557 for (uint i = 1; i < phi->req(); i++) { 2558 Node* in = result->in(i); 2559 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2560 } 2561 // we have finished processing a Phi, see if there are any more to do 2562 finished = (phi_list.length() == 0 ); 2563 if (!finished) { 2564 phi = phi_list.pop(); 2565 idx = cur_input.pop(); 2566 PhiNode *prev_result = get_map_phi(phi->_idx); 2567 prev_result->set_req(idx++, result); 2568 result = prev_result; 2569 } 2570 } 2571 return result; 2572 } 2573 2574 // 2575 // The next methods are derived from methods in MemNode. 2576 // 2577 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2578 Node *mem = mmem; 2579 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2580 // means an array I have not precisely typed yet. Do not do any 2581 // alias stuff with it any time soon. 2582 if (toop->base() != Type::AnyPtr && 2583 !(toop->klass() != NULL && 2584 toop->klass()->is_java_lang_Object() && 2585 toop->offset() == Type::OffsetBot)) { 2586 mem = mmem->memory_at(alias_idx); 2587 // Update input if it is progress over what we have now 2588 } 2589 return mem; 2590 } 2591 2592 // 2593 // Move memory users to their memory slices. 2594 // 2595 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2596 Compile* C = _compile; 2597 PhaseGVN* igvn = _igvn; 2598 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2599 assert(tp != NULL, "ptr type"); 2600 int alias_idx = C->get_alias_index(tp); 2601 int general_idx = C->get_general_index(alias_idx); 2602 2603 // Move users first 2604 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2605 Node* use = n->fast_out(i); 2606 if (use->is_MergeMem()) { 2607 MergeMemNode* mmem = use->as_MergeMem(); 2608 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2609 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2610 continue; // Nothing to do 2611 } 2612 // Replace previous general reference to mem node. 2613 uint orig_uniq = C->unique(); 2614 Node* m = find_inst_mem(n, general_idx, orig_phis); 2615 assert(orig_uniq == C->unique(), "no new nodes"); 2616 mmem->set_memory_at(general_idx, m); 2617 --imax; 2618 --i; 2619 } else if (use->is_MemBar()) { 2620 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2621 if (use->req() > MemBarNode::Precedent && 2622 use->in(MemBarNode::Precedent) == n) { 2623 // Don't move related membars. 2624 record_for_optimizer(use); 2625 continue; 2626 } 2627 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2628 if ((tp != NULL && C->get_alias_index(tp) == alias_idx) || 2629 alias_idx == general_idx) { 2630 continue; // Nothing to do 2631 } 2632 // Move to general memory slice. 2633 uint orig_uniq = C->unique(); 2634 Node* m = find_inst_mem(n, general_idx, orig_phis); 2635 assert(orig_uniq == C->unique(), "no new nodes"); 2636 igvn->hash_delete(use); 2637 imax -= use->replace_edge(n, m); 2638 igvn->hash_insert(use); 2639 record_for_optimizer(use); 2640 --i; 2641 #ifdef ASSERT 2642 } else if (use->is_Mem()) { 2643 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2644 // Don't move related cardmark. 2645 continue; 2646 } 2647 // Memory nodes should have new memory input. 2648 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2649 assert(tp != NULL, "ptr type"); 2650 int idx = C->get_alias_index(tp); 2651 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2652 "Following memory nodes should have new memory input or be on the same memory slice"); 2653 } else if (use->is_Phi()) { 2654 // Phi nodes should be split and moved already. 2655 tp = use->as_Phi()->adr_type()->isa_ptr(); 2656 assert(tp != NULL, "ptr type"); 2657 int idx = C->get_alias_index(tp); 2658 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2659 } else { 2660 use->dump(); 2661 assert(false, "should not be here"); 2662 #endif 2663 } 2664 } 2665 } 2666 2667 // 2668 // Search memory chain of "mem" to find a MemNode whose address 2669 // is the specified alias index. 2670 // 2671 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2672 if (orig_mem == NULL) 2673 return orig_mem; 2674 Compile* C = _compile; 2675 PhaseGVN* igvn = _igvn; 2676 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2677 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2678 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 2679 Node *prev = NULL; 2680 Node *result = orig_mem; 2681 while (prev != result) { 2682 prev = result; 2683 if (result == start_mem) 2684 break; // hit one of our sentinels 2685 if (result->is_Mem()) { 2686 const Type *at = igvn->type(result->in(MemNode::Address)); 2687 if (at == Type::TOP) 2688 break; // Dead 2689 assert (at->isa_ptr() != NULL, "pointer type required."); 2690 int idx = C->get_alias_index(at->is_ptr()); 2691 if (idx == alias_idx) 2692 break; // Found 2693 if (!is_instance && (at->isa_oopptr() == NULL || 2694 !at->is_oopptr()->is_known_instance())) { 2695 break; // Do not skip store to general memory slice. 2696 } 2697 result = result->in(MemNode::Memory); 2698 } 2699 if (!is_instance) 2700 continue; // don't search further for non-instance types 2701 // skip over a call which does not affect this memory slice 2702 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2703 Node *proj_in = result->in(0); 2704 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2705 break; // hit one of our sentinels 2706 } else if (proj_in->is_Call()) { 2707 // ArrayCopy node processed here as well 2708 CallNode *call = proj_in->as_Call(); 2709 if (!call->may_modify(toop, igvn)) { 2710 result = call->in(TypeFunc::Memory); 2711 } 2712 } else if (proj_in->is_Initialize()) { 2713 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2714 // Stop if this is the initialization for the object instance which 2715 // which contains this memory slice, otherwise skip over it. 2716 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2717 result = proj_in->in(TypeFunc::Memory); 2718 } 2719 } else if (proj_in->is_MemBar()) { 2720 if (proj_in->in(TypeFunc::Memory)->is_MergeMem() && 2721 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->is_Proj() && 2722 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->is_ArrayCopy()) { 2723 // clone 2724 ArrayCopyNode* ac = proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->as_ArrayCopy(); 2725 if (ac->may_modify(toop, igvn)) { 2726 break; 2727 } 2728 } 2729 result = proj_in->in(TypeFunc::Memory); 2730 } 2731 } else if (result->is_MergeMem()) { 2732 MergeMemNode *mmem = result->as_MergeMem(); 2733 result = step_through_mergemem(mmem, alias_idx, toop); 2734 if (result == mmem->base_memory()) { 2735 // Didn't find instance memory, search through general slice recursively. 2736 result = mmem->memory_at(C->get_general_index(alias_idx)); 2737 result = find_inst_mem(result, alias_idx, orig_phis); 2738 if (C->failing()) { 2739 return NULL; 2740 } 2741 mmem->set_memory_at(alias_idx, result); 2742 } 2743 } else if (result->is_Phi() && 2744 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2745 Node *un = result->as_Phi()->unique_input(igvn); 2746 if (un != NULL) { 2747 orig_phis.append_if_missing(result->as_Phi()); 2748 result = un; 2749 } else { 2750 break; 2751 } 2752 } else if (result->is_ClearArray()) { 2753 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 2754 // Can not bypass initialization of the instance 2755 // we are looking for. 2756 break; 2757 } 2758 // Otherwise skip it (the call updated 'result' value). 2759 } else if (result->Opcode() == Op_SCMemProj) { 2760 Node* mem = result->in(0); 2761 Node* adr = NULL; 2762 if (mem->is_LoadStore()) { 2763 adr = mem->in(MemNode::Address); 2764 } else { 2765 assert(mem->Opcode() == Op_EncodeISOArray || 2766 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 2767 adr = mem->in(3); // Memory edge corresponds to destination array 2768 } 2769 const Type *at = igvn->type(adr); 2770 if (at != Type::TOP) { 2771 assert(at->isa_ptr() != NULL, "pointer type required."); 2772 int idx = C->get_alias_index(at->is_ptr()); 2773 if (idx == alias_idx) { 2774 // Assert in debug mode 2775 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 2776 break; // In product mode return SCMemProj node 2777 } 2778 } 2779 result = mem->in(MemNode::Memory); 2780 } else if (result->Opcode() == Op_StrInflatedCopy) { 2781 Node* adr = result->in(3); // Memory edge corresponds to destination array 2782 const Type *at = igvn->type(adr); 2783 if (at != Type::TOP) { 2784 assert(at->isa_ptr() != NULL, "pointer type required."); 2785 int idx = C->get_alias_index(at->is_ptr()); 2786 if (idx == alias_idx) { 2787 // Assert in debug mode 2788 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 2789 break; // In product mode return SCMemProj node 2790 } 2791 } 2792 result = result->in(MemNode::Memory); 2793 } 2794 } 2795 if (result->is_Phi()) { 2796 PhiNode *mphi = result->as_Phi(); 2797 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 2798 const TypePtr *t = mphi->adr_type(); 2799 if (!is_instance) { 2800 // Push all non-instance Phis on the orig_phis worklist to update inputs 2801 // during Phase 4 if needed. 2802 orig_phis.append_if_missing(mphi); 2803 } else if (C->get_alias_index(t) != alias_idx) { 2804 // Create a new Phi with the specified alias index type. 2805 result = split_memory_phi(mphi, alias_idx, orig_phis); 2806 } 2807 } 2808 // the result is either MemNode, PhiNode, InitializeNode. 2809 return result; 2810 } 2811 2812 // 2813 // Convert the types of unescaped object to instance types where possible, 2814 // propagate the new type information through the graph, and update memory 2815 // edges and MergeMem inputs to reflect the new type. 2816 // 2817 // We start with allocations (and calls which may be allocations) on alloc_worklist. 2818 // The processing is done in 4 phases: 2819 // 2820 // Phase 1: Process possible allocations from alloc_worklist. Create instance 2821 // types for the CheckCastPP for allocations where possible. 2822 // Propagate the new types through users as follows: 2823 // casts and Phi: push users on alloc_worklist 2824 // AddP: cast Base and Address inputs to the instance type 2825 // push any AddP users on alloc_worklist and push any memnode 2826 // users onto memnode_worklist. 2827 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2828 // search the Memory chain for a store with the appropriate type 2829 // address type. If a Phi is found, create a new version with 2830 // the appropriate memory slices from each of the Phi inputs. 2831 // For stores, process the users as follows: 2832 // MemNode: push on memnode_worklist 2833 // MergeMem: push on mergemem_worklist 2834 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 2835 // moving the first node encountered of each instance type to the 2836 // the input corresponding to its alias index. 2837 // appropriate memory slice. 2838 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 2839 // 2840 // In the following example, the CheckCastPP nodes are the cast of allocation 2841 // results and the allocation of node 29 is unescaped and eligible to be an 2842 // instance type. 2843 // 2844 // We start with: 2845 // 2846 // 7 Parm #memory 2847 // 10 ConI "12" 2848 // 19 CheckCastPP "Foo" 2849 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2850 // 29 CheckCastPP "Foo" 2851 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 2852 // 2853 // 40 StoreP 25 7 20 ... alias_index=4 2854 // 50 StoreP 35 40 30 ... alias_index=4 2855 // 60 StoreP 45 50 20 ... alias_index=4 2856 // 70 LoadP _ 60 30 ... alias_index=4 2857 // 80 Phi 75 50 60 Memory alias_index=4 2858 // 90 LoadP _ 80 30 ... alias_index=4 2859 // 100 LoadP _ 80 20 ... alias_index=4 2860 // 2861 // 2862 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 2863 // and creating a new alias index for node 30. This gives: 2864 // 2865 // 7 Parm #memory 2866 // 10 ConI "12" 2867 // 19 CheckCastPP "Foo" 2868 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2869 // 29 CheckCastPP "Foo" iid=24 2870 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2871 // 2872 // 40 StoreP 25 7 20 ... alias_index=4 2873 // 50 StoreP 35 40 30 ... alias_index=6 2874 // 60 StoreP 45 50 20 ... alias_index=4 2875 // 70 LoadP _ 60 30 ... alias_index=6 2876 // 80 Phi 75 50 60 Memory alias_index=4 2877 // 90 LoadP _ 80 30 ... alias_index=6 2878 // 100 LoadP _ 80 20 ... alias_index=4 2879 // 2880 // In phase 2, new memory inputs are computed for the loads and stores, 2881 // And a new version of the phi is created. In phase 4, the inputs to 2882 // node 80 are updated and then the memory nodes are updated with the 2883 // values computed in phase 2. This results in: 2884 // 2885 // 7 Parm #memory 2886 // 10 ConI "12" 2887 // 19 CheckCastPP "Foo" 2888 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2889 // 29 CheckCastPP "Foo" iid=24 2890 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2891 // 2892 // 40 StoreP 25 7 20 ... alias_index=4 2893 // 50 StoreP 35 7 30 ... alias_index=6 2894 // 60 StoreP 45 40 20 ... alias_index=4 2895 // 70 LoadP _ 50 30 ... alias_index=6 2896 // 80 Phi 75 40 60 Memory alias_index=4 2897 // 120 Phi 75 50 50 Memory alias_index=6 2898 // 90 LoadP _ 120 30 ... alias_index=6 2899 // 100 LoadP _ 80 20 ... alias_index=4 2900 // 2901 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) { 2902 GrowableArray<Node *> memnode_worklist; 2903 GrowableArray<PhiNode *> orig_phis; 2904 PhaseIterGVN *igvn = _igvn; 2905 uint new_index_start = (uint) _compile->num_alias_types(); 2906 Arena* arena = Thread::current()->resource_area(); 2907 VectorSet visited(arena); 2908 ideal_nodes.clear(); // Reset for use with set_map/get_map. 2909 uint unique_old = _compile->unique(); 2910 2911 // Phase 1: Process possible allocations from alloc_worklist. 2912 // Create instance types for the CheckCastPP for allocations where possible. 2913 // 2914 // (Note: don't forget to change the order of the second AddP node on 2915 // the alloc_worklist if the order of the worklist processing is changed, 2916 // see the comment in find_second_addp().) 2917 // 2918 while (alloc_worklist.length() != 0) { 2919 Node *n = alloc_worklist.pop(); 2920 uint ni = n->_idx; 2921 if (n->is_Call()) { 2922 CallNode *alloc = n->as_Call(); 2923 // copy escape information to call node 2924 PointsToNode* ptn = ptnode_adr(alloc->_idx); 2925 PointsToNode::EscapeState es = ptn->escape_state(); 2926 // We have an allocation or call which returns a Java object, 2927 // see if it is unescaped. 2928 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 2929 continue; 2930 // Find CheckCastPP for the allocate or for the return value of a call 2931 n = alloc->result_cast(); 2932 if (n == NULL) { // No uses except Initialize node 2933 if (alloc->is_Allocate()) { 2934 // Set the scalar_replaceable flag for allocation 2935 // so it could be eliminated if it has no uses. 2936 alloc->as_Allocate()->_is_scalar_replaceable = true; 2937 } 2938 if (alloc->is_CallStaticJava()) { 2939 // Set the scalar_replaceable flag for boxing method 2940 // so it could be eliminated if it has no uses. 2941 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2942 } 2943 continue; 2944 } 2945 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 2946 assert(!alloc->is_Allocate(), "allocation should have unique type"); 2947 continue; 2948 } 2949 2950 // The inline code for Object.clone() casts the allocation result to 2951 // java.lang.Object and then to the actual type of the allocated 2952 // object. Detect this case and use the second cast. 2953 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 2954 // the allocation result is cast to java.lang.Object and then 2955 // to the actual Array type. 2956 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 2957 && (alloc->is_AllocateArray() || 2958 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 2959 Node *cast2 = NULL; 2960 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2961 Node *use = n->fast_out(i); 2962 if (use->is_CheckCastPP()) { 2963 cast2 = use; 2964 break; 2965 } 2966 } 2967 if (cast2 != NULL) { 2968 n = cast2; 2969 } else { 2970 // Non-scalar replaceable if the allocation type is unknown statically 2971 // (reflection allocation), the object can't be restored during 2972 // deoptimization without precise type. 2973 continue; 2974 } 2975 } 2976 2977 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 2978 if (t == NULL) 2979 continue; // not a TypeOopPtr 2980 if (!t->klass_is_exact()) 2981 continue; // not an unique type 2982 2983 if (alloc->is_Allocate()) { 2984 // Set the scalar_replaceable flag for allocation 2985 // so it could be eliminated. 2986 alloc->as_Allocate()->_is_scalar_replaceable = true; 2987 } 2988 if (alloc->is_CallStaticJava()) { 2989 // Set the scalar_replaceable flag for boxing method 2990 // so it could be eliminated. 2991 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2992 } 2993 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state 2994 // in order for an object to be scalar-replaceable, it must be: 2995 // - a direct allocation (not a call returning an object) 2996 // - non-escaping 2997 // - eligible to be a unique type 2998 // - not determined to be ineligible by escape analysis 2999 set_map(alloc, n); 3000 set_map(n, alloc); 3001 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3002 igvn->hash_delete(n); 3003 igvn->set_type(n, tinst); 3004 n->raise_bottom_type(tinst); 3005 igvn->hash_insert(n); 3006 record_for_optimizer(n); 3007 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3008 3009 // First, put on the worklist all Field edges from Connection Graph 3010 // which is more accurate than putting immediate users from Ideal Graph. 3011 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3012 PointsToNode* tgt = e.get(); 3013 if (tgt->is_Arraycopy()) { 3014 continue; 3015 } 3016 Node* use = tgt->ideal_node(); 3017 assert(tgt->is_Field() && use->is_AddP(), 3018 "only AddP nodes are Field edges in CG"); 3019 if (use->outcnt() > 0) { // Don't process dead nodes 3020 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3021 if (addp2 != NULL) { 3022 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3023 alloc_worklist.append_if_missing(addp2); 3024 } 3025 alloc_worklist.append_if_missing(use); 3026 } 3027 } 3028 3029 // An allocation may have an Initialize which has raw stores. Scan 3030 // the users of the raw allocation result and push AddP users 3031 // on alloc_worklist. 3032 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3033 assert (raw_result != NULL, "must have an allocation result"); 3034 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3035 Node *use = raw_result->fast_out(i); 3036 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3037 Node* addp2 = find_second_addp(use, raw_result); 3038 if (addp2 != NULL) { 3039 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3040 alloc_worklist.append_if_missing(addp2); 3041 } 3042 alloc_worklist.append_if_missing(use); 3043 } else if (use->is_MemBar()) { 3044 memnode_worklist.append_if_missing(use); 3045 } 3046 } 3047 } 3048 } else if (n->is_AddP()) { 3049 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 3050 if (jobj == NULL || jobj == phantom_obj) { 3051 #ifdef ASSERT 3052 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3053 ptnode_adr(n->_idx)->dump(); 3054 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3055 #endif 3056 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3057 return; 3058 } 3059 Node *base = get_map(jobj->idx()); // CheckCastPP node 3060 if (!split_AddP(n, base)) continue; // wrong type from dead path 3061 } else if (n->is_Phi() || 3062 n->is_CheckCastPP() || 3063 n->is_EncodeP() || 3064 n->is_DecodeN() || 3065 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3066 if (visited.test_set(n->_idx)) { 3067 assert(n->is_Phi(), "loops only through Phi's"); 3068 continue; // already processed 3069 } 3070 JavaObjectNode* jobj = unique_java_object(n); 3071 if (jobj == NULL || jobj == phantom_obj) { 3072 #ifdef ASSERT 3073 ptnode_adr(n->_idx)->dump(); 3074 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3075 #endif 3076 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3077 return; 3078 } else { 3079 Node *val = get_map(jobj->idx()); // CheckCastPP node 3080 TypeNode *tn = n->as_Type(); 3081 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3082 assert(tinst != NULL && tinst->is_known_instance() && 3083 tinst->instance_id() == jobj->idx() , "instance type expected."); 3084 3085 const Type *tn_type = igvn->type(tn); 3086 const TypeOopPtr *tn_t; 3087 if (tn_type->isa_narrowoop()) { 3088 tn_t = tn_type->make_ptr()->isa_oopptr(); 3089 } else { 3090 tn_t = tn_type->isa_oopptr(); 3091 } 3092 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 3093 if (tn_type->isa_narrowoop()) { 3094 tn_type = tinst->make_narrowoop(); 3095 } else { 3096 tn_type = tinst; 3097 } 3098 igvn->hash_delete(tn); 3099 igvn->set_type(tn, tn_type); 3100 tn->set_type(tn_type); 3101 igvn->hash_insert(tn); 3102 record_for_optimizer(n); 3103 } else { 3104 assert(tn_type == TypePtr::NULL_PTR || 3105 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 3106 "unexpected type"); 3107 continue; // Skip dead path with different type 3108 } 3109 } 3110 } else { 3111 debug_only(n->dump();) 3112 assert(false, "EA: unexpected node"); 3113 continue; 3114 } 3115 // push allocation's users on appropriate worklist 3116 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3117 Node *use = n->fast_out(i); 3118 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3119 // Load/store to instance's field 3120 memnode_worklist.append_if_missing(use); 3121 } else if (use->is_MemBar()) { 3122 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3123 memnode_worklist.append_if_missing(use); 3124 } 3125 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3126 Node* addp2 = find_second_addp(use, n); 3127 if (addp2 != NULL) { 3128 alloc_worklist.append_if_missing(addp2); 3129 } 3130 alloc_worklist.append_if_missing(use); 3131 } else if (use->is_Phi() || 3132 use->is_CheckCastPP() || 3133 use->is_EncodeNarrowPtr() || 3134 use->is_DecodeNarrowPtr() || 3135 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3136 alloc_worklist.append_if_missing(use); 3137 #ifdef ASSERT 3138 } else if (use->is_Mem()) { 3139 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3140 } else if (use->is_MergeMem()) { 3141 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3142 } else if (use->is_SafePoint()) { 3143 // Look for MergeMem nodes for calls which reference unique allocation 3144 // (through CheckCastPP nodes) even for debug info. 3145 Node* m = use->in(TypeFunc::Memory); 3146 if (m->is_MergeMem()) { 3147 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3148 } 3149 } else if (use->Opcode() == Op_EncodeISOArray) { 3150 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3151 // EncodeISOArray overwrites destination array 3152 memnode_worklist.append_if_missing(use); 3153 } 3154 } else { 3155 uint op = use->Opcode(); 3156 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3157 (use->in(MemNode::Memory) == n)) { 3158 // They overwrite memory edge corresponding to destination array, 3159 memnode_worklist.append_if_missing(use); 3160 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3161 op == Op_CastP2X || op == Op_StoreCM || 3162 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3163 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3164 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 3165 n->dump(); 3166 use->dump(); 3167 assert(false, "EA: missing allocation reference path"); 3168 } 3169 #endif 3170 } 3171 } 3172 3173 } 3174 3175 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3176 // type, record it in the ArrayCopy node so we know what memory this 3177 // node uses/modified. 3178 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3179 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3180 Node* dest = ac->in(ArrayCopyNode::Dest); 3181 if (dest->is_AddP()) { 3182 dest = get_addp_base(dest); 3183 } 3184 JavaObjectNode* jobj = unique_java_object(dest); 3185 if (jobj != NULL) { 3186 Node *base = get_map(jobj->idx()); 3187 if (base != NULL) { 3188 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3189 ac->_dest_type = base_t; 3190 } 3191 } 3192 Node* src = ac->in(ArrayCopyNode::Src); 3193 if (src->is_AddP()) { 3194 src = get_addp_base(src); 3195 } 3196 jobj = unique_java_object(src); 3197 if (jobj != NULL) { 3198 Node* base = get_map(jobj->idx()); 3199 if (base != NULL) { 3200 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3201 ac->_src_type = base_t; 3202 } 3203 } 3204 } 3205 3206 // New alias types were created in split_AddP(). 3207 uint new_index_end = (uint) _compile->num_alias_types(); 3208 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3209 3210 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3211 // compute new values for Memory inputs (the Memory inputs are not 3212 // actually updated until phase 4.) 3213 if (memnode_worklist.length() == 0) 3214 return; // nothing to do 3215 while (memnode_worklist.length() != 0) { 3216 Node *n = memnode_worklist.pop(); 3217 if (visited.test_set(n->_idx)) 3218 continue; 3219 if (n->is_Phi() || n->is_ClearArray()) { 3220 // we don't need to do anything, but the users must be pushed 3221 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3222 // we don't need to do anything, but the users must be pushed 3223 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 3224 if (n == NULL) 3225 continue; 3226 } else if (n->Opcode() == Op_StrCompressedCopy || 3227 n->Opcode() == Op_EncodeISOArray) { 3228 // get the memory projection 3229 n = n->find_out_with(Op_SCMemProj); 3230 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3231 } else { 3232 assert(n->is_Mem(), "memory node required."); 3233 Node *addr = n->in(MemNode::Address); 3234 const Type *addr_t = igvn->type(addr); 3235 if (addr_t == Type::TOP) 3236 continue; 3237 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3238 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3239 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3240 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3241 if (_compile->failing()) { 3242 return; 3243 } 3244 if (mem != n->in(MemNode::Memory)) { 3245 // We delay the memory edge update since we need old one in 3246 // MergeMem code below when instances memory slices are separated. 3247 set_map(n, mem); 3248 } 3249 if (n->is_Load()) { 3250 continue; // don't push users 3251 } else if (n->is_LoadStore()) { 3252 // get the memory projection 3253 n = n->find_out_with(Op_SCMemProj); 3254 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3255 } 3256 } 3257 // push user on appropriate worklist 3258 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3259 Node *use = n->fast_out(i); 3260 if (use->is_Phi() || use->is_ClearArray()) { 3261 memnode_worklist.append_if_missing(use); 3262 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3263 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 3264 continue; 3265 memnode_worklist.append_if_missing(use); 3266 } else if (use->is_MemBar()) { 3267 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3268 memnode_worklist.append_if_missing(use); 3269 } 3270 #ifdef ASSERT 3271 } else if(use->is_Mem()) { 3272 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3273 } else if (use->is_MergeMem()) { 3274 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3275 } else if (use->Opcode() == Op_EncodeISOArray) { 3276 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3277 // EncodeISOArray overwrites destination array 3278 memnode_worklist.append_if_missing(use); 3279 } 3280 } else { 3281 uint op = use->Opcode(); 3282 if ((use->in(MemNode::Memory) == n) && 3283 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 3284 // They overwrite memory edge corresponding to destination array, 3285 memnode_worklist.append_if_missing(use); 3286 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 3287 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3288 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3289 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 3290 n->dump(); 3291 use->dump(); 3292 assert(false, "EA: missing memory path"); 3293 } 3294 #endif 3295 } 3296 } 3297 } 3298 3299 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3300 // Walk each memory slice moving the first node encountered of each 3301 // instance type to the the input corresponding to its alias index. 3302 uint length = _mergemem_worklist.length(); 3303 for( uint next = 0; next < length; ++next ) { 3304 MergeMemNode* nmm = _mergemem_worklist.at(next); 3305 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3306 // Note: we don't want to use MergeMemStream here because we only want to 3307 // scan inputs which exist at the start, not ones we add during processing. 3308 // Note 2: MergeMem may already contains instance memory slices added 3309 // during find_inst_mem() call when memory nodes were processed above. 3310 igvn->hash_delete(nmm); 3311 uint nslices = MIN2(nmm->req(), new_index_start); 3312 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3313 Node* mem = nmm->in(i); 3314 Node* cur = NULL; 3315 if (mem == NULL || mem->is_top()) 3316 continue; 3317 // First, update mergemem by moving memory nodes to corresponding slices 3318 // if their type became more precise since this mergemem was created. 3319 while (mem->is_Mem()) { 3320 const Type *at = igvn->type(mem->in(MemNode::Address)); 3321 if (at != Type::TOP) { 3322 assert (at->isa_ptr() != NULL, "pointer type required."); 3323 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3324 if (idx == i) { 3325 if (cur == NULL) 3326 cur = mem; 3327 } else { 3328 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3329 nmm->set_memory_at(idx, mem); 3330 } 3331 } 3332 } 3333 mem = mem->in(MemNode::Memory); 3334 } 3335 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3336 // Find any instance of the current type if we haven't encountered 3337 // already a memory slice of the instance along the memory chain. 3338 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3339 if((uint)_compile->get_general_index(ni) == i) { 3340 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3341 if (nmm->is_empty_memory(m)) { 3342 Node* result = find_inst_mem(mem, ni, orig_phis); 3343 if (_compile->failing()) { 3344 return; 3345 } 3346 nmm->set_memory_at(ni, result); 3347 } 3348 } 3349 } 3350 } 3351 // Find the rest of instances values 3352 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3353 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3354 Node* result = step_through_mergemem(nmm, ni, tinst); 3355 if (result == nmm->base_memory()) { 3356 // Didn't find instance memory, search through general slice recursively. 3357 result = nmm->memory_at(_compile->get_general_index(ni)); 3358 result = find_inst_mem(result, ni, orig_phis); 3359 if (_compile->failing()) { 3360 return; 3361 } 3362 nmm->set_memory_at(ni, result); 3363 } 3364 } 3365 igvn->hash_insert(nmm); 3366 record_for_optimizer(nmm); 3367 } 3368 3369 // Phase 4: Update the inputs of non-instance memory Phis and 3370 // the Memory input of memnodes 3371 // First update the inputs of any non-instance Phi's from 3372 // which we split out an instance Phi. Note we don't have 3373 // to recursively process Phi's encounted on the input memory 3374 // chains as is done in split_memory_phi() since they will 3375 // also be processed here. 3376 for (int j = 0; j < orig_phis.length(); j++) { 3377 PhiNode *phi = orig_phis.at(j); 3378 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3379 igvn->hash_delete(phi); 3380 for (uint i = 1; i < phi->req(); i++) { 3381 Node *mem = phi->in(i); 3382 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3383 if (_compile->failing()) { 3384 return; 3385 } 3386 if (mem != new_mem) { 3387 phi->set_req(i, new_mem); 3388 } 3389 } 3390 igvn->hash_insert(phi); 3391 record_for_optimizer(phi); 3392 } 3393 3394 // Update the memory inputs of MemNodes with the value we computed 3395 // in Phase 2 and move stores memory users to corresponding memory slices. 3396 // Disable memory split verification code until the fix for 6984348. 3397 // Currently it produces false negative results since it does not cover all cases. 3398 #if 0 // ifdef ASSERT 3399 visited.Reset(); 3400 Node_Stack old_mems(arena, _compile->unique() >> 2); 3401 #endif 3402 for (uint i = 0; i < ideal_nodes.size(); i++) { 3403 Node* n = ideal_nodes.at(i); 3404 Node* nmem = get_map(n->_idx); 3405 assert(nmem != NULL, "sanity"); 3406 if (n->is_Mem()) { 3407 #if 0 // ifdef ASSERT 3408 Node* old_mem = n->in(MemNode::Memory); 3409 if (!visited.test_set(old_mem->_idx)) { 3410 old_mems.push(old_mem, old_mem->outcnt()); 3411 } 3412 #endif 3413 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3414 if (!n->is_Load()) { 3415 // Move memory users of a store first. 3416 move_inst_mem(n, orig_phis); 3417 } 3418 // Now update memory input 3419 igvn->hash_delete(n); 3420 n->set_req(MemNode::Memory, nmem); 3421 igvn->hash_insert(n); 3422 record_for_optimizer(n); 3423 } else { 3424 assert(n->is_Allocate() || n->is_CheckCastPP() || 3425 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3426 } 3427 } 3428 #if 0 // ifdef ASSERT 3429 // Verify that memory was split correctly 3430 while (old_mems.is_nonempty()) { 3431 Node* old_mem = old_mems.node(); 3432 uint old_cnt = old_mems.index(); 3433 old_mems.pop(); 3434 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3435 } 3436 #endif 3437 } 3438 3439 #ifndef PRODUCT 3440 static const char *node_type_names[] = { 3441 "UnknownType", 3442 "JavaObject", 3443 "LocalVar", 3444 "Field", 3445 "Arraycopy" 3446 }; 3447 3448 static const char *esc_names[] = { 3449 "UnknownEscape", 3450 "NoEscape", 3451 "ArgEscape", 3452 "GlobalEscape" 3453 }; 3454 3455 void PointsToNode::dump(bool print_state) const { 3456 NodeType nt = node_type(); 3457 tty->print("%s ", node_type_names[(int) nt]); 3458 if (print_state) { 3459 EscapeState es = escape_state(); 3460 EscapeState fields_es = fields_escape_state(); 3461 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3462 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) 3463 tty->print("NSR "); 3464 } 3465 if (is_Field()) { 3466 FieldNode* f = (FieldNode*)this; 3467 if (f->is_oop()) 3468 tty->print("oop "); 3469 if (f->offset() > 0) 3470 tty->print("+%d ", f->offset()); 3471 tty->print("("); 3472 for (BaseIterator i(f); i.has_next(); i.next()) { 3473 PointsToNode* b = i.get(); 3474 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3475 } 3476 tty->print(" )"); 3477 } 3478 tty->print("["); 3479 for (EdgeIterator i(this); i.has_next(); i.next()) { 3480 PointsToNode* e = i.get(); 3481 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3482 } 3483 tty->print(" ["); 3484 for (UseIterator i(this); i.has_next(); i.next()) { 3485 PointsToNode* u = i.get(); 3486 bool is_base = false; 3487 if (PointsToNode::is_base_use(u)) { 3488 is_base = true; 3489 u = PointsToNode::get_use_node(u)->as_Field(); 3490 } 3491 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3492 } 3493 tty->print(" ]] "); 3494 if (_node == NULL) 3495 tty->print_cr("<null>"); 3496 else 3497 _node->dump(); 3498 } 3499 3500 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3501 bool first = true; 3502 int ptnodes_length = ptnodes_worklist.length(); 3503 for (int i = 0; i < ptnodes_length; i++) { 3504 PointsToNode *ptn = ptnodes_worklist.at(i); 3505 if (ptn == NULL || !ptn->is_JavaObject()) 3506 continue; 3507 PointsToNode::EscapeState es = ptn->escape_state(); 3508 if ((es != PointsToNode::NoEscape) && !Verbose) { 3509 continue; 3510 } 3511 Node* n = ptn->ideal_node(); 3512 if (n->is_Allocate() || (n->is_CallStaticJava() && 3513 n->as_CallStaticJava()->is_boxing_method())) { 3514 if (first) { 3515 tty->cr(); 3516 tty->print("======== Connection graph for "); 3517 _compile->method()->print_short_name(); 3518 tty->cr(); 3519 first = false; 3520 } 3521 ptn->dump(); 3522 // Print all locals and fields which reference this allocation 3523 for (UseIterator j(ptn); j.has_next(); j.next()) { 3524 PointsToNode* use = j.get(); 3525 if (use->is_LocalVar()) { 3526 use->dump(Verbose); 3527 } else if (Verbose) { 3528 use->dump(); 3529 } 3530 } 3531 tty->cr(); 3532 } 3533 } 3534 } 3535 #endif