1 /* 2 * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "libadt/vectset.hpp" 29 #include "memory/allocation.hpp" 30 #include "opto/c2compiler.hpp" 31 #include "opto/arraycopynode.hpp" 32 #include "opto/callnode.hpp" 33 #include "opto/cfgnode.hpp" 34 #include "opto/compile.hpp" 35 #include "opto/escape.hpp" 36 #include "opto/phaseX.hpp" 37 #include "opto/movenode.hpp" 38 #include "opto/rootnode.hpp" 39 40 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 41 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 42 _in_worklist(C->comp_arena()), 43 _next_pidx(0), 44 _collecting(true), 45 _verify(false), 46 _compile(C), 47 _igvn(igvn), 48 _node_map(C->comp_arena()) { 49 // Add unknown java object. 50 add_java_object(C->top(), PointsToNode::GlobalEscape); 51 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 52 // Add ConP(#NULL) and ConN(#NULL) nodes. 53 Node* oop_null = igvn->zerocon(T_OBJECT); 54 assert(oop_null->_idx < nodes_size(), "should be created already"); 55 add_java_object(oop_null, PointsToNode::NoEscape); 56 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 57 if (UseCompressedOops) { 58 Node* noop_null = igvn->zerocon(T_NARROWOOP); 59 assert(noop_null->_idx < nodes_size(), "should be created already"); 60 map_ideal_node(noop_null, null_obj); 61 } 62 _pcmp_neq = NULL; // Should be initialized 63 _pcmp_eq = NULL; 64 } 65 66 bool ConnectionGraph::has_candidates(Compile *C) { 67 // EA brings benefits only when the code has allocations and/or locks which 68 // are represented by ideal Macro nodes. 69 int cnt = C->macro_count(); 70 for (int i = 0; i < cnt; i++) { 71 Node *n = C->macro_node(i); 72 if (n->is_Allocate()) 73 return true; 74 if (n->is_Lock()) { 75 Node* obj = n->as_Lock()->obj_node()->uncast(); 76 if (!(obj->is_Parm() || obj->is_Con())) 77 return true; 78 } 79 if (n->is_CallStaticJava() && 80 n->as_CallStaticJava()->is_boxing_method()) { 81 return true; 82 } 83 } 84 return false; 85 } 86 87 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 88 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 89 ResourceMark rm; 90 91 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 92 // to create space for them in ConnectionGraph::_nodes[]. 93 Node* oop_null = igvn->zerocon(T_OBJECT); 94 Node* noop_null = igvn->zerocon(T_NARROWOOP); 95 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 96 // Perform escape analysis 97 if (congraph->compute_escape()) { 98 // There are non escaping objects. 99 C->set_congraph(congraph); 100 } 101 // Cleanup. 102 if (oop_null->outcnt() == 0) 103 igvn->hash_delete(oop_null); 104 if (noop_null->outcnt() == 0) 105 igvn->hash_delete(noop_null); 106 } 107 108 bool ConnectionGraph::compute_escape() { 109 Compile* C = _compile; 110 PhaseGVN* igvn = _igvn; 111 112 // Worklists used by EA. 113 Unique_Node_List delayed_worklist; 114 GrowableArray<Node*> alloc_worklist; 115 GrowableArray<Node*> ptr_cmp_worklist; 116 GrowableArray<Node*> storestore_worklist; 117 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 118 GrowableArray<PointsToNode*> ptnodes_worklist; 119 GrowableArray<JavaObjectNode*> java_objects_worklist; 120 GrowableArray<JavaObjectNode*> non_escaped_worklist; 121 GrowableArray<FieldNode*> oop_fields_worklist; 122 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 123 124 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 125 126 // 1. Populate Connection Graph (CG) with PointsTo nodes. 127 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 128 // Initialize worklist 129 if (C->root() != NULL) { 130 ideal_nodes.push(C->root()); 131 } 132 // Processed ideal nodes are unique on ideal_nodes list 133 // but several ideal nodes are mapped to the phantom_obj. 134 // To avoid duplicated entries on the following worklists 135 // add the phantom_obj only once to them. 136 ptnodes_worklist.append(phantom_obj); 137 java_objects_worklist.append(phantom_obj); 138 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 139 Node* n = ideal_nodes.at(next); 140 // Create PointsTo nodes and add them to Connection Graph. Called 141 // only once per ideal node since ideal_nodes is Unique_Node list. 142 add_node_to_connection_graph(n, &delayed_worklist); 143 PointsToNode* ptn = ptnode_adr(n->_idx); 144 if (ptn != NULL && ptn != phantom_obj) { 145 ptnodes_worklist.append(ptn); 146 if (ptn->is_JavaObject()) { 147 java_objects_worklist.append(ptn->as_JavaObject()); 148 if ((n->is_Allocate() || n->is_CallStaticJava()) && 149 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 150 // Only allocations and java static calls results are interesting. 151 non_escaped_worklist.append(ptn->as_JavaObject()); 152 } 153 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 154 oop_fields_worklist.append(ptn->as_Field()); 155 } 156 } 157 if (n->is_MergeMem()) { 158 // Collect all MergeMem nodes to add memory slices for 159 // scalar replaceable objects in split_unique_types(). 160 _mergemem_worklist.append(n->as_MergeMem()); 161 } else if (OptimizePtrCompare && n->is_Cmp() && 162 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { 163 // Collect compare pointers nodes. 164 ptr_cmp_worklist.append(n); 165 } else if (n->is_MemBarStoreStore()) { 166 // Collect all MemBarStoreStore nodes so that depending on the 167 // escape status of the associated Allocate node some of them 168 // may be eliminated. 169 storestore_worklist.append(n); 170 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && 171 (n->req() > MemBarNode::Precedent)) { 172 record_for_optimizer(n); 173 #ifdef ASSERT 174 } else if (n->is_AddP()) { 175 // Collect address nodes for graph verification. 176 addp_worklist.append(n); 177 #endif 178 } else if (n->is_ArrayCopy()) { 179 // Keep a list of ArrayCopy nodes so if one of its input is non 180 // escaping, we can record a unique type 181 arraycopy_worklist.append(n->as_ArrayCopy()); 182 } 183 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 184 Node* m = n->fast_out(i); // Get user 185 ideal_nodes.push(m); 186 } 187 } 188 if (non_escaped_worklist.length() == 0) { 189 _collecting = false; 190 return false; // Nothing to do. 191 } 192 // Add final simple edges to graph. 193 while(delayed_worklist.size() > 0) { 194 Node* n = delayed_worklist.pop(); 195 add_final_edges(n); 196 } 197 int ptnodes_length = ptnodes_worklist.length(); 198 199 #ifdef ASSERT 200 if (VerifyConnectionGraph) { 201 // Verify that no new simple edges could be created and all 202 // local vars has edges. 203 _verify = true; 204 for (int next = 0; next < ptnodes_length; ++next) { 205 PointsToNode* ptn = ptnodes_worklist.at(next); 206 add_final_edges(ptn->ideal_node()); 207 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 208 ptn->dump(); 209 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 210 } 211 } 212 _verify = false; 213 } 214 #endif 215 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 216 // processing, calls to CI to resolve symbols (types, fields, methods) 217 // referenced in bytecode. During symbol resolution VM may throw 218 // an exception which CI cleans and converts to compilation failure. 219 if (C->failing()) return false; 220 221 // 2. Finish Graph construction by propagating references to all 222 // java objects through graph. 223 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, 224 java_objects_worklist, oop_fields_worklist)) { 225 // All objects escaped or hit time or iterations limits. 226 _collecting = false; 227 return false; 228 } 229 230 // 3. Adjust scalar_replaceable state of nonescaping objects and push 231 // scalar replaceable allocations on alloc_worklist for processing 232 // in split_unique_types(). 233 int non_escaped_length = non_escaped_worklist.length(); 234 for (int next = 0; next < non_escaped_length; next++) { 235 JavaObjectNode* ptn = non_escaped_worklist.at(next); 236 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 237 Node* n = ptn->ideal_node(); 238 if (n->is_Allocate()) { 239 n->as_Allocate()->_is_non_escaping = noescape; 240 } 241 if (n->is_CallStaticJava()) { 242 n->as_CallStaticJava()->_is_non_escaping = noescape; 243 } 244 if (noescape && ptn->scalar_replaceable()) { 245 adjust_scalar_replaceable_state(ptn); 246 if (ptn->scalar_replaceable()) { 247 alloc_worklist.append(ptn->ideal_node()); 248 } 249 } 250 } 251 252 #ifdef ASSERT 253 if (VerifyConnectionGraph) { 254 // Verify that graph is complete - no new edges could be added or needed. 255 verify_connection_graph(ptnodes_worklist, non_escaped_worklist, 256 java_objects_worklist, addp_worklist); 257 } 258 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 259 assert(null_obj->escape_state() == PointsToNode::NoEscape && 260 null_obj->edge_count() == 0 && 261 !null_obj->arraycopy_src() && 262 !null_obj->arraycopy_dst(), "sanity"); 263 #endif 264 265 _collecting = false; 266 267 } // TracePhase t3("connectionGraph") 268 269 // 4. Optimize ideal graph based on EA information. 270 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); 271 if (has_non_escaping_obj) { 272 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 273 } 274 275 #ifndef PRODUCT 276 if (PrintEscapeAnalysis) { 277 dump(ptnodes_worklist); // Dump ConnectionGraph 278 } 279 #endif 280 281 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 282 #ifdef ASSERT 283 if (VerifyConnectionGraph) { 284 int alloc_length = alloc_worklist.length(); 285 for (int next = 0; next < alloc_length; ++next) { 286 Node* n = alloc_worklist.at(next); 287 PointsToNode* ptn = ptnode_adr(n->_idx); 288 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 289 } 290 } 291 #endif 292 293 // 5. Separate memory graph for scalar replaceable allcations. 294 if (has_scalar_replaceable_candidates && 295 C->AliasLevel() >= 3 && EliminateAllocations) { 296 // Now use the escape information to create unique types for 297 // scalar replaceable objects. 298 split_unique_types(alloc_worklist, arraycopy_worklist); 299 if (C->failing()) return false; 300 C->print_method(PHASE_AFTER_EA, 2); 301 302 #ifdef ASSERT 303 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 304 tty->print("=== No allocations eliminated for "); 305 C->method()->print_short_name(); 306 if(!EliminateAllocations) { 307 tty->print(" since EliminateAllocations is off ==="); 308 } else if(!has_scalar_replaceable_candidates) { 309 tty->print(" since there are no scalar replaceable candidates ==="); 310 } else if(C->AliasLevel() < 3) { 311 tty->print(" since AliasLevel < 3 ==="); 312 } 313 tty->cr(); 314 #endif 315 } 316 return has_non_escaping_obj; 317 } 318 319 // Utility function for nodes that load an object 320 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 321 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 322 // ThreadLocal has RawPtr type. 323 const Type* t = _igvn->type(n); 324 if (t->make_ptr() != NULL) { 325 Node* adr = n->in(MemNode::Address); 326 #ifdef ASSERT 327 if (!adr->is_AddP()) { 328 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 329 } else { 330 assert((ptnode_adr(adr->_idx) == NULL || 331 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 332 } 333 #endif 334 add_local_var_and_edge(n, PointsToNode::NoEscape, 335 adr, delayed_worklist); 336 } 337 } 338 339 // Populate Connection Graph with PointsTo nodes and create simple 340 // connection graph edges. 341 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 342 assert(!_verify, "this method should not be called for verification"); 343 PhaseGVN* igvn = _igvn; 344 uint n_idx = n->_idx; 345 PointsToNode* n_ptn = ptnode_adr(n_idx); 346 if (n_ptn != NULL) 347 return; // No need to redefine PointsTo node during first iteration. 348 349 if (n->is_Call()) { 350 // Arguments to allocation and locking don't escape. 351 if (n->is_AbstractLock()) { 352 // Put Lock and Unlock nodes on IGVN worklist to process them during 353 // first IGVN optimization when escape information is still available. 354 record_for_optimizer(n); 355 } else if (n->is_Allocate()) { 356 add_call_node(n->as_Call()); 357 record_for_optimizer(n); 358 } else { 359 if (n->is_CallStaticJava()) { 360 const char* name = n->as_CallStaticJava()->_name; 361 if (name != NULL && strcmp(name, "uncommon_trap") == 0) 362 return; // Skip uncommon traps 363 } 364 // Don't mark as processed since call's arguments have to be processed. 365 delayed_worklist->push(n); 366 // Check if a call returns an object. 367 if ((n->as_Call()->returns_pointer() && 368 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) || 369 (n->is_CallStaticJava() && 370 n->as_CallStaticJava()->is_boxing_method())) { 371 add_call_node(n->as_Call()); 372 } 373 } 374 return; 375 } 376 // Put this check here to process call arguments since some call nodes 377 // point to phantom_obj. 378 if (n_ptn == phantom_obj || n_ptn == null_obj) 379 return; // Skip predefined nodes. 380 381 int opcode = n->Opcode(); 382 switch (opcode) { 383 case Op_AddP: { 384 Node* base = get_addp_base(n); 385 PointsToNode* ptn_base = ptnode_adr(base->_idx); 386 // Field nodes are created for all field types. They are used in 387 // adjust_scalar_replaceable_state() and split_unique_types(). 388 // Note, non-oop fields will have only base edges in Connection 389 // Graph because such fields are not used for oop loads and stores. 390 int offset = address_offset(n, igvn); 391 add_field(n, PointsToNode::NoEscape, offset); 392 if (ptn_base == NULL) { 393 delayed_worklist->push(n); // Process it later. 394 } else { 395 n_ptn = ptnode_adr(n_idx); 396 add_base(n_ptn->as_Field(), ptn_base); 397 } 398 break; 399 } 400 case Op_CastX2P: { 401 map_ideal_node(n, phantom_obj); 402 break; 403 } 404 case Op_CastPP: 405 case Op_CheckCastPP: 406 case Op_EncodeP: 407 case Op_DecodeN: 408 case Op_EncodePKlass: 409 case Op_DecodeNKlass: { 410 add_local_var_and_edge(n, PointsToNode::NoEscape, 411 n->in(1), delayed_worklist); 412 break; 413 } 414 case Op_CMoveP: { 415 add_local_var(n, PointsToNode::NoEscape); 416 // Do not add edges during first iteration because some could be 417 // not defined yet. 418 delayed_worklist->push(n); 419 break; 420 } 421 case Op_ConP: 422 case Op_ConN: 423 case Op_ConNKlass: { 424 // assume all oop constants globally escape except for null 425 PointsToNode::EscapeState es; 426 const Type* t = igvn->type(n); 427 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 428 es = PointsToNode::NoEscape; 429 } else { 430 es = PointsToNode::GlobalEscape; 431 } 432 add_java_object(n, es); 433 break; 434 } 435 case Op_CreateEx: { 436 // assume that all exception objects globally escape 437 map_ideal_node(n, phantom_obj); 438 break; 439 } 440 case Op_LoadKlass: 441 case Op_LoadNKlass: { 442 // Unknown class is loaded 443 map_ideal_node(n, phantom_obj); 444 break; 445 } 446 case Op_LoadP: 447 case Op_LoadN: 448 case Op_LoadPLocked: { 449 add_objload_to_connection_graph(n, delayed_worklist); 450 break; 451 } 452 case Op_Parm: { 453 map_ideal_node(n, phantom_obj); 454 break; 455 } 456 case Op_PartialSubtypeCheck: { 457 // Produces Null or notNull and is used in only in CmpP so 458 // phantom_obj could be used. 459 map_ideal_node(n, phantom_obj); // Result is unknown 460 break; 461 } 462 case Op_Phi: { 463 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 464 // ThreadLocal has RawPtr type. 465 const Type* t = n->as_Phi()->type(); 466 if (t->make_ptr() != NULL) { 467 add_local_var(n, PointsToNode::NoEscape); 468 // Do not add edges during first iteration because some could be 469 // not defined yet. 470 delayed_worklist->push(n); 471 } 472 break; 473 } 474 case Op_Proj: { 475 // we are only interested in the oop result projection from a call 476 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 477 n->in(0)->as_Call()->returns_pointer()) { 478 add_local_var_and_edge(n, PointsToNode::NoEscape, 479 n->in(0), delayed_worklist); 480 } 481 break; 482 } 483 case Op_Rethrow: // Exception object escapes 484 case Op_Return: { 485 if (n->req() > TypeFunc::Parms && 486 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 487 // Treat Return value as LocalVar with GlobalEscape escape state. 488 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 489 n->in(TypeFunc::Parms), delayed_worklist); 490 } 491 break; 492 } 493 case Op_CompareAndExchangeP: 494 case Op_CompareAndExchangeN: 495 case Op_GetAndSetP: 496 case Op_GetAndSetN: { 497 add_objload_to_connection_graph(n, delayed_worklist); 498 // fallthrough 499 } 500 case Op_StoreP: 501 case Op_StoreN: 502 case Op_StoreNKlass: 503 case Op_StorePConditional: 504 case Op_WeakCompareAndSwapP: 505 case Op_WeakCompareAndSwapN: 506 case Op_CompareAndSwapP: 507 case Op_CompareAndSwapN: { 508 Node* adr = n->in(MemNode::Address); 509 const Type *adr_type = igvn->type(adr); 510 adr_type = adr_type->make_ptr(); 511 if (adr_type == NULL) { 512 break; // skip dead nodes 513 } 514 if (adr_type->isa_oopptr() || 515 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && 516 (adr_type == TypeRawPtr::NOTNULL && 517 adr->in(AddPNode::Address)->is_Proj() && 518 adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 519 delayed_worklist->push(n); // Process it later. 520 #ifdef ASSERT 521 assert(adr->is_AddP(), "expecting an AddP"); 522 if (adr_type == TypeRawPtr::NOTNULL) { 523 // Verify a raw address for a store captured by Initialize node. 524 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 525 assert(offs != Type::OffsetBot, "offset must be a constant"); 526 } 527 #endif 528 } else { 529 // Ignore copy the displaced header to the BoxNode (OSR compilation). 530 if (adr->is_BoxLock()) 531 break; 532 // Stored value escapes in unsafe access. 533 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 534 // Pointer stores in G1 barriers looks like unsafe access. 535 // Ignore such stores to be able scalar replace non-escaping 536 // allocations. 537 if (UseG1GC && adr->is_AddP()) { 538 Node* base = get_addp_base(adr); 539 if (base->Opcode() == Op_LoadP && 540 base->in(MemNode::Address)->is_AddP()) { 541 adr = base->in(MemNode::Address); 542 Node* tls = get_addp_base(adr); 543 if (tls->Opcode() == Op_ThreadLocal) { 544 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 545 if (offs == in_bytes(JavaThread::satb_mark_queue_offset() + 546 SATBMarkQueue::byte_offset_of_buf())) { 547 break; // G1 pre barrier previous oop value store. 548 } 549 if (offs == in_bytes(JavaThread::dirty_card_queue_offset() + 550 DirtyCardQueue::byte_offset_of_buf())) { 551 break; // G1 post barrier card address store. 552 } 553 } 554 } 555 } 556 delayed_worklist->push(n); // Process unsafe access later. 557 break; 558 } 559 #ifdef ASSERT 560 n->dump(1); 561 assert(false, "not unsafe or G1 barrier raw StoreP"); 562 #endif 563 } 564 break; 565 } 566 case Op_AryEq: 567 case Op_HasNegatives: 568 case Op_StrComp: 569 case Op_StrEquals: 570 case Op_StrIndexOf: 571 case Op_StrIndexOfChar: 572 case Op_StrInflatedCopy: 573 case Op_StrCompressedCopy: 574 case Op_EncodeISOArray: { 575 add_local_var(n, PointsToNode::ArgEscape); 576 delayed_worklist->push(n); // Process it later. 577 break; 578 } 579 case Op_ThreadLocal: { 580 add_java_object(n, PointsToNode::ArgEscape); 581 break; 582 } 583 default: 584 ; // Do nothing for nodes not related to EA. 585 } 586 return; 587 } 588 589 #ifdef ASSERT 590 #define ELSE_FAIL(name) \ 591 /* Should not be called for not pointer type. */ \ 592 n->dump(1); \ 593 assert(false, name); \ 594 break; 595 #else 596 #define ELSE_FAIL(name) \ 597 break; 598 #endif 599 600 // Add final simple edges to graph. 601 void ConnectionGraph::add_final_edges(Node *n) { 602 PointsToNode* n_ptn = ptnode_adr(n->_idx); 603 #ifdef ASSERT 604 if (_verify && n_ptn->is_JavaObject()) 605 return; // This method does not change graph for JavaObject. 606 #endif 607 608 if (n->is_Call()) { 609 process_call_arguments(n->as_Call()); 610 return; 611 } 612 assert(n->is_Store() || n->is_LoadStore() || 613 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 614 "node should be registered already"); 615 int opcode = n->Opcode(); 616 switch (opcode) { 617 case Op_AddP: { 618 Node* base = get_addp_base(n); 619 PointsToNode* ptn_base = ptnode_adr(base->_idx); 620 assert(ptn_base != NULL, "field's base should be registered"); 621 add_base(n_ptn->as_Field(), ptn_base); 622 break; 623 } 624 case Op_CastPP: 625 case Op_CheckCastPP: 626 case Op_EncodeP: 627 case Op_DecodeN: 628 case Op_EncodePKlass: 629 case Op_DecodeNKlass: { 630 add_local_var_and_edge(n, PointsToNode::NoEscape, 631 n->in(1), NULL); 632 break; 633 } 634 case Op_CMoveP: { 635 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 636 Node* in = n->in(i); 637 if (in == NULL) 638 continue; // ignore NULL 639 Node* uncast_in = in->uncast(); 640 if (uncast_in->is_top() || uncast_in == n) 641 continue; // ignore top or inputs which go back this node 642 PointsToNode* ptn = ptnode_adr(in->_idx); 643 assert(ptn != NULL, "node should be registered"); 644 add_edge(n_ptn, ptn); 645 } 646 break; 647 } 648 case Op_LoadP: 649 case Op_LoadN: 650 case Op_LoadPLocked: { 651 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 652 // ThreadLocal has RawPtr type. 653 const Type* t = _igvn->type(n); 654 if (t->make_ptr() != NULL) { 655 Node* adr = n->in(MemNode::Address); 656 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 657 break; 658 } 659 ELSE_FAIL("Op_LoadP"); 660 } 661 case Op_Phi: { 662 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 663 // ThreadLocal has RawPtr type. 664 const Type* t = n->as_Phi()->type(); 665 if (t->make_ptr() != NULL) { 666 for (uint i = 1; i < n->req(); i++) { 667 Node* in = n->in(i); 668 if (in == NULL) 669 continue; // ignore NULL 670 Node* uncast_in = in->uncast(); 671 if (uncast_in->is_top() || uncast_in == n) 672 continue; // ignore top or inputs which go back this node 673 PointsToNode* ptn = ptnode_adr(in->_idx); 674 assert(ptn != NULL, "node should be registered"); 675 add_edge(n_ptn, ptn); 676 } 677 break; 678 } 679 ELSE_FAIL("Op_Phi"); 680 } 681 case Op_Proj: { 682 // we are only interested in the oop result projection from a call 683 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 684 n->in(0)->as_Call()->returns_pointer()) { 685 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 686 break; 687 } 688 ELSE_FAIL("Op_Proj"); 689 } 690 case Op_Rethrow: // Exception object escapes 691 case Op_Return: { 692 if (n->req() > TypeFunc::Parms && 693 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 694 // Treat Return value as LocalVar with GlobalEscape escape state. 695 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 696 n->in(TypeFunc::Parms), NULL); 697 break; 698 } 699 ELSE_FAIL("Op_Return"); 700 } 701 case Op_StoreP: 702 case Op_StoreN: 703 case Op_StoreNKlass: 704 case Op_StorePConditional: 705 case Op_CompareAndExchangeP: 706 case Op_CompareAndExchangeN: 707 case Op_CompareAndSwapP: 708 case Op_CompareAndSwapN: 709 case Op_WeakCompareAndSwapP: 710 case Op_WeakCompareAndSwapN: 711 case Op_GetAndSetP: 712 case Op_GetAndSetN: { 713 Node* adr = n->in(MemNode::Address); 714 const Type *adr_type = _igvn->type(adr); 715 adr_type = adr_type->make_ptr(); 716 #ifdef ASSERT 717 if (adr_type == NULL) { 718 n->dump(1); 719 assert(adr_type != NULL, "dead node should not be on list"); 720 break; 721 } 722 #endif 723 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) { 724 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 725 } 726 if (adr_type->isa_oopptr() || 727 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && 728 (adr_type == TypeRawPtr::NOTNULL && 729 adr->in(AddPNode::Address)->is_Proj() && 730 adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 731 // Point Address to Value 732 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 733 assert(adr_ptn != NULL && 734 adr_ptn->as_Field()->is_oop(), "node should be registered"); 735 Node *val = n->in(MemNode::ValueIn); 736 PointsToNode* ptn = ptnode_adr(val->_idx); 737 assert(ptn != NULL, "node should be registered"); 738 add_edge(adr_ptn, ptn); 739 break; 740 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 741 // Stored value escapes in unsafe access. 742 Node *val = n->in(MemNode::ValueIn); 743 PointsToNode* ptn = ptnode_adr(val->_idx); 744 assert(ptn != NULL, "node should be registered"); 745 set_escape_state(ptn, PointsToNode::GlobalEscape); 746 // Add edge to object for unsafe access with offset. 747 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 748 assert(adr_ptn != NULL, "node should be registered"); 749 if (adr_ptn->is_Field()) { 750 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 751 add_edge(adr_ptn, ptn); 752 } 753 break; 754 } 755 ELSE_FAIL("Op_StoreP"); 756 } 757 case Op_AryEq: 758 case Op_HasNegatives: 759 case Op_StrComp: 760 case Op_StrEquals: 761 case Op_StrIndexOf: 762 case Op_StrIndexOfChar: 763 case Op_StrInflatedCopy: 764 case Op_StrCompressedCopy: 765 case Op_EncodeISOArray: { 766 // char[]/byte[] arrays passed to string intrinsic do not escape but 767 // they are not scalar replaceable. Adjust escape state for them. 768 // Start from in(2) edge since in(1) is memory edge. 769 for (uint i = 2; i < n->req(); i++) { 770 Node* adr = n->in(i); 771 const Type* at = _igvn->type(adr); 772 if (!adr->is_top() && at->isa_ptr()) { 773 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 774 at->isa_ptr() != NULL, "expecting a pointer"); 775 if (adr->is_AddP()) { 776 adr = get_addp_base(adr); 777 } 778 PointsToNode* ptn = ptnode_adr(adr->_idx); 779 assert(ptn != NULL, "node should be registered"); 780 add_edge(n_ptn, ptn); 781 } 782 } 783 break; 784 } 785 default: { 786 // This method should be called only for EA specific nodes which may 787 // miss some edges when they were created. 788 #ifdef ASSERT 789 n->dump(1); 790 #endif 791 guarantee(false, "unknown node"); 792 } 793 } 794 return; 795 } 796 797 void ConnectionGraph::add_call_node(CallNode* call) { 798 assert(call->returns_pointer(), "only for call which returns pointer"); 799 uint call_idx = call->_idx; 800 if (call->is_Allocate()) { 801 Node* k = call->in(AllocateNode::KlassNode); 802 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 803 assert(kt != NULL, "TypeKlassPtr required."); 804 ciKlass* cik = kt->klass(); 805 PointsToNode::EscapeState es = PointsToNode::NoEscape; 806 bool scalar_replaceable = true; 807 if (call->is_AllocateArray()) { 808 if (!cik->is_array_klass()) { // StressReflectiveCode 809 es = PointsToNode::GlobalEscape; 810 } else { 811 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 812 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 813 // Not scalar replaceable if the length is not constant or too big. 814 scalar_replaceable = false; 815 } 816 } 817 } else { // Allocate instance 818 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 819 cik->is_subclass_of(_compile->env()->Reference_klass()) || 820 !cik->is_instance_klass() || // StressReflectiveCode 821 !cik->as_instance_klass()->can_be_instantiated() || 822 cik->as_instance_klass()->has_finalizer()) { 823 es = PointsToNode::GlobalEscape; 824 } 825 } 826 add_java_object(call, es); 827 PointsToNode* ptn = ptnode_adr(call_idx); 828 if (!scalar_replaceable && ptn->scalar_replaceable()) { 829 ptn->set_scalar_replaceable(false); 830 } 831 } else if (call->is_CallStaticJava()) { 832 // Call nodes could be different types: 833 // 834 // 1. CallDynamicJavaNode (what happened during call is unknown): 835 // 836 // - mapped to GlobalEscape JavaObject node if oop is returned; 837 // 838 // - all oop arguments are escaping globally; 839 // 840 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 841 // 842 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 843 // 844 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 845 // - mapped to NoEscape JavaObject node if non-escaping object allocated 846 // during call is returned; 847 // - mapped to ArgEscape LocalVar node pointed to object arguments 848 // which are returned and does not escape during call; 849 // 850 // - oop arguments escaping status is defined by bytecode analysis; 851 // 852 // For a static call, we know exactly what method is being called. 853 // Use bytecode estimator to record whether the call's return value escapes. 854 ciMethod* meth = call->as_CallJava()->method(); 855 if (meth == NULL) { 856 const char* name = call->as_CallStaticJava()->_name; 857 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 858 // Returns a newly allocated unescaped object. 859 add_java_object(call, PointsToNode::NoEscape); 860 ptnode_adr(call_idx)->set_scalar_replaceable(false); 861 } else if (meth->is_boxing_method()) { 862 // Returns boxing object 863 PointsToNode::EscapeState es; 864 vmIntrinsics::ID intr = meth->intrinsic_id(); 865 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 866 // It does not escape if object is always allocated. 867 es = PointsToNode::NoEscape; 868 } else { 869 // It escapes globally if object could be loaded from cache. 870 es = PointsToNode::GlobalEscape; 871 } 872 add_java_object(call, es); 873 } else { 874 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 875 call_analyzer->copy_dependencies(_compile->dependencies()); 876 if (call_analyzer->is_return_allocated()) { 877 // Returns a newly allocated unescaped object, simply 878 // update dependency information. 879 // Mark it as NoEscape so that objects referenced by 880 // it's fields will be marked as NoEscape at least. 881 add_java_object(call, PointsToNode::NoEscape); 882 ptnode_adr(call_idx)->set_scalar_replaceable(false); 883 } else { 884 // Determine whether any arguments are returned. 885 const TypeTuple* d = call->tf()->domain_sig(); 886 bool ret_arg = false; 887 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 888 if (d->field_at(i)->isa_ptr() != NULL && 889 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 890 ret_arg = true; 891 break; 892 } 893 } 894 if (ret_arg) { 895 add_local_var(call, PointsToNode::ArgEscape); 896 } else { 897 // Returns unknown object. 898 map_ideal_node(call, phantom_obj); 899 } 900 } 901 } 902 } else { 903 // An other type of call, assume the worst case: 904 // returned value is unknown and globally escapes. 905 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 906 map_ideal_node(call, phantom_obj); 907 } 908 } 909 910 void ConnectionGraph::process_call_arguments(CallNode *call) { 911 bool is_arraycopy = false; 912 switch (call->Opcode()) { 913 #ifdef ASSERT 914 case Op_Allocate: 915 case Op_AllocateArray: 916 case Op_Lock: 917 case Op_Unlock: 918 assert(false, "should be done already"); 919 break; 920 #endif 921 case Op_ArrayCopy: 922 case Op_CallLeafNoFP: 923 // Most array copies are ArrayCopy nodes at this point but there 924 // are still a few direct calls to the copy subroutines (See 925 // PhaseStringOpts::copy_string()) 926 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 927 call->as_CallLeaf()->is_call_to_arraycopystub(); 928 // fall through 929 case Op_CallLeaf: { 930 // Stub calls, objects do not escape but they are not scale replaceable. 931 // Adjust escape state for outgoing arguments. 932 const TypeTuple * d = call->tf()->domain_sig(); 933 bool src_has_oops = false; 934 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 935 const Type* at = d->field_at(i); 936 Node *arg = call->in(i); 937 if (arg == NULL) { 938 continue; 939 } 940 const Type *aat = _igvn->type(arg); 941 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) 942 continue; 943 if (arg->is_AddP()) { 944 // 945 // The inline_native_clone() case when the arraycopy stub is called 946 // after the allocation before Initialize and CheckCastPP nodes. 947 // Or normal arraycopy for object arrays case. 948 // 949 // Set AddP's base (Allocate) as not scalar replaceable since 950 // pointer to the base (with offset) is passed as argument. 951 // 952 arg = get_addp_base(arg); 953 } 954 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 955 assert(arg_ptn != NULL, "should be registered"); 956 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 957 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 958 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 959 aat->isa_ptr() != NULL, "expecting an Ptr"); 960 bool arg_has_oops = aat->isa_oopptr() && 961 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || 962 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); 963 if (i == TypeFunc::Parms) { 964 src_has_oops = arg_has_oops; 965 } 966 // 967 // src or dst could be j.l.Object when other is basic type array: 968 // 969 // arraycopy(char[],0,Object*,0,size); 970 // arraycopy(Object*,0,char[],0,size); 971 // 972 // Don't add edges in such cases. 973 // 974 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 975 arg_has_oops && (i > TypeFunc::Parms); 976 #ifdef ASSERT 977 if (!(is_arraycopy || 978 (call->as_CallLeaf()->_name != NULL && 979 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 || 980 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 || 981 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 982 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 983 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 984 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 985 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 986 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 987 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 988 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 989 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 990 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 991 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 992 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 993 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 994 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 995 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 996 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 997 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 998 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 999 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1000 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1001 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0) 1002 ))) { 1003 call->dump(); 1004 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1005 } 1006 #endif 1007 // Always process arraycopy's destination object since 1008 // we need to add all possible edges to references in 1009 // source object. 1010 if (arg_esc >= PointsToNode::ArgEscape && 1011 !arg_is_arraycopy_dest) { 1012 continue; 1013 } 1014 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1015 if (call->is_ArrayCopy()) { 1016 ArrayCopyNode* ac = call->as_ArrayCopy(); 1017 if (ac->is_clonebasic() || 1018 ac->is_arraycopy_validated() || 1019 ac->is_copyof_validated() || 1020 ac->is_copyofrange_validated()) { 1021 es = PointsToNode::NoEscape; 1022 } 1023 } 1024 set_escape_state(arg_ptn, es); 1025 if (arg_is_arraycopy_dest) { 1026 Node* src = call->in(TypeFunc::Parms); 1027 if (src->is_AddP()) { 1028 src = get_addp_base(src); 1029 } 1030 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1031 assert(src_ptn != NULL, "should be registered"); 1032 if (arg_ptn != src_ptn) { 1033 // Special arraycopy edge: 1034 // A destination object's field can't have the source object 1035 // as base since objects escape states are not related. 1036 // Only escape state of destination object's fields affects 1037 // escape state of fields in source object. 1038 add_arraycopy(call, es, src_ptn, arg_ptn); 1039 } 1040 } 1041 } 1042 } 1043 break; 1044 } 1045 case Op_CallStaticJava: { 1046 // For a static call, we know exactly what method is being called. 1047 // Use bytecode estimator to record the call's escape affects 1048 #ifdef ASSERT 1049 const char* name = call->as_CallStaticJava()->_name; 1050 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1051 #endif 1052 ciMethod* meth = call->as_CallJava()->method(); 1053 if ((meth != NULL) && meth->is_boxing_method()) { 1054 break; // Boxing methods do not modify any oops. 1055 } 1056 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1057 // fall-through if not a Java method or no analyzer information 1058 if (call_analyzer != NULL) { 1059 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1060 const TypeTuple* d = call->tf()->domain_sig(); 1061 int extra = 0; 1062 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1063 const Type* at = d->field_at(i); 1064 if (at->isa_valuetypeptr()) { 1065 extra += at->is_valuetypeptr()->value_type()->value_klass()->field_count() - 1; 1066 continue; 1067 } 1068 int k = i - TypeFunc::Parms; 1069 Node* arg = call->in(i + extra); 1070 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1071 if (at->isa_ptr() != NULL && 1072 call_analyzer->is_arg_returned(k)) { 1073 // The call returns arguments. 1074 if (call_ptn != NULL) { // Is call's result used? 1075 assert(call_ptn->is_LocalVar(), "node should be registered"); 1076 assert(arg_ptn != NULL, "node should be registered"); 1077 add_edge(call_ptn, arg_ptn); 1078 } 1079 } 1080 if (at->isa_oopptr() != NULL && 1081 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1082 if (!call_analyzer->is_arg_stack(k)) { 1083 // The argument global escapes 1084 set_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1085 } else { 1086 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 1087 if (!call_analyzer->is_arg_local(k)) { 1088 // The argument itself doesn't escape, but any fields might 1089 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1090 } 1091 } 1092 } 1093 } 1094 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1095 // The call returns arguments. 1096 assert(call_ptn->edge_count() > 0, "sanity"); 1097 if (!call_analyzer->is_return_local()) { 1098 // Returns also unknown object. 1099 add_edge(call_ptn, phantom_obj); 1100 } 1101 } 1102 break; 1103 } 1104 } 1105 default: { 1106 // Fall-through here if not a Java method or no analyzer information 1107 // or some other type of call, assume the worst case: all arguments 1108 // globally escape. 1109 const TypeTuple* d = call->tf()->domain_sig(); 1110 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1111 const Type* at = d->field_at(i); 1112 if (at->isa_oopptr() != NULL) { 1113 Node* arg = call->in(i); 1114 if (arg->is_AddP()) { 1115 arg = get_addp_base(arg); 1116 } 1117 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1118 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); 1119 } 1120 } 1121 } 1122 } 1123 } 1124 1125 1126 // Finish Graph construction. 1127 bool ConnectionGraph::complete_connection_graph( 1128 GrowableArray<PointsToNode*>& ptnodes_worklist, 1129 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1130 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1131 GrowableArray<FieldNode*>& oop_fields_worklist) { 1132 // Normally only 1-3 passes needed to build Connection Graph depending 1133 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1134 // Set limit to 20 to catch situation when something did go wrong and 1135 // bailout Escape Analysis. 1136 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1137 #define CG_BUILD_ITER_LIMIT 20 1138 1139 // Propagate GlobalEscape and ArgEscape escape states and check that 1140 // we still have non-escaping objects. The method pushs on _worklist 1141 // Field nodes which reference phantom_object. 1142 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1143 return false; // Nothing to do. 1144 } 1145 // Now propagate references to all JavaObject nodes. 1146 int java_objects_length = java_objects_worklist.length(); 1147 elapsedTimer time; 1148 bool timeout = false; 1149 int new_edges = 1; 1150 int iterations = 0; 1151 do { 1152 while ((new_edges > 0) && 1153 (iterations++ < CG_BUILD_ITER_LIMIT)) { 1154 double start_time = time.seconds(); 1155 time.start(); 1156 new_edges = 0; 1157 // Propagate references to phantom_object for nodes pushed on _worklist 1158 // by find_non_escaped_objects() and find_field_value(). 1159 new_edges += add_java_object_edges(phantom_obj, false); 1160 for (int next = 0; next < java_objects_length; ++next) { 1161 JavaObjectNode* ptn = java_objects_worklist.at(next); 1162 new_edges += add_java_object_edges(ptn, true); 1163 1164 #define SAMPLE_SIZE 4 1165 if ((next % SAMPLE_SIZE) == 0) { 1166 // Each 4 iterations calculate how much time it will take 1167 // to complete graph construction. 1168 time.stop(); 1169 // Poll for requests from shutdown mechanism to quiesce compiler 1170 // because Connection graph construction may take long time. 1171 CompileBroker::maybe_block(); 1172 double stop_time = time.seconds(); 1173 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1174 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1175 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1176 timeout = true; 1177 break; // Timeout 1178 } 1179 start_time = stop_time; 1180 time.start(); 1181 } 1182 #undef SAMPLE_SIZE 1183 1184 } 1185 if (timeout) break; 1186 if (new_edges > 0) { 1187 // Update escape states on each iteration if graph was updated. 1188 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1189 return false; // Nothing to do. 1190 } 1191 } 1192 time.stop(); 1193 if (time.seconds() >= EscapeAnalysisTimeout) { 1194 timeout = true; 1195 break; 1196 } 1197 } 1198 if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { 1199 time.start(); 1200 // Find fields which have unknown value. 1201 int fields_length = oop_fields_worklist.length(); 1202 for (int next = 0; next < fields_length; next++) { 1203 FieldNode* field = oop_fields_worklist.at(next); 1204 if (field->edge_count() == 0) { 1205 new_edges += find_field_value(field); 1206 // This code may added new edges to phantom_object. 1207 // Need an other cycle to propagate references to phantom_object. 1208 } 1209 } 1210 time.stop(); 1211 if (time.seconds() >= EscapeAnalysisTimeout) { 1212 timeout = true; 1213 break; 1214 } 1215 } else { 1216 new_edges = 0; // Bailout 1217 } 1218 } while (new_edges > 0); 1219 1220 // Bailout if passed limits. 1221 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { 1222 Compile* C = _compile; 1223 if (C->log() != NULL) { 1224 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1225 C->log()->text("%s", timeout ? "time" : "iterations"); 1226 C->log()->end_elem(" limit'"); 1227 } 1228 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", 1229 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()); 1230 // Possible infinite build_connection_graph loop, 1231 // bailout (no changes to ideal graph were made). 1232 return false; 1233 } 1234 #ifdef ASSERT 1235 if (Verbose && PrintEscapeAnalysis) { 1236 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", 1237 iterations, nodes_size(), ptnodes_worklist.length()); 1238 } 1239 #endif 1240 1241 #undef CG_BUILD_ITER_LIMIT 1242 1243 // Find fields initialized by NULL for non-escaping Allocations. 1244 int non_escaped_length = non_escaped_worklist.length(); 1245 for (int next = 0; next < non_escaped_length; next++) { 1246 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1247 PointsToNode::EscapeState es = ptn->escape_state(); 1248 assert(es <= PointsToNode::ArgEscape, "sanity"); 1249 if (es == PointsToNode::NoEscape) { 1250 if (find_init_values(ptn, null_obj, _igvn) > 0) { 1251 // Adding references to NULL object does not change escape states 1252 // since it does not escape. Also no fields are added to NULL object. 1253 add_java_object_edges(null_obj, false); 1254 } 1255 } 1256 Node* n = ptn->ideal_node(); 1257 if (n->is_Allocate()) { 1258 // The object allocated by this Allocate node will never be 1259 // seen by an other thread. Mark it so that when it is 1260 // expanded no MemBarStoreStore is added. 1261 InitializeNode* ini = n->as_Allocate()->initialization(); 1262 if (ini != NULL) 1263 ini->set_does_not_escape(); 1264 } 1265 } 1266 return true; // Finished graph construction. 1267 } 1268 1269 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1270 // and check that we still have non-escaping java objects. 1271 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1272 GrowableArray<JavaObjectNode*>& non_escaped_worklist) { 1273 GrowableArray<PointsToNode*> escape_worklist; 1274 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1275 int ptnodes_length = ptnodes_worklist.length(); 1276 for (int next = 0; next < ptnodes_length; ++next) { 1277 PointsToNode* ptn = ptnodes_worklist.at(next); 1278 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1279 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1280 escape_worklist.push(ptn); 1281 } 1282 } 1283 // Set escape states to referenced nodes (edges list). 1284 while (escape_worklist.length() > 0) { 1285 PointsToNode* ptn = escape_worklist.pop(); 1286 PointsToNode::EscapeState es = ptn->escape_state(); 1287 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1288 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1289 es >= PointsToNode::ArgEscape) { 1290 // GlobalEscape or ArgEscape state of field means it has unknown value. 1291 if (add_edge(ptn, phantom_obj)) { 1292 // New edge was added 1293 add_field_uses_to_worklist(ptn->as_Field()); 1294 } 1295 } 1296 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1297 PointsToNode* e = i.get(); 1298 if (e->is_Arraycopy()) { 1299 assert(ptn->arraycopy_dst(), "sanity"); 1300 // Propagate only fields escape state through arraycopy edge. 1301 if (e->fields_escape_state() < field_es) { 1302 set_fields_escape_state(e, field_es); 1303 escape_worklist.push(e); 1304 } 1305 } else if (es >= field_es) { 1306 // fields_escape_state is also set to 'es' if it is less than 'es'. 1307 if (e->escape_state() < es) { 1308 set_escape_state(e, es); 1309 escape_worklist.push(e); 1310 } 1311 } else { 1312 // Propagate field escape state. 1313 bool es_changed = false; 1314 if (e->fields_escape_state() < field_es) { 1315 set_fields_escape_state(e, field_es); 1316 es_changed = true; 1317 } 1318 if ((e->escape_state() < field_es) && 1319 e->is_Field() && ptn->is_JavaObject() && 1320 e->as_Field()->is_oop()) { 1321 // Change escape state of referenced fields. 1322 set_escape_state(e, field_es); 1323 es_changed = true; 1324 } else if (e->escape_state() < es) { 1325 set_escape_state(e, es); 1326 es_changed = true; 1327 } 1328 if (es_changed) { 1329 escape_worklist.push(e); 1330 } 1331 } 1332 } 1333 } 1334 // Remove escaped objects from non_escaped list. 1335 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { 1336 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1337 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1338 non_escaped_worklist.delete_at(next); 1339 } 1340 if (ptn->escape_state() == PointsToNode::NoEscape) { 1341 // Find fields in non-escaped allocations which have unknown value. 1342 find_init_values(ptn, phantom_obj, NULL); 1343 } 1344 } 1345 return (non_escaped_worklist.length() > 0); 1346 } 1347 1348 // Add all references to JavaObject node by walking over all uses. 1349 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1350 int new_edges = 0; 1351 if (populate_worklist) { 1352 // Populate _worklist by uses of jobj's uses. 1353 for (UseIterator i(jobj); i.has_next(); i.next()) { 1354 PointsToNode* use = i.get(); 1355 if (use->is_Arraycopy()) 1356 continue; 1357 add_uses_to_worklist(use); 1358 if (use->is_Field() && use->as_Field()->is_oop()) { 1359 // Put on worklist all field's uses (loads) and 1360 // related field nodes (same base and offset). 1361 add_field_uses_to_worklist(use->as_Field()); 1362 } 1363 } 1364 } 1365 for (int l = 0; l < _worklist.length(); l++) { 1366 PointsToNode* use = _worklist.at(l); 1367 if (PointsToNode::is_base_use(use)) { 1368 // Add reference from jobj to field and from field to jobj (field's base). 1369 use = PointsToNode::get_use_node(use)->as_Field(); 1370 if (add_base(use->as_Field(), jobj)) { 1371 new_edges++; 1372 } 1373 continue; 1374 } 1375 assert(!use->is_JavaObject(), "sanity"); 1376 if (use->is_Arraycopy()) { 1377 if (jobj == null_obj) // NULL object does not have field edges 1378 continue; 1379 // Added edge from Arraycopy node to arraycopy's source java object 1380 if (add_edge(use, jobj)) { 1381 jobj->set_arraycopy_src(); 1382 new_edges++; 1383 } 1384 // and stop here. 1385 continue; 1386 } 1387 if (!add_edge(use, jobj)) 1388 continue; // No new edge added, there was such edge already. 1389 new_edges++; 1390 if (use->is_LocalVar()) { 1391 add_uses_to_worklist(use); 1392 if (use->arraycopy_dst()) { 1393 for (EdgeIterator i(use); i.has_next(); i.next()) { 1394 PointsToNode* e = i.get(); 1395 if (e->is_Arraycopy()) { 1396 if (jobj == null_obj) // NULL object does not have field edges 1397 continue; 1398 // Add edge from arraycopy's destination java object to Arraycopy node. 1399 if (add_edge(jobj, e)) { 1400 new_edges++; 1401 jobj->set_arraycopy_dst(); 1402 } 1403 } 1404 } 1405 } 1406 } else { 1407 // Added new edge to stored in field values. 1408 // Put on worklist all field's uses (loads) and 1409 // related field nodes (same base and offset). 1410 add_field_uses_to_worklist(use->as_Field()); 1411 } 1412 } 1413 _worklist.clear(); 1414 _in_worklist.Reset(); 1415 return new_edges; 1416 } 1417 1418 // Put on worklist all related field nodes. 1419 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1420 assert(field->is_oop(), "sanity"); 1421 int offset = field->offset(); 1422 add_uses_to_worklist(field); 1423 // Loop over all bases of this field and push on worklist Field nodes 1424 // with the same offset and base (since they may reference the same field). 1425 for (BaseIterator i(field); i.has_next(); i.next()) { 1426 PointsToNode* base = i.get(); 1427 add_fields_to_worklist(field, base); 1428 // Check if the base was source object of arraycopy and go over arraycopy's 1429 // destination objects since values stored to a field of source object are 1430 // accessable by uses (loads) of fields of destination objects. 1431 if (base->arraycopy_src()) { 1432 for (UseIterator j(base); j.has_next(); j.next()) { 1433 PointsToNode* arycp = j.get(); 1434 if (arycp->is_Arraycopy()) { 1435 for (UseIterator k(arycp); k.has_next(); k.next()) { 1436 PointsToNode* abase = k.get(); 1437 if (abase->arraycopy_dst() && abase != base) { 1438 // Look for the same arraycopy reference. 1439 add_fields_to_worklist(field, abase); 1440 } 1441 } 1442 } 1443 } 1444 } 1445 } 1446 } 1447 1448 // Put on worklist all related field nodes. 1449 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1450 int offset = field->offset(); 1451 if (base->is_LocalVar()) { 1452 for (UseIterator j(base); j.has_next(); j.next()) { 1453 PointsToNode* f = j.get(); 1454 if (PointsToNode::is_base_use(f)) { // Field 1455 f = PointsToNode::get_use_node(f); 1456 if (f == field || !f->as_Field()->is_oop()) 1457 continue; 1458 int offs = f->as_Field()->offset(); 1459 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1460 add_to_worklist(f); 1461 } 1462 } 1463 } 1464 } else { 1465 assert(base->is_JavaObject(), "sanity"); 1466 if (// Skip phantom_object since it is only used to indicate that 1467 // this field's content globally escapes. 1468 (base != phantom_obj) && 1469 // NULL object node does not have fields. 1470 (base != null_obj)) { 1471 for (EdgeIterator i(base); i.has_next(); i.next()) { 1472 PointsToNode* f = i.get(); 1473 // Skip arraycopy edge since store to destination object field 1474 // does not update value in source object field. 1475 if (f->is_Arraycopy()) { 1476 assert(base->arraycopy_dst(), "sanity"); 1477 continue; 1478 } 1479 if (f == field || !f->as_Field()->is_oop()) 1480 continue; 1481 int offs = f->as_Field()->offset(); 1482 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1483 add_to_worklist(f); 1484 } 1485 } 1486 } 1487 } 1488 } 1489 1490 // Find fields which have unknown value. 1491 int ConnectionGraph::find_field_value(FieldNode* field) { 1492 // Escaped fields should have init value already. 1493 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1494 int new_edges = 0; 1495 for (BaseIterator i(field); i.has_next(); i.next()) { 1496 PointsToNode* base = i.get(); 1497 if (base->is_JavaObject()) { 1498 // Skip Allocate's fields which will be processed later. 1499 if (base->ideal_node()->is_Allocate()) 1500 return 0; 1501 assert(base == null_obj, "only NULL ptr base expected here"); 1502 } 1503 } 1504 if (add_edge(field, phantom_obj)) { 1505 // New edge was added 1506 new_edges++; 1507 add_field_uses_to_worklist(field); 1508 } 1509 return new_edges; 1510 } 1511 1512 // Find fields initializing values for allocations. 1513 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { 1514 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1515 int new_edges = 0; 1516 Node* alloc = pta->ideal_node(); 1517 if (init_val == phantom_obj) { 1518 // Do nothing for Allocate nodes since its fields values are 1519 // "known" unless they are initialized by arraycopy/clone. 1520 if (alloc->is_Allocate() && !pta->arraycopy_dst()) 1521 return 0; 1522 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 1523 #ifdef ASSERT 1524 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { 1525 const char* name = alloc->as_CallStaticJava()->_name; 1526 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1527 } 1528 #endif 1529 // Non-escaped allocation returned from Java or runtime call have 1530 // unknown values in fields. 1531 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1532 PointsToNode* field = i.get(); 1533 if (field->is_Field() && field->as_Field()->is_oop()) { 1534 if (add_edge(field, phantom_obj)) { 1535 // New edge was added 1536 new_edges++; 1537 add_field_uses_to_worklist(field->as_Field()); 1538 } 1539 } 1540 } 1541 return new_edges; 1542 } 1543 assert(init_val == null_obj, "sanity"); 1544 // Do nothing for Call nodes since its fields values are unknown. 1545 if (!alloc->is_Allocate()) 1546 return 0; 1547 1548 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1549 bool visited_bottom_offset = false; 1550 GrowableArray<int> offsets_worklist; 1551 1552 // Check if an oop field's initializing value is recorded and add 1553 // a corresponding NULL if field's value if it is not recorded. 1554 // Connection Graph does not record a default initialization by NULL 1555 // captured by Initialize node. 1556 // 1557 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1558 PointsToNode* field = i.get(); // Field (AddP) 1559 if (!field->is_Field() || !field->as_Field()->is_oop()) 1560 continue; // Not oop field 1561 int offset = field->as_Field()->offset(); 1562 if (offset == Type::OffsetBot) { 1563 if (!visited_bottom_offset) { 1564 // OffsetBot is used to reference array's element, 1565 // always add reference to NULL to all Field nodes since we don't 1566 // known which element is referenced. 1567 if (add_edge(field, null_obj)) { 1568 // New edge was added 1569 new_edges++; 1570 add_field_uses_to_worklist(field->as_Field()); 1571 visited_bottom_offset = true; 1572 } 1573 } 1574 } else { 1575 // Check only oop fields. 1576 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1577 if (adr_type->isa_rawptr()) { 1578 #ifdef ASSERT 1579 // Raw pointers are used for initializing stores so skip it 1580 // since it should be recorded already 1581 Node* base = get_addp_base(field->ideal_node()); 1582 assert(adr_type->isa_rawptr() && base->is_Proj() && 1583 (base->in(0) == alloc),"unexpected pointer type"); 1584 #endif 1585 continue; 1586 } 1587 if (!offsets_worklist.contains(offset)) { 1588 offsets_worklist.append(offset); 1589 Node* value = NULL; 1590 if (ini != NULL) { 1591 // StoreP::memory_type() == T_ADDRESS 1592 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1593 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1594 // Make sure initializing store has the same type as this AddP. 1595 // This AddP may reference non existing field because it is on a 1596 // dead branch of bimorphic call which is not eliminated yet. 1597 if (store != NULL && store->is_Store() && 1598 store->as_Store()->memory_type() == ft) { 1599 value = store->in(MemNode::ValueIn); 1600 #ifdef ASSERT 1601 if (VerifyConnectionGraph) { 1602 // Verify that AddP already points to all objects the value points to. 1603 PointsToNode* val = ptnode_adr(value->_idx); 1604 assert((val != NULL), "should be processed already"); 1605 PointsToNode* missed_obj = NULL; 1606 if (val->is_JavaObject()) { 1607 if (!field->points_to(val->as_JavaObject())) { 1608 missed_obj = val; 1609 } 1610 } else { 1611 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1612 tty->print_cr("----------init store has invalid value -----"); 1613 store->dump(); 1614 val->dump(); 1615 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1616 } 1617 for (EdgeIterator j(val); j.has_next(); j.next()) { 1618 PointsToNode* obj = j.get(); 1619 if (obj->is_JavaObject()) { 1620 if (!field->points_to(obj->as_JavaObject())) { 1621 missed_obj = obj; 1622 break; 1623 } 1624 } 1625 } 1626 } 1627 if (missed_obj != NULL) { 1628 tty->print_cr("----------field---------------------------------"); 1629 field->dump(); 1630 tty->print_cr("----------missed referernce to object-----------"); 1631 missed_obj->dump(); 1632 tty->print_cr("----------object referernced by init store -----"); 1633 store->dump(); 1634 val->dump(); 1635 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1636 } 1637 } 1638 #endif 1639 } else { 1640 // There could be initializing stores which follow allocation. 1641 // For example, a volatile field store is not collected 1642 // by Initialize node. 1643 // 1644 // Need to check for dependent loads to separate such stores from 1645 // stores which follow loads. For now, add initial value NULL so 1646 // that compare pointers optimization works correctly. 1647 } 1648 } 1649 if (value == NULL) { 1650 // A field's initializing value was not recorded. Add NULL. 1651 if (add_edge(field, null_obj)) { 1652 // New edge was added 1653 new_edges++; 1654 add_field_uses_to_worklist(field->as_Field()); 1655 } 1656 } 1657 } 1658 } 1659 } 1660 return new_edges; 1661 } 1662 1663 // Adjust scalar_replaceable state after Connection Graph is built. 1664 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1665 // Search for non-escaping objects which are not scalar replaceable 1666 // and mark them to propagate the state to referenced objects. 1667 1668 // 1. An object is not scalar replaceable if the field into which it is 1669 // stored has unknown offset (stored into unknown element of an array). 1670 // 1671 for (UseIterator i(jobj); i.has_next(); i.next()) { 1672 PointsToNode* use = i.get(); 1673 if (use->is_Arraycopy()) { 1674 continue; 1675 } 1676 if (use->is_Field()) { 1677 FieldNode* field = use->as_Field(); 1678 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 1679 if (field->offset() == Type::OffsetBot) { 1680 jobj->set_scalar_replaceable(false); 1681 return; 1682 } 1683 // 2. An object is not scalar replaceable if the field into which it is 1684 // stored has multiple bases one of which is null. 1685 if (field->base_count() > 1) { 1686 for (BaseIterator i(field); i.has_next(); i.next()) { 1687 PointsToNode* base = i.get(); 1688 if (base == null_obj) { 1689 jobj->set_scalar_replaceable(false); 1690 return; 1691 } 1692 } 1693 } 1694 } 1695 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1696 // 3. An object is not scalar replaceable if it is merged with other objects. 1697 for (EdgeIterator j(use); j.has_next(); j.next()) { 1698 PointsToNode* ptn = j.get(); 1699 if (ptn->is_JavaObject() && ptn != jobj) { 1700 // Mark all objects. 1701 jobj->set_scalar_replaceable(false); 1702 ptn->set_scalar_replaceable(false); 1703 } 1704 } 1705 if (!jobj->scalar_replaceable()) { 1706 return; 1707 } 1708 } 1709 1710 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1711 if (j.get()->is_Arraycopy()) { 1712 continue; 1713 } 1714 1715 // Non-escaping object node should point only to field nodes. 1716 FieldNode* field = j.get()->as_Field(); 1717 int offset = field->as_Field()->offset(); 1718 1719 // 4. An object is not scalar replaceable if it has a field with unknown 1720 // offset (array's element is accessed in loop). 1721 if (offset == Type::OffsetBot) { 1722 jobj->set_scalar_replaceable(false); 1723 return; 1724 } 1725 // 5. Currently an object is not scalar replaceable if a LoadStore node 1726 // access its field since the field value is unknown after it. 1727 // 1728 Node* n = field->ideal_node(); 1729 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1730 if (n->fast_out(i)->is_LoadStore()) { 1731 jobj->set_scalar_replaceable(false); 1732 return; 1733 } 1734 } 1735 1736 // 6. Or the address may point to more then one object. This may produce 1737 // the false positive result (set not scalar replaceable) 1738 // since the flow-insensitive escape analysis can't separate 1739 // the case when stores overwrite the field's value from the case 1740 // when stores happened on different control branches. 1741 // 1742 // Note: it will disable scalar replacement in some cases: 1743 // 1744 // Point p[] = new Point[1]; 1745 // p[0] = new Point(); // Will be not scalar replaced 1746 // 1747 // but it will save us from incorrect optimizations in next cases: 1748 // 1749 // Point p[] = new Point[1]; 1750 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1751 // 1752 if (field->base_count() > 1) { 1753 for (BaseIterator i(field); i.has_next(); i.next()) { 1754 PointsToNode* base = i.get(); 1755 // Don't take into account LocalVar nodes which 1756 // may point to only one object which should be also 1757 // this field's base by now. 1758 if (base->is_JavaObject() && base != jobj) { 1759 // Mark all bases. 1760 jobj->set_scalar_replaceable(false); 1761 base->set_scalar_replaceable(false); 1762 } 1763 } 1764 } 1765 } 1766 } 1767 1768 #ifdef ASSERT 1769 void ConnectionGraph::verify_connection_graph( 1770 GrowableArray<PointsToNode*>& ptnodes_worklist, 1771 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1772 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1773 GrowableArray<Node*>& addp_worklist) { 1774 // Verify that graph is complete - no new edges could be added. 1775 int java_objects_length = java_objects_worklist.length(); 1776 int non_escaped_length = non_escaped_worklist.length(); 1777 int new_edges = 0; 1778 for (int next = 0; next < java_objects_length; ++next) { 1779 JavaObjectNode* ptn = java_objects_worklist.at(next); 1780 new_edges += add_java_object_edges(ptn, true); 1781 } 1782 assert(new_edges == 0, "graph was not complete"); 1783 // Verify that escape state is final. 1784 int length = non_escaped_worklist.length(); 1785 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); 1786 assert((non_escaped_length == non_escaped_worklist.length()) && 1787 (non_escaped_length == length) && 1788 (_worklist.length() == 0), "escape state was not final"); 1789 1790 // Verify fields information. 1791 int addp_length = addp_worklist.length(); 1792 for (int next = 0; next < addp_length; ++next ) { 1793 Node* n = addp_worklist.at(next); 1794 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 1795 if (field->is_oop()) { 1796 // Verify that field has all bases 1797 Node* base = get_addp_base(n); 1798 PointsToNode* ptn = ptnode_adr(base->_idx); 1799 if (ptn->is_JavaObject()) { 1800 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 1801 } else { 1802 assert(ptn->is_LocalVar(), "sanity"); 1803 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1804 PointsToNode* e = i.get(); 1805 if (e->is_JavaObject()) { 1806 assert(field->has_base(e->as_JavaObject()), "sanity"); 1807 } 1808 } 1809 } 1810 // Verify that all fields have initializing values. 1811 if (field->edge_count() == 0) { 1812 tty->print_cr("----------field does not have references----------"); 1813 field->dump(); 1814 for (BaseIterator i(field); i.has_next(); i.next()) { 1815 PointsToNode* base = i.get(); 1816 tty->print_cr("----------field has next base---------------------"); 1817 base->dump(); 1818 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 1819 tty->print_cr("----------base has fields-------------------------"); 1820 for (EdgeIterator j(base); j.has_next(); j.next()) { 1821 j.get()->dump(); 1822 } 1823 tty->print_cr("----------base has references---------------------"); 1824 for (UseIterator j(base); j.has_next(); j.next()) { 1825 j.get()->dump(); 1826 } 1827 } 1828 } 1829 for (UseIterator i(field); i.has_next(); i.next()) { 1830 i.get()->dump(); 1831 } 1832 assert(field->edge_count() > 0, "sanity"); 1833 } 1834 } 1835 } 1836 } 1837 #endif 1838 1839 // Optimize ideal graph. 1840 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 1841 GrowableArray<Node*>& storestore_worklist) { 1842 Compile* C = _compile; 1843 PhaseIterGVN* igvn = _igvn; 1844 if (EliminateLocks) { 1845 // Mark locks before changing ideal graph. 1846 int cnt = C->macro_count(); 1847 for( int i=0; i < cnt; i++ ) { 1848 Node *n = C->macro_node(i); 1849 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1850 AbstractLockNode* alock = n->as_AbstractLock(); 1851 if (!alock->is_non_esc_obj()) { 1852 if (not_global_escape(alock->obj_node())) { 1853 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 1854 // The lock could be marked eliminated by lock coarsening 1855 // code during first IGVN before EA. Replace coarsened flag 1856 // to eliminate all associated locks/unlocks. 1857 #ifdef ASSERT 1858 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 1859 #endif 1860 alock->set_non_esc_obj(); 1861 } 1862 } 1863 } 1864 } 1865 } 1866 1867 if (OptimizePtrCompare) { 1868 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1869 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1870 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1871 // Optimize objects compare. 1872 while (ptr_cmp_worklist.length() != 0) { 1873 Node *n = ptr_cmp_worklist.pop(); 1874 Node *res = optimize_ptr_compare(n); 1875 if (res != NULL) { 1876 #ifndef PRODUCT 1877 if (PrintOptimizePtrCompare) { 1878 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1879 if (Verbose) { 1880 n->dump(1); 1881 } 1882 } 1883 #endif 1884 igvn->replace_node(n, res); 1885 } 1886 } 1887 // cleanup 1888 if (_pcmp_neq->outcnt() == 0) 1889 igvn->hash_delete(_pcmp_neq); 1890 if (_pcmp_eq->outcnt() == 0) 1891 igvn->hash_delete(_pcmp_eq); 1892 } 1893 1894 // For MemBarStoreStore nodes added in library_call.cpp, check 1895 // escape status of associated AllocateNode and optimize out 1896 // MemBarStoreStore node if the allocated object never escapes. 1897 while (storestore_worklist.length() != 0) { 1898 Node *n = storestore_worklist.pop(); 1899 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 1900 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 1901 assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); 1902 if (not_global_escape(alloc)) { 1903 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 1904 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 1905 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 1906 igvn->register_new_node_with_optimizer(mb); 1907 igvn->replace_node(storestore, mb); 1908 } 1909 } 1910 } 1911 1912 // Optimize objects compare. 1913 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 1914 assert(OptimizePtrCompare, "sanity"); 1915 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 1916 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 1917 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 1918 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 1919 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 1920 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 1921 1922 // Check simple cases first. 1923 if (jobj1 != NULL) { 1924 if (jobj1->escape_state() == PointsToNode::NoEscape) { 1925 if (jobj1 == jobj2) { 1926 // Comparing the same not escaping object. 1927 return _pcmp_eq; 1928 } 1929 Node* obj = jobj1->ideal_node(); 1930 // Comparing not escaping allocation. 1931 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1932 !ptn2->points_to(jobj1)) { 1933 return _pcmp_neq; // This includes nullness check. 1934 } 1935 } 1936 } 1937 if (jobj2 != NULL) { 1938 if (jobj2->escape_state() == PointsToNode::NoEscape) { 1939 Node* obj = jobj2->ideal_node(); 1940 // Comparing not escaping allocation. 1941 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1942 !ptn1->points_to(jobj2)) { 1943 return _pcmp_neq; // This includes nullness check. 1944 } 1945 } 1946 } 1947 if (jobj1 != NULL && jobj1 != phantom_obj && 1948 jobj2 != NULL && jobj2 != phantom_obj && 1949 jobj1->ideal_node()->is_Con() && 1950 jobj2->ideal_node()->is_Con()) { 1951 // Klass or String constants compare. Need to be careful with 1952 // compressed pointers - compare types of ConN and ConP instead of nodes. 1953 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 1954 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 1955 if (t1->make_ptr() == t2->make_ptr()) { 1956 return _pcmp_eq; 1957 } else { 1958 return _pcmp_neq; 1959 } 1960 } 1961 if (ptn1->meet(ptn2)) { 1962 return NULL; // Sets are not disjoint 1963 } 1964 1965 // Sets are disjoint. 1966 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 1967 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 1968 bool set1_has_null_ptr = ptn1->points_to(null_obj); 1969 bool set2_has_null_ptr = ptn2->points_to(null_obj); 1970 if (set1_has_unknown_ptr && set2_has_null_ptr || 1971 set2_has_unknown_ptr && set1_has_null_ptr) { 1972 // Check nullness of unknown object. 1973 return NULL; 1974 } 1975 1976 // Disjointness by itself is not sufficient since 1977 // alias analysis is not complete for escaped objects. 1978 // Disjoint sets are definitely unrelated only when 1979 // at least one set has only not escaping allocations. 1980 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 1981 if (ptn1->non_escaping_allocation()) { 1982 return _pcmp_neq; 1983 } 1984 } 1985 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 1986 if (ptn2->non_escaping_allocation()) { 1987 return _pcmp_neq; 1988 } 1989 } 1990 return NULL; 1991 } 1992 1993 // Connection Graph constuction functions. 1994 1995 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 1996 PointsToNode* ptadr = _nodes.at(n->_idx); 1997 if (ptadr != NULL) { 1998 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 1999 return; 2000 } 2001 Compile* C = _compile; 2002 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2003 _nodes.at_put(n->_idx, ptadr); 2004 } 2005 2006 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2007 PointsToNode* ptadr = _nodes.at(n->_idx); 2008 if (ptadr != NULL) { 2009 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2010 return; 2011 } 2012 Compile* C = _compile; 2013 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2014 _nodes.at_put(n->_idx, ptadr); 2015 } 2016 2017 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2018 PointsToNode* ptadr = _nodes.at(n->_idx); 2019 if (ptadr != NULL) { 2020 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2021 return; 2022 } 2023 bool unsafe = false; 2024 bool is_oop = is_oop_field(n, offset, &unsafe); 2025 if (unsafe) { 2026 es = PointsToNode::GlobalEscape; 2027 } 2028 Compile* C = _compile; 2029 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2030 _nodes.at_put(n->_idx, field); 2031 } 2032 2033 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2034 PointsToNode* src, PointsToNode* dst) { 2035 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2036 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 2037 PointsToNode* ptadr = _nodes.at(n->_idx); 2038 if (ptadr != NULL) { 2039 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2040 return; 2041 } 2042 Compile* C = _compile; 2043 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2044 _nodes.at_put(n->_idx, ptadr); 2045 // Add edge from arraycopy node to source object. 2046 (void)add_edge(ptadr, src); 2047 src->set_arraycopy_src(); 2048 // Add edge from destination object to arraycopy node. 2049 (void)add_edge(dst, ptadr); 2050 dst->set_arraycopy_dst(); 2051 } 2052 2053 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2054 const Type* adr_type = n->as_AddP()->bottom_type(); 2055 BasicType bt = T_INT; 2056 if (offset == Type::OffsetBot) { 2057 // Check only oop fields. 2058 if (!adr_type->isa_aryptr() || 2059 (adr_type->isa_aryptr()->klass() == NULL) || 2060 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { 2061 // OffsetBot is used to reference array's element. Ignore first AddP. 2062 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 2063 bt = T_OBJECT; 2064 } 2065 } 2066 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2067 if (adr_type->isa_instptr()) { 2068 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 2069 if (field != NULL) { 2070 bt = field->layout_type(); 2071 } else { 2072 // Check for unsafe oop field access 2073 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) { 2074 bt = T_OBJECT; 2075 (*unsafe) = true; 2076 } 2077 } 2078 } else if (adr_type->isa_aryptr()) { 2079 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2080 // Ignore array length load. 2081 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2082 // Ignore first AddP. 2083 } else { 2084 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2085 bt = elemtype->array_element_basic_type(); 2086 } 2087 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2088 // Allocation initialization, ThreadLocal field access, unsafe access 2089 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) { 2090 bt = T_OBJECT; 2091 } 2092 } 2093 } 2094 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); 2095 } 2096 2097 // Returns unique pointed java object or NULL. 2098 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2099 assert(!_collecting, "should not call when contructed graph"); 2100 // If the node was created after the escape computation we can't answer. 2101 uint idx = n->_idx; 2102 if (idx >= nodes_size()) { 2103 return NULL; 2104 } 2105 PointsToNode* ptn = ptnode_adr(idx); 2106 if (ptn->is_JavaObject()) { 2107 return ptn->as_JavaObject(); 2108 } 2109 assert(ptn->is_LocalVar(), "sanity"); 2110 // Check all java objects it points to. 2111 JavaObjectNode* jobj = NULL; 2112 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2113 PointsToNode* e = i.get(); 2114 if (e->is_JavaObject()) { 2115 if (jobj == NULL) { 2116 jobj = e->as_JavaObject(); 2117 } else if (jobj != e) { 2118 return NULL; 2119 } 2120 } 2121 } 2122 return jobj; 2123 } 2124 2125 // Return true if this node points only to non-escaping allocations. 2126 bool PointsToNode::non_escaping_allocation() { 2127 if (is_JavaObject()) { 2128 Node* n = ideal_node(); 2129 if (n->is_Allocate() || n->is_CallStaticJava()) { 2130 return (escape_state() == PointsToNode::NoEscape); 2131 } else { 2132 return false; 2133 } 2134 } 2135 assert(is_LocalVar(), "sanity"); 2136 // Check all java objects it points to. 2137 for (EdgeIterator i(this); i.has_next(); i.next()) { 2138 PointsToNode* e = i.get(); 2139 if (e->is_JavaObject()) { 2140 Node* n = e->ideal_node(); 2141 if ((e->escape_state() != PointsToNode::NoEscape) || 2142 !(n->is_Allocate() || n->is_CallStaticJava())) { 2143 return false; 2144 } 2145 } 2146 } 2147 return true; 2148 } 2149 2150 // Return true if we know the node does not escape globally. 2151 bool ConnectionGraph::not_global_escape(Node *n) { 2152 assert(!_collecting, "should not call during graph construction"); 2153 // If the node was created after the escape computation we can't answer. 2154 uint idx = n->_idx; 2155 if (idx >= nodes_size()) { 2156 return false; 2157 } 2158 PointsToNode* ptn = ptnode_adr(idx); 2159 PointsToNode::EscapeState es = ptn->escape_state(); 2160 // If we have already computed a value, return it. 2161 if (es >= PointsToNode::GlobalEscape) 2162 return false; 2163 if (ptn->is_JavaObject()) { 2164 return true; // (es < PointsToNode::GlobalEscape); 2165 } 2166 assert(ptn->is_LocalVar(), "sanity"); 2167 // Check all java objects it points to. 2168 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2169 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) 2170 return false; 2171 } 2172 return true; 2173 } 2174 2175 2176 // Helper functions 2177 2178 // Return true if this node points to specified node or nodes it points to. 2179 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2180 if (is_JavaObject()) { 2181 return (this == ptn); 2182 } 2183 assert(is_LocalVar() || is_Field(), "sanity"); 2184 for (EdgeIterator i(this); i.has_next(); i.next()) { 2185 if (i.get() == ptn) 2186 return true; 2187 } 2188 return false; 2189 } 2190 2191 // Return true if one node points to an other. 2192 bool PointsToNode::meet(PointsToNode* ptn) { 2193 if (this == ptn) { 2194 return true; 2195 } else if (ptn->is_JavaObject()) { 2196 return this->points_to(ptn->as_JavaObject()); 2197 } else if (this->is_JavaObject()) { 2198 return ptn->points_to(this->as_JavaObject()); 2199 } 2200 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2201 int ptn_count = ptn->edge_count(); 2202 for (EdgeIterator i(this); i.has_next(); i.next()) { 2203 PointsToNode* this_e = i.get(); 2204 for (int j = 0; j < ptn_count; j++) { 2205 if (this_e == ptn->edge(j)) 2206 return true; 2207 } 2208 } 2209 return false; 2210 } 2211 2212 #ifdef ASSERT 2213 // Return true if bases point to this java object. 2214 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2215 for (BaseIterator i(this); i.has_next(); i.next()) { 2216 if (i.get() == jobj) 2217 return true; 2218 } 2219 return false; 2220 } 2221 #endif 2222 2223 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2224 const Type *adr_type = phase->type(adr); 2225 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 2226 adr->in(AddPNode::Address)->is_Proj() && 2227 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 2228 // We are computing a raw address for a store captured by an Initialize 2229 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2230 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2231 assert(offs != Type::OffsetBot || 2232 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2233 "offset must be a constant or it is initialization of array"); 2234 return offs; 2235 } 2236 const TypePtr *t_ptr = adr_type->isa_ptr(); 2237 assert(t_ptr != NULL, "must be a pointer type"); 2238 return t_ptr->offset(); 2239 } 2240 2241 Node* ConnectionGraph::get_addp_base(Node *addp) { 2242 assert(addp->is_AddP(), "must be AddP"); 2243 // 2244 // AddP cases for Base and Address inputs: 2245 // case #1. Direct object's field reference: 2246 // Allocate 2247 // | 2248 // Proj #5 ( oop result ) 2249 // | 2250 // CheckCastPP (cast to instance type) 2251 // | | 2252 // AddP ( base == address ) 2253 // 2254 // case #2. Indirect object's field reference: 2255 // Phi 2256 // | 2257 // CastPP (cast to instance type) 2258 // | | 2259 // AddP ( base == address ) 2260 // 2261 // case #3. Raw object's field reference for Initialize node: 2262 // Allocate 2263 // | 2264 // Proj #5 ( oop result ) 2265 // top | 2266 // \ | 2267 // AddP ( base == top ) 2268 // 2269 // case #4. Array's element reference: 2270 // {CheckCastPP | CastPP} 2271 // | | | 2272 // | AddP ( array's element offset ) 2273 // | | 2274 // AddP ( array's offset ) 2275 // 2276 // case #5. Raw object's field reference for arraycopy stub call: 2277 // The inline_native_clone() case when the arraycopy stub is called 2278 // after the allocation before Initialize and CheckCastPP nodes. 2279 // Allocate 2280 // | 2281 // Proj #5 ( oop result ) 2282 // | | 2283 // AddP ( base == address ) 2284 // 2285 // case #6. Constant Pool, ThreadLocal, CastX2P or 2286 // Raw object's field reference: 2287 // {ConP, ThreadLocal, CastX2P, raw Load} 2288 // top | 2289 // \ | 2290 // AddP ( base == top ) 2291 // 2292 // case #7. Klass's field reference. 2293 // LoadKlass 2294 // | | 2295 // AddP ( base == address ) 2296 // 2297 // case #8. narrow Klass's field reference. 2298 // LoadNKlass 2299 // | 2300 // DecodeN 2301 // | | 2302 // AddP ( base == address ) 2303 // 2304 Node *base = addp->in(AddPNode::Base); 2305 if (base->uncast()->is_top()) { // The AddP case #3 and #6. 2306 base = addp->in(AddPNode::Address); 2307 while (base->is_AddP()) { 2308 // Case #6 (unsafe access) may have several chained AddP nodes. 2309 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2310 base = base->in(AddPNode::Address); 2311 } 2312 Node* uncast_base = base->uncast(); 2313 int opcode = uncast_base->Opcode(); 2314 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2315 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2316 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2317 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); 2318 } 2319 return base; 2320 } 2321 2322 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2323 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2324 Node* addp2 = addp->raw_out(0); 2325 if (addp->outcnt() == 1 && addp2->is_AddP() && 2326 addp2->in(AddPNode::Base) == n && 2327 addp2->in(AddPNode::Address) == addp) { 2328 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2329 // 2330 // Find array's offset to push it on worklist first and 2331 // as result process an array's element offset first (pushed second) 2332 // to avoid CastPP for the array's offset. 2333 // Otherwise the inserted CastPP (LocalVar) will point to what 2334 // the AddP (Field) points to. Which would be wrong since 2335 // the algorithm expects the CastPP has the same point as 2336 // as AddP's base CheckCastPP (LocalVar). 2337 // 2338 // ArrayAllocation 2339 // | 2340 // CheckCastPP 2341 // | 2342 // memProj (from ArrayAllocation CheckCastPP) 2343 // | || 2344 // | || Int (element index) 2345 // | || | ConI (log(element size)) 2346 // | || | / 2347 // | || LShift 2348 // | || / 2349 // | AddP (array's element offset) 2350 // | | 2351 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2352 // | / / 2353 // AddP (array's offset) 2354 // | 2355 // Load/Store (memory operation on array's element) 2356 // 2357 return addp2; 2358 } 2359 return NULL; 2360 } 2361 2362 // 2363 // Adjust the type and inputs of an AddP which computes the 2364 // address of a field of an instance 2365 // 2366 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2367 PhaseGVN* igvn = _igvn; 2368 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2369 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2370 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2371 if (t == NULL) { 2372 // We are computing a raw address for a store captured by an Initialize 2373 // compute an appropriate address type (cases #3 and #5). 2374 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2375 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2376 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2377 assert(offs != Type::OffsetBot, "offset must be a constant"); 2378 t = base_t->add_offset(offs)->is_oopptr(); 2379 } 2380 int inst_id = base_t->instance_id(); 2381 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2382 "old type must be non-instance or match new type"); 2383 2384 // The type 't' could be subclass of 'base_t'. 2385 // As result t->offset() could be large then base_t's size and it will 2386 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2387 // constructor verifies correctness of the offset. 2388 // 2389 // It could happened on subclass's branch (from the type profiling 2390 // inlining) which was not eliminated during parsing since the exactness 2391 // of the allocation type was not propagated to the subclass type check. 2392 // 2393 // Or the type 't' could be not related to 'base_t' at all. 2394 // It could happened when CHA type is different from MDO type on a dead path 2395 // (for example, from instanceof check) which is not collapsed during parsing. 2396 // 2397 // Do nothing for such AddP node and don't process its users since 2398 // this code branch will go away. 2399 // 2400 if (!t->is_known_instance() && 2401 !base_t->klass()->is_subtype_of(t->klass())) { 2402 return false; // bail out 2403 } 2404 const TypePtr* tinst = base_t->add_offset(t->offset()); 2405 if (tinst->isa_aryptr()) { 2406 // In the case of a flattened value type array, each field has its 2407 // own slice so we need to keep track of the field being accessed. 2408 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset()); 2409 } 2410 2411 // Do NOT remove the next line: ensure a new alias index is allocated 2412 // for the instance type. Note: C++ will not remove it since the call 2413 // has side effect. 2414 int alias_idx = _compile->get_alias_index(tinst); 2415 igvn->set_type(addp, tinst); 2416 // record the allocation in the node map 2417 set_map(addp, get_map(base->_idx)); 2418 // Set addp's Base and Address to 'base'. 2419 Node *abase = addp->in(AddPNode::Base); 2420 Node *adr = addp->in(AddPNode::Address); 2421 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2422 adr->in(0)->_idx == (uint)inst_id) { 2423 // Skip AddP cases #3 and #5. 2424 } else { 2425 assert(!abase->is_top(), "sanity"); // AddP case #3 2426 if (abase != base) { 2427 igvn->hash_delete(addp); 2428 addp->set_req(AddPNode::Base, base); 2429 if (abase == adr) { 2430 addp->set_req(AddPNode::Address, base); 2431 } else { 2432 // AddP case #4 (adr is array's element offset AddP node) 2433 #ifdef ASSERT 2434 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2435 assert(adr->is_AddP() && atype != NULL && 2436 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2437 #endif 2438 } 2439 igvn->hash_insert(addp); 2440 } 2441 } 2442 // Put on IGVN worklist since at least addp's type was changed above. 2443 record_for_optimizer(addp); 2444 return true; 2445 } 2446 2447 // 2448 // Create a new version of orig_phi if necessary. Returns either the newly 2449 // created phi or an existing phi. Sets create_new to indicate whether a new 2450 // phi was created. Cache the last newly created phi in the node map. 2451 // 2452 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2453 Compile *C = _compile; 2454 PhaseGVN* igvn = _igvn; 2455 new_created = false; 2456 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2457 // nothing to do if orig_phi is bottom memory or matches alias_idx 2458 if (phi_alias_idx == alias_idx) { 2459 return orig_phi; 2460 } 2461 // Have we recently created a Phi for this alias index? 2462 PhiNode *result = get_map_phi(orig_phi->_idx); 2463 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2464 return result; 2465 } 2466 // Previous check may fail when the same wide memory Phi was split into Phis 2467 // for different memory slices. Search all Phis for this region. 2468 if (result != NULL) { 2469 Node* region = orig_phi->in(0); 2470 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2471 Node* phi = region->fast_out(i); 2472 if (phi->is_Phi() && 2473 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2474 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2475 return phi->as_Phi(); 2476 } 2477 } 2478 } 2479 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 2480 if (C->do_escape_analysis() == true && !C->failing()) { 2481 // Retry compilation without escape analysis. 2482 // If this is the first failure, the sentinel string will "stick" 2483 // to the Compile object, and the C2Compiler will see it and retry. 2484 C->record_failure(C2Compiler::retry_no_escape_analysis()); 2485 } 2486 return NULL; 2487 } 2488 orig_phi_worklist.append_if_missing(orig_phi); 2489 const TypePtr *atype = C->get_adr_type(alias_idx); 2490 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2491 C->copy_node_notes_to(result, orig_phi); 2492 igvn->set_type(result, result->bottom_type()); 2493 record_for_optimizer(result); 2494 set_map(orig_phi, result); 2495 new_created = true; 2496 return result; 2497 } 2498 2499 // 2500 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2501 // specified alias index. 2502 // 2503 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2504 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2505 Compile *C = _compile; 2506 PhaseGVN* igvn = _igvn; 2507 bool new_phi_created; 2508 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2509 if (!new_phi_created) { 2510 return result; 2511 } 2512 GrowableArray<PhiNode *> phi_list; 2513 GrowableArray<uint> cur_input; 2514 PhiNode *phi = orig_phi; 2515 uint idx = 1; 2516 bool finished = false; 2517 while(!finished) { 2518 while (idx < phi->req()) { 2519 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2520 if (mem != NULL && mem->is_Phi()) { 2521 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2522 if (new_phi_created) { 2523 // found an phi for which we created a new split, push current one on worklist and begin 2524 // processing new one 2525 phi_list.push(phi); 2526 cur_input.push(idx); 2527 phi = mem->as_Phi(); 2528 result = newphi; 2529 idx = 1; 2530 continue; 2531 } else { 2532 mem = newphi; 2533 } 2534 } 2535 if (C->failing()) { 2536 return NULL; 2537 } 2538 result->set_req(idx++, mem); 2539 } 2540 #ifdef ASSERT 2541 // verify that the new Phi has an input for each input of the original 2542 assert( phi->req() == result->req(), "must have same number of inputs."); 2543 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2544 #endif 2545 // Check if all new phi's inputs have specified alias index. 2546 // Otherwise use old phi. 2547 for (uint i = 1; i < phi->req(); i++) { 2548 Node* in = result->in(i); 2549 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2550 } 2551 // we have finished processing a Phi, see if there are any more to do 2552 finished = (phi_list.length() == 0 ); 2553 if (!finished) { 2554 phi = phi_list.pop(); 2555 idx = cur_input.pop(); 2556 PhiNode *prev_result = get_map_phi(phi->_idx); 2557 prev_result->set_req(idx++, result); 2558 result = prev_result; 2559 } 2560 } 2561 return result; 2562 } 2563 2564 // 2565 // The next methods are derived from methods in MemNode. 2566 // 2567 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2568 Node *mem = mmem; 2569 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2570 // means an array I have not precisely typed yet. Do not do any 2571 // alias stuff with it any time soon. 2572 if (toop->base() != Type::AnyPtr && 2573 !(toop->klass() != NULL && 2574 toop->klass()->is_java_lang_Object() && 2575 toop->offset() == Type::OffsetBot)) { 2576 mem = mmem->memory_at(alias_idx); 2577 // Update input if it is progress over what we have now 2578 } 2579 return mem; 2580 } 2581 2582 // 2583 // Move memory users to their memory slices. 2584 // 2585 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2586 Compile* C = _compile; 2587 PhaseGVN* igvn = _igvn; 2588 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2589 assert(tp != NULL, "ptr type"); 2590 int alias_idx = C->get_alias_index(tp); 2591 int general_idx = C->get_general_index(alias_idx); 2592 2593 // Move users first 2594 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2595 Node* use = n->fast_out(i); 2596 if (use->is_MergeMem()) { 2597 MergeMemNode* mmem = use->as_MergeMem(); 2598 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2599 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2600 continue; // Nothing to do 2601 } 2602 // Replace previous general reference to mem node. 2603 uint orig_uniq = C->unique(); 2604 Node* m = find_inst_mem(n, general_idx, orig_phis); 2605 assert(orig_uniq == C->unique(), "no new nodes"); 2606 mmem->set_memory_at(general_idx, m); 2607 --imax; 2608 --i; 2609 } else if (use->is_MemBar()) { 2610 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2611 if (use->req() > MemBarNode::Precedent && 2612 use->in(MemBarNode::Precedent) == n) { 2613 // Don't move related membars. 2614 record_for_optimizer(use); 2615 continue; 2616 } 2617 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2618 if (tp != NULL && C->get_alias_index(tp) == alias_idx || 2619 alias_idx == general_idx) { 2620 continue; // Nothing to do 2621 } 2622 // Move to general memory slice. 2623 uint orig_uniq = C->unique(); 2624 Node* m = find_inst_mem(n, general_idx, orig_phis); 2625 assert(orig_uniq == C->unique(), "no new nodes"); 2626 igvn->hash_delete(use); 2627 imax -= use->replace_edge(n, m); 2628 igvn->hash_insert(use); 2629 record_for_optimizer(use); 2630 --i; 2631 #ifdef ASSERT 2632 } else if (use->is_Mem()) { 2633 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2634 // Don't move related cardmark. 2635 continue; 2636 } 2637 // Memory nodes should have new memory input. 2638 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2639 assert(tp != NULL, "ptr type"); 2640 int idx = C->get_alias_index(tp); 2641 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2642 "Following memory nodes should have new memory input or be on the same memory slice"); 2643 } else if (use->is_Phi()) { 2644 // Phi nodes should be split and moved already. 2645 tp = use->as_Phi()->adr_type()->isa_ptr(); 2646 assert(tp != NULL, "ptr type"); 2647 int idx = C->get_alias_index(tp); 2648 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2649 } else { 2650 use->dump(); 2651 assert(false, "should not be here"); 2652 #endif 2653 } 2654 } 2655 } 2656 2657 // 2658 // Search memory chain of "mem" to find a MemNode whose address 2659 // is the specified alias index. 2660 // 2661 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2662 if (orig_mem == NULL) 2663 return orig_mem; 2664 Compile* C = _compile; 2665 PhaseGVN* igvn = _igvn; 2666 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2667 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2668 Node *start_mem = C->start()->proj_out(TypeFunc::Memory); 2669 Node *prev = NULL; 2670 Node *result = orig_mem; 2671 while (prev != result) { 2672 prev = result; 2673 if (result == start_mem) 2674 break; // hit one of our sentinels 2675 if (result->is_Mem()) { 2676 const Type *at = igvn->type(result->in(MemNode::Address)); 2677 if (at == Type::TOP) 2678 break; // Dead 2679 assert (at->isa_ptr() != NULL, "pointer type required."); 2680 int idx = C->get_alias_index(at->is_ptr()); 2681 if (idx == alias_idx) 2682 break; // Found 2683 if (!is_instance && (at->isa_oopptr() == NULL || 2684 !at->is_oopptr()->is_known_instance())) { 2685 break; // Do not skip store to general memory slice. 2686 } 2687 result = result->in(MemNode::Memory); 2688 } 2689 if (!is_instance) 2690 continue; // don't search further for non-instance types 2691 // skip over a call which does not affect this memory slice 2692 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2693 Node *proj_in = result->in(0); 2694 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2695 break; // hit one of our sentinels 2696 } else if (proj_in->is_Call()) { 2697 // ArrayCopy node processed here as well 2698 CallNode *call = proj_in->as_Call(); 2699 if (!call->may_modify(toop, igvn)) { 2700 result = call->in(TypeFunc::Memory); 2701 } 2702 } else if (proj_in->is_Initialize()) { 2703 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2704 // Stop if this is the initialization for the object instance which 2705 // which contains this memory slice, otherwise skip over it. 2706 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2707 result = proj_in->in(TypeFunc::Memory); 2708 } 2709 } else if (proj_in->is_MemBar()) { 2710 if (proj_in->in(TypeFunc::Memory)->is_MergeMem() && 2711 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->is_Proj() && 2712 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->is_ArrayCopy()) { 2713 // clone 2714 ArrayCopyNode* ac = proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->as_ArrayCopy(); 2715 if (ac->may_modify(toop, igvn)) { 2716 break; 2717 } 2718 } 2719 result = proj_in->in(TypeFunc::Memory); 2720 } 2721 } else if (result->is_MergeMem()) { 2722 MergeMemNode *mmem = result->as_MergeMem(); 2723 result = step_through_mergemem(mmem, alias_idx, toop); 2724 if (result == mmem->base_memory()) { 2725 // Didn't find instance memory, search through general slice recursively. 2726 result = mmem->memory_at(C->get_general_index(alias_idx)); 2727 result = find_inst_mem(result, alias_idx, orig_phis); 2728 if (C->failing()) { 2729 return NULL; 2730 } 2731 mmem->set_memory_at(alias_idx, result); 2732 } 2733 } else if (result->is_Phi() && 2734 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2735 Node *un = result->as_Phi()->unique_input(igvn); 2736 if (un != NULL) { 2737 orig_phis.append_if_missing(result->as_Phi()); 2738 result = un; 2739 } else { 2740 break; 2741 } 2742 } else if (result->is_ClearArray()) { 2743 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 2744 // Can not bypass initialization of the instance 2745 // we are looking for. 2746 break; 2747 } 2748 // Otherwise skip it (the call updated 'result' value). 2749 } else if (result->Opcode() == Op_SCMemProj) { 2750 Node* mem = result->in(0); 2751 Node* adr = NULL; 2752 if (mem->is_LoadStore()) { 2753 adr = mem->in(MemNode::Address); 2754 } else { 2755 assert(mem->Opcode() == Op_EncodeISOArray || 2756 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 2757 adr = mem->in(3); // Memory edge corresponds to destination array 2758 } 2759 const Type *at = igvn->type(adr); 2760 if (at != Type::TOP) { 2761 assert(at->isa_ptr() != NULL, "pointer type required."); 2762 int idx = C->get_alias_index(at->is_ptr()); 2763 if (idx == alias_idx) { 2764 // Assert in debug mode 2765 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 2766 break; // In product mode return SCMemProj node 2767 } 2768 } 2769 result = mem->in(MemNode::Memory); 2770 } else if (result->Opcode() == Op_StrInflatedCopy) { 2771 Node* adr = result->in(3); // Memory edge corresponds to destination array 2772 const Type *at = igvn->type(adr); 2773 if (at != Type::TOP) { 2774 assert(at->isa_ptr() != NULL, "pointer type required."); 2775 int idx = C->get_alias_index(at->is_ptr()); 2776 if (idx == alias_idx) { 2777 // Assert in debug mode 2778 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 2779 break; // In product mode return SCMemProj node 2780 } 2781 } 2782 result = result->in(MemNode::Memory); 2783 } 2784 } 2785 if (result->is_Phi()) { 2786 PhiNode *mphi = result->as_Phi(); 2787 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 2788 const TypePtr *t = mphi->adr_type(); 2789 if (!is_instance) { 2790 // Push all non-instance Phis on the orig_phis worklist to update inputs 2791 // during Phase 4 if needed. 2792 orig_phis.append_if_missing(mphi); 2793 } else if (C->get_alias_index(t) != alias_idx) { 2794 // Create a new Phi with the specified alias index type. 2795 result = split_memory_phi(mphi, alias_idx, orig_phis); 2796 } 2797 } 2798 // the result is either MemNode, PhiNode, InitializeNode. 2799 return result; 2800 } 2801 2802 // 2803 // Convert the types of unescaped object to instance types where possible, 2804 // propagate the new type information through the graph, and update memory 2805 // edges and MergeMem inputs to reflect the new type. 2806 // 2807 // We start with allocations (and calls which may be allocations) on alloc_worklist. 2808 // The processing is done in 4 phases: 2809 // 2810 // Phase 1: Process possible allocations from alloc_worklist. Create instance 2811 // types for the CheckCastPP for allocations where possible. 2812 // Propagate the new types through users as follows: 2813 // casts and Phi: push users on alloc_worklist 2814 // AddP: cast Base and Address inputs to the instance type 2815 // push any AddP users on alloc_worklist and push any memnode 2816 // users onto memnode_worklist. 2817 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2818 // search the Memory chain for a store with the appropriate type 2819 // address type. If a Phi is found, create a new version with 2820 // the appropriate memory slices from each of the Phi inputs. 2821 // For stores, process the users as follows: 2822 // MemNode: push on memnode_worklist 2823 // MergeMem: push on mergemem_worklist 2824 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 2825 // moving the first node encountered of each instance type to the 2826 // the input corresponding to its alias index. 2827 // appropriate memory slice. 2828 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 2829 // 2830 // In the following example, the CheckCastPP nodes are the cast of allocation 2831 // results and the allocation of node 29 is unescaped and eligible to be an 2832 // instance type. 2833 // 2834 // We start with: 2835 // 2836 // 7 Parm #memory 2837 // 10 ConI "12" 2838 // 19 CheckCastPP "Foo" 2839 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2840 // 29 CheckCastPP "Foo" 2841 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 2842 // 2843 // 40 StoreP 25 7 20 ... alias_index=4 2844 // 50 StoreP 35 40 30 ... alias_index=4 2845 // 60 StoreP 45 50 20 ... alias_index=4 2846 // 70 LoadP _ 60 30 ... alias_index=4 2847 // 80 Phi 75 50 60 Memory alias_index=4 2848 // 90 LoadP _ 80 30 ... alias_index=4 2849 // 100 LoadP _ 80 20 ... alias_index=4 2850 // 2851 // 2852 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 2853 // and creating a new alias index for node 30. This gives: 2854 // 2855 // 7 Parm #memory 2856 // 10 ConI "12" 2857 // 19 CheckCastPP "Foo" 2858 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2859 // 29 CheckCastPP "Foo" iid=24 2860 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2861 // 2862 // 40 StoreP 25 7 20 ... alias_index=4 2863 // 50 StoreP 35 40 30 ... alias_index=6 2864 // 60 StoreP 45 50 20 ... alias_index=4 2865 // 70 LoadP _ 60 30 ... alias_index=6 2866 // 80 Phi 75 50 60 Memory alias_index=4 2867 // 90 LoadP _ 80 30 ... alias_index=6 2868 // 100 LoadP _ 80 20 ... alias_index=4 2869 // 2870 // In phase 2, new memory inputs are computed for the loads and stores, 2871 // And a new version of the phi is created. In phase 4, the inputs to 2872 // node 80 are updated and then the memory nodes are updated with the 2873 // values computed in phase 2. This results in: 2874 // 2875 // 7 Parm #memory 2876 // 10 ConI "12" 2877 // 19 CheckCastPP "Foo" 2878 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2879 // 29 CheckCastPP "Foo" iid=24 2880 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2881 // 2882 // 40 StoreP 25 7 20 ... alias_index=4 2883 // 50 StoreP 35 7 30 ... alias_index=6 2884 // 60 StoreP 45 40 20 ... alias_index=4 2885 // 70 LoadP _ 50 30 ... alias_index=6 2886 // 80 Phi 75 40 60 Memory alias_index=4 2887 // 120 Phi 75 50 50 Memory alias_index=6 2888 // 90 LoadP _ 120 30 ... alias_index=6 2889 // 100 LoadP _ 80 20 ... alias_index=4 2890 // 2891 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) { 2892 GrowableArray<Node *> memnode_worklist; 2893 GrowableArray<PhiNode *> orig_phis; 2894 PhaseIterGVN *igvn = _igvn; 2895 uint new_index_start = (uint) _compile->num_alias_types(); 2896 Arena* arena = Thread::current()->resource_area(); 2897 VectorSet visited(arena); 2898 ideal_nodes.clear(); // Reset for use with set_map/get_map. 2899 uint unique_old = _compile->unique(); 2900 2901 // Phase 1: Process possible allocations from alloc_worklist. 2902 // Create instance types for the CheckCastPP for allocations where possible. 2903 // 2904 // (Note: don't forget to change the order of the second AddP node on 2905 // the alloc_worklist if the order of the worklist processing is changed, 2906 // see the comment in find_second_addp().) 2907 // 2908 while (alloc_worklist.length() != 0) { 2909 Node *n = alloc_worklist.pop(); 2910 uint ni = n->_idx; 2911 if (n->is_Call()) { 2912 CallNode *alloc = n->as_Call(); 2913 // copy escape information to call node 2914 PointsToNode* ptn = ptnode_adr(alloc->_idx); 2915 PointsToNode::EscapeState es = ptn->escape_state(); 2916 // We have an allocation or call which returns a Java object, 2917 // see if it is unescaped. 2918 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 2919 continue; 2920 // Find CheckCastPP for the allocate or for the return value of a call 2921 n = alloc->result_cast(); 2922 if (n == NULL) { // No uses except Initialize node 2923 if (alloc->is_Allocate()) { 2924 // Set the scalar_replaceable flag for allocation 2925 // so it could be eliminated if it has no uses. 2926 alloc->as_Allocate()->_is_scalar_replaceable = true; 2927 } 2928 if (alloc->is_CallStaticJava()) { 2929 // Set the scalar_replaceable flag for boxing method 2930 // so it could be eliminated if it has no uses. 2931 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2932 } 2933 continue; 2934 } 2935 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 2936 assert(!alloc->is_Allocate(), "allocation should have unique type"); 2937 continue; 2938 } 2939 2940 // The inline code for Object.clone() casts the allocation result to 2941 // java.lang.Object and then to the actual type of the allocated 2942 // object. Detect this case and use the second cast. 2943 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 2944 // the allocation result is cast to java.lang.Object and then 2945 // to the actual Array type. 2946 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 2947 && (alloc->is_AllocateArray() || 2948 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 2949 Node *cast2 = NULL; 2950 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2951 Node *use = n->fast_out(i); 2952 if (use->is_CheckCastPP()) { 2953 cast2 = use; 2954 break; 2955 } 2956 } 2957 if (cast2 != NULL) { 2958 n = cast2; 2959 } else { 2960 // Non-scalar replaceable if the allocation type is unknown statically 2961 // (reflection allocation), the object can't be restored during 2962 // deoptimization without precise type. 2963 continue; 2964 } 2965 } 2966 2967 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 2968 if (t == NULL) 2969 continue; // not a TypeOopPtr 2970 if (!t->klass_is_exact()) 2971 continue; // not an unique type 2972 2973 if (alloc->is_Allocate()) { 2974 // Set the scalar_replaceable flag for allocation 2975 // so it could be eliminated. 2976 alloc->as_Allocate()->_is_scalar_replaceable = true; 2977 } 2978 if (alloc->is_CallStaticJava()) { 2979 // Set the scalar_replaceable flag for boxing method 2980 // so it could be eliminated. 2981 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2982 } 2983 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state 2984 // in order for an object to be scalar-replaceable, it must be: 2985 // - a direct allocation (not a call returning an object) 2986 // - non-escaping 2987 // - eligible to be a unique type 2988 // - not determined to be ineligible by escape analysis 2989 set_map(alloc, n); 2990 set_map(n, alloc); 2991 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 2992 igvn->hash_delete(n); 2993 igvn->set_type(n, tinst); 2994 n->raise_bottom_type(tinst); 2995 igvn->hash_insert(n); 2996 record_for_optimizer(n); 2997 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 2998 2999 // First, put on the worklist all Field edges from Connection Graph 3000 // which is more accurate than putting immediate users from Ideal Graph. 3001 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3002 PointsToNode* tgt = e.get(); 3003 if (tgt->is_Arraycopy()) { 3004 continue; 3005 } 3006 Node* use = tgt->ideal_node(); 3007 assert(tgt->is_Field() && use->is_AddP(), 3008 "only AddP nodes are Field edges in CG"); 3009 if (use->outcnt() > 0) { // Don't process dead nodes 3010 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3011 if (addp2 != NULL) { 3012 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3013 alloc_worklist.append_if_missing(addp2); 3014 } 3015 alloc_worklist.append_if_missing(use); 3016 } 3017 } 3018 3019 // An allocation may have an Initialize which has raw stores. Scan 3020 // the users of the raw allocation result and push AddP users 3021 // on alloc_worklist. 3022 Node *raw_result = alloc->proj_out(TypeFunc::Parms); 3023 assert (raw_result != NULL, "must have an allocation result"); 3024 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3025 Node *use = raw_result->fast_out(i); 3026 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3027 Node* addp2 = find_second_addp(use, raw_result); 3028 if (addp2 != NULL) { 3029 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3030 alloc_worklist.append_if_missing(addp2); 3031 } 3032 alloc_worklist.append_if_missing(use); 3033 } else if (use->is_MemBar()) { 3034 memnode_worklist.append_if_missing(use); 3035 } 3036 } 3037 } 3038 } else if (n->is_AddP()) { 3039 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 3040 if (jobj == NULL || jobj == phantom_obj) { 3041 #ifdef ASSERT 3042 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3043 ptnode_adr(n->_idx)->dump(); 3044 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3045 #endif 3046 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3047 return; 3048 } 3049 Node *base = get_map(jobj->idx()); // CheckCastPP node 3050 if (!split_AddP(n, base)) continue; // wrong type from dead path 3051 } else if (n->is_Phi() || 3052 n->is_CheckCastPP() || 3053 n->is_EncodeP() || 3054 n->is_DecodeN() || 3055 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3056 if (visited.test_set(n->_idx)) { 3057 assert(n->is_Phi(), "loops only through Phi's"); 3058 continue; // already processed 3059 } 3060 JavaObjectNode* jobj = unique_java_object(n); 3061 if (jobj == NULL || jobj == phantom_obj) { 3062 #ifdef ASSERT 3063 ptnode_adr(n->_idx)->dump(); 3064 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3065 #endif 3066 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3067 return; 3068 } else { 3069 Node *val = get_map(jobj->idx()); // CheckCastPP node 3070 TypeNode *tn = n->as_Type(); 3071 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3072 assert(tinst != NULL && tinst->is_known_instance() && 3073 tinst->instance_id() == jobj->idx() , "instance type expected."); 3074 3075 const Type *tn_type = igvn->type(tn); 3076 const TypeOopPtr *tn_t; 3077 if (tn_type->isa_narrowoop()) { 3078 tn_t = tn_type->make_ptr()->isa_oopptr(); 3079 } else { 3080 tn_t = tn_type->isa_oopptr(); 3081 } 3082 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 3083 if (tn_type->isa_narrowoop()) { 3084 tn_type = tinst->make_narrowoop(); 3085 } else { 3086 tn_type = tinst; 3087 } 3088 igvn->hash_delete(tn); 3089 igvn->set_type(tn, tn_type); 3090 tn->set_type(tn_type); 3091 igvn->hash_insert(tn); 3092 record_for_optimizer(n); 3093 } else { 3094 assert(tn_type == TypePtr::NULL_PTR || 3095 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 3096 "unexpected type"); 3097 continue; // Skip dead path with different type 3098 } 3099 } 3100 } else { 3101 debug_only(n->dump();) 3102 assert(false, "EA: unexpected node"); 3103 continue; 3104 } 3105 // push allocation's users on appropriate worklist 3106 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3107 Node *use = n->fast_out(i); 3108 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3109 // Load/store to instance's field 3110 memnode_worklist.append_if_missing(use); 3111 } else if (use->is_MemBar()) { 3112 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3113 memnode_worklist.append_if_missing(use); 3114 } 3115 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3116 Node* addp2 = find_second_addp(use, n); 3117 if (addp2 != NULL) { 3118 alloc_worklist.append_if_missing(addp2); 3119 } 3120 alloc_worklist.append_if_missing(use); 3121 } else if (use->is_Phi() || 3122 use->is_CheckCastPP() || 3123 use->is_EncodeNarrowPtr() || 3124 use->is_DecodeNarrowPtr() || 3125 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3126 alloc_worklist.append_if_missing(use); 3127 #ifdef ASSERT 3128 } else if (use->is_Mem()) { 3129 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3130 } else if (use->is_MergeMem()) { 3131 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3132 } else if (use->is_SafePoint()) { 3133 // Look for MergeMem nodes for calls which reference unique allocation 3134 // (through CheckCastPP nodes) even for debug info. 3135 Node* m = use->in(TypeFunc::Memory); 3136 if (m->is_MergeMem()) { 3137 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3138 } 3139 } else if (use->Opcode() == Op_EncodeISOArray) { 3140 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3141 // EncodeISOArray overwrites destination array 3142 memnode_worklist.append_if_missing(use); 3143 } 3144 } else { 3145 uint op = use->Opcode(); 3146 if ((use->in(MemNode::Memory) == n) && 3147 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 3148 // They overwrite memory edge corresponding to destination array, 3149 memnode_worklist.append_if_missing(use); 3150 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3151 op == Op_CastP2X || op == Op_StoreCM || 3152 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3153 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3154 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3155 op == Op_ValueType)) { 3156 n->dump(); 3157 use->dump(); 3158 assert(false, "EA: missing allocation reference path"); 3159 } 3160 #endif 3161 } 3162 } 3163 3164 } 3165 3166 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3167 // type, record it in the ArrayCopy node so we know what memory this 3168 // node uses/modified. 3169 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3170 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3171 Node* dest = ac->in(ArrayCopyNode::Dest); 3172 if (dest->is_AddP()) { 3173 dest = get_addp_base(dest); 3174 } 3175 JavaObjectNode* jobj = unique_java_object(dest); 3176 if (jobj != NULL) { 3177 Node *base = get_map(jobj->idx()); 3178 if (base != NULL) { 3179 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3180 ac->_dest_type = base_t; 3181 } 3182 } 3183 Node* src = ac->in(ArrayCopyNode::Src); 3184 if (src->is_AddP()) { 3185 src = get_addp_base(src); 3186 } 3187 jobj = unique_java_object(src); 3188 if (jobj != NULL) { 3189 Node* base = get_map(jobj->idx()); 3190 if (base != NULL) { 3191 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3192 ac->_src_type = base_t; 3193 } 3194 } 3195 } 3196 3197 // New alias types were created in split_AddP(). 3198 uint new_index_end = (uint) _compile->num_alias_types(); 3199 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3200 3201 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3202 // compute new values for Memory inputs (the Memory inputs are not 3203 // actually updated until phase 4.) 3204 if (memnode_worklist.length() == 0) 3205 return; // nothing to do 3206 while (memnode_worklist.length() != 0) { 3207 Node *n = memnode_worklist.pop(); 3208 if (visited.test_set(n->_idx)) 3209 continue; 3210 if (n->is_Phi() || n->is_ClearArray()) { 3211 // we don't need to do anything, but the users must be pushed 3212 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3213 // we don't need to do anything, but the users must be pushed 3214 n = n->as_MemBar()->proj_out(TypeFunc::Memory); 3215 if (n == NULL) 3216 continue; 3217 } else if (n->Opcode() == Op_StrCompressedCopy || 3218 n->Opcode() == Op_EncodeISOArray) { 3219 // get the memory projection 3220 n = n->find_out_with(Op_SCMemProj); 3221 assert(n->Opcode() == Op_SCMemProj, "memory projection required"); 3222 } else { 3223 assert(n->is_Mem(), "memory node required."); 3224 Node *addr = n->in(MemNode::Address); 3225 const Type *addr_t = igvn->type(addr); 3226 if (addr_t == Type::TOP) 3227 continue; 3228 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3229 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3230 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3231 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3232 if (_compile->failing()) { 3233 return; 3234 } 3235 if (mem != n->in(MemNode::Memory)) { 3236 // We delay the memory edge update since we need old one in 3237 // MergeMem code below when instances memory slices are separated. 3238 set_map(n, mem); 3239 } 3240 if (n->is_Load()) { 3241 continue; // don't push users 3242 } else if (n->is_LoadStore()) { 3243 // get the memory projection 3244 n = n->find_out_with(Op_SCMemProj); 3245 assert(n->Opcode() == Op_SCMemProj, "memory projection required"); 3246 } 3247 } 3248 // push user on appropriate worklist 3249 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3250 Node *use = n->fast_out(i); 3251 if (use->is_Phi() || use->is_ClearArray()) { 3252 memnode_worklist.append_if_missing(use); 3253 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3254 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 3255 continue; 3256 memnode_worklist.append_if_missing(use); 3257 } else if (use->is_MemBar()) { 3258 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3259 memnode_worklist.append_if_missing(use); 3260 } 3261 #ifdef ASSERT 3262 } else if(use->is_Mem()) { 3263 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3264 } else if (use->is_MergeMem()) { 3265 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3266 } else if (use->Opcode() == Op_EncodeISOArray) { 3267 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3268 // EncodeISOArray overwrites destination array 3269 memnode_worklist.append_if_missing(use); 3270 } 3271 } else { 3272 uint op = use->Opcode(); 3273 if ((use->in(MemNode::Memory) == n) && 3274 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 3275 // They overwrite memory edge corresponding to destination array, 3276 memnode_worklist.append_if_missing(use); 3277 } else if (!(op == Op_StoreCM || 3278 (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL && 3279 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) || 3280 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3281 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3282 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 3283 n->dump(); 3284 use->dump(); 3285 assert(false, "EA: missing memory path"); 3286 } 3287 #endif 3288 } 3289 } 3290 } 3291 3292 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3293 // Walk each memory slice moving the first node encountered of each 3294 // instance type to the input corresponding to its alias index. 3295 uint length = _mergemem_worklist.length(); 3296 for( uint next = 0; next < length; ++next ) { 3297 MergeMemNode* nmm = _mergemem_worklist.at(next); 3298 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3299 // Note: we don't want to use MergeMemStream here because we only want to 3300 // scan inputs which exist at the start, not ones we add during processing. 3301 // Note 2: MergeMem may already contains instance memory slices added 3302 // during find_inst_mem() call when memory nodes were processed above. 3303 igvn->hash_delete(nmm); 3304 uint nslices = MIN2(nmm->req(), new_index_start); 3305 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3306 Node* mem = nmm->in(i); 3307 Node* cur = NULL; 3308 if (mem == NULL || mem->is_top()) 3309 continue; 3310 // First, update mergemem by moving memory nodes to corresponding slices 3311 // if their type became more precise since this mergemem was created. 3312 while (mem->is_Mem()) { 3313 const Type *at = igvn->type(mem->in(MemNode::Address)); 3314 if (at != Type::TOP) { 3315 assert (at->isa_ptr() != NULL, "pointer type required."); 3316 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3317 if (idx == i) { 3318 if (cur == NULL) 3319 cur = mem; 3320 } else { 3321 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3322 nmm->set_memory_at(idx, mem); 3323 } 3324 } 3325 } 3326 mem = mem->in(MemNode::Memory); 3327 } 3328 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3329 // Find any instance of the current type if we haven't encountered 3330 // already a memory slice of the instance along the memory chain. 3331 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3332 if((uint)_compile->get_general_index(ni) == i) { 3333 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3334 if (nmm->is_empty_memory(m)) { 3335 Node* result = find_inst_mem(mem, ni, orig_phis); 3336 if (_compile->failing()) { 3337 return; 3338 } 3339 nmm->set_memory_at(ni, result); 3340 } 3341 } 3342 } 3343 } 3344 // Find the rest of instances values 3345 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3346 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3347 Node* result = step_through_mergemem(nmm, ni, tinst); 3348 if (result == nmm->base_memory()) { 3349 // Didn't find instance memory, search through general slice recursively. 3350 result = nmm->memory_at(_compile->get_general_index(ni)); 3351 result = find_inst_mem(result, ni, orig_phis); 3352 if (_compile->failing()) { 3353 return; 3354 } 3355 nmm->set_memory_at(ni, result); 3356 } 3357 } 3358 igvn->hash_insert(nmm); 3359 record_for_optimizer(nmm); 3360 } 3361 3362 // Phase 4: Update the inputs of non-instance memory Phis and 3363 // the Memory input of memnodes 3364 // First update the inputs of any non-instance Phi's from 3365 // which we split out an instance Phi. Note we don't have 3366 // to recursively process Phi's encounted on the input memory 3367 // chains as is done in split_memory_phi() since they will 3368 // also be processed here. 3369 for (int j = 0; j < orig_phis.length(); j++) { 3370 PhiNode *phi = orig_phis.at(j); 3371 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3372 igvn->hash_delete(phi); 3373 for (uint i = 1; i < phi->req(); i++) { 3374 Node *mem = phi->in(i); 3375 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3376 if (_compile->failing()) { 3377 return; 3378 } 3379 if (mem != new_mem) { 3380 phi->set_req(i, new_mem); 3381 } 3382 } 3383 igvn->hash_insert(phi); 3384 record_for_optimizer(phi); 3385 } 3386 3387 // Update the memory inputs of MemNodes with the value we computed 3388 // in Phase 2 and move stores memory users to corresponding memory slices. 3389 // Disable memory split verification code until the fix for 6984348. 3390 // Currently it produces false negative results since it does not cover all cases. 3391 #if 0 // ifdef ASSERT 3392 visited.Reset(); 3393 Node_Stack old_mems(arena, _compile->unique() >> 2); 3394 #endif 3395 for (uint i = 0; i < ideal_nodes.size(); i++) { 3396 Node* n = ideal_nodes.at(i); 3397 Node* nmem = get_map(n->_idx); 3398 assert(nmem != NULL, "sanity"); 3399 if (n->is_Mem()) { 3400 #if 0 // ifdef ASSERT 3401 Node* old_mem = n->in(MemNode::Memory); 3402 if (!visited.test_set(old_mem->_idx)) { 3403 old_mems.push(old_mem, old_mem->outcnt()); 3404 } 3405 #endif 3406 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3407 if (!n->is_Load()) { 3408 // Move memory users of a store first. 3409 move_inst_mem(n, orig_phis); 3410 } 3411 // Now update memory input 3412 igvn->hash_delete(n); 3413 n->set_req(MemNode::Memory, nmem); 3414 igvn->hash_insert(n); 3415 record_for_optimizer(n); 3416 } else { 3417 assert(n->is_Allocate() || n->is_CheckCastPP() || 3418 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3419 } 3420 } 3421 #if 0 // ifdef ASSERT 3422 // Verify that memory was split correctly 3423 while (old_mems.is_nonempty()) { 3424 Node* old_mem = old_mems.node(); 3425 uint old_cnt = old_mems.index(); 3426 old_mems.pop(); 3427 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3428 } 3429 #endif 3430 } 3431 3432 #ifndef PRODUCT 3433 static const char *node_type_names[] = { 3434 "UnknownType", 3435 "JavaObject", 3436 "LocalVar", 3437 "Field", 3438 "Arraycopy" 3439 }; 3440 3441 static const char *esc_names[] = { 3442 "UnknownEscape", 3443 "NoEscape", 3444 "ArgEscape", 3445 "GlobalEscape" 3446 }; 3447 3448 void PointsToNode::dump(bool print_state) const { 3449 NodeType nt = node_type(); 3450 tty->print("%s ", node_type_names[(int) nt]); 3451 if (print_state) { 3452 EscapeState es = escape_state(); 3453 EscapeState fields_es = fields_escape_state(); 3454 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3455 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) 3456 tty->print("NSR "); 3457 } 3458 if (is_Field()) { 3459 FieldNode* f = (FieldNode*)this; 3460 if (f->is_oop()) 3461 tty->print("oop "); 3462 if (f->offset() > 0) 3463 tty->print("+%d ", f->offset()); 3464 tty->print("("); 3465 for (BaseIterator i(f); i.has_next(); i.next()) { 3466 PointsToNode* b = i.get(); 3467 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3468 } 3469 tty->print(" )"); 3470 } 3471 tty->print("["); 3472 for (EdgeIterator i(this); i.has_next(); i.next()) { 3473 PointsToNode* e = i.get(); 3474 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3475 } 3476 tty->print(" ["); 3477 for (UseIterator i(this); i.has_next(); i.next()) { 3478 PointsToNode* u = i.get(); 3479 bool is_base = false; 3480 if (PointsToNode::is_base_use(u)) { 3481 is_base = true; 3482 u = PointsToNode::get_use_node(u)->as_Field(); 3483 } 3484 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3485 } 3486 tty->print(" ]] "); 3487 if (_node == NULL) 3488 tty->print_cr("<null>"); 3489 else 3490 _node->dump(); 3491 } 3492 3493 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3494 bool first = true; 3495 int ptnodes_length = ptnodes_worklist.length(); 3496 for (int i = 0; i < ptnodes_length; i++) { 3497 PointsToNode *ptn = ptnodes_worklist.at(i); 3498 if (ptn == NULL || !ptn->is_JavaObject()) 3499 continue; 3500 PointsToNode::EscapeState es = ptn->escape_state(); 3501 if ((es != PointsToNode::NoEscape) && !Verbose) { 3502 continue; 3503 } 3504 Node* n = ptn->ideal_node(); 3505 if (n->is_Allocate() || (n->is_CallStaticJava() && 3506 n->as_CallStaticJava()->is_boxing_method())) { 3507 if (first) { 3508 tty->cr(); 3509 tty->print("======== Connection graph for "); 3510 _compile->method()->print_short_name(); 3511 tty->cr(); 3512 first = false; 3513 } 3514 ptn->dump(); 3515 // Print all locals and fields which reference this allocation 3516 for (UseIterator j(ptn); j.has_next(); j.next()) { 3517 PointsToNode* use = j.get(); 3518 if (use->is_LocalVar()) { 3519 use->dump(Verbose); 3520 } else if (Verbose) { 3521 use->dump(); 3522 } 3523 } 3524 tty->cr(); 3525 } 3526 } 3527 } 3528 #endif