1 /* 2 * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "libadt/vectset.hpp" 29 #include "memory/allocation.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "opto/c2compiler.hpp" 32 #include "opto/arraycopynode.hpp" 33 #include "opto/callnode.hpp" 34 #include "opto/cfgnode.hpp" 35 #include "opto/compile.hpp" 36 #include "opto/escape.hpp" 37 #include "opto/phaseX.hpp" 38 #include "opto/movenode.hpp" 39 #include "opto/rootnode.hpp" 40 41 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 42 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 43 _in_worklist(C->comp_arena()), 44 _next_pidx(0), 45 _collecting(true), 46 _verify(false), 47 _compile(C), 48 _igvn(igvn), 49 _node_map(C->comp_arena()) { 50 // Add unknown java object. 51 add_java_object(C->top(), PointsToNode::GlobalEscape); 52 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 53 // Add ConP(#NULL) and ConN(#NULL) nodes. 54 Node* oop_null = igvn->zerocon(T_OBJECT); 55 assert(oop_null->_idx < nodes_size(), "should be created already"); 56 add_java_object(oop_null, PointsToNode::NoEscape); 57 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 58 if (UseCompressedOops) { 59 Node* noop_null = igvn->zerocon(T_NARROWOOP); 60 assert(noop_null->_idx < nodes_size(), "should be created already"); 61 map_ideal_node(noop_null, null_obj); 62 } 63 _pcmp_neq = NULL; // Should be initialized 64 _pcmp_eq = NULL; 65 } 66 67 bool ConnectionGraph::has_candidates(Compile *C) { 68 // EA brings benefits only when the code has allocations and/or locks which 69 // are represented by ideal Macro nodes. 70 int cnt = C->macro_count(); 71 for (int i = 0; i < cnt; i++) { 72 Node *n = C->macro_node(i); 73 if (n->is_Allocate()) 74 return true; 75 if (n->is_Lock()) { 76 Node* obj = n->as_Lock()->obj_node()->uncast(); 77 if (!(obj->is_Parm() || obj->is_Con())) 78 return true; 79 } 80 if (n->is_CallStaticJava() && 81 n->as_CallStaticJava()->is_boxing_method()) { 82 return true; 83 } 84 } 85 return false; 86 } 87 88 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 89 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 90 ResourceMark rm; 91 92 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 93 // to create space for them in ConnectionGraph::_nodes[]. 94 Node* oop_null = igvn->zerocon(T_OBJECT); 95 Node* noop_null = igvn->zerocon(T_NARROWOOP); 96 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 97 // Perform escape analysis 98 if (congraph->compute_escape()) { 99 // There are non escaping objects. 100 C->set_congraph(congraph); 101 } 102 // Cleanup. 103 if (oop_null->outcnt() == 0) 104 igvn->hash_delete(oop_null); 105 if (noop_null->outcnt() == 0) 106 igvn->hash_delete(noop_null); 107 } 108 109 bool ConnectionGraph::compute_escape() { 110 Compile* C = _compile; 111 PhaseGVN* igvn = _igvn; 112 113 // Worklists used by EA. 114 Unique_Node_List delayed_worklist; 115 GrowableArray<Node*> alloc_worklist; 116 GrowableArray<Node*> ptr_cmp_worklist; 117 GrowableArray<Node*> storestore_worklist; 118 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 119 GrowableArray<PointsToNode*> ptnodes_worklist; 120 GrowableArray<JavaObjectNode*> java_objects_worklist; 121 GrowableArray<JavaObjectNode*> non_escaped_worklist; 122 GrowableArray<FieldNode*> oop_fields_worklist; 123 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 124 125 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 126 127 // 1. Populate Connection Graph (CG) with PointsTo nodes. 128 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 129 // Initialize worklist 130 if (C->root() != NULL) { 131 ideal_nodes.push(C->root()); 132 } 133 // Processed ideal nodes are unique on ideal_nodes list 134 // but several ideal nodes are mapped to the phantom_obj. 135 // To avoid duplicated entries on the following worklists 136 // add the phantom_obj only once to them. 137 ptnodes_worklist.append(phantom_obj); 138 java_objects_worklist.append(phantom_obj); 139 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 140 Node* n = ideal_nodes.at(next); 141 // Create PointsTo nodes and add them to Connection Graph. Called 142 // only once per ideal node since ideal_nodes is Unique_Node list. 143 add_node_to_connection_graph(n, &delayed_worklist); 144 PointsToNode* ptn = ptnode_adr(n->_idx); 145 if (ptn != NULL && ptn != phantom_obj) { 146 ptnodes_worklist.append(ptn); 147 if (ptn->is_JavaObject()) { 148 java_objects_worklist.append(ptn->as_JavaObject()); 149 if ((n->is_Allocate() || n->is_CallStaticJava()) && 150 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 151 // Only allocations and java static calls results are interesting. 152 non_escaped_worklist.append(ptn->as_JavaObject()); 153 } 154 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 155 oop_fields_worklist.append(ptn->as_Field()); 156 } 157 } 158 if (n->is_MergeMem()) { 159 // Collect all MergeMem nodes to add memory slices for 160 // scalar replaceable objects in split_unique_types(). 161 _mergemem_worklist.append(n->as_MergeMem()); 162 } else if (OptimizePtrCompare && n->is_Cmp() && 163 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { 164 // Collect compare pointers nodes. 165 ptr_cmp_worklist.append(n); 166 } else if (n->is_MemBarStoreStore()) { 167 // Collect all MemBarStoreStore nodes so that depending on the 168 // escape status of the associated Allocate node some of them 169 // may be eliminated. 170 storestore_worklist.append(n); 171 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && 172 (n->req() > MemBarNode::Precedent)) { 173 record_for_optimizer(n); 174 #ifdef ASSERT 175 } else if (n->is_AddP()) { 176 // Collect address nodes for graph verification. 177 addp_worklist.append(n); 178 #endif 179 } else if (n->is_ArrayCopy()) { 180 // Keep a list of ArrayCopy nodes so if one of its input is non 181 // escaping, we can record a unique type 182 arraycopy_worklist.append(n->as_ArrayCopy()); 183 } 184 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 185 Node* m = n->fast_out(i); // Get user 186 ideal_nodes.push(m); 187 } 188 } 189 if (non_escaped_worklist.length() == 0) { 190 _collecting = false; 191 return false; // Nothing to do. 192 } 193 // Add final simple edges to graph. 194 while(delayed_worklist.size() > 0) { 195 Node* n = delayed_worklist.pop(); 196 add_final_edges(n); 197 } 198 int ptnodes_length = ptnodes_worklist.length(); 199 200 #ifdef ASSERT 201 if (VerifyConnectionGraph) { 202 // Verify that no new simple edges could be created and all 203 // local vars has edges. 204 _verify = true; 205 for (int next = 0; next < ptnodes_length; ++next) { 206 PointsToNode* ptn = ptnodes_worklist.at(next); 207 add_final_edges(ptn->ideal_node()); 208 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 209 ptn->dump(); 210 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 211 } 212 } 213 _verify = false; 214 } 215 #endif 216 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 217 // processing, calls to CI to resolve symbols (types, fields, methods) 218 // referenced in bytecode. During symbol resolution VM may throw 219 // an exception which CI cleans and converts to compilation failure. 220 if (C->failing()) return false; 221 222 // 2. Finish Graph construction by propagating references to all 223 // java objects through graph. 224 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, 225 java_objects_worklist, oop_fields_worklist)) { 226 // All objects escaped or hit time or iterations limits. 227 _collecting = false; 228 return false; 229 } 230 231 // 3. Adjust scalar_replaceable state of nonescaping objects and push 232 // scalar replaceable allocations on alloc_worklist for processing 233 // in split_unique_types(). 234 int non_escaped_length = non_escaped_worklist.length(); 235 for (int next = 0; next < non_escaped_length; next++) { 236 JavaObjectNode* ptn = non_escaped_worklist.at(next); 237 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 238 Node* n = ptn->ideal_node(); 239 if (n->is_Allocate()) { 240 n->as_Allocate()->_is_non_escaping = noescape; 241 } 242 if (n->is_CallStaticJava()) { 243 n->as_CallStaticJava()->_is_non_escaping = noescape; 244 } 245 if (noescape && ptn->scalar_replaceable()) { 246 adjust_scalar_replaceable_state(ptn); 247 if (ptn->scalar_replaceable()) { 248 alloc_worklist.append(ptn->ideal_node()); 249 } 250 } 251 } 252 253 #ifdef ASSERT 254 if (VerifyConnectionGraph) { 255 // Verify that graph is complete - no new edges could be added or needed. 256 verify_connection_graph(ptnodes_worklist, non_escaped_worklist, 257 java_objects_worklist, addp_worklist); 258 } 259 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 260 assert(null_obj->escape_state() == PointsToNode::NoEscape && 261 null_obj->edge_count() == 0 && 262 !null_obj->arraycopy_src() && 263 !null_obj->arraycopy_dst(), "sanity"); 264 #endif 265 266 _collecting = false; 267 268 } // TracePhase t3("connectionGraph") 269 270 // 4. Optimize ideal graph based on EA information. 271 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); 272 if (has_non_escaping_obj) { 273 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 274 } 275 276 #ifndef PRODUCT 277 if (PrintEscapeAnalysis) { 278 dump(ptnodes_worklist); // Dump ConnectionGraph 279 } 280 #endif 281 282 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 283 #ifdef ASSERT 284 if (VerifyConnectionGraph) { 285 int alloc_length = alloc_worklist.length(); 286 for (int next = 0; next < alloc_length; ++next) { 287 Node* n = alloc_worklist.at(next); 288 PointsToNode* ptn = ptnode_adr(n->_idx); 289 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 290 } 291 } 292 #endif 293 294 // 5. Separate memory graph for scalar replaceable allcations. 295 if (has_scalar_replaceable_candidates && 296 C->AliasLevel() >= 3 && EliminateAllocations) { 297 // Now use the escape information to create unique types for 298 // scalar replaceable objects. 299 split_unique_types(alloc_worklist, arraycopy_worklist); 300 if (C->failing()) return false; 301 C->print_method(PHASE_AFTER_EA, 2); 302 303 #ifdef ASSERT 304 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 305 tty->print("=== No allocations eliminated for "); 306 C->method()->print_short_name(); 307 if(!EliminateAllocations) { 308 tty->print(" since EliminateAllocations is off ==="); 309 } else if(!has_scalar_replaceable_candidates) { 310 tty->print(" since there are no scalar replaceable candidates ==="); 311 } else if(C->AliasLevel() < 3) { 312 tty->print(" since AliasLevel < 3 ==="); 313 } 314 tty->cr(); 315 #endif 316 } 317 return has_non_escaping_obj; 318 } 319 320 // Utility function for nodes that load an object 321 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 322 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 323 // ThreadLocal has RawPtr type. 324 const Type* t = _igvn->type(n); 325 if (t->make_ptr() != NULL) { 326 Node* adr = n->in(MemNode::Address); 327 #ifdef ASSERT 328 if (!adr->is_AddP()) { 329 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 330 } else { 331 assert((ptnode_adr(adr->_idx) == NULL || 332 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 333 } 334 #endif 335 add_local_var_and_edge(n, PointsToNode::NoEscape, 336 adr, delayed_worklist); 337 } 338 } 339 340 // Populate Connection Graph with PointsTo nodes and create simple 341 // connection graph edges. 342 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 343 assert(!_verify, "this method should not be called for verification"); 344 PhaseGVN* igvn = _igvn; 345 uint n_idx = n->_idx; 346 PointsToNode* n_ptn = ptnode_adr(n_idx); 347 if (n_ptn != NULL) 348 return; // No need to redefine PointsTo node during first iteration. 349 350 if (n->is_Call()) { 351 // Arguments to allocation and locking don't escape. 352 if (n->is_AbstractLock()) { 353 // Put Lock and Unlock nodes on IGVN worklist to process them during 354 // first IGVN optimization when escape information is still available. 355 record_for_optimizer(n); 356 } else if (n->is_Allocate()) { 357 add_call_node(n->as_Call()); 358 record_for_optimizer(n); 359 } else { 360 if (n->is_CallStaticJava()) { 361 const char* name = n->as_CallStaticJava()->_name; 362 if (name != NULL && strcmp(name, "uncommon_trap") == 0) 363 return; // Skip uncommon traps 364 } 365 // Don't mark as processed since call's arguments have to be processed. 366 delayed_worklist->push(n); 367 // Check if a call returns an object. 368 if ((n->as_Call()->returns_pointer() && 369 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) || 370 (n->is_CallStaticJava() && 371 n->as_CallStaticJava()->is_boxing_method())) { 372 add_call_node(n->as_Call()); 373 } else if (n->as_Call()->tf()->returns_value_type_as_fields()) { 374 bool returns_oop = false; 375 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) { 376 ProjNode* pn = n->fast_out(i)->as_Proj(); 377 if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) { 378 returns_oop = true; 379 } 380 } 381 if (returns_oop) { 382 add_call_node(n->as_Call()); 383 } 384 } 385 } 386 return; 387 } 388 // Put this check here to process call arguments since some call nodes 389 // point to phantom_obj. 390 if (n_ptn == phantom_obj || n_ptn == null_obj) 391 return; // Skip predefined nodes. 392 393 int opcode = n->Opcode(); 394 switch (opcode) { 395 case Op_AddP: { 396 Node* base = get_addp_base(n); 397 PointsToNode* ptn_base = ptnode_adr(base->_idx); 398 // Field nodes are created for all field types. They are used in 399 // adjust_scalar_replaceable_state() and split_unique_types(). 400 // Note, non-oop fields will have only base edges in Connection 401 // Graph because such fields are not used for oop loads and stores. 402 int offset = address_offset(n, igvn); 403 add_field(n, PointsToNode::NoEscape, offset); 404 if (ptn_base == NULL) { 405 delayed_worklist->push(n); // Process it later. 406 } else { 407 n_ptn = ptnode_adr(n_idx); 408 add_base(n_ptn->as_Field(), ptn_base); 409 } 410 break; 411 } 412 case Op_CastX2P: { 413 map_ideal_node(n, phantom_obj); 414 break; 415 } 416 case Op_CastPP: 417 case Op_CheckCastPP: 418 case Op_EncodeP: 419 case Op_DecodeN: 420 case Op_EncodePKlass: 421 case Op_DecodeNKlass: { 422 add_local_var_and_edge(n, PointsToNode::NoEscape, 423 n->in(1), delayed_worklist); 424 break; 425 } 426 case Op_CMoveP: { 427 add_local_var(n, PointsToNode::NoEscape); 428 // Do not add edges during first iteration because some could be 429 // not defined yet. 430 delayed_worklist->push(n); 431 break; 432 } 433 case Op_ConP: 434 case Op_ConN: 435 case Op_ConNKlass: { 436 // assume all oop constants globally escape except for null 437 PointsToNode::EscapeState es; 438 const Type* t = igvn->type(n); 439 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 440 es = PointsToNode::NoEscape; 441 } else { 442 es = PointsToNode::GlobalEscape; 443 } 444 add_java_object(n, es); 445 break; 446 } 447 case Op_CreateEx: { 448 // assume that all exception objects globally escape 449 map_ideal_node(n, phantom_obj); 450 break; 451 } 452 case Op_LoadKlass: 453 case Op_LoadNKlass: { 454 // Unknown class is loaded 455 map_ideal_node(n, phantom_obj); 456 break; 457 } 458 case Op_LoadP: 459 case Op_LoadN: 460 case Op_LoadPLocked: { 461 add_objload_to_connection_graph(n, delayed_worklist); 462 break; 463 } 464 case Op_Parm: { 465 map_ideal_node(n, phantom_obj); 466 break; 467 } 468 case Op_PartialSubtypeCheck: { 469 // Produces Null or notNull and is used in only in CmpP so 470 // phantom_obj could be used. 471 map_ideal_node(n, phantom_obj); // Result is unknown 472 break; 473 } 474 case Op_Phi: { 475 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 476 // ThreadLocal has RawPtr type. 477 const Type* t = n->as_Phi()->type(); 478 if (t->make_ptr() != NULL) { 479 add_local_var(n, PointsToNode::NoEscape); 480 // Do not add edges during first iteration because some could be 481 // not defined yet. 482 delayed_worklist->push(n); 483 } 484 break; 485 } 486 case Op_Proj: { 487 // we are only interested in the oop result projection from a call 488 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && 489 (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) { 490 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 491 n->in(0)->as_Call()->tf()->returns_value_type_as_fields(), "what kind of oop return is it?"); 492 add_local_var_and_edge(n, PointsToNode::NoEscape, 493 n->in(0), delayed_worklist); 494 } 495 break; 496 } 497 case Op_Rethrow: // Exception object escapes 498 case Op_Return: { 499 if (n->req() > TypeFunc::Parms && 500 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 501 // Treat Return value as LocalVar with GlobalEscape escape state. 502 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 503 n->in(TypeFunc::Parms), delayed_worklist); 504 } 505 break; 506 } 507 case Op_CompareAndExchangeP: 508 case Op_CompareAndExchangeN: 509 case Op_GetAndSetP: 510 case Op_GetAndSetN: { 511 add_objload_to_connection_graph(n, delayed_worklist); 512 // fallthrough 513 } 514 case Op_StoreP: 515 case Op_StoreN: 516 case Op_StoreNKlass: 517 case Op_StorePConditional: 518 case Op_WeakCompareAndSwapP: 519 case Op_WeakCompareAndSwapN: 520 case Op_CompareAndSwapP: 521 case Op_CompareAndSwapN: { 522 Node* adr = n->in(MemNode::Address); 523 const Type *adr_type = igvn->type(adr); 524 adr_type = adr_type->make_ptr(); 525 if (adr_type == NULL) { 526 break; // skip dead nodes 527 } 528 if ( adr_type->isa_oopptr() 529 || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 530 && adr_type == TypeRawPtr::NOTNULL 531 && adr->in(AddPNode::Address)->is_Proj() 532 && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 533 delayed_worklist->push(n); // Process it later. 534 #ifdef ASSERT 535 assert(adr->is_AddP(), "expecting an AddP"); 536 if (adr_type == TypeRawPtr::NOTNULL) { 537 // Verify a raw address for a store captured by Initialize node. 538 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 539 assert(offs != Type::OffsetBot, "offset must be a constant"); 540 } 541 #endif 542 } else { 543 // Ignore copy the displaced header to the BoxNode (OSR compilation). 544 if (adr->is_BoxLock()) 545 break; 546 // Stored value escapes in unsafe access. 547 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 548 // Pointer stores in G1 barriers looks like unsafe access. 549 // Ignore such stores to be able scalar replace non-escaping 550 // allocations. 551 if (UseG1GC && adr->is_AddP()) { 552 Node* base = get_addp_base(adr); 553 if (base->Opcode() == Op_LoadP && 554 base->in(MemNode::Address)->is_AddP()) { 555 adr = base->in(MemNode::Address); 556 Node* tls = get_addp_base(adr); 557 if (tls->Opcode() == Op_ThreadLocal) { 558 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 559 if (offs == in_bytes(JavaThread::satb_mark_queue_offset() + 560 SATBMarkQueue::byte_offset_of_buf())) { 561 break; // G1 pre barrier previous oop value store. 562 } 563 if (offs == in_bytes(JavaThread::dirty_card_queue_offset() + 564 DirtyCardQueue::byte_offset_of_buf())) { 565 break; // G1 post barrier card address store. 566 } 567 } 568 } 569 } 570 delayed_worklist->push(n); // Process unsafe access later. 571 break; 572 } 573 #ifdef ASSERT 574 n->dump(1); 575 assert(false, "not unsafe or G1 barrier raw StoreP"); 576 #endif 577 } 578 break; 579 } 580 case Op_AryEq: 581 case Op_HasNegatives: 582 case Op_StrComp: 583 case Op_StrEquals: 584 case Op_StrIndexOf: 585 case Op_StrIndexOfChar: 586 case Op_StrInflatedCopy: 587 case Op_StrCompressedCopy: 588 case Op_EncodeISOArray: { 589 add_local_var(n, PointsToNode::ArgEscape); 590 delayed_worklist->push(n); // Process it later. 591 break; 592 } 593 case Op_ThreadLocal: { 594 add_java_object(n, PointsToNode::ArgEscape); 595 break; 596 } 597 default: 598 ; // Do nothing for nodes not related to EA. 599 } 600 return; 601 } 602 603 #ifdef ASSERT 604 #define ELSE_FAIL(name) \ 605 /* Should not be called for not pointer type. */ \ 606 n->dump(1); \ 607 assert(false, name); \ 608 break; 609 #else 610 #define ELSE_FAIL(name) \ 611 break; 612 #endif 613 614 // Add final simple edges to graph. 615 void ConnectionGraph::add_final_edges(Node *n) { 616 PointsToNode* n_ptn = ptnode_adr(n->_idx); 617 #ifdef ASSERT 618 if (_verify && n_ptn->is_JavaObject()) 619 return; // This method does not change graph for JavaObject. 620 #endif 621 622 if (n->is_Call()) { 623 process_call_arguments(n->as_Call()); 624 return; 625 } 626 assert(n->is_Store() || n->is_LoadStore() || 627 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 628 "node should be registered already"); 629 int opcode = n->Opcode(); 630 switch (opcode) { 631 case Op_AddP: { 632 Node* base = get_addp_base(n); 633 PointsToNode* ptn_base = ptnode_adr(base->_idx); 634 assert(ptn_base != NULL, "field's base should be registered"); 635 add_base(n_ptn->as_Field(), ptn_base); 636 break; 637 } 638 case Op_CastPP: 639 case Op_CheckCastPP: 640 case Op_EncodeP: 641 case Op_DecodeN: 642 case Op_EncodePKlass: 643 case Op_DecodeNKlass: { 644 add_local_var_and_edge(n, PointsToNode::NoEscape, 645 n->in(1), NULL); 646 break; 647 } 648 case Op_CMoveP: { 649 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 650 Node* in = n->in(i); 651 if (in == NULL) 652 continue; // ignore NULL 653 Node* uncast_in = in->uncast(); 654 if (uncast_in->is_top() || uncast_in == n) 655 continue; // ignore top or inputs which go back this node 656 PointsToNode* ptn = ptnode_adr(in->_idx); 657 assert(ptn != NULL, "node should be registered"); 658 add_edge(n_ptn, ptn); 659 } 660 break; 661 } 662 case Op_LoadP: 663 case Op_LoadN: 664 case Op_LoadPLocked: { 665 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 666 // ThreadLocal has RawPtr type. 667 const Type* t = _igvn->type(n); 668 if (t->make_ptr() != NULL) { 669 Node* adr = n->in(MemNode::Address); 670 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 671 break; 672 } 673 ELSE_FAIL("Op_LoadP"); 674 } 675 case Op_Phi: { 676 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 677 // ThreadLocal has RawPtr type. 678 const Type* t = n->as_Phi()->type(); 679 if (t->make_ptr() != NULL) { 680 for (uint i = 1; i < n->req(); i++) { 681 Node* in = n->in(i); 682 if (in == NULL) 683 continue; // ignore NULL 684 Node* uncast_in = in->uncast(); 685 if (uncast_in->is_top() || uncast_in == n) 686 continue; // ignore top or inputs which go back this node 687 PointsToNode* ptn = ptnode_adr(in->_idx); 688 assert(ptn != NULL, "node should be registered"); 689 add_edge(n_ptn, ptn); 690 } 691 break; 692 } 693 ELSE_FAIL("Op_Phi"); 694 } 695 case Op_Proj: { 696 // we are only interested in the oop result projection from a call 697 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && 698 (n->in(0)->as_Call()->returns_pointer()|| n->bottom_type()->isa_ptr())) { 699 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 700 n->in(0)->as_Call()->tf()->returns_value_type_as_fields(), "what kind of oop return is it?"); 701 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 702 break; 703 } 704 ELSE_FAIL("Op_Proj"); 705 } 706 case Op_Rethrow: // Exception object escapes 707 case Op_Return: { 708 if (n->req() > TypeFunc::Parms && 709 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 710 // Treat Return value as LocalVar with GlobalEscape escape state. 711 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 712 n->in(TypeFunc::Parms), NULL); 713 break; 714 } 715 ELSE_FAIL("Op_Return"); 716 } 717 case Op_StoreP: 718 case Op_StoreN: 719 case Op_StoreNKlass: 720 case Op_StorePConditional: 721 case Op_CompareAndExchangeP: 722 case Op_CompareAndExchangeN: 723 case Op_CompareAndSwapP: 724 case Op_CompareAndSwapN: 725 case Op_WeakCompareAndSwapP: 726 case Op_WeakCompareAndSwapN: 727 case Op_GetAndSetP: 728 case Op_GetAndSetN: { 729 Node* adr = n->in(MemNode::Address); 730 const Type *adr_type = _igvn->type(adr); 731 adr_type = adr_type->make_ptr(); 732 #ifdef ASSERT 733 if (adr_type == NULL) { 734 n->dump(1); 735 assert(adr_type != NULL, "dead node should not be on list"); 736 break; 737 } 738 #endif 739 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN || 740 opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) { 741 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 742 } 743 if ( adr_type->isa_oopptr() 744 || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 745 && adr_type == TypeRawPtr::NOTNULL 746 && adr->in(AddPNode::Address)->is_Proj() 747 && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 748 // Point Address to Value 749 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 750 assert(adr_ptn != NULL && 751 adr_ptn->as_Field()->is_oop(), "node should be registered"); 752 Node *val = n->in(MemNode::ValueIn); 753 PointsToNode* ptn = ptnode_adr(val->_idx); 754 assert(ptn != NULL, "node should be registered"); 755 add_edge(adr_ptn, ptn); 756 break; 757 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 758 // Stored value escapes in unsafe access. 759 Node *val = n->in(MemNode::ValueIn); 760 PointsToNode* ptn = ptnode_adr(val->_idx); 761 assert(ptn != NULL, "node should be registered"); 762 set_escape_state(ptn, PointsToNode::GlobalEscape); 763 // Add edge to object for unsafe access with offset. 764 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 765 assert(adr_ptn != NULL, "node should be registered"); 766 if (adr_ptn->is_Field()) { 767 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 768 add_edge(adr_ptn, ptn); 769 } 770 break; 771 } 772 ELSE_FAIL("Op_StoreP"); 773 } 774 case Op_AryEq: 775 case Op_HasNegatives: 776 case Op_StrComp: 777 case Op_StrEquals: 778 case Op_StrIndexOf: 779 case Op_StrIndexOfChar: 780 case Op_StrInflatedCopy: 781 case Op_StrCompressedCopy: 782 case Op_EncodeISOArray: { 783 // char[]/byte[] arrays passed to string intrinsic do not escape but 784 // they are not scalar replaceable. Adjust escape state for them. 785 // Start from in(2) edge since in(1) is memory edge. 786 for (uint i = 2; i < n->req(); i++) { 787 Node* adr = n->in(i); 788 const Type* at = _igvn->type(adr); 789 if (!adr->is_top() && at->isa_ptr()) { 790 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 791 at->isa_ptr() != NULL, "expecting a pointer"); 792 if (adr->is_AddP()) { 793 adr = get_addp_base(adr); 794 } 795 PointsToNode* ptn = ptnode_adr(adr->_idx); 796 assert(ptn != NULL, "node should be registered"); 797 add_edge(n_ptn, ptn); 798 } 799 } 800 break; 801 } 802 default: { 803 // This method should be called only for EA specific nodes which may 804 // miss some edges when they were created. 805 #ifdef ASSERT 806 n->dump(1); 807 #endif 808 guarantee(false, "unknown node"); 809 } 810 } 811 return; 812 } 813 814 void ConnectionGraph::add_call_node(CallNode* call) { 815 assert(call->returns_pointer() || call->tf()->returns_value_type_as_fields(), "only for call which returns pointer"); 816 uint call_idx = call->_idx; 817 if (call->is_Allocate()) { 818 Node* k = call->in(AllocateNode::KlassNode); 819 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 820 assert(kt != NULL, "TypeKlassPtr required."); 821 ciKlass* cik = kt->klass(); 822 PointsToNode::EscapeState es = PointsToNode::NoEscape; 823 bool scalar_replaceable = true; 824 if (call->is_AllocateArray()) { 825 if (!cik->is_array_klass()) { // StressReflectiveCode 826 es = PointsToNode::GlobalEscape; 827 } else { 828 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 829 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 830 // Not scalar replaceable if the length is not constant or too big. 831 scalar_replaceable = false; 832 } 833 } 834 } else { // Allocate instance 835 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 836 cik->is_subclass_of(_compile->env()->Reference_klass()) || 837 !cik->is_instance_klass() || // StressReflectiveCode 838 !cik->as_instance_klass()->can_be_instantiated() || 839 cik->as_instance_klass()->has_finalizer()) { 840 es = PointsToNode::GlobalEscape; 841 } 842 } 843 add_java_object(call, es); 844 PointsToNode* ptn = ptnode_adr(call_idx); 845 if (!scalar_replaceable && ptn->scalar_replaceable()) { 846 ptn->set_scalar_replaceable(false); 847 } 848 } else if (call->is_CallStaticJava()) { 849 // Call nodes could be different types: 850 // 851 // 1. CallDynamicJavaNode (what happened during call is unknown): 852 // 853 // - mapped to GlobalEscape JavaObject node if oop is returned; 854 // 855 // - all oop arguments are escaping globally; 856 // 857 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 858 // 859 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 860 // 861 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 862 // - mapped to NoEscape JavaObject node if non-escaping object allocated 863 // during call is returned; 864 // - mapped to ArgEscape LocalVar node pointed to object arguments 865 // which are returned and does not escape during call; 866 // 867 // - oop arguments escaping status is defined by bytecode analysis; 868 // 869 // For a static call, we know exactly what method is being called. 870 // Use bytecode estimator to record whether the call's return value escapes. 871 ciMethod* meth = call->as_CallJava()->method(); 872 if (meth == NULL) { 873 const char* name = call->as_CallStaticJava()->_name; 874 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 875 // Returns a newly allocated unescaped object. 876 add_java_object(call, PointsToNode::NoEscape); 877 ptnode_adr(call_idx)->set_scalar_replaceable(false); 878 } else if (meth->is_boxing_method()) { 879 // Returns boxing object 880 PointsToNode::EscapeState es; 881 vmIntrinsics::ID intr = meth->intrinsic_id(); 882 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 883 // It does not escape if object is always allocated. 884 es = PointsToNode::NoEscape; 885 } else { 886 // It escapes globally if object could be loaded from cache. 887 es = PointsToNode::GlobalEscape; 888 } 889 add_java_object(call, es); 890 } else { 891 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 892 call_analyzer->copy_dependencies(_compile->dependencies()); 893 if (call_analyzer->is_return_allocated()) { 894 // Returns a newly allocated unescaped object, simply 895 // update dependency information. 896 // Mark it as NoEscape so that objects referenced by 897 // it's fields will be marked as NoEscape at least. 898 add_java_object(call, PointsToNode::NoEscape); 899 ptnode_adr(call_idx)->set_scalar_replaceable(false); 900 } else { 901 // Determine whether any arguments are returned. 902 const TypeTuple* d = call->tf()->domain_cc(); 903 bool ret_arg = false; 904 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 905 if (d->field_at(i)->isa_ptr() != NULL && 906 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 907 ret_arg = true; 908 break; 909 } 910 } 911 if (ret_arg) { 912 add_local_var(call, PointsToNode::ArgEscape); 913 } else { 914 // Returns unknown object. 915 map_ideal_node(call, phantom_obj); 916 } 917 } 918 } 919 } else { 920 // An other type of call, assume the worst case: 921 // returned value is unknown and globally escapes. 922 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 923 map_ideal_node(call, phantom_obj); 924 } 925 } 926 927 void ConnectionGraph::process_call_arguments(CallNode *call) { 928 bool is_arraycopy = false; 929 switch (call->Opcode()) { 930 #ifdef ASSERT 931 case Op_Allocate: 932 case Op_AllocateArray: 933 case Op_Lock: 934 case Op_Unlock: 935 assert(false, "should be done already"); 936 break; 937 #endif 938 case Op_ArrayCopy: 939 case Op_CallLeafNoFP: 940 // Most array copies are ArrayCopy nodes at this point but there 941 // are still a few direct calls to the copy subroutines (See 942 // PhaseStringOpts::copy_string()) 943 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 944 call->as_CallLeaf()->is_call_to_arraycopystub(); 945 // fall through 946 case Op_CallLeaf: { 947 // Stub calls, objects do not escape but they are not scale replaceable. 948 // Adjust escape state for outgoing arguments. 949 const TypeTuple * d = call->tf()->domain_sig(); 950 bool src_has_oops = false; 951 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 952 const Type* at = d->field_at(i); 953 Node *arg = call->in(i); 954 if (arg == NULL) { 955 continue; 956 } 957 const Type *aat = _igvn->type(arg); 958 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) 959 continue; 960 if (arg->is_AddP()) { 961 // 962 // The inline_native_clone() case when the arraycopy stub is called 963 // after the allocation before Initialize and CheckCastPP nodes. 964 // Or normal arraycopy for object arrays case. 965 // 966 // Set AddP's base (Allocate) as not scalar replaceable since 967 // pointer to the base (with offset) is passed as argument. 968 // 969 arg = get_addp_base(arg); 970 } 971 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 972 assert(arg_ptn != NULL, "should be registered"); 973 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 974 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 975 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 976 aat->isa_ptr() != NULL, "expecting an Ptr"); 977 bool arg_has_oops = aat->isa_oopptr() && 978 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || 979 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); 980 if (i == TypeFunc::Parms) { 981 src_has_oops = arg_has_oops; 982 } 983 // 984 // src or dst could be j.l.Object when other is basic type array: 985 // 986 // arraycopy(char[],0,Object*,0,size); 987 // arraycopy(Object*,0,char[],0,size); 988 // 989 // Don't add edges in such cases. 990 // 991 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 992 arg_has_oops && (i > TypeFunc::Parms); 993 #ifdef ASSERT 994 if (!(is_arraycopy || 995 (call->as_CallLeaf()->_name != NULL && 996 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 || 997 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 || 998 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 999 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 1000 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 1001 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 1002 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 1003 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 1004 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 1005 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 1006 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 1007 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 1008 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1009 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1010 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1011 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1012 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1013 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1014 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1015 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1016 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1017 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1018 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0) 1019 ))) { 1020 call->dump(); 1021 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1022 } 1023 #endif 1024 // Always process arraycopy's destination object since 1025 // we need to add all possible edges to references in 1026 // source object. 1027 if (arg_esc >= PointsToNode::ArgEscape && 1028 !arg_is_arraycopy_dest) { 1029 continue; 1030 } 1031 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1032 if (call->is_ArrayCopy()) { 1033 ArrayCopyNode* ac = call->as_ArrayCopy(); 1034 if (ac->is_clonebasic() || 1035 ac->is_arraycopy_validated() || 1036 ac->is_copyof_validated() || 1037 ac->is_copyofrange_validated()) { 1038 es = PointsToNode::NoEscape; 1039 } 1040 } 1041 set_escape_state(arg_ptn, es); 1042 if (arg_is_arraycopy_dest) { 1043 Node* src = call->in(TypeFunc::Parms); 1044 if (src->is_AddP()) { 1045 src = get_addp_base(src); 1046 } 1047 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1048 assert(src_ptn != NULL, "should be registered"); 1049 if (arg_ptn != src_ptn) { 1050 // Special arraycopy edge: 1051 // A destination object's field can't have the source object 1052 // as base since objects escape states are not related. 1053 // Only escape state of destination object's fields affects 1054 // escape state of fields in source object. 1055 add_arraycopy(call, es, src_ptn, arg_ptn); 1056 } 1057 } 1058 } 1059 } 1060 break; 1061 } 1062 case Op_CallStaticJava: { 1063 // For a static call, we know exactly what method is being called. 1064 // Use bytecode estimator to record the call's escape affects 1065 #ifdef ASSERT 1066 const char* name = call->as_CallStaticJava()->_name; 1067 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1068 #endif 1069 ciMethod* meth = call->as_CallJava()->method(); 1070 if ((meth != NULL) && meth->is_boxing_method()) { 1071 break; // Boxing methods do not modify any oops. 1072 } 1073 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1074 // fall-through if not a Java method or no analyzer information 1075 if (call_analyzer != NULL) { 1076 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1077 const TypeTuple* d = call->tf()->domain_cc(); 1078 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1079 const Type* at = d->field_at(i); 1080 int k = i - TypeFunc::Parms; 1081 Node* arg = call->in(i); 1082 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1083 if (at->isa_ptr() != NULL && 1084 call_analyzer->is_arg_returned(k)) { 1085 // The call returns arguments. 1086 if (call_ptn != NULL) { // Is call's result used? 1087 assert(call_ptn->is_LocalVar(), "node should be registered"); 1088 assert(arg_ptn != NULL, "node should be registered"); 1089 add_edge(call_ptn, arg_ptn); 1090 } 1091 } 1092 if (at->isa_oopptr() != NULL && 1093 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1094 if (!call_analyzer->is_arg_stack(k)) { 1095 // The argument global escapes 1096 set_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1097 } else { 1098 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 1099 if (!call_analyzer->is_arg_local(k)) { 1100 // The argument itself doesn't escape, but any fields might 1101 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1102 } 1103 } 1104 } 1105 } 1106 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1107 // The call returns arguments. 1108 assert(call_ptn->edge_count() > 0, "sanity"); 1109 if (!call_analyzer->is_return_local()) { 1110 // Returns also unknown object. 1111 add_edge(call_ptn, phantom_obj); 1112 } 1113 } 1114 break; 1115 } 1116 } 1117 default: { 1118 // Fall-through here if not a Java method or no analyzer information 1119 // or some other type of call, assume the worst case: all arguments 1120 // globally escape. 1121 const TypeTuple* d = call->tf()->domain_cc(); 1122 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1123 const Type* at = d->field_at(i); 1124 if (at->isa_oopptr() != NULL) { 1125 Node* arg = call->in(i); 1126 if (arg->is_AddP()) { 1127 arg = get_addp_base(arg); 1128 } 1129 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1130 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); 1131 } 1132 } 1133 } 1134 } 1135 } 1136 1137 1138 // Finish Graph construction. 1139 bool ConnectionGraph::complete_connection_graph( 1140 GrowableArray<PointsToNode*>& ptnodes_worklist, 1141 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1142 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1143 GrowableArray<FieldNode*>& oop_fields_worklist) { 1144 // Normally only 1-3 passes needed to build Connection Graph depending 1145 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1146 // Set limit to 20 to catch situation when something did go wrong and 1147 // bailout Escape Analysis. 1148 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1149 #define CG_BUILD_ITER_LIMIT 20 1150 1151 // Propagate GlobalEscape and ArgEscape escape states and check that 1152 // we still have non-escaping objects. The method pushs on _worklist 1153 // Field nodes which reference phantom_object. 1154 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1155 return false; // Nothing to do. 1156 } 1157 // Now propagate references to all JavaObject nodes. 1158 int java_objects_length = java_objects_worklist.length(); 1159 elapsedTimer time; 1160 bool timeout = false; 1161 int new_edges = 1; 1162 int iterations = 0; 1163 do { 1164 while ((new_edges > 0) && 1165 (iterations++ < CG_BUILD_ITER_LIMIT)) { 1166 double start_time = time.seconds(); 1167 time.start(); 1168 new_edges = 0; 1169 // Propagate references to phantom_object for nodes pushed on _worklist 1170 // by find_non_escaped_objects() and find_field_value(). 1171 new_edges += add_java_object_edges(phantom_obj, false); 1172 for (int next = 0; next < java_objects_length; ++next) { 1173 JavaObjectNode* ptn = java_objects_worklist.at(next); 1174 new_edges += add_java_object_edges(ptn, true); 1175 1176 #define SAMPLE_SIZE 4 1177 if ((next % SAMPLE_SIZE) == 0) { 1178 // Each 4 iterations calculate how much time it will take 1179 // to complete graph construction. 1180 time.stop(); 1181 // Poll for requests from shutdown mechanism to quiesce compiler 1182 // because Connection graph construction may take long time. 1183 CompileBroker::maybe_block(); 1184 double stop_time = time.seconds(); 1185 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1186 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1187 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1188 timeout = true; 1189 break; // Timeout 1190 } 1191 start_time = stop_time; 1192 time.start(); 1193 } 1194 #undef SAMPLE_SIZE 1195 1196 } 1197 if (timeout) break; 1198 if (new_edges > 0) { 1199 // Update escape states on each iteration if graph was updated. 1200 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1201 return false; // Nothing to do. 1202 } 1203 } 1204 time.stop(); 1205 if (time.seconds() >= EscapeAnalysisTimeout) { 1206 timeout = true; 1207 break; 1208 } 1209 } 1210 if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { 1211 time.start(); 1212 // Find fields which have unknown value. 1213 int fields_length = oop_fields_worklist.length(); 1214 for (int next = 0; next < fields_length; next++) { 1215 FieldNode* field = oop_fields_worklist.at(next); 1216 if (field->edge_count() == 0) { 1217 new_edges += find_field_value(field); 1218 // This code may added new edges to phantom_object. 1219 // Need an other cycle to propagate references to phantom_object. 1220 } 1221 } 1222 time.stop(); 1223 if (time.seconds() >= EscapeAnalysisTimeout) { 1224 timeout = true; 1225 break; 1226 } 1227 } else { 1228 new_edges = 0; // Bailout 1229 } 1230 } while (new_edges > 0); 1231 1232 // Bailout if passed limits. 1233 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { 1234 Compile* C = _compile; 1235 if (C->log() != NULL) { 1236 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1237 C->log()->text("%s", timeout ? "time" : "iterations"); 1238 C->log()->end_elem(" limit'"); 1239 } 1240 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", 1241 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()); 1242 // Possible infinite build_connection_graph loop, 1243 // bailout (no changes to ideal graph were made). 1244 return false; 1245 } 1246 #ifdef ASSERT 1247 if (Verbose && PrintEscapeAnalysis) { 1248 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", 1249 iterations, nodes_size(), ptnodes_worklist.length()); 1250 } 1251 #endif 1252 1253 #undef CG_BUILD_ITER_LIMIT 1254 1255 // Find fields initialized by NULL for non-escaping Allocations. 1256 int non_escaped_length = non_escaped_worklist.length(); 1257 for (int next = 0; next < non_escaped_length; next++) { 1258 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1259 PointsToNode::EscapeState es = ptn->escape_state(); 1260 assert(es <= PointsToNode::ArgEscape, "sanity"); 1261 if (es == PointsToNode::NoEscape) { 1262 if (find_init_values(ptn, null_obj, _igvn) > 0) { 1263 // Adding references to NULL object does not change escape states 1264 // since it does not escape. Also no fields are added to NULL object. 1265 add_java_object_edges(null_obj, false); 1266 } 1267 } 1268 Node* n = ptn->ideal_node(); 1269 if (n->is_Allocate()) { 1270 // The object allocated by this Allocate node will never be 1271 // seen by an other thread. Mark it so that when it is 1272 // expanded no MemBarStoreStore is added. 1273 InitializeNode* ini = n->as_Allocate()->initialization(); 1274 if (ini != NULL) 1275 ini->set_does_not_escape(); 1276 } 1277 } 1278 return true; // Finished graph construction. 1279 } 1280 1281 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1282 // and check that we still have non-escaping java objects. 1283 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1284 GrowableArray<JavaObjectNode*>& non_escaped_worklist) { 1285 GrowableArray<PointsToNode*> escape_worklist; 1286 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1287 int ptnodes_length = ptnodes_worklist.length(); 1288 for (int next = 0; next < ptnodes_length; ++next) { 1289 PointsToNode* ptn = ptnodes_worklist.at(next); 1290 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1291 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1292 escape_worklist.push(ptn); 1293 } 1294 } 1295 // Set escape states to referenced nodes (edges list). 1296 while (escape_worklist.length() > 0) { 1297 PointsToNode* ptn = escape_worklist.pop(); 1298 PointsToNode::EscapeState es = ptn->escape_state(); 1299 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1300 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1301 es >= PointsToNode::ArgEscape) { 1302 // GlobalEscape or ArgEscape state of field means it has unknown value. 1303 if (add_edge(ptn, phantom_obj)) { 1304 // New edge was added 1305 add_field_uses_to_worklist(ptn->as_Field()); 1306 } 1307 } 1308 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1309 PointsToNode* e = i.get(); 1310 if (e->is_Arraycopy()) { 1311 assert(ptn->arraycopy_dst(), "sanity"); 1312 // Propagate only fields escape state through arraycopy edge. 1313 if (e->fields_escape_state() < field_es) { 1314 set_fields_escape_state(e, field_es); 1315 escape_worklist.push(e); 1316 } 1317 } else if (es >= field_es) { 1318 // fields_escape_state is also set to 'es' if it is less than 'es'. 1319 if (e->escape_state() < es) { 1320 set_escape_state(e, es); 1321 escape_worklist.push(e); 1322 } 1323 } else { 1324 // Propagate field escape state. 1325 bool es_changed = false; 1326 if (e->fields_escape_state() < field_es) { 1327 set_fields_escape_state(e, field_es); 1328 es_changed = true; 1329 } 1330 if ((e->escape_state() < field_es) && 1331 e->is_Field() && ptn->is_JavaObject() && 1332 e->as_Field()->is_oop()) { 1333 // Change escape state of referenced fields. 1334 set_escape_state(e, field_es); 1335 es_changed = true; 1336 } else if (e->escape_state() < es) { 1337 set_escape_state(e, es); 1338 es_changed = true; 1339 } 1340 if (es_changed) { 1341 escape_worklist.push(e); 1342 } 1343 } 1344 } 1345 } 1346 // Remove escaped objects from non_escaped list. 1347 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { 1348 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1349 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1350 non_escaped_worklist.delete_at(next); 1351 } 1352 if (ptn->escape_state() == PointsToNode::NoEscape) { 1353 // Find fields in non-escaped allocations which have unknown value. 1354 find_init_values(ptn, phantom_obj, NULL); 1355 } 1356 } 1357 return (non_escaped_worklist.length() > 0); 1358 } 1359 1360 // Add all references to JavaObject node by walking over all uses. 1361 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1362 int new_edges = 0; 1363 if (populate_worklist) { 1364 // Populate _worklist by uses of jobj's uses. 1365 for (UseIterator i(jobj); i.has_next(); i.next()) { 1366 PointsToNode* use = i.get(); 1367 if (use->is_Arraycopy()) 1368 continue; 1369 add_uses_to_worklist(use); 1370 if (use->is_Field() && use->as_Field()->is_oop()) { 1371 // Put on worklist all field's uses (loads) and 1372 // related field nodes (same base and offset). 1373 add_field_uses_to_worklist(use->as_Field()); 1374 } 1375 } 1376 } 1377 for (int l = 0; l < _worklist.length(); l++) { 1378 PointsToNode* use = _worklist.at(l); 1379 if (PointsToNode::is_base_use(use)) { 1380 // Add reference from jobj to field and from field to jobj (field's base). 1381 use = PointsToNode::get_use_node(use)->as_Field(); 1382 if (add_base(use->as_Field(), jobj)) { 1383 new_edges++; 1384 } 1385 continue; 1386 } 1387 assert(!use->is_JavaObject(), "sanity"); 1388 if (use->is_Arraycopy()) { 1389 if (jobj == null_obj) // NULL object does not have field edges 1390 continue; 1391 // Added edge from Arraycopy node to arraycopy's source java object 1392 if (add_edge(use, jobj)) { 1393 jobj->set_arraycopy_src(); 1394 new_edges++; 1395 } 1396 // and stop here. 1397 continue; 1398 } 1399 if (!add_edge(use, jobj)) 1400 continue; // No new edge added, there was such edge already. 1401 new_edges++; 1402 if (use->is_LocalVar()) { 1403 add_uses_to_worklist(use); 1404 if (use->arraycopy_dst()) { 1405 for (EdgeIterator i(use); i.has_next(); i.next()) { 1406 PointsToNode* e = i.get(); 1407 if (e->is_Arraycopy()) { 1408 if (jobj == null_obj) // NULL object does not have field edges 1409 continue; 1410 // Add edge from arraycopy's destination java object to Arraycopy node. 1411 if (add_edge(jobj, e)) { 1412 new_edges++; 1413 jobj->set_arraycopy_dst(); 1414 } 1415 } 1416 } 1417 } 1418 } else { 1419 // Added new edge to stored in field values. 1420 // Put on worklist all field's uses (loads) and 1421 // related field nodes (same base and offset). 1422 add_field_uses_to_worklist(use->as_Field()); 1423 } 1424 } 1425 _worklist.clear(); 1426 _in_worklist.Reset(); 1427 return new_edges; 1428 } 1429 1430 // Put on worklist all related field nodes. 1431 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1432 assert(field->is_oop(), "sanity"); 1433 int offset = field->offset(); 1434 add_uses_to_worklist(field); 1435 // Loop over all bases of this field and push on worklist Field nodes 1436 // with the same offset and base (since they may reference the same field). 1437 for (BaseIterator i(field); i.has_next(); i.next()) { 1438 PointsToNode* base = i.get(); 1439 add_fields_to_worklist(field, base); 1440 // Check if the base was source object of arraycopy and go over arraycopy's 1441 // destination objects since values stored to a field of source object are 1442 // accessable by uses (loads) of fields of destination objects. 1443 if (base->arraycopy_src()) { 1444 for (UseIterator j(base); j.has_next(); j.next()) { 1445 PointsToNode* arycp = j.get(); 1446 if (arycp->is_Arraycopy()) { 1447 for (UseIterator k(arycp); k.has_next(); k.next()) { 1448 PointsToNode* abase = k.get(); 1449 if (abase->arraycopy_dst() && abase != base) { 1450 // Look for the same arraycopy reference. 1451 add_fields_to_worklist(field, abase); 1452 } 1453 } 1454 } 1455 } 1456 } 1457 } 1458 } 1459 1460 // Put on worklist all related field nodes. 1461 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1462 int offset = field->offset(); 1463 if (base->is_LocalVar()) { 1464 for (UseIterator j(base); j.has_next(); j.next()) { 1465 PointsToNode* f = j.get(); 1466 if (PointsToNode::is_base_use(f)) { // Field 1467 f = PointsToNode::get_use_node(f); 1468 if (f == field || !f->as_Field()->is_oop()) 1469 continue; 1470 int offs = f->as_Field()->offset(); 1471 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1472 add_to_worklist(f); 1473 } 1474 } 1475 } 1476 } else { 1477 assert(base->is_JavaObject(), "sanity"); 1478 if (// Skip phantom_object since it is only used to indicate that 1479 // this field's content globally escapes. 1480 (base != phantom_obj) && 1481 // NULL object node does not have fields. 1482 (base != null_obj)) { 1483 for (EdgeIterator i(base); i.has_next(); i.next()) { 1484 PointsToNode* f = i.get(); 1485 // Skip arraycopy edge since store to destination object field 1486 // does not update value in source object field. 1487 if (f->is_Arraycopy()) { 1488 assert(base->arraycopy_dst(), "sanity"); 1489 continue; 1490 } 1491 if (f == field || !f->as_Field()->is_oop()) 1492 continue; 1493 int offs = f->as_Field()->offset(); 1494 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1495 add_to_worklist(f); 1496 } 1497 } 1498 } 1499 } 1500 } 1501 1502 // Find fields which have unknown value. 1503 int ConnectionGraph::find_field_value(FieldNode* field) { 1504 // Escaped fields should have init value already. 1505 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1506 int new_edges = 0; 1507 for (BaseIterator i(field); i.has_next(); i.next()) { 1508 PointsToNode* base = i.get(); 1509 if (base->is_JavaObject()) { 1510 // Skip Allocate's fields which will be processed later. 1511 if (base->ideal_node()->is_Allocate()) 1512 return 0; 1513 assert(base == null_obj, "only NULL ptr base expected here"); 1514 } 1515 } 1516 if (add_edge(field, phantom_obj)) { 1517 // New edge was added 1518 new_edges++; 1519 add_field_uses_to_worklist(field); 1520 } 1521 return new_edges; 1522 } 1523 1524 // Find fields initializing values for allocations. 1525 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { 1526 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1527 int new_edges = 0; 1528 Node* alloc = pta->ideal_node(); 1529 if (init_val == phantom_obj) { 1530 // Do nothing for Allocate nodes since its fields values are 1531 // "known" unless they are initialized by arraycopy/clone. 1532 if (alloc->is_Allocate() && !pta->arraycopy_dst()) 1533 return 0; 1534 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 1535 #ifdef ASSERT 1536 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { 1537 const char* name = alloc->as_CallStaticJava()->_name; 1538 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1539 } 1540 #endif 1541 // Non-escaped allocation returned from Java or runtime call have 1542 // unknown values in fields. 1543 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1544 PointsToNode* field = i.get(); 1545 if (field->is_Field() && field->as_Field()->is_oop()) { 1546 if (add_edge(field, phantom_obj)) { 1547 // New edge was added 1548 new_edges++; 1549 add_field_uses_to_worklist(field->as_Field()); 1550 } 1551 } 1552 } 1553 return new_edges; 1554 } 1555 assert(init_val == null_obj, "sanity"); 1556 // Do nothing for Call nodes since its fields values are unknown. 1557 if (!alloc->is_Allocate()) 1558 return 0; 1559 1560 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1561 bool visited_bottom_offset = false; 1562 GrowableArray<int> offsets_worklist; 1563 1564 // Check if an oop field's initializing value is recorded and add 1565 // a corresponding NULL if field's value if it is not recorded. 1566 // Connection Graph does not record a default initialization by NULL 1567 // captured by Initialize node. 1568 // 1569 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1570 PointsToNode* field = i.get(); // Field (AddP) 1571 if (!field->is_Field() || !field->as_Field()->is_oop()) 1572 continue; // Not oop field 1573 int offset = field->as_Field()->offset(); 1574 if (offset == Type::OffsetBot) { 1575 if (!visited_bottom_offset) { 1576 // OffsetBot is used to reference array's element, 1577 // always add reference to NULL to all Field nodes since we don't 1578 // known which element is referenced. 1579 if (add_edge(field, null_obj)) { 1580 // New edge was added 1581 new_edges++; 1582 add_field_uses_to_worklist(field->as_Field()); 1583 visited_bottom_offset = true; 1584 } 1585 } 1586 } else { 1587 // Check only oop fields. 1588 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1589 if (adr_type->isa_rawptr()) { 1590 #ifdef ASSERT 1591 // Raw pointers are used for initializing stores so skip it 1592 // since it should be recorded already 1593 Node* base = get_addp_base(field->ideal_node()); 1594 assert(adr_type->isa_rawptr() && base->is_Proj() && 1595 (base->in(0) == alloc),"unexpected pointer type"); 1596 #endif 1597 continue; 1598 } 1599 if (!offsets_worklist.contains(offset)) { 1600 offsets_worklist.append(offset); 1601 Node* value = NULL; 1602 if (ini != NULL) { 1603 // StoreP::memory_type() == T_ADDRESS 1604 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1605 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1606 // Make sure initializing store has the same type as this AddP. 1607 // This AddP may reference non existing field because it is on a 1608 // dead branch of bimorphic call which is not eliminated yet. 1609 if (store != NULL && store->is_Store() && 1610 store->as_Store()->memory_type() == ft) { 1611 value = store->in(MemNode::ValueIn); 1612 #ifdef ASSERT 1613 if (VerifyConnectionGraph) { 1614 // Verify that AddP already points to all objects the value points to. 1615 PointsToNode* val = ptnode_adr(value->_idx); 1616 assert((val != NULL), "should be processed already"); 1617 PointsToNode* missed_obj = NULL; 1618 if (val->is_JavaObject()) { 1619 if (!field->points_to(val->as_JavaObject())) { 1620 missed_obj = val; 1621 } 1622 } else { 1623 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1624 tty->print_cr("----------init store has invalid value -----"); 1625 store->dump(); 1626 val->dump(); 1627 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1628 } 1629 for (EdgeIterator j(val); j.has_next(); j.next()) { 1630 PointsToNode* obj = j.get(); 1631 if (obj->is_JavaObject()) { 1632 if (!field->points_to(obj->as_JavaObject())) { 1633 missed_obj = obj; 1634 break; 1635 } 1636 } 1637 } 1638 } 1639 if (missed_obj != NULL) { 1640 tty->print_cr("----------field---------------------------------"); 1641 field->dump(); 1642 tty->print_cr("----------missed referernce to object-----------"); 1643 missed_obj->dump(); 1644 tty->print_cr("----------object referernced by init store -----"); 1645 store->dump(); 1646 val->dump(); 1647 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1648 } 1649 } 1650 #endif 1651 } else { 1652 // There could be initializing stores which follow allocation. 1653 // For example, a volatile field store is not collected 1654 // by Initialize node. 1655 // 1656 // Need to check for dependent loads to separate such stores from 1657 // stores which follow loads. For now, add initial value NULL so 1658 // that compare pointers optimization works correctly. 1659 } 1660 } 1661 if (value == NULL) { 1662 // A field's initializing value was not recorded. Add NULL. 1663 if (add_edge(field, null_obj)) { 1664 // New edge was added 1665 new_edges++; 1666 add_field_uses_to_worklist(field->as_Field()); 1667 } 1668 } 1669 } 1670 } 1671 } 1672 return new_edges; 1673 } 1674 1675 // Adjust scalar_replaceable state after Connection Graph is built. 1676 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1677 // Search for non-escaping objects which are not scalar replaceable 1678 // and mark them to propagate the state to referenced objects. 1679 1680 // 1. An object is not scalar replaceable if the field into which it is 1681 // stored has unknown offset (stored into unknown element of an array). 1682 // 1683 for (UseIterator i(jobj); i.has_next(); i.next()) { 1684 PointsToNode* use = i.get(); 1685 if (use->is_Arraycopy()) { 1686 continue; 1687 } 1688 if (use->is_Field()) { 1689 FieldNode* field = use->as_Field(); 1690 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 1691 if (field->offset() == Type::OffsetBot) { 1692 jobj->set_scalar_replaceable(false); 1693 return; 1694 } 1695 // 2. An object is not scalar replaceable if the field into which it is 1696 // stored has multiple bases one of which is null. 1697 if (field->base_count() > 1) { 1698 for (BaseIterator i(field); i.has_next(); i.next()) { 1699 PointsToNode* base = i.get(); 1700 if (base == null_obj) { 1701 jobj->set_scalar_replaceable(false); 1702 return; 1703 } 1704 } 1705 } 1706 } 1707 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1708 // 3. An object is not scalar replaceable if it is merged with other objects. 1709 for (EdgeIterator j(use); j.has_next(); j.next()) { 1710 PointsToNode* ptn = j.get(); 1711 if (ptn->is_JavaObject() && ptn != jobj) { 1712 // Mark all objects. 1713 jobj->set_scalar_replaceable(false); 1714 ptn->set_scalar_replaceable(false); 1715 } 1716 } 1717 if (!jobj->scalar_replaceable()) { 1718 return; 1719 } 1720 } 1721 1722 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1723 if (j.get()->is_Arraycopy()) { 1724 continue; 1725 } 1726 1727 // Non-escaping object node should point only to field nodes. 1728 FieldNode* field = j.get()->as_Field(); 1729 int offset = field->as_Field()->offset(); 1730 1731 // 4. An object is not scalar replaceable if it has a field with unknown 1732 // offset (array's element is accessed in loop). 1733 if (offset == Type::OffsetBot) { 1734 jobj->set_scalar_replaceable(false); 1735 return; 1736 } 1737 // 5. Currently an object is not scalar replaceable if a LoadStore node 1738 // access its field since the field value is unknown after it. 1739 // 1740 Node* n = field->ideal_node(); 1741 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1742 if (n->fast_out(i)->is_LoadStore()) { 1743 jobj->set_scalar_replaceable(false); 1744 return; 1745 } 1746 } 1747 1748 // 6. Or the address may point to more then one object. This may produce 1749 // the false positive result (set not scalar replaceable) 1750 // since the flow-insensitive escape analysis can't separate 1751 // the case when stores overwrite the field's value from the case 1752 // when stores happened on different control branches. 1753 // 1754 // Note: it will disable scalar replacement in some cases: 1755 // 1756 // Point p[] = new Point[1]; 1757 // p[0] = new Point(); // Will be not scalar replaced 1758 // 1759 // but it will save us from incorrect optimizations in next cases: 1760 // 1761 // Point p[] = new Point[1]; 1762 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1763 // 1764 if (field->base_count() > 1) { 1765 for (BaseIterator i(field); i.has_next(); i.next()) { 1766 PointsToNode* base = i.get(); 1767 // Don't take into account LocalVar nodes which 1768 // may point to only one object which should be also 1769 // this field's base by now. 1770 if (base->is_JavaObject() && base != jobj) { 1771 // Mark all bases. 1772 jobj->set_scalar_replaceable(false); 1773 base->set_scalar_replaceable(false); 1774 } 1775 } 1776 } 1777 } 1778 } 1779 1780 #ifdef ASSERT 1781 void ConnectionGraph::verify_connection_graph( 1782 GrowableArray<PointsToNode*>& ptnodes_worklist, 1783 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1784 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1785 GrowableArray<Node*>& addp_worklist) { 1786 // Verify that graph is complete - no new edges could be added. 1787 int java_objects_length = java_objects_worklist.length(); 1788 int non_escaped_length = non_escaped_worklist.length(); 1789 int new_edges = 0; 1790 for (int next = 0; next < java_objects_length; ++next) { 1791 JavaObjectNode* ptn = java_objects_worklist.at(next); 1792 new_edges += add_java_object_edges(ptn, true); 1793 } 1794 assert(new_edges == 0, "graph was not complete"); 1795 // Verify that escape state is final. 1796 int length = non_escaped_worklist.length(); 1797 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); 1798 assert((non_escaped_length == non_escaped_worklist.length()) && 1799 (non_escaped_length == length) && 1800 (_worklist.length() == 0), "escape state was not final"); 1801 1802 // Verify fields information. 1803 int addp_length = addp_worklist.length(); 1804 for (int next = 0; next < addp_length; ++next ) { 1805 Node* n = addp_worklist.at(next); 1806 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 1807 if (field->is_oop()) { 1808 // Verify that field has all bases 1809 Node* base = get_addp_base(n); 1810 PointsToNode* ptn = ptnode_adr(base->_idx); 1811 if (ptn->is_JavaObject()) { 1812 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 1813 } else { 1814 assert(ptn->is_LocalVar(), "sanity"); 1815 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1816 PointsToNode* e = i.get(); 1817 if (e->is_JavaObject()) { 1818 assert(field->has_base(e->as_JavaObject()), "sanity"); 1819 } 1820 } 1821 } 1822 // Verify that all fields have initializing values. 1823 if (field->edge_count() == 0) { 1824 tty->print_cr("----------field does not have references----------"); 1825 field->dump(); 1826 for (BaseIterator i(field); i.has_next(); i.next()) { 1827 PointsToNode* base = i.get(); 1828 tty->print_cr("----------field has next base---------------------"); 1829 base->dump(); 1830 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 1831 tty->print_cr("----------base has fields-------------------------"); 1832 for (EdgeIterator j(base); j.has_next(); j.next()) { 1833 j.get()->dump(); 1834 } 1835 tty->print_cr("----------base has references---------------------"); 1836 for (UseIterator j(base); j.has_next(); j.next()) { 1837 j.get()->dump(); 1838 } 1839 } 1840 } 1841 for (UseIterator i(field); i.has_next(); i.next()) { 1842 i.get()->dump(); 1843 } 1844 assert(field->edge_count() > 0, "sanity"); 1845 } 1846 } 1847 } 1848 } 1849 #endif 1850 1851 // Optimize ideal graph. 1852 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 1853 GrowableArray<Node*>& storestore_worklist) { 1854 Compile* C = _compile; 1855 PhaseIterGVN* igvn = _igvn; 1856 if (EliminateLocks) { 1857 // Mark locks before changing ideal graph. 1858 int cnt = C->macro_count(); 1859 for( int i=0; i < cnt; i++ ) { 1860 Node *n = C->macro_node(i); 1861 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1862 AbstractLockNode* alock = n->as_AbstractLock(); 1863 if (!alock->is_non_esc_obj()) { 1864 if (not_global_escape(alock->obj_node())) { 1865 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 1866 // The lock could be marked eliminated by lock coarsening 1867 // code during first IGVN before EA. Replace coarsened flag 1868 // to eliminate all associated locks/unlocks. 1869 #ifdef ASSERT 1870 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 1871 #endif 1872 alock->set_non_esc_obj(); 1873 } 1874 } 1875 } 1876 } 1877 } 1878 1879 if (OptimizePtrCompare) { 1880 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1881 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1882 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1883 // Optimize objects compare. 1884 while (ptr_cmp_worklist.length() != 0) { 1885 Node *n = ptr_cmp_worklist.pop(); 1886 Node *res = optimize_ptr_compare(n); 1887 if (res != NULL) { 1888 #ifndef PRODUCT 1889 if (PrintOptimizePtrCompare) { 1890 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1891 if (Verbose) { 1892 n->dump(1); 1893 } 1894 } 1895 #endif 1896 igvn->replace_node(n, res); 1897 } 1898 } 1899 // cleanup 1900 if (_pcmp_neq->outcnt() == 0) 1901 igvn->hash_delete(_pcmp_neq); 1902 if (_pcmp_eq->outcnt() == 0) 1903 igvn->hash_delete(_pcmp_eq); 1904 } 1905 1906 // For MemBarStoreStore nodes added in library_call.cpp, check 1907 // escape status of associated AllocateNode and optimize out 1908 // MemBarStoreStore node if the allocated object never escapes. 1909 while (storestore_worklist.length() != 0) { 1910 Node *n = storestore_worklist.pop(); 1911 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 1912 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 1913 assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); 1914 if (not_global_escape(alloc)) { 1915 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 1916 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 1917 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 1918 igvn->register_new_node_with_optimizer(mb); 1919 igvn->replace_node(storestore, mb); 1920 } 1921 } 1922 } 1923 1924 // Optimize objects compare. 1925 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 1926 assert(OptimizePtrCompare, "sanity"); 1927 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 1928 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 1929 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 1930 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 1931 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 1932 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 1933 1934 // Check simple cases first. 1935 if (jobj1 != NULL) { 1936 if (jobj1->escape_state() == PointsToNode::NoEscape) { 1937 if (jobj1 == jobj2) { 1938 // Comparing the same not escaping object. 1939 return _pcmp_eq; 1940 } 1941 Node* obj = jobj1->ideal_node(); 1942 // Comparing not escaping allocation. 1943 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1944 !ptn2->points_to(jobj1)) { 1945 return _pcmp_neq; // This includes nullness check. 1946 } 1947 } 1948 } 1949 if (jobj2 != NULL) { 1950 if (jobj2->escape_state() == PointsToNode::NoEscape) { 1951 Node* obj = jobj2->ideal_node(); 1952 // Comparing not escaping allocation. 1953 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1954 !ptn1->points_to(jobj2)) { 1955 return _pcmp_neq; // This includes nullness check. 1956 } 1957 } 1958 } 1959 if (jobj1 != NULL && jobj1 != phantom_obj && 1960 jobj2 != NULL && jobj2 != phantom_obj && 1961 jobj1->ideal_node()->is_Con() && 1962 jobj2->ideal_node()->is_Con()) { 1963 // Klass or String constants compare. Need to be careful with 1964 // compressed pointers - compare types of ConN and ConP instead of nodes. 1965 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 1966 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 1967 if (t1->make_ptr() == t2->make_ptr()) { 1968 return _pcmp_eq; 1969 } else { 1970 return _pcmp_neq; 1971 } 1972 } 1973 if (ptn1->meet(ptn2)) { 1974 return NULL; // Sets are not disjoint 1975 } 1976 1977 // Sets are disjoint. 1978 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 1979 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 1980 bool set1_has_null_ptr = ptn1->points_to(null_obj); 1981 bool set2_has_null_ptr = ptn2->points_to(null_obj); 1982 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 1983 (set2_has_unknown_ptr && set1_has_null_ptr)) { 1984 // Check nullness of unknown object. 1985 return NULL; 1986 } 1987 1988 // Disjointness by itself is not sufficient since 1989 // alias analysis is not complete for escaped objects. 1990 // Disjoint sets are definitely unrelated only when 1991 // at least one set has only not escaping allocations. 1992 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 1993 if (ptn1->non_escaping_allocation()) { 1994 return _pcmp_neq; 1995 } 1996 } 1997 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 1998 if (ptn2->non_escaping_allocation()) { 1999 return _pcmp_neq; 2000 } 2001 } 2002 return NULL; 2003 } 2004 2005 // Connection Graph constuction functions. 2006 2007 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2008 PointsToNode* ptadr = _nodes.at(n->_idx); 2009 if (ptadr != NULL) { 2010 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2011 return; 2012 } 2013 Compile* C = _compile; 2014 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2015 _nodes.at_put(n->_idx, ptadr); 2016 } 2017 2018 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2019 PointsToNode* ptadr = _nodes.at(n->_idx); 2020 if (ptadr != NULL) { 2021 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2022 return; 2023 } 2024 Compile* C = _compile; 2025 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2026 _nodes.at_put(n->_idx, ptadr); 2027 } 2028 2029 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2030 PointsToNode* ptadr = _nodes.at(n->_idx); 2031 if (ptadr != NULL) { 2032 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2033 return; 2034 } 2035 bool unsafe = false; 2036 bool is_oop = is_oop_field(n, offset, &unsafe); 2037 if (unsafe) { 2038 es = PointsToNode::GlobalEscape; 2039 } 2040 Compile* C = _compile; 2041 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2042 _nodes.at_put(n->_idx, field); 2043 } 2044 2045 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2046 PointsToNode* src, PointsToNode* dst) { 2047 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2048 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 2049 PointsToNode* ptadr = _nodes.at(n->_idx); 2050 if (ptadr != NULL) { 2051 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2052 return; 2053 } 2054 Compile* C = _compile; 2055 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2056 _nodes.at_put(n->_idx, ptadr); 2057 // Add edge from arraycopy node to source object. 2058 (void)add_edge(ptadr, src); 2059 src->set_arraycopy_src(); 2060 // Add edge from destination object to arraycopy node. 2061 (void)add_edge(dst, ptadr); 2062 dst->set_arraycopy_dst(); 2063 } 2064 2065 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2066 const Type* adr_type = n->as_AddP()->bottom_type(); 2067 int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot; 2068 BasicType bt = T_INT; 2069 if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) { 2070 // Check only oop fields. 2071 if (!adr_type->isa_aryptr() || 2072 (adr_type->isa_aryptr()->klass() == NULL) || 2073 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { 2074 // OffsetBot is used to reference array's element. Ignore first AddP. 2075 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 2076 bt = T_OBJECT; 2077 } 2078 } 2079 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2080 if (adr_type->isa_instptr() || adr_type->isa_valuetypeptr()) { 2081 ciField* field = _compile->alias_type(adr_type->is_ptr())->field(); 2082 if (field != NULL) { 2083 bt = field->layout_type(); 2084 } else { 2085 // Check for unsafe oop field access 2086 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2087 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2088 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) { 2089 bt = T_OBJECT; 2090 (*unsafe) = true; 2091 } 2092 } 2093 } else if (adr_type->isa_aryptr()) { 2094 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2095 // Ignore array length load. 2096 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2097 // Ignore first AddP. 2098 } else { 2099 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2100 if (elemtype->isa_valuetype() && field_offset != Type::OffsetBot) { 2101 ciValueKlass* vk = elemtype->is_valuetype()->value_klass(); 2102 field_offset += vk->first_field_offset(); 2103 bt = vk->get_field_by_offset(field_offset, false)->layout_type(); 2104 } else { 2105 bt = elemtype->array_element_basic_type(); 2106 } 2107 } 2108 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2109 // Allocation initialization, ThreadLocal field access, unsafe access 2110 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2111 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2112 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) { 2113 bt = T_OBJECT; 2114 } 2115 } 2116 } 2117 // TODO enable when using T_VALUETYPEPTR 2118 //assert(bt != T_VALUETYPE, "should not have valuetype here"); 2119 return (bt == T_OBJECT || bt == T_VALUETYPE || bt == T_VALUETYPEPTR || bt == T_NARROWOOP || bt == T_ARRAY); 2120 } 2121 2122 // Returns unique pointed java object or NULL. 2123 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2124 assert(!_collecting, "should not call when contructed graph"); 2125 // If the node was created after the escape computation we can't answer. 2126 uint idx = n->_idx; 2127 if (idx >= nodes_size()) { 2128 return NULL; 2129 } 2130 PointsToNode* ptn = ptnode_adr(idx); 2131 if (ptn->is_JavaObject()) { 2132 return ptn->as_JavaObject(); 2133 } 2134 assert(ptn->is_LocalVar(), "sanity"); 2135 // Check all java objects it points to. 2136 JavaObjectNode* jobj = NULL; 2137 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2138 PointsToNode* e = i.get(); 2139 if (e->is_JavaObject()) { 2140 if (jobj == NULL) { 2141 jobj = e->as_JavaObject(); 2142 } else if (jobj != e) { 2143 return NULL; 2144 } 2145 } 2146 } 2147 return jobj; 2148 } 2149 2150 // Return true if this node points only to non-escaping allocations. 2151 bool PointsToNode::non_escaping_allocation() { 2152 if (is_JavaObject()) { 2153 Node* n = ideal_node(); 2154 if (n->is_Allocate() || n->is_CallStaticJava()) { 2155 return (escape_state() == PointsToNode::NoEscape); 2156 } else { 2157 return false; 2158 } 2159 } 2160 assert(is_LocalVar(), "sanity"); 2161 // Check all java objects it points to. 2162 for (EdgeIterator i(this); i.has_next(); i.next()) { 2163 PointsToNode* e = i.get(); 2164 if (e->is_JavaObject()) { 2165 Node* n = e->ideal_node(); 2166 if ((e->escape_state() != PointsToNode::NoEscape) || 2167 !(n->is_Allocate() || n->is_CallStaticJava())) { 2168 return false; 2169 } 2170 } 2171 } 2172 return true; 2173 } 2174 2175 // Return true if we know the node does not escape globally. 2176 bool ConnectionGraph::not_global_escape(Node *n) { 2177 assert(!_collecting, "should not call during graph construction"); 2178 // If the node was created after the escape computation we can't answer. 2179 uint idx = n->_idx; 2180 if (idx >= nodes_size()) { 2181 return false; 2182 } 2183 PointsToNode* ptn = ptnode_adr(idx); 2184 PointsToNode::EscapeState es = ptn->escape_state(); 2185 // If we have already computed a value, return it. 2186 if (es >= PointsToNode::GlobalEscape) 2187 return false; 2188 if (ptn->is_JavaObject()) { 2189 return true; // (es < PointsToNode::GlobalEscape); 2190 } 2191 assert(ptn->is_LocalVar(), "sanity"); 2192 // Check all java objects it points to. 2193 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2194 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) 2195 return false; 2196 } 2197 return true; 2198 } 2199 2200 2201 // Helper functions 2202 2203 // Return true if this node points to specified node or nodes it points to. 2204 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2205 if (is_JavaObject()) { 2206 return (this == ptn); 2207 } 2208 assert(is_LocalVar() || is_Field(), "sanity"); 2209 for (EdgeIterator i(this); i.has_next(); i.next()) { 2210 if (i.get() == ptn) 2211 return true; 2212 } 2213 return false; 2214 } 2215 2216 // Return true if one node points to an other. 2217 bool PointsToNode::meet(PointsToNode* ptn) { 2218 if (this == ptn) { 2219 return true; 2220 } else if (ptn->is_JavaObject()) { 2221 return this->points_to(ptn->as_JavaObject()); 2222 } else if (this->is_JavaObject()) { 2223 return ptn->points_to(this->as_JavaObject()); 2224 } 2225 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2226 int ptn_count = ptn->edge_count(); 2227 for (EdgeIterator i(this); i.has_next(); i.next()) { 2228 PointsToNode* this_e = i.get(); 2229 for (int j = 0; j < ptn_count; j++) { 2230 if (this_e == ptn->edge(j)) 2231 return true; 2232 } 2233 } 2234 return false; 2235 } 2236 2237 #ifdef ASSERT 2238 // Return true if bases point to this java object. 2239 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2240 for (BaseIterator i(this); i.has_next(); i.next()) { 2241 if (i.get() == jobj) 2242 return true; 2243 } 2244 return false; 2245 } 2246 #endif 2247 2248 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2249 const Type *adr_type = phase->type(adr); 2250 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 2251 adr->in(AddPNode::Address)->is_Proj() && 2252 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 2253 // We are computing a raw address for a store captured by an Initialize 2254 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2255 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2256 assert(offs != Type::OffsetBot || 2257 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2258 "offset must be a constant or it is initialization of array"); 2259 return offs; 2260 } 2261 const TypePtr *t_ptr = adr_type->isa_ptr(); 2262 assert(t_ptr != NULL, "must be a pointer type"); 2263 return t_ptr->offset(); 2264 } 2265 2266 Node* ConnectionGraph::get_addp_base(Node *addp) { 2267 assert(addp->is_AddP(), "must be AddP"); 2268 // 2269 // AddP cases for Base and Address inputs: 2270 // case #1. Direct object's field reference: 2271 // Allocate 2272 // | 2273 // Proj #5 ( oop result ) 2274 // | 2275 // CheckCastPP (cast to instance type) 2276 // | | 2277 // AddP ( base == address ) 2278 // 2279 // case #2. Indirect object's field reference: 2280 // Phi 2281 // | 2282 // CastPP (cast to instance type) 2283 // | | 2284 // AddP ( base == address ) 2285 // 2286 // case #3. Raw object's field reference for Initialize node: 2287 // Allocate 2288 // | 2289 // Proj #5 ( oop result ) 2290 // top | 2291 // \ | 2292 // AddP ( base == top ) 2293 // 2294 // case #4. Array's element reference: 2295 // {CheckCastPP | CastPP} 2296 // | | | 2297 // | AddP ( array's element offset ) 2298 // | | 2299 // AddP ( array's offset ) 2300 // 2301 // case #5. Raw object's field reference for arraycopy stub call: 2302 // The inline_native_clone() case when the arraycopy stub is called 2303 // after the allocation before Initialize and CheckCastPP nodes. 2304 // Allocate 2305 // | 2306 // Proj #5 ( oop result ) 2307 // | | 2308 // AddP ( base == address ) 2309 // 2310 // case #6. Constant Pool, ThreadLocal, CastX2P or 2311 // Raw object's field reference: 2312 // {ConP, ThreadLocal, CastX2P, raw Load} 2313 // top | 2314 // \ | 2315 // AddP ( base == top ) 2316 // 2317 // case #7. Klass's field reference. 2318 // LoadKlass 2319 // | | 2320 // AddP ( base == address ) 2321 // 2322 // case #8. narrow Klass's field reference. 2323 // LoadNKlass 2324 // | 2325 // DecodeN 2326 // | | 2327 // AddP ( base == address ) 2328 // 2329 // case #9. Mixed unsafe access 2330 // {instance} 2331 // | 2332 // CheckCastPP (raw) 2333 // top | 2334 // \ | 2335 // AddP ( base == top ) 2336 // 2337 Node *base = addp->in(AddPNode::Base); 2338 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 2339 base = addp->in(AddPNode::Address); 2340 while (base->is_AddP()) { 2341 // Case #6 (unsafe access) may have several chained AddP nodes. 2342 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2343 base = base->in(AddPNode::Address); 2344 } 2345 if (base->Opcode() == Op_CheckCastPP && 2346 base->bottom_type()->isa_rawptr() && 2347 _igvn->type(base->in(1))->isa_oopptr()) { 2348 base = base->in(1); // Case #9 2349 } else { 2350 Node* uncast_base = base->uncast(); 2351 int opcode = uncast_base->Opcode(); 2352 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2353 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2354 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2355 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); 2356 } 2357 } 2358 return base; 2359 } 2360 2361 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2362 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2363 Node* addp2 = addp->raw_out(0); 2364 if (addp->outcnt() == 1 && addp2->is_AddP() && 2365 addp2->in(AddPNode::Base) == n && 2366 addp2->in(AddPNode::Address) == addp) { 2367 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2368 // 2369 // Find array's offset to push it on worklist first and 2370 // as result process an array's element offset first (pushed second) 2371 // to avoid CastPP for the array's offset. 2372 // Otherwise the inserted CastPP (LocalVar) will point to what 2373 // the AddP (Field) points to. Which would be wrong since 2374 // the algorithm expects the CastPP has the same point as 2375 // as AddP's base CheckCastPP (LocalVar). 2376 // 2377 // ArrayAllocation 2378 // | 2379 // CheckCastPP 2380 // | 2381 // memProj (from ArrayAllocation CheckCastPP) 2382 // | || 2383 // | || Int (element index) 2384 // | || | ConI (log(element size)) 2385 // | || | / 2386 // | || LShift 2387 // | || / 2388 // | AddP (array's element offset) 2389 // | | 2390 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2391 // | / / 2392 // AddP (array's offset) 2393 // | 2394 // Load/Store (memory operation on array's element) 2395 // 2396 return addp2; 2397 } 2398 return NULL; 2399 } 2400 2401 // 2402 // Adjust the type and inputs of an AddP which computes the 2403 // address of a field of an instance 2404 // 2405 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2406 PhaseGVN* igvn = _igvn; 2407 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2408 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2409 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2410 if (t == NULL) { 2411 // We are computing a raw address for a store captured by an Initialize 2412 // compute an appropriate address type (cases #3 and #5). 2413 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2414 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2415 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2416 assert(offs != Type::OffsetBot, "offset must be a constant"); 2417 t = base_t->add_offset(offs)->is_oopptr(); 2418 } 2419 int inst_id = base_t->instance_id(); 2420 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2421 "old type must be non-instance or match new type"); 2422 2423 // The type 't' could be subclass of 'base_t'. 2424 // As result t->offset() could be large then base_t's size and it will 2425 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2426 // constructor verifies correctness of the offset. 2427 // 2428 // It could happened on subclass's branch (from the type profiling 2429 // inlining) which was not eliminated during parsing since the exactness 2430 // of the allocation type was not propagated to the subclass type check. 2431 // 2432 // Or the type 't' could be not related to 'base_t' at all. 2433 // It could happened when CHA type is different from MDO type on a dead path 2434 // (for example, from instanceof check) which is not collapsed during parsing. 2435 // 2436 // Do nothing for such AddP node and don't process its users since 2437 // this code branch will go away. 2438 // 2439 if (!t->is_known_instance() && 2440 !base_t->klass()->is_subtype_of(t->klass())) { 2441 return false; // bail out 2442 } 2443 const TypePtr* tinst = base_t->add_offset(t->offset()); 2444 if (tinst->isa_aryptr() && t->isa_aryptr()) { 2445 // In the case of a flattened value type array, each field has its 2446 // own slice so we need to keep track of the field being accessed. 2447 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get()); 2448 } 2449 2450 // Do NOT remove the next line: ensure a new alias index is allocated 2451 // for the instance type. Note: C++ will not remove it since the call 2452 // has side effect. 2453 int alias_idx = _compile->get_alias_index(tinst); 2454 igvn->set_type(addp, tinst); 2455 // record the allocation in the node map 2456 set_map(addp, get_map(base->_idx)); 2457 // Set addp's Base and Address to 'base'. 2458 Node *abase = addp->in(AddPNode::Base); 2459 Node *adr = addp->in(AddPNode::Address); 2460 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2461 adr->in(0)->_idx == (uint)inst_id) { 2462 // Skip AddP cases #3 and #5. 2463 } else { 2464 assert(!abase->is_top(), "sanity"); // AddP case #3 2465 if (abase != base) { 2466 igvn->hash_delete(addp); 2467 addp->set_req(AddPNode::Base, base); 2468 if (abase == adr) { 2469 addp->set_req(AddPNode::Address, base); 2470 } else { 2471 // AddP case #4 (adr is array's element offset AddP node) 2472 #ifdef ASSERT 2473 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2474 assert(adr->is_AddP() && atype != NULL && 2475 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2476 #endif 2477 } 2478 igvn->hash_insert(addp); 2479 } 2480 } 2481 // Put on IGVN worklist since at least addp's type was changed above. 2482 record_for_optimizer(addp); 2483 return true; 2484 } 2485 2486 // 2487 // Create a new version of orig_phi if necessary. Returns either the newly 2488 // created phi or an existing phi. Sets create_new to indicate whether a new 2489 // phi was created. Cache the last newly created phi in the node map. 2490 // 2491 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2492 Compile *C = _compile; 2493 PhaseGVN* igvn = _igvn; 2494 new_created = false; 2495 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2496 // nothing to do if orig_phi is bottom memory or matches alias_idx 2497 if (phi_alias_idx == alias_idx) { 2498 return orig_phi; 2499 } 2500 // Have we recently created a Phi for this alias index? 2501 PhiNode *result = get_map_phi(orig_phi->_idx); 2502 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2503 return result; 2504 } 2505 // Previous check may fail when the same wide memory Phi was split into Phis 2506 // for different memory slices. Search all Phis for this region. 2507 if (result != NULL) { 2508 Node* region = orig_phi->in(0); 2509 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2510 Node* phi = region->fast_out(i); 2511 if (phi->is_Phi() && 2512 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2513 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2514 return phi->as_Phi(); 2515 } 2516 } 2517 } 2518 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 2519 if (C->do_escape_analysis() == true && !C->failing()) { 2520 // Retry compilation without escape analysis. 2521 // If this is the first failure, the sentinel string will "stick" 2522 // to the Compile object, and the C2Compiler will see it and retry. 2523 C->record_failure(C2Compiler::retry_no_escape_analysis()); 2524 } 2525 return NULL; 2526 } 2527 orig_phi_worklist.append_if_missing(orig_phi); 2528 const TypePtr *atype = C->get_adr_type(alias_idx); 2529 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2530 C->copy_node_notes_to(result, orig_phi); 2531 igvn->set_type(result, result->bottom_type()); 2532 record_for_optimizer(result); 2533 set_map(orig_phi, result); 2534 new_created = true; 2535 return result; 2536 } 2537 2538 // 2539 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2540 // specified alias index. 2541 // 2542 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2543 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2544 Compile *C = _compile; 2545 PhaseGVN* igvn = _igvn; 2546 bool new_phi_created; 2547 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2548 if (!new_phi_created) { 2549 return result; 2550 } 2551 GrowableArray<PhiNode *> phi_list; 2552 GrowableArray<uint> cur_input; 2553 PhiNode *phi = orig_phi; 2554 uint idx = 1; 2555 bool finished = false; 2556 while(!finished) { 2557 while (idx < phi->req()) { 2558 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2559 if (mem != NULL && mem->is_Phi()) { 2560 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2561 if (new_phi_created) { 2562 // found an phi for which we created a new split, push current one on worklist and begin 2563 // processing new one 2564 phi_list.push(phi); 2565 cur_input.push(idx); 2566 phi = mem->as_Phi(); 2567 result = newphi; 2568 idx = 1; 2569 continue; 2570 } else { 2571 mem = newphi; 2572 } 2573 } 2574 if (C->failing()) { 2575 return NULL; 2576 } 2577 result->set_req(idx++, mem); 2578 } 2579 #ifdef ASSERT 2580 // verify that the new Phi has an input for each input of the original 2581 assert( phi->req() == result->req(), "must have same number of inputs."); 2582 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2583 #endif 2584 // Check if all new phi's inputs have specified alias index. 2585 // Otherwise use old phi. 2586 for (uint i = 1; i < phi->req(); i++) { 2587 Node* in = result->in(i); 2588 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2589 } 2590 // we have finished processing a Phi, see if there are any more to do 2591 finished = (phi_list.length() == 0 ); 2592 if (!finished) { 2593 phi = phi_list.pop(); 2594 idx = cur_input.pop(); 2595 PhiNode *prev_result = get_map_phi(phi->_idx); 2596 prev_result->set_req(idx++, result); 2597 result = prev_result; 2598 } 2599 } 2600 return result; 2601 } 2602 2603 // 2604 // The next methods are derived from methods in MemNode. 2605 // 2606 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2607 Node *mem = mmem; 2608 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2609 // means an array I have not precisely typed yet. Do not do any 2610 // alias stuff with it any time soon. 2611 if (toop->base() != Type::AnyPtr && 2612 !(toop->klass() != NULL && 2613 toop->klass()->is_java_lang_Object() && 2614 toop->offset() == Type::OffsetBot)) { 2615 mem = mmem->memory_at(alias_idx); 2616 // Update input if it is progress over what we have now 2617 } 2618 return mem; 2619 } 2620 2621 // 2622 // Move memory users to their memory slices. 2623 // 2624 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2625 Compile* C = _compile; 2626 PhaseGVN* igvn = _igvn; 2627 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2628 assert(tp != NULL, "ptr type"); 2629 int alias_idx = C->get_alias_index(tp); 2630 int general_idx = C->get_general_index(alias_idx); 2631 2632 // Move users first 2633 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2634 Node* use = n->fast_out(i); 2635 if (use->is_MergeMem()) { 2636 MergeMemNode* mmem = use->as_MergeMem(); 2637 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2638 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2639 continue; // Nothing to do 2640 } 2641 // Replace previous general reference to mem node. 2642 uint orig_uniq = C->unique(); 2643 Node* m = find_inst_mem(n, general_idx, orig_phis); 2644 assert(orig_uniq == C->unique(), "no new nodes"); 2645 mmem->set_memory_at(general_idx, m); 2646 --imax; 2647 --i; 2648 } else if (use->is_MemBar()) { 2649 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2650 if (use->req() > MemBarNode::Precedent && 2651 use->in(MemBarNode::Precedent) == n) { 2652 // Don't move related membars. 2653 record_for_optimizer(use); 2654 continue; 2655 } 2656 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2657 if ((tp != NULL && C->get_alias_index(tp) == alias_idx) || 2658 alias_idx == general_idx) { 2659 continue; // Nothing to do 2660 } 2661 // Move to general memory slice. 2662 uint orig_uniq = C->unique(); 2663 Node* m = find_inst_mem(n, general_idx, orig_phis); 2664 assert(orig_uniq == C->unique(), "no new nodes"); 2665 igvn->hash_delete(use); 2666 imax -= use->replace_edge(n, m); 2667 igvn->hash_insert(use); 2668 record_for_optimizer(use); 2669 --i; 2670 #ifdef ASSERT 2671 } else if (use->is_Mem()) { 2672 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2673 // Don't move related cardmark. 2674 continue; 2675 } 2676 // Memory nodes should have new memory input. 2677 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2678 assert(tp != NULL, "ptr type"); 2679 int idx = C->get_alias_index(tp); 2680 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2681 "Following memory nodes should have new memory input or be on the same memory slice"); 2682 } else if (use->is_Phi()) { 2683 // Phi nodes should be split and moved already. 2684 tp = use->as_Phi()->adr_type()->isa_ptr(); 2685 assert(tp != NULL, "ptr type"); 2686 int idx = C->get_alias_index(tp); 2687 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2688 } else { 2689 use->dump(); 2690 assert(false, "should not be here"); 2691 #endif 2692 } 2693 } 2694 } 2695 2696 // 2697 // Search memory chain of "mem" to find a MemNode whose address 2698 // is the specified alias index. 2699 // 2700 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2701 if (orig_mem == NULL) 2702 return orig_mem; 2703 Compile* C = _compile; 2704 PhaseGVN* igvn = _igvn; 2705 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2706 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2707 Node *start_mem = C->start()->proj_out(TypeFunc::Memory); 2708 Node *prev = NULL; 2709 Node *result = orig_mem; 2710 while (prev != result) { 2711 prev = result; 2712 if (result == start_mem) 2713 break; // hit one of our sentinels 2714 if (result->is_Mem()) { 2715 const Type *at = igvn->type(result->in(MemNode::Address)); 2716 if (at == Type::TOP) 2717 break; // Dead 2718 assert (at->isa_ptr() != NULL, "pointer type required."); 2719 int idx = C->get_alias_index(at->is_ptr()); 2720 if (idx == alias_idx) 2721 break; // Found 2722 if (!is_instance && (at->isa_oopptr() == NULL || 2723 !at->is_oopptr()->is_known_instance())) { 2724 break; // Do not skip store to general memory slice. 2725 } 2726 result = result->in(MemNode::Memory); 2727 } 2728 if (!is_instance) 2729 continue; // don't search further for non-instance types 2730 // skip over a call which does not affect this memory slice 2731 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2732 Node *proj_in = result->in(0); 2733 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2734 break; // hit one of our sentinels 2735 } else if (proj_in->is_Call()) { 2736 // ArrayCopy node processed here as well 2737 CallNode *call = proj_in->as_Call(); 2738 if (!call->may_modify(toop, igvn)) { 2739 result = call->in(TypeFunc::Memory); 2740 } 2741 } else if (proj_in->is_Initialize()) { 2742 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2743 // Stop if this is the initialization for the object instance which 2744 // which contains this memory slice, otherwise skip over it. 2745 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2746 result = proj_in->in(TypeFunc::Memory); 2747 } 2748 } else if (proj_in->is_MemBar()) { 2749 if (proj_in->in(TypeFunc::Memory)->is_MergeMem() && 2750 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->is_Proj() && 2751 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->is_ArrayCopy()) { 2752 // clone 2753 ArrayCopyNode* ac = proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->as_ArrayCopy(); 2754 if (ac->may_modify(toop, igvn)) { 2755 break; 2756 } 2757 } 2758 result = proj_in->in(TypeFunc::Memory); 2759 } 2760 } else if (result->is_MergeMem()) { 2761 MergeMemNode *mmem = result->as_MergeMem(); 2762 result = step_through_mergemem(mmem, alias_idx, toop); 2763 if (result == mmem->base_memory()) { 2764 // Didn't find instance memory, search through general slice recursively. 2765 result = mmem->memory_at(C->get_general_index(alias_idx)); 2766 result = find_inst_mem(result, alias_idx, orig_phis); 2767 if (C->failing()) { 2768 return NULL; 2769 } 2770 mmem->set_memory_at(alias_idx, result); 2771 } 2772 } else if (result->is_Phi() && 2773 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2774 Node *un = result->as_Phi()->unique_input(igvn); 2775 if (un != NULL) { 2776 orig_phis.append_if_missing(result->as_Phi()); 2777 result = un; 2778 } else { 2779 break; 2780 } 2781 } else if (result->is_ClearArray()) { 2782 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 2783 // Can not bypass initialization of the instance 2784 // we are looking for. 2785 break; 2786 } 2787 // Otherwise skip it (the call updated 'result' value). 2788 } else if (result->Opcode() == Op_SCMemProj) { 2789 Node* mem = result->in(0); 2790 Node* adr = NULL; 2791 if (mem->is_LoadStore()) { 2792 adr = mem->in(MemNode::Address); 2793 } else { 2794 assert(mem->Opcode() == Op_EncodeISOArray || 2795 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 2796 adr = mem->in(3); // Memory edge corresponds to destination array 2797 } 2798 const Type *at = igvn->type(adr); 2799 if (at != Type::TOP) { 2800 assert(at->isa_ptr() != NULL, "pointer type required."); 2801 int idx = C->get_alias_index(at->is_ptr()); 2802 if (idx == alias_idx) { 2803 // Assert in debug mode 2804 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 2805 break; // In product mode return SCMemProj node 2806 } 2807 } 2808 result = mem->in(MemNode::Memory); 2809 } else if (result->Opcode() == Op_StrInflatedCopy) { 2810 Node* adr = result->in(3); // Memory edge corresponds to destination array 2811 const Type *at = igvn->type(adr); 2812 if (at != Type::TOP) { 2813 assert(at->isa_ptr() != NULL, "pointer type required."); 2814 int idx = C->get_alias_index(at->is_ptr()); 2815 if (idx == alias_idx) { 2816 // Assert in debug mode 2817 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 2818 break; // In product mode return SCMemProj node 2819 } 2820 } 2821 result = result->in(MemNode::Memory); 2822 } 2823 } 2824 if (result->is_Phi()) { 2825 PhiNode *mphi = result->as_Phi(); 2826 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 2827 const TypePtr *t = mphi->adr_type(); 2828 if (!is_instance) { 2829 // Push all non-instance Phis on the orig_phis worklist to update inputs 2830 // during Phase 4 if needed. 2831 orig_phis.append_if_missing(mphi); 2832 } else if (C->get_alias_index(t) != alias_idx) { 2833 // Create a new Phi with the specified alias index type. 2834 result = split_memory_phi(mphi, alias_idx, orig_phis); 2835 } 2836 } 2837 // the result is either MemNode, PhiNode, InitializeNode. 2838 return result; 2839 } 2840 2841 // 2842 // Convert the types of unescaped object to instance types where possible, 2843 // propagate the new type information through the graph, and update memory 2844 // edges and MergeMem inputs to reflect the new type. 2845 // 2846 // We start with allocations (and calls which may be allocations) on alloc_worklist. 2847 // The processing is done in 4 phases: 2848 // 2849 // Phase 1: Process possible allocations from alloc_worklist. Create instance 2850 // types for the CheckCastPP for allocations where possible. 2851 // Propagate the new types through users as follows: 2852 // casts and Phi: push users on alloc_worklist 2853 // AddP: cast Base and Address inputs to the instance type 2854 // push any AddP users on alloc_worklist and push any memnode 2855 // users onto memnode_worklist. 2856 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2857 // search the Memory chain for a store with the appropriate type 2858 // address type. If a Phi is found, create a new version with 2859 // the appropriate memory slices from each of the Phi inputs. 2860 // For stores, process the users as follows: 2861 // MemNode: push on memnode_worklist 2862 // MergeMem: push on mergemem_worklist 2863 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 2864 // moving the first node encountered of each instance type to the 2865 // the input corresponding to its alias index. 2866 // appropriate memory slice. 2867 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 2868 // 2869 // In the following example, the CheckCastPP nodes are the cast of allocation 2870 // results and the allocation of node 29 is unescaped and eligible to be an 2871 // instance type. 2872 // 2873 // We start with: 2874 // 2875 // 7 Parm #memory 2876 // 10 ConI "12" 2877 // 19 CheckCastPP "Foo" 2878 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2879 // 29 CheckCastPP "Foo" 2880 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 2881 // 2882 // 40 StoreP 25 7 20 ... alias_index=4 2883 // 50 StoreP 35 40 30 ... alias_index=4 2884 // 60 StoreP 45 50 20 ... alias_index=4 2885 // 70 LoadP _ 60 30 ... alias_index=4 2886 // 80 Phi 75 50 60 Memory alias_index=4 2887 // 90 LoadP _ 80 30 ... alias_index=4 2888 // 100 LoadP _ 80 20 ... alias_index=4 2889 // 2890 // 2891 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 2892 // and creating a new alias index for node 30. This gives: 2893 // 2894 // 7 Parm #memory 2895 // 10 ConI "12" 2896 // 19 CheckCastPP "Foo" 2897 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2898 // 29 CheckCastPP "Foo" iid=24 2899 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2900 // 2901 // 40 StoreP 25 7 20 ... alias_index=4 2902 // 50 StoreP 35 40 30 ... alias_index=6 2903 // 60 StoreP 45 50 20 ... alias_index=4 2904 // 70 LoadP _ 60 30 ... alias_index=6 2905 // 80 Phi 75 50 60 Memory alias_index=4 2906 // 90 LoadP _ 80 30 ... alias_index=6 2907 // 100 LoadP _ 80 20 ... alias_index=4 2908 // 2909 // In phase 2, new memory inputs are computed for the loads and stores, 2910 // And a new version of the phi is created. In phase 4, the inputs to 2911 // node 80 are updated and then the memory nodes are updated with the 2912 // values computed in phase 2. This results in: 2913 // 2914 // 7 Parm #memory 2915 // 10 ConI "12" 2916 // 19 CheckCastPP "Foo" 2917 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2918 // 29 CheckCastPP "Foo" iid=24 2919 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2920 // 2921 // 40 StoreP 25 7 20 ... alias_index=4 2922 // 50 StoreP 35 7 30 ... alias_index=6 2923 // 60 StoreP 45 40 20 ... alias_index=4 2924 // 70 LoadP _ 50 30 ... alias_index=6 2925 // 80 Phi 75 40 60 Memory alias_index=4 2926 // 120 Phi 75 50 50 Memory alias_index=6 2927 // 90 LoadP _ 120 30 ... alias_index=6 2928 // 100 LoadP _ 80 20 ... alias_index=4 2929 // 2930 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) { 2931 GrowableArray<Node *> memnode_worklist; 2932 GrowableArray<PhiNode *> orig_phis; 2933 PhaseIterGVN *igvn = _igvn; 2934 uint new_index_start = (uint) _compile->num_alias_types(); 2935 Arena* arena = Thread::current()->resource_area(); 2936 VectorSet visited(arena); 2937 ideal_nodes.clear(); // Reset for use with set_map/get_map. 2938 uint unique_old = _compile->unique(); 2939 2940 // Phase 1: Process possible allocations from alloc_worklist. 2941 // Create instance types for the CheckCastPP for allocations where possible. 2942 // 2943 // (Note: don't forget to change the order of the second AddP node on 2944 // the alloc_worklist if the order of the worklist processing is changed, 2945 // see the comment in find_second_addp().) 2946 // 2947 while (alloc_worklist.length() != 0) { 2948 Node *n = alloc_worklist.pop(); 2949 uint ni = n->_idx; 2950 if (n->is_Call()) { 2951 CallNode *alloc = n->as_Call(); 2952 // copy escape information to call node 2953 PointsToNode* ptn = ptnode_adr(alloc->_idx); 2954 PointsToNode::EscapeState es = ptn->escape_state(); 2955 // We have an allocation or call which returns a Java object, 2956 // see if it is unescaped. 2957 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 2958 continue; 2959 // Find CheckCastPP for the allocate or for the return value of a call 2960 n = alloc->result_cast(); 2961 if (n == NULL) { // No uses except Initialize node 2962 if (alloc->is_Allocate()) { 2963 // Set the scalar_replaceable flag for allocation 2964 // so it could be eliminated if it has no uses. 2965 alloc->as_Allocate()->_is_scalar_replaceable = true; 2966 } 2967 if (alloc->is_CallStaticJava()) { 2968 // Set the scalar_replaceable flag for boxing method 2969 // so it could be eliminated if it has no uses. 2970 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2971 } 2972 continue; 2973 } 2974 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 2975 assert(!alloc->is_Allocate(), "allocation should have unique type"); 2976 continue; 2977 } 2978 2979 // The inline code for Object.clone() casts the allocation result to 2980 // java.lang.Object and then to the actual type of the allocated 2981 // object. Detect this case and use the second cast. 2982 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 2983 // the allocation result is cast to java.lang.Object and then 2984 // to the actual Array type. 2985 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 2986 && (alloc->is_AllocateArray() || 2987 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 2988 Node *cast2 = NULL; 2989 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2990 Node *use = n->fast_out(i); 2991 if (use->is_CheckCastPP()) { 2992 cast2 = use; 2993 break; 2994 } 2995 } 2996 if (cast2 != NULL) { 2997 n = cast2; 2998 } else { 2999 // Non-scalar replaceable if the allocation type is unknown statically 3000 // (reflection allocation), the object can't be restored during 3001 // deoptimization without precise type. 3002 continue; 3003 } 3004 } 3005 3006 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3007 if (t == NULL) 3008 continue; // not a TypeOopPtr 3009 if (!t->klass_is_exact()) 3010 continue; // not an unique type 3011 3012 if (alloc->is_Allocate()) { 3013 // Set the scalar_replaceable flag for allocation 3014 // so it could be eliminated. 3015 alloc->as_Allocate()->_is_scalar_replaceable = true; 3016 } 3017 if (alloc->is_CallStaticJava()) { 3018 // Set the scalar_replaceable flag for boxing method 3019 // so it could be eliminated. 3020 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 3021 } 3022 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state 3023 // in order for an object to be scalar-replaceable, it must be: 3024 // - a direct allocation (not a call returning an object) 3025 // - non-escaping 3026 // - eligible to be a unique type 3027 // - not determined to be ineligible by escape analysis 3028 set_map(alloc, n); 3029 set_map(n, alloc); 3030 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3031 igvn->hash_delete(n); 3032 igvn->set_type(n, tinst); 3033 n->raise_bottom_type(tinst); 3034 igvn->hash_insert(n); 3035 record_for_optimizer(n); 3036 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr() || t->isa_valuetypeptr())) { 3037 3038 // First, put on the worklist all Field edges from Connection Graph 3039 // which is more accurate than putting immediate users from Ideal Graph. 3040 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3041 PointsToNode* tgt = e.get(); 3042 if (tgt->is_Arraycopy()) { 3043 continue; 3044 } 3045 Node* use = tgt->ideal_node(); 3046 assert(tgt->is_Field() && use->is_AddP(), 3047 "only AddP nodes are Field edges in CG"); 3048 if (use->outcnt() > 0) { // Don't process dead nodes 3049 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3050 if (addp2 != NULL) { 3051 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3052 alloc_worklist.append_if_missing(addp2); 3053 } 3054 alloc_worklist.append_if_missing(use); 3055 } 3056 } 3057 3058 // An allocation may have an Initialize which has raw stores. Scan 3059 // the users of the raw allocation result and push AddP users 3060 // on alloc_worklist. 3061 Node *raw_result = alloc->proj_out(TypeFunc::Parms); 3062 assert (raw_result != NULL, "must have an allocation result"); 3063 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3064 Node *use = raw_result->fast_out(i); 3065 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3066 Node* addp2 = find_second_addp(use, raw_result); 3067 if (addp2 != NULL) { 3068 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3069 alloc_worklist.append_if_missing(addp2); 3070 } 3071 alloc_worklist.append_if_missing(use); 3072 } else if (use->is_MemBar()) { 3073 memnode_worklist.append_if_missing(use); 3074 } 3075 } 3076 } 3077 } else if (n->is_AddP()) { 3078 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 3079 if (jobj == NULL || jobj == phantom_obj) { 3080 #ifdef ASSERT 3081 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3082 ptnode_adr(n->_idx)->dump(); 3083 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3084 #endif 3085 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3086 return; 3087 } 3088 Node *base = get_map(jobj->idx()); // CheckCastPP node 3089 if (!split_AddP(n, base)) continue; // wrong type from dead path 3090 } else if (n->is_Phi() || 3091 n->is_CheckCastPP() || 3092 n->is_EncodeP() || 3093 n->is_DecodeN() || 3094 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3095 if (visited.test_set(n->_idx)) { 3096 assert(n->is_Phi(), "loops only through Phi's"); 3097 continue; // already processed 3098 } 3099 JavaObjectNode* jobj = unique_java_object(n); 3100 if (jobj == NULL || jobj == phantom_obj) { 3101 #ifdef ASSERT 3102 ptnode_adr(n->_idx)->dump(); 3103 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3104 #endif 3105 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3106 return; 3107 } else { 3108 Node *val = get_map(jobj->idx()); // CheckCastPP node 3109 TypeNode *tn = n->as_Type(); 3110 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3111 assert(tinst != NULL && tinst->is_known_instance() && 3112 tinst->instance_id() == jobj->idx() , "instance type expected."); 3113 3114 const Type *tn_type = igvn->type(tn); 3115 const TypeOopPtr *tn_t; 3116 if (tn_type->isa_narrowoop()) { 3117 tn_t = tn_type->make_ptr()->isa_oopptr(); 3118 } else { 3119 tn_t = tn_type->isa_oopptr(); 3120 } 3121 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 3122 if (tn_type->isa_narrowoop()) { 3123 tn_type = tinst->make_narrowoop(); 3124 } else { 3125 tn_type = tinst; 3126 } 3127 igvn->hash_delete(tn); 3128 igvn->set_type(tn, tn_type); 3129 tn->set_type(tn_type); 3130 igvn->hash_insert(tn); 3131 record_for_optimizer(n); 3132 } else { 3133 assert(tn_type == TypePtr::NULL_PTR || 3134 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 3135 "unexpected type"); 3136 continue; // Skip dead path with different type 3137 } 3138 } 3139 } else { 3140 debug_only(n->dump();) 3141 assert(false, "EA: unexpected node"); 3142 continue; 3143 } 3144 // push allocation's users on appropriate worklist 3145 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3146 Node *use = n->fast_out(i); 3147 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3148 // Load/store to instance's field 3149 memnode_worklist.append_if_missing(use); 3150 } else if (use->is_MemBar()) { 3151 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3152 memnode_worklist.append_if_missing(use); 3153 } 3154 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3155 Node* addp2 = find_second_addp(use, n); 3156 if (addp2 != NULL) { 3157 alloc_worklist.append_if_missing(addp2); 3158 } 3159 alloc_worklist.append_if_missing(use); 3160 } else if (use->is_Phi() || 3161 use->is_CheckCastPP() || 3162 use->is_EncodeNarrowPtr() || 3163 use->is_DecodeNarrowPtr() || 3164 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3165 alloc_worklist.append_if_missing(use); 3166 #ifdef ASSERT 3167 } else if (use->is_Mem()) { 3168 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3169 } else if (use->is_MergeMem()) { 3170 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3171 } else if (use->is_SafePoint()) { 3172 // Look for MergeMem nodes for calls which reference unique allocation 3173 // (through CheckCastPP nodes) even for debug info. 3174 Node* m = use->in(TypeFunc::Memory); 3175 if (m->is_MergeMem()) { 3176 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3177 } 3178 } else if (use->Opcode() == Op_EncodeISOArray) { 3179 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3180 // EncodeISOArray overwrites destination array 3181 memnode_worklist.append_if_missing(use); 3182 } 3183 } else if (use->Opcode() == Op_Return) { 3184 assert(_compile->tf()->returns_value_type_as_fields(), "must return a value type"); 3185 // Get ValueKlass by removing the tag bit from the metadata pointer 3186 Node* klass = use->in(TypeFunc::Parms); 3187 intptr_t ptr = igvn->type(klass)->isa_rawptr()->get_con(); 3188 clear_nth_bit(ptr, 0); 3189 assert(Metaspace::contains((void*)ptr), "should be klass"); 3190 assert(((ValueKlass*)ptr)->contains_oops(), "returned value type must contain a reference field"); 3191 } else { 3192 uint op = use->Opcode(); 3193 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3194 (use->in(MemNode::Memory) == n)) { 3195 // They overwrite memory edge corresponding to destination array, 3196 memnode_worklist.append_if_missing(use); 3197 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3198 op == Op_CastP2X || op == Op_StoreCM || 3199 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3200 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3201 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3202 op == Op_ValueType)) { 3203 n->dump(); 3204 use->dump(); 3205 assert(false, "EA: missing allocation reference path"); 3206 } 3207 #endif 3208 } 3209 } 3210 3211 } 3212 3213 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3214 // type, record it in the ArrayCopy node so we know what memory this 3215 // node uses/modified. 3216 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3217 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3218 Node* dest = ac->in(ArrayCopyNode::Dest); 3219 if (dest->is_AddP()) { 3220 dest = get_addp_base(dest); 3221 } 3222 JavaObjectNode* jobj = unique_java_object(dest); 3223 if (jobj != NULL) { 3224 Node *base = get_map(jobj->idx()); 3225 if (base != NULL) { 3226 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3227 ac->_dest_type = base_t; 3228 } 3229 } 3230 Node* src = ac->in(ArrayCopyNode::Src); 3231 if (src->is_AddP()) { 3232 src = get_addp_base(src); 3233 } 3234 jobj = unique_java_object(src); 3235 if (jobj != NULL) { 3236 Node* base = get_map(jobj->idx()); 3237 if (base != NULL) { 3238 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3239 ac->_src_type = base_t; 3240 } 3241 } 3242 } 3243 3244 // New alias types were created in split_AddP(). 3245 uint new_index_end = (uint) _compile->num_alias_types(); 3246 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3247 3248 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3249 // compute new values for Memory inputs (the Memory inputs are not 3250 // actually updated until phase 4.) 3251 if (memnode_worklist.length() == 0) 3252 return; // nothing to do 3253 while (memnode_worklist.length() != 0) { 3254 Node *n = memnode_worklist.pop(); 3255 if (visited.test_set(n->_idx)) 3256 continue; 3257 if (n->is_Phi() || n->is_ClearArray()) { 3258 // we don't need to do anything, but the users must be pushed 3259 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3260 // we don't need to do anything, but the users must be pushed 3261 n = n->as_MemBar()->proj_out(TypeFunc::Memory); 3262 if (n == NULL) 3263 continue; 3264 } else if (n->Opcode() == Op_StrCompressedCopy || 3265 n->Opcode() == Op_EncodeISOArray) { 3266 // get the memory projection 3267 n = n->find_out_with(Op_SCMemProj); 3268 assert(n->Opcode() == Op_SCMemProj, "memory projection required"); 3269 } else { 3270 assert(n->is_Mem(), "memory node required."); 3271 Node *addr = n->in(MemNode::Address); 3272 const Type *addr_t = igvn->type(addr); 3273 if (addr_t == Type::TOP) 3274 continue; 3275 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3276 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3277 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3278 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3279 if (_compile->failing()) { 3280 return; 3281 } 3282 if (mem != n->in(MemNode::Memory)) { 3283 // We delay the memory edge update since we need old one in 3284 // MergeMem code below when instances memory slices are separated. 3285 set_map(n, mem); 3286 } 3287 if (n->is_Load()) { 3288 continue; // don't push users 3289 } else if (n->is_LoadStore()) { 3290 // get the memory projection 3291 n = n->find_out_with(Op_SCMemProj); 3292 assert(n->Opcode() == Op_SCMemProj, "memory projection required"); 3293 } 3294 } 3295 // push user on appropriate worklist 3296 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3297 Node *use = n->fast_out(i); 3298 if (use->is_Phi() || use->is_ClearArray()) { 3299 memnode_worklist.append_if_missing(use); 3300 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3301 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 3302 continue; 3303 memnode_worklist.append_if_missing(use); 3304 } else if (use->is_MemBar()) { 3305 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3306 memnode_worklist.append_if_missing(use); 3307 } 3308 #ifdef ASSERT 3309 } else if(use->is_Mem()) { 3310 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3311 } else if (use->is_MergeMem()) { 3312 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3313 } else if (use->Opcode() == Op_EncodeISOArray) { 3314 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3315 // EncodeISOArray overwrites destination array 3316 memnode_worklist.append_if_missing(use); 3317 } 3318 } else { 3319 uint op = use->Opcode(); 3320 if ((use->in(MemNode::Memory) == n) && 3321 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 3322 // They overwrite memory edge corresponding to destination array, 3323 memnode_worklist.append_if_missing(use); 3324 } else if (!(op == Op_StoreCM || 3325 (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL && 3326 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) || 3327 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3328 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3329 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 3330 n->dump(); 3331 use->dump(); 3332 assert(false, "EA: missing memory path"); 3333 } 3334 #endif 3335 } 3336 } 3337 } 3338 3339 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3340 // Walk each memory slice moving the first node encountered of each 3341 // instance type to the input corresponding to its alias index. 3342 uint length = _mergemem_worklist.length(); 3343 for( uint next = 0; next < length; ++next ) { 3344 MergeMemNode* nmm = _mergemem_worklist.at(next); 3345 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3346 // Note: we don't want to use MergeMemStream here because we only want to 3347 // scan inputs which exist at the start, not ones we add during processing. 3348 // Note 2: MergeMem may already contains instance memory slices added 3349 // during find_inst_mem() call when memory nodes were processed above. 3350 igvn->hash_delete(nmm); 3351 uint nslices = MIN2(nmm->req(), new_index_start); 3352 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3353 Node* mem = nmm->in(i); 3354 Node* cur = NULL; 3355 if (mem == NULL || mem->is_top()) 3356 continue; 3357 // First, update mergemem by moving memory nodes to corresponding slices 3358 // if their type became more precise since this mergemem was created. 3359 while (mem->is_Mem()) { 3360 const Type *at = igvn->type(mem->in(MemNode::Address)); 3361 if (at != Type::TOP) { 3362 assert (at->isa_ptr() != NULL, "pointer type required."); 3363 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3364 if (idx == i) { 3365 if (cur == NULL) 3366 cur = mem; 3367 } else { 3368 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3369 nmm->set_memory_at(idx, mem); 3370 } 3371 } 3372 } 3373 mem = mem->in(MemNode::Memory); 3374 } 3375 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3376 // Find any instance of the current type if we haven't encountered 3377 // already a memory slice of the instance along the memory chain. 3378 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3379 if((uint)_compile->get_general_index(ni) == i) { 3380 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3381 if (nmm->is_empty_memory(m)) { 3382 Node* result = find_inst_mem(mem, ni, orig_phis); 3383 if (_compile->failing()) { 3384 return; 3385 } 3386 nmm->set_memory_at(ni, result); 3387 } 3388 } 3389 } 3390 } 3391 // Find the rest of instances values 3392 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3393 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3394 Node* result = step_through_mergemem(nmm, ni, tinst); 3395 if (result == nmm->base_memory()) { 3396 // Didn't find instance memory, search through general slice recursively. 3397 result = nmm->memory_at(_compile->get_general_index(ni)); 3398 result = find_inst_mem(result, ni, orig_phis); 3399 if (_compile->failing()) { 3400 return; 3401 } 3402 nmm->set_memory_at(ni, result); 3403 } 3404 } 3405 igvn->hash_insert(nmm); 3406 record_for_optimizer(nmm); 3407 } 3408 3409 // Phase 4: Update the inputs of non-instance memory Phis and 3410 // the Memory input of memnodes 3411 // First update the inputs of any non-instance Phi's from 3412 // which we split out an instance Phi. Note we don't have 3413 // to recursively process Phi's encountered on the input memory 3414 // chains as is done in split_memory_phi() since they will 3415 // also be processed here. 3416 for (int j = 0; j < orig_phis.length(); j++) { 3417 PhiNode *phi = orig_phis.at(j); 3418 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3419 igvn->hash_delete(phi); 3420 for (uint i = 1; i < phi->req(); i++) { 3421 Node *mem = phi->in(i); 3422 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3423 if (_compile->failing()) { 3424 return; 3425 } 3426 if (mem != new_mem) { 3427 phi->set_req(i, new_mem); 3428 } 3429 } 3430 igvn->hash_insert(phi); 3431 record_for_optimizer(phi); 3432 } 3433 3434 // Update the memory inputs of MemNodes with the value we computed 3435 // in Phase 2 and move stores memory users to corresponding memory slices. 3436 // Disable memory split verification code until the fix for 6984348. 3437 // Currently it produces false negative results since it does not cover all cases. 3438 #if 0 // ifdef ASSERT 3439 visited.Reset(); 3440 Node_Stack old_mems(arena, _compile->unique() >> 2); 3441 #endif 3442 for (uint i = 0; i < ideal_nodes.size(); i++) { 3443 Node* n = ideal_nodes.at(i); 3444 Node* nmem = get_map(n->_idx); 3445 assert(nmem != NULL, "sanity"); 3446 if (n->is_Mem()) { 3447 #if 0 // ifdef ASSERT 3448 Node* old_mem = n->in(MemNode::Memory); 3449 if (!visited.test_set(old_mem->_idx)) { 3450 old_mems.push(old_mem, old_mem->outcnt()); 3451 } 3452 #endif 3453 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3454 if (!n->is_Load()) { 3455 // Move memory users of a store first. 3456 move_inst_mem(n, orig_phis); 3457 } 3458 // Now update memory input 3459 igvn->hash_delete(n); 3460 n->set_req(MemNode::Memory, nmem); 3461 igvn->hash_insert(n); 3462 record_for_optimizer(n); 3463 } else { 3464 assert(n->is_Allocate() || n->is_CheckCastPP() || 3465 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3466 } 3467 } 3468 #if 0 // ifdef ASSERT 3469 // Verify that memory was split correctly 3470 while (old_mems.is_nonempty()) { 3471 Node* old_mem = old_mems.node(); 3472 uint old_cnt = old_mems.index(); 3473 old_mems.pop(); 3474 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3475 } 3476 #endif 3477 } 3478 3479 #ifndef PRODUCT 3480 static const char *node_type_names[] = { 3481 "UnknownType", 3482 "JavaObject", 3483 "LocalVar", 3484 "Field", 3485 "Arraycopy" 3486 }; 3487 3488 static const char *esc_names[] = { 3489 "UnknownEscape", 3490 "NoEscape", 3491 "ArgEscape", 3492 "GlobalEscape" 3493 }; 3494 3495 void PointsToNode::dump(bool print_state) const { 3496 NodeType nt = node_type(); 3497 tty->print("%s ", node_type_names[(int) nt]); 3498 if (print_state) { 3499 EscapeState es = escape_state(); 3500 EscapeState fields_es = fields_escape_state(); 3501 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3502 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) 3503 tty->print("NSR "); 3504 } 3505 if (is_Field()) { 3506 FieldNode* f = (FieldNode*)this; 3507 if (f->is_oop()) 3508 tty->print("oop "); 3509 if (f->offset() > 0) 3510 tty->print("+%d ", f->offset()); 3511 tty->print("("); 3512 for (BaseIterator i(f); i.has_next(); i.next()) { 3513 PointsToNode* b = i.get(); 3514 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3515 } 3516 tty->print(" )"); 3517 } 3518 tty->print("["); 3519 for (EdgeIterator i(this); i.has_next(); i.next()) { 3520 PointsToNode* e = i.get(); 3521 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3522 } 3523 tty->print(" ["); 3524 for (UseIterator i(this); i.has_next(); i.next()) { 3525 PointsToNode* u = i.get(); 3526 bool is_base = false; 3527 if (PointsToNode::is_base_use(u)) { 3528 is_base = true; 3529 u = PointsToNode::get_use_node(u)->as_Field(); 3530 } 3531 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3532 } 3533 tty->print(" ]] "); 3534 if (_node == NULL) 3535 tty->print_cr("<null>"); 3536 else 3537 _node->dump(); 3538 } 3539 3540 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3541 bool first = true; 3542 int ptnodes_length = ptnodes_worklist.length(); 3543 for (int i = 0; i < ptnodes_length; i++) { 3544 PointsToNode *ptn = ptnodes_worklist.at(i); 3545 if (ptn == NULL || !ptn->is_JavaObject()) 3546 continue; 3547 PointsToNode::EscapeState es = ptn->escape_state(); 3548 if ((es != PointsToNode::NoEscape) && !Verbose) { 3549 continue; 3550 } 3551 Node* n = ptn->ideal_node(); 3552 if (n->is_Allocate() || (n->is_CallStaticJava() && 3553 n->as_CallStaticJava()->is_boxing_method())) { 3554 if (first) { 3555 tty->cr(); 3556 tty->print("======== Connection graph for "); 3557 _compile->method()->print_short_name(); 3558 tty->cr(); 3559 first = false; 3560 } 3561 ptn->dump(); 3562 // Print all locals and fields which reference this allocation 3563 for (UseIterator j(ptn); j.has_next(); j.next()) { 3564 PointsToNode* use = j.get(); 3565 if (use->is_LocalVar()) { 3566 use->dump(Verbose); 3567 } else if (Verbose) { 3568 use->dump(); 3569 } 3570 } 3571 tty->cr(); 3572 } 3573 } 3574 } 3575 #endif