1 /* 2 * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "libadt/vectset.hpp" 29 #include "memory/allocation.hpp" 30 #include "opto/c2compiler.hpp" 31 #include "opto/callnode.hpp" 32 #include "opto/cfgnode.hpp" 33 #include "opto/compile.hpp" 34 #include "opto/escape.hpp" 35 #include "opto/phaseX.hpp" 36 #include "opto/rootnode.hpp" 37 38 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 39 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 40 _collecting(true), 41 _verify(false), 42 _compile(C), 43 _igvn(igvn), 44 _node_map(C->comp_arena()) { 45 // Add unknown java object. 46 add_java_object(C->top(), PointsToNode::GlobalEscape); 47 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 48 // Add ConP(#NULL) and ConN(#NULL) nodes. 49 Node* oop_null = igvn->zerocon(T_OBJECT); 50 assert(oop_null->_idx < nodes_size(), "should be created already"); 51 add_java_object(oop_null, PointsToNode::NoEscape); 52 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 53 if (UseCompressedOops) { 54 Node* noop_null = igvn->zerocon(T_NARROWOOP); 55 assert(noop_null->_idx < nodes_size(), "should be created already"); 56 map_ideal_node(noop_null, null_obj); 57 } 58 _pcmp_neq = NULL; // Should be initialized 59 _pcmp_eq = NULL; 60 } 61 62 bool ConnectionGraph::has_candidates(Compile *C) { 63 // EA brings benefits only when the code has allocations and/or locks which 64 // are represented by ideal Macro nodes. 65 int cnt = C->macro_count(); 66 for (int i = 0; i < cnt; i++) { 67 Node *n = C->macro_node(i); 68 if (n->is_Allocate()) 69 return true; 70 if (n->is_Lock()) { 71 Node* obj = n->as_Lock()->obj_node()->uncast(); 72 if (!(obj->is_Parm() || obj->is_Con())) 73 return true; 74 } 75 if (n->is_CallStaticJava() && 76 n->as_CallStaticJava()->is_boxing_method()) { 77 return true; 78 } 79 } 80 return false; 81 } 82 83 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 84 Compile::TracePhase t2("escapeAnalysis", &Phase::_t_escapeAnalysis, true); 85 ResourceMark rm; 86 87 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 88 // to create space for them in ConnectionGraph::_nodes[]. 89 Node* oop_null = igvn->zerocon(T_OBJECT); 90 Node* noop_null = igvn->zerocon(T_NARROWOOP); 91 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 92 // Perform escape analysis 93 if (congraph->compute_escape()) { 94 // There are non escaping objects. 95 C->set_congraph(congraph); 96 } 97 // Cleanup. 98 if (oop_null->outcnt() == 0) 99 igvn->hash_delete(oop_null); 100 if (noop_null->outcnt() == 0) 101 igvn->hash_delete(noop_null); 102 } 103 104 bool ConnectionGraph::compute_escape() { 105 Compile* C = _compile; 106 PhaseGVN* igvn = _igvn; 107 108 // Worklists used by EA. 109 Unique_Node_List delayed_worklist; 110 GrowableArray<Node*> alloc_worklist; 111 GrowableArray<Node*> ptr_cmp_worklist; 112 GrowableArray<Node*> storestore_worklist; 113 GrowableArray<PointsToNode*> ptnodes_worklist; 114 GrowableArray<JavaObjectNode*> java_objects_worklist; 115 GrowableArray<JavaObjectNode*> non_escaped_worklist; 116 GrowableArray<FieldNode*> oop_fields_worklist; 117 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 118 119 { Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true); 120 121 // 1. Populate Connection Graph (CG) with PointsTo nodes. 122 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 123 // Initialize worklist 124 if (C->root() != NULL) { 125 ideal_nodes.push(C->root()); 126 } 127 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 128 Node* n = ideal_nodes.at(next); 129 // Create PointsTo nodes and add them to Connection Graph. Called 130 // only once per ideal node since ideal_nodes is Unique_Node list. 131 add_node_to_connection_graph(n, &delayed_worklist); 132 PointsToNode* ptn = ptnode_adr(n->_idx); 133 if (ptn != NULL) { 134 ptnodes_worklist.append(ptn); 135 if (ptn->is_JavaObject()) { 136 java_objects_worklist.append(ptn->as_JavaObject()); 137 if ((n->is_Allocate() || n->is_CallStaticJava()) && 138 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 139 // Only allocations and java static calls results are interesting. 140 non_escaped_worklist.append(ptn->as_JavaObject()); 141 } 142 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 143 oop_fields_worklist.append(ptn->as_Field()); 144 } 145 } 146 if (n->is_MergeMem()) { 147 // Collect all MergeMem nodes to add memory slices for 148 // scalar replaceable objects in split_unique_types(). 149 _mergemem_worklist.append(n->as_MergeMem()); 150 } else if (OptimizePtrCompare && n->is_Cmp() && 151 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { 152 // Collect compare pointers nodes. 153 ptr_cmp_worklist.append(n); 154 } else if (n->is_MemBarStoreStore()) { 155 // Collect all MemBarStoreStore nodes so that depending on the 156 // escape status of the associated Allocate node some of them 157 // may be eliminated. 158 storestore_worklist.append(n); 159 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && 160 (n->req() > MemBarNode::Precedent)) { 161 record_for_optimizer(n); 162 #ifdef ASSERT 163 } else if (n->is_AddP()) { 164 // Collect address nodes for graph verification. 165 addp_worklist.append(n); 166 #endif 167 } 168 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 169 Node* m = n->fast_out(i); // Get user 170 ideal_nodes.push(m); 171 } 172 } 173 if (non_escaped_worklist.length() == 0) { 174 _collecting = false; 175 return false; // Nothing to do. 176 } 177 // Add final simple edges to graph. 178 while(delayed_worklist.size() > 0) { 179 Node* n = delayed_worklist.pop(); 180 add_final_edges(n); 181 } 182 int ptnodes_length = ptnodes_worklist.length(); 183 184 #ifdef ASSERT 185 if (VerifyConnectionGraph) { 186 // Verify that no new simple edges could be created and all 187 // local vars has edges. 188 _verify = true; 189 for (int next = 0; next < ptnodes_length; ++next) { 190 PointsToNode* ptn = ptnodes_worklist.at(next); 191 add_final_edges(ptn->ideal_node()); 192 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 193 ptn->dump(); 194 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 195 } 196 } 197 _verify = false; 198 } 199 #endif 200 201 // 2. Finish Graph construction by propagating references to all 202 // java objects through graph. 203 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, 204 java_objects_worklist, oop_fields_worklist)) { 205 // All objects escaped or hit time or iterations limits. 206 _collecting = false; 207 return false; 208 } 209 210 // 3. Adjust scalar_replaceable state of nonescaping objects and push 211 // scalar replaceable allocations on alloc_worklist for processing 212 // in split_unique_types(). 213 int non_escaped_length = non_escaped_worklist.length(); 214 for (int next = 0; next < non_escaped_length; next++) { 215 JavaObjectNode* ptn = non_escaped_worklist.at(next); 216 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 217 Node* n = ptn->ideal_node(); 218 if (n->is_Allocate()) { 219 n->as_Allocate()->_is_non_escaping = noescape; 220 } 221 if (n->is_CallStaticJava()) { 222 n->as_CallStaticJava()->_is_non_escaping = noescape; 223 } 224 if (noescape && ptn->scalar_replaceable()) { 225 adjust_scalar_replaceable_state(ptn); 226 if (ptn->scalar_replaceable()) { 227 alloc_worklist.append(ptn->ideal_node()); 228 } 229 } 230 } 231 232 #ifdef ASSERT 233 if (VerifyConnectionGraph) { 234 // Verify that graph is complete - no new edges could be added or needed. 235 verify_connection_graph(ptnodes_worklist, non_escaped_worklist, 236 java_objects_worklist, addp_worklist); 237 } 238 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 239 assert(null_obj->escape_state() == PointsToNode::NoEscape && 240 null_obj->edge_count() == 0 && 241 !null_obj->arraycopy_src() && 242 !null_obj->arraycopy_dst(), "sanity"); 243 #endif 244 245 _collecting = false; 246 247 } // TracePhase t3("connectionGraph") 248 249 // 4. Optimize ideal graph based on EA information. 250 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); 251 if (has_non_escaping_obj) { 252 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 253 } 254 255 #ifndef PRODUCT 256 if (PrintEscapeAnalysis) { 257 dump(ptnodes_worklist); // Dump ConnectionGraph 258 } 259 #endif 260 261 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 262 #ifdef ASSERT 263 if (VerifyConnectionGraph) { 264 int alloc_length = alloc_worklist.length(); 265 for (int next = 0; next < alloc_length; ++next) { 266 Node* n = alloc_worklist.at(next); 267 PointsToNode* ptn = ptnode_adr(n->_idx); 268 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 269 } 270 } 271 #endif 272 273 // 5. Separate memory graph for scalar replaceable allcations. 274 if (has_scalar_replaceable_candidates && 275 C->AliasLevel() >= 3 && EliminateAllocations) { 276 // Now use the escape information to create unique types for 277 // scalar replaceable objects. 278 split_unique_types(alloc_worklist); 279 if (C->failing()) return false; 280 C->print_method(PHASE_AFTER_EA, 2); 281 282 #ifdef ASSERT 283 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 284 tty->print("=== No allocations eliminated for "); 285 C->method()->print_short_name(); 286 if(!EliminateAllocations) { 287 tty->print(" since EliminateAllocations is off ==="); 288 } else if(!has_scalar_replaceable_candidates) { 289 tty->print(" since there are no scalar replaceable candidates ==="); 290 } else if(C->AliasLevel() < 3) { 291 tty->print(" since AliasLevel < 3 ==="); 292 } 293 tty->cr(); 294 #endif 295 } 296 return has_non_escaping_obj; 297 } 298 299 // Utility function for nodes that load an object 300 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 301 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 302 // ThreadLocal has RawPtr type. 303 const Type* t = _igvn->type(n); 304 if (t->make_ptr() != NULL) { 305 Node* adr = n->in(MemNode::Address); 306 #ifdef ASSERT 307 if (!adr->is_AddP()) { 308 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 309 } else { 310 assert((ptnode_adr(adr->_idx) == NULL || 311 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 312 } 313 #endif 314 add_local_var_and_edge(n, PointsToNode::NoEscape, 315 adr, delayed_worklist); 316 } 317 } 318 319 // Populate Connection Graph with PointsTo nodes and create simple 320 // connection graph edges. 321 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 322 assert(!_verify, "this method sould not be called for verification"); 323 PhaseGVN* igvn = _igvn; 324 uint n_idx = n->_idx; 325 PointsToNode* n_ptn = ptnode_adr(n_idx); 326 if (n_ptn != NULL) 327 return; // No need to redefine PointsTo node during first iteration. 328 329 if (n->is_Call()) { 330 // Arguments to allocation and locking don't escape. 331 if (n->is_AbstractLock()) { 332 // Put Lock and Unlock nodes on IGVN worklist to process them during 333 // first IGVN optimization when escape information is still available. 334 record_for_optimizer(n); 335 } else if (n->is_Allocate()) { 336 add_call_node(n->as_Call()); 337 record_for_optimizer(n); 338 } else { 339 if (n->is_CallStaticJava()) { 340 const char* name = n->as_CallStaticJava()->_name; 341 if (name != NULL && strcmp(name, "uncommon_trap") == 0) 342 return; // Skip uncommon traps 343 } 344 // Don't mark as processed since call's arguments have to be processed. 345 delayed_worklist->push(n); 346 // Check if a call returns an object. 347 if ((n->as_Call()->returns_pointer() && 348 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) || 349 (n->is_CallStaticJava() && 350 n->as_CallStaticJava()->is_boxing_method())) { 351 add_call_node(n->as_Call()); 352 } 353 } 354 return; 355 } 356 // Put this check here to process call arguments since some call nodes 357 // point to phantom_obj. 358 if (n_ptn == phantom_obj || n_ptn == null_obj) 359 return; // Skip predefined nodes. 360 361 int opcode = n->Opcode(); 362 switch (opcode) { 363 case Op_AddP: { 364 Node* base = get_addp_base(n); 365 PointsToNode* ptn_base = ptnode_adr(base->_idx); 366 // Field nodes are created for all field types. They are used in 367 // adjust_scalar_replaceable_state() and split_unique_types(). 368 // Note, non-oop fields will have only base edges in Connection 369 // Graph because such fields are not used for oop loads and stores. 370 int offset = address_offset(n, igvn); 371 add_field(n, PointsToNode::NoEscape, offset); 372 if (ptn_base == NULL) { 373 delayed_worklist->push(n); // Process it later. 374 } else { 375 n_ptn = ptnode_adr(n_idx); 376 add_base(n_ptn->as_Field(), ptn_base); 377 } 378 break; 379 } 380 case Op_CastX2P: { 381 map_ideal_node(n, phantom_obj); 382 break; 383 } 384 case Op_CastPP: 385 case Op_CheckCastPP: 386 case Op_EncodeP: 387 case Op_DecodeN: 388 case Op_EncodePKlass: 389 case Op_DecodeNKlass: { 390 add_local_var_and_edge(n, PointsToNode::NoEscape, 391 n->in(1), delayed_worklist); 392 break; 393 } 394 case Op_CMoveP: { 395 add_local_var(n, PointsToNode::NoEscape); 396 // Do not add edges during first iteration because some could be 397 // not defined yet. 398 delayed_worklist->push(n); 399 break; 400 } 401 case Op_ConP: 402 case Op_ConN: 403 case Op_ConNKlass: { 404 // assume all oop constants globally escape except for null 405 PointsToNode::EscapeState es; 406 const Type* t = igvn->type(n); 407 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 408 es = PointsToNode::NoEscape; 409 } else { 410 es = PointsToNode::GlobalEscape; 411 } 412 add_java_object(n, es); 413 break; 414 } 415 case Op_CreateEx: { 416 // assume that all exception objects globally escape 417 add_java_object(n, PointsToNode::GlobalEscape); 418 break; 419 } 420 case Op_LoadKlass: 421 case Op_LoadNKlass: { 422 // Unknown class is loaded 423 map_ideal_node(n, phantom_obj); 424 break; 425 } 426 case Op_LoadP: 427 case Op_LoadN: 428 case Op_LoadPLocked: { 429 add_objload_to_connection_graph(n, delayed_worklist); 430 break; 431 } 432 case Op_Parm: { 433 map_ideal_node(n, phantom_obj); 434 break; 435 } 436 case Op_PartialSubtypeCheck: { 437 // Produces Null or notNull and is used in only in CmpP so 438 // phantom_obj could be used. 439 map_ideal_node(n, phantom_obj); // Result is unknown 440 break; 441 } 442 case Op_Phi: { 443 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 444 // ThreadLocal has RawPtr type. 445 const Type* t = n->as_Phi()->type(); 446 if (t->make_ptr() != NULL) { 447 add_local_var(n, PointsToNode::NoEscape); 448 // Do not add edges during first iteration because some could be 449 // not defined yet. 450 delayed_worklist->push(n); 451 } 452 break; 453 } 454 case Op_Proj: { 455 // we are only interested in the oop result projection from a call 456 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 457 n->in(0)->as_Call()->returns_pointer()) { 458 add_local_var_and_edge(n, PointsToNode::NoEscape, 459 n->in(0), delayed_worklist); 460 } 461 break; 462 } 463 case Op_Rethrow: // Exception object escapes 464 case Op_Return: { 465 if (n->req() > TypeFunc::Parms && 466 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 467 // Treat Return value as LocalVar with GlobalEscape escape state. 468 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 469 n->in(TypeFunc::Parms), delayed_worklist); 470 } 471 break; 472 } 473 case Op_GetAndSetP: 474 case Op_GetAndSetN: { 475 add_objload_to_connection_graph(n, delayed_worklist); 476 // fallthrough 477 } 478 case Op_StoreP: 479 case Op_StoreN: 480 case Op_StoreNKlass: 481 case Op_StorePConditional: 482 case Op_CompareAndSwapP: 483 case Op_CompareAndSwapN: { 484 Node* adr = n->in(MemNode::Address); 485 const Type *adr_type = igvn->type(adr); 486 adr_type = adr_type->make_ptr(); 487 if (adr_type == NULL) { 488 break; // skip dead nodes 489 } 490 if (adr_type->isa_oopptr() || 491 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && 492 (adr_type == TypeRawPtr::NOTNULL && 493 adr->in(AddPNode::Address)->is_Proj() && 494 adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 495 delayed_worklist->push(n); // Process it later. 496 #ifdef ASSERT 497 assert(adr->is_AddP(), "expecting an AddP"); 498 if (adr_type == TypeRawPtr::NOTNULL) { 499 // Verify a raw address for a store captured by Initialize node. 500 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 501 assert(offs != Type::OffsetBot, "offset must be a constant"); 502 } 503 #endif 504 } else { 505 // Ignore copy the displaced header to the BoxNode (OSR compilation). 506 if (adr->is_BoxLock()) 507 break; 508 // Stored value escapes in unsafe access. 509 if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) { 510 // Pointer stores in G1 barriers looks like unsafe access. 511 // Ignore such stores to be able scalar replace non-escaping 512 // allocations. 513 if (UseG1GC && adr->is_AddP()) { 514 Node* base = get_addp_base(adr); 515 if (base->Opcode() == Op_LoadP && 516 base->in(MemNode::Address)->is_AddP()) { 517 adr = base->in(MemNode::Address); 518 Node* tls = get_addp_base(adr); 519 if (tls->Opcode() == Op_ThreadLocal) { 520 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 521 if (offs == in_bytes(JavaThread::satb_mark_queue_offset() + 522 PtrQueue::byte_offset_of_buf())) { 523 break; // G1 pre barier previous oop value store. 524 } 525 if (offs == in_bytes(JavaThread::dirty_card_queue_offset() + 526 PtrQueue::byte_offset_of_buf())) { 527 break; // G1 post barier card address store. 528 } 529 } 530 } 531 } 532 delayed_worklist->push(n); // Process unsafe access later. 533 break; 534 } 535 #ifdef ASSERT 536 n->dump(1); 537 assert(false, "not unsafe or G1 barrier raw StoreP"); 538 #endif 539 } 540 break; 541 } 542 case Op_AryEq: 543 case Op_StrComp: 544 case Op_StrEquals: 545 case Op_StrIndexOf: 546 case Op_EncodeISOArray: { 547 add_local_var(n, PointsToNode::ArgEscape); 548 delayed_worklist->push(n); // Process it later. 549 break; 550 } 551 case Op_ThreadLocal: { 552 add_java_object(n, PointsToNode::ArgEscape); 553 break; 554 } 555 default: 556 ; // Do nothing for nodes not related to EA. 557 } 558 return; 559 } 560 561 #ifdef ASSERT 562 #define ELSE_FAIL(name) \ 563 /* Should not be called for not pointer type. */ \ 564 n->dump(1); \ 565 assert(false, name); \ 566 break; 567 #else 568 #define ELSE_FAIL(name) \ 569 break; 570 #endif 571 572 // Add final simple edges to graph. 573 void ConnectionGraph::add_final_edges(Node *n) { 574 PointsToNode* n_ptn = ptnode_adr(n->_idx); 575 #ifdef ASSERT 576 if (_verify && n_ptn->is_JavaObject()) 577 return; // This method does not change graph for JavaObject. 578 #endif 579 580 if (n->is_Call()) { 581 process_call_arguments(n->as_Call()); 582 return; 583 } 584 assert(n->is_Store() || n->is_LoadStore() || 585 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 586 "node should be registered already"); 587 int opcode = n->Opcode(); 588 switch (opcode) { 589 case Op_AddP: { 590 Node* base = get_addp_base(n); 591 PointsToNode* ptn_base = ptnode_adr(base->_idx); 592 assert(ptn_base != NULL, "field's base should be registered"); 593 add_base(n_ptn->as_Field(), ptn_base); 594 break; 595 } 596 case Op_CastPP: 597 case Op_CheckCastPP: 598 case Op_EncodeP: 599 case Op_DecodeN: 600 case Op_EncodePKlass: 601 case Op_DecodeNKlass: { 602 add_local_var_and_edge(n, PointsToNode::NoEscape, 603 n->in(1), NULL); 604 break; 605 } 606 case Op_CMoveP: { 607 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 608 Node* in = n->in(i); 609 if (in == NULL) 610 continue; // ignore NULL 611 Node* uncast_in = in->uncast(); 612 if (uncast_in->is_top() || uncast_in == n) 613 continue; // ignore top or inputs which go back this node 614 PointsToNode* ptn = ptnode_adr(in->_idx); 615 assert(ptn != NULL, "node should be registered"); 616 add_edge(n_ptn, ptn); 617 } 618 break; 619 } 620 case Op_LoadP: 621 case Op_LoadN: 622 case Op_LoadPLocked: { 623 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 624 // ThreadLocal has RawPtr type. 625 const Type* t = _igvn->type(n); 626 if (t->make_ptr() != NULL) { 627 Node* adr = n->in(MemNode::Address); 628 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 629 break; 630 } 631 ELSE_FAIL("Op_LoadP"); 632 } 633 case Op_Phi: { 634 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 635 // ThreadLocal has RawPtr type. 636 const Type* t = n->as_Phi()->type(); 637 if (t->make_ptr() != NULL) { 638 for (uint i = 1; i < n->req(); i++) { 639 Node* in = n->in(i); 640 if (in == NULL) 641 continue; // ignore NULL 642 Node* uncast_in = in->uncast(); 643 if (uncast_in->is_top() || uncast_in == n) 644 continue; // ignore top or inputs which go back this node 645 PointsToNode* ptn = ptnode_adr(in->_idx); 646 assert(ptn != NULL, "node should be registered"); 647 add_edge(n_ptn, ptn); 648 } 649 break; 650 } 651 ELSE_FAIL("Op_Phi"); 652 } 653 case Op_Proj: { 654 // we are only interested in the oop result projection from a call 655 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 656 n->in(0)->as_Call()->returns_pointer()) { 657 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 658 break; 659 } 660 ELSE_FAIL("Op_Proj"); 661 } 662 case Op_Rethrow: // Exception object escapes 663 case Op_Return: { 664 if (n->req() > TypeFunc::Parms && 665 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 666 // Treat Return value as LocalVar with GlobalEscape escape state. 667 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 668 n->in(TypeFunc::Parms), NULL); 669 break; 670 } 671 ELSE_FAIL("Op_Return"); 672 } 673 case Op_StoreP: 674 case Op_StoreN: 675 case Op_StoreNKlass: 676 case Op_StorePConditional: 677 case Op_CompareAndSwapP: 678 case Op_CompareAndSwapN: 679 case Op_GetAndSetP: 680 case Op_GetAndSetN: { 681 Node* adr = n->in(MemNode::Address); 682 const Type *adr_type = _igvn->type(adr); 683 adr_type = adr_type->make_ptr(); 684 #ifdef ASSERT 685 if (adr_type == NULL) { 686 n->dump(1); 687 assert(adr_type != NULL, "dead node should not be on list"); 688 break; 689 } 690 #endif 691 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) { 692 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 693 } 694 if (adr_type->isa_oopptr() || 695 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && 696 (adr_type == TypeRawPtr::NOTNULL && 697 adr->in(AddPNode::Address)->is_Proj() && 698 adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 699 // Point Address to Value 700 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 701 assert(adr_ptn != NULL && 702 adr_ptn->as_Field()->is_oop(), "node should be registered"); 703 Node *val = n->in(MemNode::ValueIn); 704 PointsToNode* ptn = ptnode_adr(val->_idx); 705 assert(ptn != NULL, "node should be registered"); 706 add_edge(adr_ptn, ptn); 707 break; 708 } else if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) { 709 // Stored value escapes in unsafe access. 710 Node *val = n->in(MemNode::ValueIn); 711 PointsToNode* ptn = ptnode_adr(val->_idx); 712 assert(ptn != NULL, "node should be registered"); 713 ptn->set_escape_state(PointsToNode::GlobalEscape); 714 // Add edge to object for unsafe access with offset. 715 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 716 assert(adr_ptn != NULL, "node should be registered"); 717 if (adr_ptn->is_Field()) { 718 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 719 add_edge(adr_ptn, ptn); 720 } 721 break; 722 } 723 ELSE_FAIL("Op_StoreP"); 724 } 725 case Op_AryEq: 726 case Op_StrComp: 727 case Op_StrEquals: 728 case Op_StrIndexOf: 729 case Op_EncodeISOArray: { 730 // char[] arrays passed to string intrinsic do not escape but 731 // they are not scalar replaceable. Adjust escape state for them. 732 // Start from in(2) edge since in(1) is memory edge. 733 for (uint i = 2; i < n->req(); i++) { 734 Node* adr = n->in(i); 735 const Type* at = _igvn->type(adr); 736 if (!adr->is_top() && at->isa_ptr()) { 737 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 738 at->isa_ptr() != NULL, "expecting a pointer"); 739 if (adr->is_AddP()) { 740 adr = get_addp_base(adr); 741 } 742 PointsToNode* ptn = ptnode_adr(adr->_idx); 743 assert(ptn != NULL, "node should be registered"); 744 add_edge(n_ptn, ptn); 745 } 746 } 747 break; 748 } 749 default: { 750 // This method should be called only for EA specific nodes which may 751 // miss some edges when they were created. 752 #ifdef ASSERT 753 n->dump(1); 754 #endif 755 guarantee(false, "unknown node"); 756 } 757 } 758 return; 759 } 760 761 void ConnectionGraph::add_call_node(CallNode* call) { 762 assert(call->returns_pointer(), "only for call which returns pointer"); 763 uint call_idx = call->_idx; 764 if (call->is_Allocate()) { 765 Node* k = call->in(AllocateNode::KlassNode); 766 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 767 assert(kt != NULL, "TypeKlassPtr required."); 768 ciKlass* cik = kt->klass(); 769 PointsToNode::EscapeState es = PointsToNode::NoEscape; 770 bool scalar_replaceable = true; 771 if (call->is_AllocateArray()) { 772 if (!cik->is_array_klass()) { // StressReflectiveCode 773 es = PointsToNode::GlobalEscape; 774 } else { 775 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 776 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 777 // Not scalar replaceable if the length is not constant or too big. 778 scalar_replaceable = false; 779 } 780 } 781 } else { // Allocate instance 782 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 783 !cik->is_instance_klass() || // StressReflectiveCode 784 cik->as_instance_klass()->has_finalizer()) { 785 es = PointsToNode::GlobalEscape; 786 } 787 } 788 add_java_object(call, es); 789 PointsToNode* ptn = ptnode_adr(call_idx); 790 if (!scalar_replaceable && ptn->scalar_replaceable()) { 791 ptn->set_scalar_replaceable(false); 792 } 793 } else if (call->is_CallStaticJava()) { 794 // Call nodes could be different types: 795 // 796 // 1. CallDynamicJavaNode (what happened during call is unknown): 797 // 798 // - mapped to GlobalEscape JavaObject node if oop is returned; 799 // 800 // - all oop arguments are escaping globally; 801 // 802 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 803 // 804 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 805 // 806 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 807 // - mapped to NoEscape JavaObject node if non-escaping object allocated 808 // during call is returned; 809 // - mapped to ArgEscape LocalVar node pointed to object arguments 810 // which are returned and does not escape during call; 811 // 812 // - oop arguments escaping status is defined by bytecode analysis; 813 // 814 // For a static call, we know exactly what method is being called. 815 // Use bytecode estimator to record whether the call's return value escapes. 816 ciMethod* meth = call->as_CallJava()->method(); 817 if (meth == NULL) { 818 const char* name = call->as_CallStaticJava()->_name; 819 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 820 // Returns a newly allocated unescaped object. 821 add_java_object(call, PointsToNode::NoEscape); 822 ptnode_adr(call_idx)->set_scalar_replaceable(false); 823 } else if (meth->is_boxing_method()) { 824 // Returns boxing object 825 PointsToNode::EscapeState es; 826 vmIntrinsics::ID intr = meth->intrinsic_id(); 827 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 828 // It does not escape if object is always allocated. 829 es = PointsToNode::NoEscape; 830 } else { 831 // It escapes globally if object could be loaded from cache. 832 es = PointsToNode::GlobalEscape; 833 } 834 add_java_object(call, es); 835 } else { 836 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 837 call_analyzer->copy_dependencies(_compile->dependencies()); 838 if (call_analyzer->is_return_allocated()) { 839 // Returns a newly allocated unescaped object, simply 840 // update dependency information. 841 // Mark it as NoEscape so that objects referenced by 842 // it's fields will be marked as NoEscape at least. 843 add_java_object(call, PointsToNode::NoEscape); 844 ptnode_adr(call_idx)->set_scalar_replaceable(false); 845 } else { 846 // Determine whether any arguments are returned. 847 const TypeTuple* d = call->tf()->domain(); 848 bool ret_arg = false; 849 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 850 if (d->field_at(i)->isa_ptr() != NULL && 851 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 852 ret_arg = true; 853 break; 854 } 855 } 856 if (ret_arg) { 857 add_local_var(call, PointsToNode::ArgEscape); 858 } else { 859 // Returns unknown object. 860 map_ideal_node(call, phantom_obj); 861 } 862 } 863 } 864 } else { 865 // An other type of call, assume the worst case: 866 // returned value is unknown and globally escapes. 867 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 868 map_ideal_node(call, phantom_obj); 869 } 870 } 871 872 void ConnectionGraph::process_call_arguments(CallNode *call) { 873 bool is_arraycopy = false; 874 switch (call->Opcode()) { 875 #ifdef ASSERT 876 case Op_Allocate: 877 case Op_AllocateArray: 878 case Op_Lock: 879 case Op_Unlock: 880 assert(false, "should be done already"); 881 break; 882 #endif 883 case Op_CallLeafNoFP: 884 is_arraycopy = (call->as_CallLeaf()->_name != NULL && 885 strstr(call->as_CallLeaf()->_name, "arraycopy") != 0); 886 // fall through 887 case Op_CallLeaf: { 888 // Stub calls, objects do not escape but they are not scale replaceable. 889 // Adjust escape state for outgoing arguments. 890 const TypeTuple * d = call->tf()->domain(); 891 bool src_has_oops = false; 892 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 893 const Type* at = d->field_at(i); 894 Node *arg = call->in(i); 895 const Type *aat = _igvn->type(arg); 896 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) 897 continue; 898 if (arg->is_AddP()) { 899 // 900 // The inline_native_clone() case when the arraycopy stub is called 901 // after the allocation before Initialize and CheckCastPP nodes. 902 // Or normal arraycopy for object arrays case. 903 // 904 // Set AddP's base (Allocate) as not scalar replaceable since 905 // pointer to the base (with offset) is passed as argument. 906 // 907 arg = get_addp_base(arg); 908 } 909 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 910 assert(arg_ptn != NULL, "should be registered"); 911 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 912 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 913 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 914 aat->isa_ptr() != NULL, "expecting an Ptr"); 915 bool arg_has_oops = aat->isa_oopptr() && 916 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || 917 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); 918 if (i == TypeFunc::Parms) { 919 src_has_oops = arg_has_oops; 920 } 921 // 922 // src or dst could be j.l.Object when other is basic type array: 923 // 924 // arraycopy(char[],0,Object*,0,size); 925 // arraycopy(Object*,0,char[],0,size); 926 // 927 // Don't add edges in such cases. 928 // 929 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 930 arg_has_oops && (i > TypeFunc::Parms); 931 #ifdef ASSERT 932 if (!(is_arraycopy || 933 (call->as_CallLeaf()->_name != NULL && 934 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 || 935 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 || 936 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 937 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 938 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 939 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 940 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0) 941 ))) { 942 call->dump(); 943 fatal(err_msg_res("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name)); 944 } 945 #endif 946 // Always process arraycopy's destination object since 947 // we need to add all possible edges to references in 948 // source object. 949 if (arg_esc >= PointsToNode::ArgEscape && 950 !arg_is_arraycopy_dest) { 951 continue; 952 } 953 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 954 if (arg_is_arraycopy_dest) { 955 Node* src = call->in(TypeFunc::Parms); 956 if (src->is_AddP()) { 957 src = get_addp_base(src); 958 } 959 PointsToNode* src_ptn = ptnode_adr(src->_idx); 960 assert(src_ptn != NULL, "should be registered"); 961 if (arg_ptn != src_ptn) { 962 // Special arraycopy edge: 963 // A destination object's field can't have the source object 964 // as base since objects escape states are not related. 965 // Only escape state of destination object's fields affects 966 // escape state of fields in source object. 967 add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn); 968 } 969 } 970 } 971 } 972 break; 973 } 974 case Op_CallStaticJava: { 975 // For a static call, we know exactly what method is being called. 976 // Use bytecode estimator to record the call's escape affects 977 #ifdef ASSERT 978 const char* name = call->as_CallStaticJava()->_name; 979 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 980 #endif 981 ciMethod* meth = call->as_CallJava()->method(); 982 if ((meth != NULL) && meth->is_boxing_method()) { 983 break; // Boxing methods do not modify any oops. 984 } 985 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 986 // fall-through if not a Java method or no analyzer information 987 if (call_analyzer != NULL) { 988 PointsToNode* call_ptn = ptnode_adr(call->_idx); 989 const TypeTuple* d = call->tf()->domain(); 990 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 991 const Type* at = d->field_at(i); 992 int k = i - TypeFunc::Parms; 993 Node* arg = call->in(i); 994 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 995 if (at->isa_ptr() != NULL && 996 call_analyzer->is_arg_returned(k)) { 997 // The call returns arguments. 998 if (call_ptn != NULL) { // Is call's result used? 999 assert(call_ptn->is_LocalVar(), "node should be registered"); 1000 assert(arg_ptn != NULL, "node should be registered"); 1001 add_edge(call_ptn, arg_ptn); 1002 } 1003 } 1004 if (at->isa_oopptr() != NULL && 1005 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1006 if (!call_analyzer->is_arg_stack(k)) { 1007 // The argument global escapes 1008 set_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1009 } else { 1010 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 1011 if (!call_analyzer->is_arg_local(k)) { 1012 // The argument itself doesn't escape, but any fields might 1013 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1014 } 1015 } 1016 } 1017 } 1018 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1019 // The call returns arguments. 1020 assert(call_ptn->edge_count() > 0, "sanity"); 1021 if (!call_analyzer->is_return_local()) { 1022 // Returns also unknown object. 1023 add_edge(call_ptn, phantom_obj); 1024 } 1025 } 1026 break; 1027 } 1028 } 1029 default: { 1030 // Fall-through here if not a Java method or no analyzer information 1031 // or some other type of call, assume the worst case: all arguments 1032 // globally escape. 1033 const TypeTuple* d = call->tf()->domain(); 1034 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1035 const Type* at = d->field_at(i); 1036 if (at->isa_oopptr() != NULL) { 1037 Node* arg = call->in(i); 1038 if (arg->is_AddP()) { 1039 arg = get_addp_base(arg); 1040 } 1041 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1042 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); 1043 } 1044 } 1045 } 1046 } 1047 } 1048 1049 1050 // Finish Graph construction. 1051 bool ConnectionGraph::complete_connection_graph( 1052 GrowableArray<PointsToNode*>& ptnodes_worklist, 1053 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1054 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1055 GrowableArray<FieldNode*>& oop_fields_worklist) { 1056 // Normally only 1-3 passes needed to build Connection Graph depending 1057 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1058 // Set limit to 20 to catch situation when something did go wrong and 1059 // bailout Escape Analysis. 1060 // Also limit build time to 30 sec (60 in debug VM). 1061 #define CG_BUILD_ITER_LIMIT 20 1062 #ifdef ASSERT 1063 #define CG_BUILD_TIME_LIMIT 60.0 1064 #else 1065 #define CG_BUILD_TIME_LIMIT 30.0 1066 #endif 1067 1068 // Propagate GlobalEscape and ArgEscape escape states and check that 1069 // we still have non-escaping objects. The method pushs on _worklist 1070 // Field nodes which reference phantom_object. 1071 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1072 return false; // Nothing to do. 1073 } 1074 // Now propagate references to all JavaObject nodes. 1075 int java_objects_length = java_objects_worklist.length(); 1076 elapsedTimer time; 1077 int new_edges = 1; 1078 int iterations = 0; 1079 do { 1080 while ((new_edges > 0) && 1081 (iterations++ < CG_BUILD_ITER_LIMIT) && 1082 (time.seconds() < CG_BUILD_TIME_LIMIT)) { 1083 time.start(); 1084 new_edges = 0; 1085 // Propagate references to phantom_object for nodes pushed on _worklist 1086 // by find_non_escaped_objects() and find_field_value(). 1087 new_edges += add_java_object_edges(phantom_obj, false); 1088 for (int next = 0; next < java_objects_length; ++next) { 1089 JavaObjectNode* ptn = java_objects_worklist.at(next); 1090 new_edges += add_java_object_edges(ptn, true); 1091 } 1092 if (new_edges > 0) { 1093 // Update escape states on each iteration if graph was updated. 1094 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1095 return false; // Nothing to do. 1096 } 1097 } 1098 time.stop(); 1099 } 1100 if ((iterations < CG_BUILD_ITER_LIMIT) && 1101 (time.seconds() < CG_BUILD_TIME_LIMIT)) { 1102 time.start(); 1103 // Find fields which have unknown value. 1104 int fields_length = oop_fields_worklist.length(); 1105 for (int next = 0; next < fields_length; next++) { 1106 FieldNode* field = oop_fields_worklist.at(next); 1107 if (field->edge_count() == 0) { 1108 new_edges += find_field_value(field); 1109 // This code may added new edges to phantom_object. 1110 // Need an other cycle to propagate references to phantom_object. 1111 } 1112 } 1113 time.stop(); 1114 } else { 1115 new_edges = 0; // Bailout 1116 } 1117 } while (new_edges > 0); 1118 1119 // Bailout if passed limits. 1120 if ((iterations >= CG_BUILD_ITER_LIMIT) || 1121 (time.seconds() >= CG_BUILD_TIME_LIMIT)) { 1122 Compile* C = _compile; 1123 if (C->log() != NULL) { 1124 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1125 C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time"); 1126 C->log()->end_elem(" limit'"); 1127 } 1128 assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", 1129 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length())); 1130 // Possible infinite build_connection_graph loop, 1131 // bailout (no changes to ideal graph were made). 1132 return false; 1133 } 1134 #ifdef ASSERT 1135 if (Verbose && PrintEscapeAnalysis) { 1136 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", 1137 iterations, nodes_size(), ptnodes_worklist.length()); 1138 } 1139 #endif 1140 1141 #undef CG_BUILD_ITER_LIMIT 1142 #undef CG_BUILD_TIME_LIMIT 1143 1144 // Find fields initialized by NULL for non-escaping Allocations. 1145 int non_escaped_length = non_escaped_worklist.length(); 1146 for (int next = 0; next < non_escaped_length; next++) { 1147 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1148 PointsToNode::EscapeState es = ptn->escape_state(); 1149 assert(es <= PointsToNode::ArgEscape, "sanity"); 1150 if (es == PointsToNode::NoEscape) { 1151 if (find_init_values(ptn, null_obj, _igvn) > 0) { 1152 // Adding references to NULL object does not change escape states 1153 // since it does not escape. Also no fields are added to NULL object. 1154 add_java_object_edges(null_obj, false); 1155 } 1156 } 1157 Node* n = ptn->ideal_node(); 1158 if (n->is_Allocate()) { 1159 // The object allocated by this Allocate node will never be 1160 // seen by an other thread. Mark it so that when it is 1161 // expanded no MemBarStoreStore is added. 1162 InitializeNode* ini = n->as_Allocate()->initialization(); 1163 if (ini != NULL) 1164 ini->set_does_not_escape(); 1165 } 1166 } 1167 return true; // Finished graph construction. 1168 } 1169 1170 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1171 // and check that we still have non-escaping java objects. 1172 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1173 GrowableArray<JavaObjectNode*>& non_escaped_worklist) { 1174 GrowableArray<PointsToNode*> escape_worklist; 1175 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1176 int ptnodes_length = ptnodes_worklist.length(); 1177 for (int next = 0; next < ptnodes_length; ++next) { 1178 PointsToNode* ptn = ptnodes_worklist.at(next); 1179 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1180 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1181 escape_worklist.push(ptn); 1182 } 1183 } 1184 // Set escape states to referenced nodes (edges list). 1185 while (escape_worklist.length() > 0) { 1186 PointsToNode* ptn = escape_worklist.pop(); 1187 PointsToNode::EscapeState es = ptn->escape_state(); 1188 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1189 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1190 es >= PointsToNode::ArgEscape) { 1191 // GlobalEscape or ArgEscape state of field means it has unknown value. 1192 if (add_edge(ptn, phantom_obj)) { 1193 // New edge was added 1194 add_field_uses_to_worklist(ptn->as_Field()); 1195 } 1196 } 1197 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1198 PointsToNode* e = i.get(); 1199 if (e->is_Arraycopy()) { 1200 assert(ptn->arraycopy_dst(), "sanity"); 1201 // Propagate only fields escape state through arraycopy edge. 1202 if (e->fields_escape_state() < field_es) { 1203 set_fields_escape_state(e, field_es); 1204 escape_worklist.push(e); 1205 } 1206 } else if (es >= field_es) { 1207 // fields_escape_state is also set to 'es' if it is less than 'es'. 1208 if (e->escape_state() < es) { 1209 set_escape_state(e, es); 1210 escape_worklist.push(e); 1211 } 1212 } else { 1213 // Propagate field escape state. 1214 bool es_changed = false; 1215 if (e->fields_escape_state() < field_es) { 1216 set_fields_escape_state(e, field_es); 1217 es_changed = true; 1218 } 1219 if ((e->escape_state() < field_es) && 1220 e->is_Field() && ptn->is_JavaObject() && 1221 e->as_Field()->is_oop()) { 1222 // Change escape state of referenced fileds. 1223 set_escape_state(e, field_es); 1224 es_changed = true;; 1225 } else if (e->escape_state() < es) { 1226 set_escape_state(e, es); 1227 es_changed = true;; 1228 } 1229 if (es_changed) { 1230 escape_worklist.push(e); 1231 } 1232 } 1233 } 1234 } 1235 // Remove escaped objects from non_escaped list. 1236 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { 1237 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1238 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1239 non_escaped_worklist.delete_at(next); 1240 } 1241 if (ptn->escape_state() == PointsToNode::NoEscape) { 1242 // Find fields in non-escaped allocations which have unknown value. 1243 find_init_values(ptn, phantom_obj, NULL); 1244 } 1245 } 1246 return (non_escaped_worklist.length() > 0); 1247 } 1248 1249 // Add all references to JavaObject node by walking over all uses. 1250 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1251 int new_edges = 0; 1252 if (populate_worklist) { 1253 // Populate _worklist by uses of jobj's uses. 1254 for (UseIterator i(jobj); i.has_next(); i.next()) { 1255 PointsToNode* use = i.get(); 1256 if (use->is_Arraycopy()) 1257 continue; 1258 add_uses_to_worklist(use); 1259 if (use->is_Field() && use->as_Field()->is_oop()) { 1260 // Put on worklist all field's uses (loads) and 1261 // related field nodes (same base and offset). 1262 add_field_uses_to_worklist(use->as_Field()); 1263 } 1264 } 1265 } 1266 while(_worklist.length() > 0) { 1267 PointsToNode* use = _worklist.pop(); 1268 if (PointsToNode::is_base_use(use)) { 1269 // Add reference from jobj to field and from field to jobj (field's base). 1270 use = PointsToNode::get_use_node(use)->as_Field(); 1271 if (add_base(use->as_Field(), jobj)) { 1272 new_edges++; 1273 } 1274 continue; 1275 } 1276 assert(!use->is_JavaObject(), "sanity"); 1277 if (use->is_Arraycopy()) { 1278 if (jobj == null_obj) // NULL object does not have field edges 1279 continue; 1280 // Added edge from Arraycopy node to arraycopy's source java object 1281 if (add_edge(use, jobj)) { 1282 jobj->set_arraycopy_src(); 1283 new_edges++; 1284 } 1285 // and stop here. 1286 continue; 1287 } 1288 if (!add_edge(use, jobj)) 1289 continue; // No new edge added, there was such edge already. 1290 new_edges++; 1291 if (use->is_LocalVar()) { 1292 add_uses_to_worklist(use); 1293 if (use->arraycopy_dst()) { 1294 for (EdgeIterator i(use); i.has_next(); i.next()) { 1295 PointsToNode* e = i.get(); 1296 if (e->is_Arraycopy()) { 1297 if (jobj == null_obj) // NULL object does not have field edges 1298 continue; 1299 // Add edge from arraycopy's destination java object to Arraycopy node. 1300 if (add_edge(jobj, e)) { 1301 new_edges++; 1302 jobj->set_arraycopy_dst(); 1303 } 1304 } 1305 } 1306 } 1307 } else { 1308 // Added new edge to stored in field values. 1309 // Put on worklist all field's uses (loads) and 1310 // related field nodes (same base and offset). 1311 add_field_uses_to_worklist(use->as_Field()); 1312 } 1313 } 1314 return new_edges; 1315 } 1316 1317 // Put on worklist all related field nodes. 1318 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1319 assert(field->is_oop(), "sanity"); 1320 int offset = field->offset(); 1321 add_uses_to_worklist(field); 1322 // Loop over all bases of this field and push on worklist Field nodes 1323 // with the same offset and base (since they may reference the same field). 1324 for (BaseIterator i(field); i.has_next(); i.next()) { 1325 PointsToNode* base = i.get(); 1326 add_fields_to_worklist(field, base); 1327 // Check if the base was source object of arraycopy and go over arraycopy's 1328 // destination objects since values stored to a field of source object are 1329 // accessable by uses (loads) of fields of destination objects. 1330 if (base->arraycopy_src()) { 1331 for (UseIterator j(base); j.has_next(); j.next()) { 1332 PointsToNode* arycp = j.get(); 1333 if (arycp->is_Arraycopy()) { 1334 for (UseIterator k(arycp); k.has_next(); k.next()) { 1335 PointsToNode* abase = k.get(); 1336 if (abase->arraycopy_dst() && abase != base) { 1337 // Look for the same arracopy reference. 1338 add_fields_to_worklist(field, abase); 1339 } 1340 } 1341 } 1342 } 1343 } 1344 } 1345 } 1346 1347 // Put on worklist all related field nodes. 1348 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1349 int offset = field->offset(); 1350 if (base->is_LocalVar()) { 1351 for (UseIterator j(base); j.has_next(); j.next()) { 1352 PointsToNode* f = j.get(); 1353 if (PointsToNode::is_base_use(f)) { // Field 1354 f = PointsToNode::get_use_node(f); 1355 if (f == field || !f->as_Field()->is_oop()) 1356 continue; 1357 int offs = f->as_Field()->offset(); 1358 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1359 add_to_worklist(f); 1360 } 1361 } 1362 } 1363 } else { 1364 assert(base->is_JavaObject(), "sanity"); 1365 if (// Skip phantom_object since it is only used to indicate that 1366 // this field's content globally escapes. 1367 (base != phantom_obj) && 1368 // NULL object node does not have fields. 1369 (base != null_obj)) { 1370 for (EdgeIterator i(base); i.has_next(); i.next()) { 1371 PointsToNode* f = i.get(); 1372 // Skip arraycopy edge since store to destination object field 1373 // does not update value in source object field. 1374 if (f->is_Arraycopy()) { 1375 assert(base->arraycopy_dst(), "sanity"); 1376 continue; 1377 } 1378 if (f == field || !f->as_Field()->is_oop()) 1379 continue; 1380 int offs = f->as_Field()->offset(); 1381 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1382 add_to_worklist(f); 1383 } 1384 } 1385 } 1386 } 1387 } 1388 1389 // Find fields which have unknown value. 1390 int ConnectionGraph::find_field_value(FieldNode* field) { 1391 // Escaped fields should have init value already. 1392 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1393 int new_edges = 0; 1394 for (BaseIterator i(field); i.has_next(); i.next()) { 1395 PointsToNode* base = i.get(); 1396 if (base->is_JavaObject()) { 1397 // Skip Allocate's fields which will be processed later. 1398 if (base->ideal_node()->is_Allocate()) 1399 return 0; 1400 assert(base == null_obj, "only NULL ptr base expected here"); 1401 } 1402 } 1403 if (add_edge(field, phantom_obj)) { 1404 // New edge was added 1405 new_edges++; 1406 add_field_uses_to_worklist(field); 1407 } 1408 return new_edges; 1409 } 1410 1411 // Find fields initializing values for allocations. 1412 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { 1413 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1414 int new_edges = 0; 1415 Node* alloc = pta->ideal_node(); 1416 if (init_val == phantom_obj) { 1417 // Do nothing for Allocate nodes since its fields values are "known". 1418 if (alloc->is_Allocate()) 1419 return 0; 1420 assert(alloc->as_CallStaticJava(), "sanity"); 1421 #ifdef ASSERT 1422 if (alloc->as_CallStaticJava()->method() == NULL) { 1423 const char* name = alloc->as_CallStaticJava()->_name; 1424 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1425 } 1426 #endif 1427 // Non-escaped allocation returned from Java or runtime call have 1428 // unknown values in fields. 1429 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1430 PointsToNode* field = i.get(); 1431 if (field->is_Field() && field->as_Field()->is_oop()) { 1432 if (add_edge(field, phantom_obj)) { 1433 // New edge was added 1434 new_edges++; 1435 add_field_uses_to_worklist(field->as_Field()); 1436 } 1437 } 1438 } 1439 return new_edges; 1440 } 1441 assert(init_val == null_obj, "sanity"); 1442 // Do nothing for Call nodes since its fields values are unknown. 1443 if (!alloc->is_Allocate()) 1444 return 0; 1445 1446 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1447 Compile* C = _compile; 1448 bool visited_bottom_offset = false; 1449 GrowableArray<int> offsets_worklist; 1450 1451 // Check if an oop field's initializing value is recorded and add 1452 // a corresponding NULL if field's value if it is not recorded. 1453 // Connection Graph does not record a default initialization by NULL 1454 // captured by Initialize node. 1455 // 1456 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1457 PointsToNode* field = i.get(); // Field (AddP) 1458 if (!field->is_Field() || !field->as_Field()->is_oop()) 1459 continue; // Not oop field 1460 int offset = field->as_Field()->offset(); 1461 if (offset == Type::OffsetBot) { 1462 if (!visited_bottom_offset) { 1463 // OffsetBot is used to reference array's element, 1464 // always add reference to NULL to all Field nodes since we don't 1465 // known which element is referenced. 1466 if (add_edge(field, null_obj)) { 1467 // New edge was added 1468 new_edges++; 1469 add_field_uses_to_worklist(field->as_Field()); 1470 visited_bottom_offset = true; 1471 } 1472 } 1473 } else { 1474 // Check only oop fields. 1475 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1476 if (adr_type->isa_rawptr()) { 1477 #ifdef ASSERT 1478 // Raw pointers are used for initializing stores so skip it 1479 // since it should be recorded already 1480 Node* base = get_addp_base(field->ideal_node()); 1481 assert(adr_type->isa_rawptr() && base->is_Proj() && 1482 (base->in(0) == alloc),"unexpected pointer type"); 1483 #endif 1484 continue; 1485 } 1486 if (!offsets_worklist.contains(offset)) { 1487 offsets_worklist.append(offset); 1488 Node* value = NULL; 1489 if (ini != NULL) { 1490 // StoreP::memory_type() == T_ADDRESS 1491 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1492 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1493 // Make sure initializing store has the same type as this AddP. 1494 // This AddP may reference non existing field because it is on a 1495 // dead branch of bimorphic call which is not eliminated yet. 1496 if (store != NULL && store->is_Store() && 1497 store->as_Store()->memory_type() == ft) { 1498 value = store->in(MemNode::ValueIn); 1499 #ifdef ASSERT 1500 if (VerifyConnectionGraph) { 1501 // Verify that AddP already points to all objects the value points to. 1502 PointsToNode* val = ptnode_adr(value->_idx); 1503 assert((val != NULL), "should be processed already"); 1504 PointsToNode* missed_obj = NULL; 1505 if (val->is_JavaObject()) { 1506 if (!field->points_to(val->as_JavaObject())) { 1507 missed_obj = val; 1508 } 1509 } else { 1510 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1511 tty->print_cr("----------init store has invalid value -----"); 1512 store->dump(); 1513 val->dump(); 1514 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1515 } 1516 for (EdgeIterator j(val); j.has_next(); j.next()) { 1517 PointsToNode* obj = j.get(); 1518 if (obj->is_JavaObject()) { 1519 if (!field->points_to(obj->as_JavaObject())) { 1520 missed_obj = obj; 1521 break; 1522 } 1523 } 1524 } 1525 } 1526 if (missed_obj != NULL) { 1527 tty->print_cr("----------field---------------------------------"); 1528 field->dump(); 1529 tty->print_cr("----------missed referernce to object-----------"); 1530 missed_obj->dump(); 1531 tty->print_cr("----------object referernced by init store -----"); 1532 store->dump(); 1533 val->dump(); 1534 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1535 } 1536 } 1537 #endif 1538 } else { 1539 // There could be initializing stores which follow allocation. 1540 // For example, a volatile field store is not collected 1541 // by Initialize node. 1542 // 1543 // Need to check for dependent loads to separate such stores from 1544 // stores which follow loads. For now, add initial value NULL so 1545 // that compare pointers optimization works correctly. 1546 } 1547 } 1548 if (value == NULL) { 1549 // A field's initializing value was not recorded. Add NULL. 1550 if (add_edge(field, null_obj)) { 1551 // New edge was added 1552 new_edges++; 1553 add_field_uses_to_worklist(field->as_Field()); 1554 } 1555 } 1556 } 1557 } 1558 } 1559 return new_edges; 1560 } 1561 1562 // Adjust scalar_replaceable state after Connection Graph is built. 1563 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1564 // Search for non-escaping objects which are not scalar replaceable 1565 // and mark them to propagate the state to referenced objects. 1566 1567 // 1. An object is not scalar replaceable if the field into which it is 1568 // stored has unknown offset (stored into unknown element of an array). 1569 // 1570 for (UseIterator i(jobj); i.has_next(); i.next()) { 1571 PointsToNode* use = i.get(); 1572 assert(!use->is_Arraycopy(), "sanity"); 1573 if (use->is_Field()) { 1574 FieldNode* field = use->as_Field(); 1575 assert(field->is_oop() && field->scalar_replaceable() && 1576 field->fields_escape_state() == PointsToNode::NoEscape, "sanity"); 1577 if (field->offset() == Type::OffsetBot) { 1578 jobj->set_scalar_replaceable(false); 1579 return; 1580 } 1581 } 1582 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1583 // 2. An object is not scalar replaceable if it is merged with other objects. 1584 for (EdgeIterator j(use); j.has_next(); j.next()) { 1585 PointsToNode* ptn = j.get(); 1586 if (ptn->is_JavaObject() && ptn != jobj) { 1587 // Mark all objects. 1588 jobj->set_scalar_replaceable(false); 1589 ptn->set_scalar_replaceable(false); 1590 } 1591 } 1592 if (!jobj->scalar_replaceable()) { 1593 return; 1594 } 1595 } 1596 1597 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1598 // Non-escaping object node should point only to field nodes. 1599 FieldNode* field = j.get()->as_Field(); 1600 int offset = field->as_Field()->offset(); 1601 1602 // 3. An object is not scalar replaceable if it has a field with unknown 1603 // offset (array's element is accessed in loop). 1604 if (offset == Type::OffsetBot) { 1605 jobj->set_scalar_replaceable(false); 1606 return; 1607 } 1608 // 4. Currently an object is not scalar replaceable if a LoadStore node 1609 // access its field since the field value is unknown after it. 1610 // 1611 Node* n = field->ideal_node(); 1612 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1613 if (n->fast_out(i)->is_LoadStore()) { 1614 jobj->set_scalar_replaceable(false); 1615 return; 1616 } 1617 } 1618 1619 // 5. Or the address may point to more then one object. This may produce 1620 // the false positive result (set not scalar replaceable) 1621 // since the flow-insensitive escape analysis can't separate 1622 // the case when stores overwrite the field's value from the case 1623 // when stores happened on different control branches. 1624 // 1625 // Note: it will disable scalar replacement in some cases: 1626 // 1627 // Point p[] = new Point[1]; 1628 // p[0] = new Point(); // Will be not scalar replaced 1629 // 1630 // but it will save us from incorrect optimizations in next cases: 1631 // 1632 // Point p[] = new Point[1]; 1633 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1634 // 1635 if (field->base_count() > 1) { 1636 for (BaseIterator i(field); i.has_next(); i.next()) { 1637 PointsToNode* base = i.get(); 1638 // Don't take into account LocalVar nodes which 1639 // may point to only one object which should be also 1640 // this field's base by now. 1641 if (base->is_JavaObject() && base != jobj) { 1642 // Mark all bases. 1643 jobj->set_scalar_replaceable(false); 1644 base->set_scalar_replaceable(false); 1645 } 1646 } 1647 } 1648 } 1649 } 1650 1651 #ifdef ASSERT 1652 void ConnectionGraph::verify_connection_graph( 1653 GrowableArray<PointsToNode*>& ptnodes_worklist, 1654 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1655 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1656 GrowableArray<Node*>& addp_worklist) { 1657 // Verify that graph is complete - no new edges could be added. 1658 int java_objects_length = java_objects_worklist.length(); 1659 int non_escaped_length = non_escaped_worklist.length(); 1660 int new_edges = 0; 1661 for (int next = 0; next < java_objects_length; ++next) { 1662 JavaObjectNode* ptn = java_objects_worklist.at(next); 1663 new_edges += add_java_object_edges(ptn, true); 1664 } 1665 assert(new_edges == 0, "graph was not complete"); 1666 // Verify that escape state is final. 1667 int length = non_escaped_worklist.length(); 1668 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); 1669 assert((non_escaped_length == non_escaped_worklist.length()) && 1670 (non_escaped_length == length) && 1671 (_worklist.length() == 0), "escape state was not final"); 1672 1673 // Verify fields information. 1674 int addp_length = addp_worklist.length(); 1675 for (int next = 0; next < addp_length; ++next ) { 1676 Node* n = addp_worklist.at(next); 1677 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 1678 if (field->is_oop()) { 1679 // Verify that field has all bases 1680 Node* base = get_addp_base(n); 1681 PointsToNode* ptn = ptnode_adr(base->_idx); 1682 if (ptn->is_JavaObject()) { 1683 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 1684 } else { 1685 assert(ptn->is_LocalVar(), "sanity"); 1686 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1687 PointsToNode* e = i.get(); 1688 if (e->is_JavaObject()) { 1689 assert(field->has_base(e->as_JavaObject()), "sanity"); 1690 } 1691 } 1692 } 1693 // Verify that all fields have initializing values. 1694 if (field->edge_count() == 0) { 1695 tty->print_cr("----------field does not have references----------"); 1696 field->dump(); 1697 for (BaseIterator i(field); i.has_next(); i.next()) { 1698 PointsToNode* base = i.get(); 1699 tty->print_cr("----------field has next base---------------------"); 1700 base->dump(); 1701 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 1702 tty->print_cr("----------base has fields-------------------------"); 1703 for (EdgeIterator j(base); j.has_next(); j.next()) { 1704 j.get()->dump(); 1705 } 1706 tty->print_cr("----------base has references---------------------"); 1707 for (UseIterator j(base); j.has_next(); j.next()) { 1708 j.get()->dump(); 1709 } 1710 } 1711 } 1712 for (UseIterator i(field); i.has_next(); i.next()) { 1713 i.get()->dump(); 1714 } 1715 assert(field->edge_count() > 0, "sanity"); 1716 } 1717 } 1718 } 1719 } 1720 #endif 1721 1722 // Optimize ideal graph. 1723 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 1724 GrowableArray<Node*>& storestore_worklist) { 1725 Compile* C = _compile; 1726 PhaseIterGVN* igvn = _igvn; 1727 if (EliminateLocks) { 1728 // Mark locks before changing ideal graph. 1729 int cnt = C->macro_count(); 1730 for( int i=0; i < cnt; i++ ) { 1731 Node *n = C->macro_node(i); 1732 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1733 AbstractLockNode* alock = n->as_AbstractLock(); 1734 if (!alock->is_non_esc_obj()) { 1735 if (not_global_escape(alock->obj_node())) { 1736 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 1737 // The lock could be marked eliminated by lock coarsening 1738 // code during first IGVN before EA. Replace coarsened flag 1739 // to eliminate all associated locks/unlocks. 1740 alock->set_non_esc_obj(); 1741 } 1742 } 1743 } 1744 } 1745 } 1746 1747 if (OptimizePtrCompare) { 1748 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1749 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1750 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1751 // Optimize objects compare. 1752 while (ptr_cmp_worklist.length() != 0) { 1753 Node *n = ptr_cmp_worklist.pop(); 1754 Node *res = optimize_ptr_compare(n); 1755 if (res != NULL) { 1756 #ifndef PRODUCT 1757 if (PrintOptimizePtrCompare) { 1758 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1759 if (Verbose) { 1760 n->dump(1); 1761 } 1762 } 1763 #endif 1764 igvn->replace_node(n, res); 1765 } 1766 } 1767 // cleanup 1768 if (_pcmp_neq->outcnt() == 0) 1769 igvn->hash_delete(_pcmp_neq); 1770 if (_pcmp_eq->outcnt() == 0) 1771 igvn->hash_delete(_pcmp_eq); 1772 } 1773 1774 // For MemBarStoreStore nodes added in library_call.cpp, check 1775 // escape status of associated AllocateNode and optimize out 1776 // MemBarStoreStore node if the allocated object never escapes. 1777 while (storestore_worklist.length() != 0) { 1778 Node *n = storestore_worklist.pop(); 1779 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 1780 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 1781 assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); 1782 if (not_global_escape(alloc)) { 1783 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 1784 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 1785 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 1786 igvn->register_new_node_with_optimizer(mb); 1787 igvn->replace_node(storestore, mb); 1788 } 1789 } 1790 } 1791 1792 // Optimize objects compare. 1793 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 1794 assert(OptimizePtrCompare, "sanity"); 1795 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 1796 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 1797 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 1798 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 1799 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 1800 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 1801 1802 // Check simple cases first. 1803 if (jobj1 != NULL) { 1804 if (jobj1->escape_state() == PointsToNode::NoEscape) { 1805 if (jobj1 == jobj2) { 1806 // Comparing the same not escaping object. 1807 return _pcmp_eq; 1808 } 1809 Node* obj = jobj1->ideal_node(); 1810 // Comparing not escaping allocation. 1811 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1812 !ptn2->points_to(jobj1)) { 1813 return _pcmp_neq; // This includes nullness check. 1814 } 1815 } 1816 } 1817 if (jobj2 != NULL) { 1818 if (jobj2->escape_state() == PointsToNode::NoEscape) { 1819 Node* obj = jobj2->ideal_node(); 1820 // Comparing not escaping allocation. 1821 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1822 !ptn1->points_to(jobj2)) { 1823 return _pcmp_neq; // This includes nullness check. 1824 } 1825 } 1826 } 1827 if (jobj1 != NULL && jobj1 != phantom_obj && 1828 jobj2 != NULL && jobj2 != phantom_obj && 1829 jobj1->ideal_node()->is_Con() && 1830 jobj2->ideal_node()->is_Con()) { 1831 // Klass or String constants compare. Need to be careful with 1832 // compressed pointers - compare types of ConN and ConP instead of nodes. 1833 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 1834 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 1835 if (t1->make_ptr() == t2->make_ptr()) { 1836 return _pcmp_eq; 1837 } else { 1838 return _pcmp_neq; 1839 } 1840 } 1841 if (ptn1->meet(ptn2)) { 1842 return NULL; // Sets are not disjoint 1843 } 1844 1845 // Sets are disjoint. 1846 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 1847 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 1848 bool set1_has_null_ptr = ptn1->points_to(null_obj); 1849 bool set2_has_null_ptr = ptn2->points_to(null_obj); 1850 if (set1_has_unknown_ptr && set2_has_null_ptr || 1851 set2_has_unknown_ptr && set1_has_null_ptr) { 1852 // Check nullness of unknown object. 1853 return NULL; 1854 } 1855 1856 // Disjointness by itself is not sufficient since 1857 // alias analysis is not complete for escaped objects. 1858 // Disjoint sets are definitely unrelated only when 1859 // at least one set has only not escaping allocations. 1860 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 1861 if (ptn1->non_escaping_allocation()) { 1862 return _pcmp_neq; 1863 } 1864 } 1865 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 1866 if (ptn2->non_escaping_allocation()) { 1867 return _pcmp_neq; 1868 } 1869 } 1870 return NULL; 1871 } 1872 1873 // Connection Graph constuction functions. 1874 1875 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 1876 PointsToNode* ptadr = _nodes.at(n->_idx); 1877 if (ptadr != NULL) { 1878 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 1879 return; 1880 } 1881 Compile* C = _compile; 1882 ptadr = new (C->comp_arena()) LocalVarNode(C, n, es); 1883 _nodes.at_put(n->_idx, ptadr); 1884 } 1885 1886 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 1887 PointsToNode* ptadr = _nodes.at(n->_idx); 1888 if (ptadr != NULL) { 1889 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 1890 return; 1891 } 1892 Compile* C = _compile; 1893 ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es); 1894 _nodes.at_put(n->_idx, ptadr); 1895 } 1896 1897 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 1898 PointsToNode* ptadr = _nodes.at(n->_idx); 1899 if (ptadr != NULL) { 1900 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 1901 return; 1902 } 1903 bool unsafe = false; 1904 bool is_oop = is_oop_field(n, offset, &unsafe); 1905 if (unsafe) { 1906 es = PointsToNode::GlobalEscape; 1907 } 1908 Compile* C = _compile; 1909 FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop); 1910 _nodes.at_put(n->_idx, field); 1911 } 1912 1913 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 1914 PointsToNode* src, PointsToNode* dst) { 1915 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 1916 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 1917 PointsToNode* ptadr = _nodes.at(n->_idx); 1918 if (ptadr != NULL) { 1919 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 1920 return; 1921 } 1922 Compile* C = _compile; 1923 ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es); 1924 _nodes.at_put(n->_idx, ptadr); 1925 // Add edge from arraycopy node to source object. 1926 (void)add_edge(ptadr, src); 1927 src->set_arraycopy_src(); 1928 // Add edge from destination object to arraycopy node. 1929 (void)add_edge(dst, ptadr); 1930 dst->set_arraycopy_dst(); 1931 } 1932 1933 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 1934 const Type* adr_type = n->as_AddP()->bottom_type(); 1935 BasicType bt = T_INT; 1936 if (offset == Type::OffsetBot) { 1937 // Check only oop fields. 1938 if (!adr_type->isa_aryptr() || 1939 (adr_type->isa_aryptr()->klass() == NULL) || 1940 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { 1941 // OffsetBot is used to reference array's element. Ignore first AddP. 1942 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 1943 bt = T_OBJECT; 1944 } 1945 } 1946 } else if (offset != oopDesc::klass_offset_in_bytes()) { 1947 if (adr_type->isa_instptr()) { 1948 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 1949 if (field != NULL) { 1950 bt = field->layout_type(); 1951 } else { 1952 // Check for unsafe oop field access 1953 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1954 int opcode = n->fast_out(i)->Opcode(); 1955 if (opcode == Op_StoreP || opcode == Op_LoadP || 1956 opcode == Op_StoreN || opcode == Op_LoadN) { 1957 bt = T_OBJECT; 1958 (*unsafe) = true; 1959 break; 1960 } 1961 } 1962 } 1963 } else if (adr_type->isa_aryptr()) { 1964 if (offset == arrayOopDesc::length_offset_in_bytes()) { 1965 // Ignore array length load. 1966 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 1967 // Ignore first AddP. 1968 } else { 1969 const Type* elemtype = adr_type->isa_aryptr()->elem(); 1970 bt = elemtype->array_element_basic_type(); 1971 } 1972 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 1973 // Allocation initialization, ThreadLocal field access, unsafe access 1974 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1975 int opcode = n->fast_out(i)->Opcode(); 1976 if (opcode == Op_StoreP || opcode == Op_LoadP || 1977 opcode == Op_StoreN || opcode == Op_LoadN) { 1978 bt = T_OBJECT; 1979 break; 1980 } 1981 } 1982 } 1983 } 1984 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); 1985 } 1986 1987 // Returns unique pointed java object or NULL. 1988 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 1989 assert(!_collecting, "should not call when contructed graph"); 1990 // If the node was created after the escape computation we can't answer. 1991 uint idx = n->_idx; 1992 if (idx >= nodes_size()) { 1993 return NULL; 1994 } 1995 PointsToNode* ptn = ptnode_adr(idx); 1996 if (ptn->is_JavaObject()) { 1997 return ptn->as_JavaObject(); 1998 } 1999 assert(ptn->is_LocalVar(), "sanity"); 2000 // Check all java objects it points to. 2001 JavaObjectNode* jobj = NULL; 2002 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2003 PointsToNode* e = i.get(); 2004 if (e->is_JavaObject()) { 2005 if (jobj == NULL) { 2006 jobj = e->as_JavaObject(); 2007 } else if (jobj != e) { 2008 return NULL; 2009 } 2010 } 2011 } 2012 return jobj; 2013 } 2014 2015 // Return true if this node points only to non-escaping allocations. 2016 bool PointsToNode::non_escaping_allocation() { 2017 if (is_JavaObject()) { 2018 Node* n = ideal_node(); 2019 if (n->is_Allocate() || n->is_CallStaticJava()) { 2020 return (escape_state() == PointsToNode::NoEscape); 2021 } else { 2022 return false; 2023 } 2024 } 2025 assert(is_LocalVar(), "sanity"); 2026 // Check all java objects it points to. 2027 for (EdgeIterator i(this); i.has_next(); i.next()) { 2028 PointsToNode* e = i.get(); 2029 if (e->is_JavaObject()) { 2030 Node* n = e->ideal_node(); 2031 if ((e->escape_state() != PointsToNode::NoEscape) || 2032 !(n->is_Allocate() || n->is_CallStaticJava())) { 2033 return false; 2034 } 2035 } 2036 } 2037 return true; 2038 } 2039 2040 // Return true if we know the node does not escape globally. 2041 bool ConnectionGraph::not_global_escape(Node *n) { 2042 assert(!_collecting, "should not call during graph construction"); 2043 // If the node was created after the escape computation we can't answer. 2044 uint idx = n->_idx; 2045 if (idx >= nodes_size()) { 2046 return false; 2047 } 2048 PointsToNode* ptn = ptnode_adr(idx); 2049 PointsToNode::EscapeState es = ptn->escape_state(); 2050 // If we have already computed a value, return it. 2051 if (es >= PointsToNode::GlobalEscape) 2052 return false; 2053 if (ptn->is_JavaObject()) { 2054 return true; // (es < PointsToNode::GlobalEscape); 2055 } 2056 assert(ptn->is_LocalVar(), "sanity"); 2057 // Check all java objects it points to. 2058 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2059 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) 2060 return false; 2061 } 2062 return true; 2063 } 2064 2065 2066 // Helper functions 2067 2068 // Return true if this node points to specified node or nodes it points to. 2069 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2070 if (is_JavaObject()) { 2071 return (this == ptn); 2072 } 2073 assert(is_LocalVar() || is_Field(), "sanity"); 2074 for (EdgeIterator i(this); i.has_next(); i.next()) { 2075 if (i.get() == ptn) 2076 return true; 2077 } 2078 return false; 2079 } 2080 2081 // Return true if one node points to an other. 2082 bool PointsToNode::meet(PointsToNode* ptn) { 2083 if (this == ptn) { 2084 return true; 2085 } else if (ptn->is_JavaObject()) { 2086 return this->points_to(ptn->as_JavaObject()); 2087 } else if (this->is_JavaObject()) { 2088 return ptn->points_to(this->as_JavaObject()); 2089 } 2090 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2091 int ptn_count = ptn->edge_count(); 2092 for (EdgeIterator i(this); i.has_next(); i.next()) { 2093 PointsToNode* this_e = i.get(); 2094 for (int j = 0; j < ptn_count; j++) { 2095 if (this_e == ptn->edge(j)) 2096 return true; 2097 } 2098 } 2099 return false; 2100 } 2101 2102 #ifdef ASSERT 2103 // Return true if bases point to this java object. 2104 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2105 for (BaseIterator i(this); i.has_next(); i.next()) { 2106 if (i.get() == jobj) 2107 return true; 2108 } 2109 return false; 2110 } 2111 #endif 2112 2113 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2114 const Type *adr_type = phase->type(adr); 2115 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 2116 adr->in(AddPNode::Address)->is_Proj() && 2117 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 2118 // We are computing a raw address for a store captured by an Initialize 2119 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2120 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2121 assert(offs != Type::OffsetBot || 2122 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2123 "offset must be a constant or it is initialization of array"); 2124 return offs; 2125 } 2126 const TypePtr *t_ptr = adr_type->isa_ptr(); 2127 assert(t_ptr != NULL, "must be a pointer type"); 2128 return t_ptr->offset(); 2129 } 2130 2131 Node* ConnectionGraph::get_addp_base(Node *addp) { 2132 assert(addp->is_AddP(), "must be AddP"); 2133 // 2134 // AddP cases for Base and Address inputs: 2135 // case #1. Direct object's field reference: 2136 // Allocate 2137 // | 2138 // Proj #5 ( oop result ) 2139 // | 2140 // CheckCastPP (cast to instance type) 2141 // | | 2142 // AddP ( base == address ) 2143 // 2144 // case #2. Indirect object's field reference: 2145 // Phi 2146 // | 2147 // CastPP (cast to instance type) 2148 // | | 2149 // AddP ( base == address ) 2150 // 2151 // case #3. Raw object's field reference for Initialize node: 2152 // Allocate 2153 // | 2154 // Proj #5 ( oop result ) 2155 // top | 2156 // \ | 2157 // AddP ( base == top ) 2158 // 2159 // case #4. Array's element reference: 2160 // {CheckCastPP | CastPP} 2161 // | | | 2162 // | AddP ( array's element offset ) 2163 // | | 2164 // AddP ( array's offset ) 2165 // 2166 // case #5. Raw object's field reference for arraycopy stub call: 2167 // The inline_native_clone() case when the arraycopy stub is called 2168 // after the allocation before Initialize and CheckCastPP nodes. 2169 // Allocate 2170 // | 2171 // Proj #5 ( oop result ) 2172 // | | 2173 // AddP ( base == address ) 2174 // 2175 // case #6. Constant Pool, ThreadLocal, CastX2P or 2176 // Raw object's field reference: 2177 // {ConP, ThreadLocal, CastX2P, raw Load} 2178 // top | 2179 // \ | 2180 // AddP ( base == top ) 2181 // 2182 // case #7. Klass's field reference. 2183 // LoadKlass 2184 // | | 2185 // AddP ( base == address ) 2186 // 2187 // case #8. narrow Klass's field reference. 2188 // LoadNKlass 2189 // | 2190 // DecodeN 2191 // | | 2192 // AddP ( base == address ) 2193 // 2194 Node *base = addp->in(AddPNode::Base); 2195 if (base->uncast()->is_top()) { // The AddP case #3 and #6. 2196 base = addp->in(AddPNode::Address); 2197 while (base->is_AddP()) { 2198 // Case #6 (unsafe access) may have several chained AddP nodes. 2199 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2200 base = base->in(AddPNode::Address); 2201 } 2202 Node* uncast_base = base->uncast(); 2203 int opcode = uncast_base->Opcode(); 2204 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2205 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2206 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2207 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); 2208 } 2209 return base; 2210 } 2211 2212 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2213 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2214 Node* addp2 = addp->raw_out(0); 2215 if (addp->outcnt() == 1 && addp2->is_AddP() && 2216 addp2->in(AddPNode::Base) == n && 2217 addp2->in(AddPNode::Address) == addp) { 2218 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2219 // 2220 // Find array's offset to push it on worklist first and 2221 // as result process an array's element offset first (pushed second) 2222 // to avoid CastPP for the array's offset. 2223 // Otherwise the inserted CastPP (LocalVar) will point to what 2224 // the AddP (Field) points to. Which would be wrong since 2225 // the algorithm expects the CastPP has the same point as 2226 // as AddP's base CheckCastPP (LocalVar). 2227 // 2228 // ArrayAllocation 2229 // | 2230 // CheckCastPP 2231 // | 2232 // memProj (from ArrayAllocation CheckCastPP) 2233 // | || 2234 // | || Int (element index) 2235 // | || | ConI (log(element size)) 2236 // | || | / 2237 // | || LShift 2238 // | || / 2239 // | AddP (array's element offset) 2240 // | | 2241 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2242 // | / / 2243 // AddP (array's offset) 2244 // | 2245 // Load/Store (memory operation on array's element) 2246 // 2247 return addp2; 2248 } 2249 return NULL; 2250 } 2251 2252 // 2253 // Adjust the type and inputs of an AddP which computes the 2254 // address of a field of an instance 2255 // 2256 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2257 PhaseGVN* igvn = _igvn; 2258 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2259 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2260 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2261 if (t == NULL) { 2262 // We are computing a raw address for a store captured by an Initialize 2263 // compute an appropriate address type (cases #3 and #5). 2264 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2265 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2266 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2267 assert(offs != Type::OffsetBot, "offset must be a constant"); 2268 t = base_t->add_offset(offs)->is_oopptr(); 2269 } 2270 int inst_id = base_t->instance_id(); 2271 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2272 "old type must be non-instance or match new type"); 2273 2274 // The type 't' could be subclass of 'base_t'. 2275 // As result t->offset() could be large then base_t's size and it will 2276 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2277 // constructor verifies correctness of the offset. 2278 // 2279 // It could happened on subclass's branch (from the type profiling 2280 // inlining) which was not eliminated during parsing since the exactness 2281 // of the allocation type was not propagated to the subclass type check. 2282 // 2283 // Or the type 't' could be not related to 'base_t' at all. 2284 // It could happened when CHA type is different from MDO type on a dead path 2285 // (for example, from instanceof check) which is not collapsed during parsing. 2286 // 2287 // Do nothing for such AddP node and don't process its users since 2288 // this code branch will go away. 2289 // 2290 if (!t->is_known_instance() && 2291 !base_t->klass()->is_subtype_of(t->klass())) { 2292 return false; // bail out 2293 } 2294 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 2295 // Do NOT remove the next line: ensure a new alias index is allocated 2296 // for the instance type. Note: C++ will not remove it since the call 2297 // has side effect. 2298 int alias_idx = _compile->get_alias_index(tinst); 2299 igvn->set_type(addp, tinst); 2300 // record the allocation in the node map 2301 set_map(addp, get_map(base->_idx)); 2302 // Set addp's Base and Address to 'base'. 2303 Node *abase = addp->in(AddPNode::Base); 2304 Node *adr = addp->in(AddPNode::Address); 2305 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2306 adr->in(0)->_idx == (uint)inst_id) { 2307 // Skip AddP cases #3 and #5. 2308 } else { 2309 assert(!abase->is_top(), "sanity"); // AddP case #3 2310 if (abase != base) { 2311 igvn->hash_delete(addp); 2312 addp->set_req(AddPNode::Base, base); 2313 if (abase == adr) { 2314 addp->set_req(AddPNode::Address, base); 2315 } else { 2316 // AddP case #4 (adr is array's element offset AddP node) 2317 #ifdef ASSERT 2318 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2319 assert(adr->is_AddP() && atype != NULL && 2320 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2321 #endif 2322 } 2323 igvn->hash_insert(addp); 2324 } 2325 } 2326 // Put on IGVN worklist since at least addp's type was changed above. 2327 record_for_optimizer(addp); 2328 return true; 2329 } 2330 2331 // 2332 // Create a new version of orig_phi if necessary. Returns either the newly 2333 // created phi or an existing phi. Sets create_new to indicate whether a new 2334 // phi was created. Cache the last newly created phi in the node map. 2335 // 2336 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2337 Compile *C = _compile; 2338 PhaseGVN* igvn = _igvn; 2339 new_created = false; 2340 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2341 // nothing to do if orig_phi is bottom memory or matches alias_idx 2342 if (phi_alias_idx == alias_idx) { 2343 return orig_phi; 2344 } 2345 // Have we recently created a Phi for this alias index? 2346 PhiNode *result = get_map_phi(orig_phi->_idx); 2347 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2348 return result; 2349 } 2350 // Previous check may fail when the same wide memory Phi was split into Phis 2351 // for different memory slices. Search all Phis for this region. 2352 if (result != NULL) { 2353 Node* region = orig_phi->in(0); 2354 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2355 Node* phi = region->fast_out(i); 2356 if (phi->is_Phi() && 2357 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2358 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2359 return phi->as_Phi(); 2360 } 2361 } 2362 } 2363 if ((int) (C->live_nodes() + 2*NodeLimitFudgeFactor) > MaxNodeLimit) { 2364 if (C->do_escape_analysis() == true && !C->failing()) { 2365 // Retry compilation without escape analysis. 2366 // If this is the first failure, the sentinel string will "stick" 2367 // to the Compile object, and the C2Compiler will see it and retry. 2368 C->record_failure(C2Compiler::retry_no_escape_analysis()); 2369 } 2370 return NULL; 2371 } 2372 orig_phi_worklist.append_if_missing(orig_phi); 2373 const TypePtr *atype = C->get_adr_type(alias_idx); 2374 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2375 C->copy_node_notes_to(result, orig_phi); 2376 igvn->set_type(result, result->bottom_type()); 2377 record_for_optimizer(result); 2378 set_map(orig_phi, result); 2379 new_created = true; 2380 return result; 2381 } 2382 2383 // 2384 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2385 // specified alias index. 2386 // 2387 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2388 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2389 Compile *C = _compile; 2390 PhaseGVN* igvn = _igvn; 2391 bool new_phi_created; 2392 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2393 if (!new_phi_created) { 2394 return result; 2395 } 2396 GrowableArray<PhiNode *> phi_list; 2397 GrowableArray<uint> cur_input; 2398 PhiNode *phi = orig_phi; 2399 uint idx = 1; 2400 bool finished = false; 2401 while(!finished) { 2402 while (idx < phi->req()) { 2403 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2404 if (mem != NULL && mem->is_Phi()) { 2405 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2406 if (new_phi_created) { 2407 // found an phi for which we created a new split, push current one on worklist and begin 2408 // processing new one 2409 phi_list.push(phi); 2410 cur_input.push(idx); 2411 phi = mem->as_Phi(); 2412 result = newphi; 2413 idx = 1; 2414 continue; 2415 } else { 2416 mem = newphi; 2417 } 2418 } 2419 if (C->failing()) { 2420 return NULL; 2421 } 2422 result->set_req(idx++, mem); 2423 } 2424 #ifdef ASSERT 2425 // verify that the new Phi has an input for each input of the original 2426 assert( phi->req() == result->req(), "must have same number of inputs."); 2427 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2428 #endif 2429 // Check if all new phi's inputs have specified alias index. 2430 // Otherwise use old phi. 2431 for (uint i = 1; i < phi->req(); i++) { 2432 Node* in = result->in(i); 2433 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2434 } 2435 // we have finished processing a Phi, see if there are any more to do 2436 finished = (phi_list.length() == 0 ); 2437 if (!finished) { 2438 phi = phi_list.pop(); 2439 idx = cur_input.pop(); 2440 PhiNode *prev_result = get_map_phi(phi->_idx); 2441 prev_result->set_req(idx++, result); 2442 result = prev_result; 2443 } 2444 } 2445 return result; 2446 } 2447 2448 // 2449 // The next methods are derived from methods in MemNode. 2450 // 2451 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2452 Node *mem = mmem; 2453 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2454 // means an array I have not precisely typed yet. Do not do any 2455 // alias stuff with it any time soon. 2456 if (toop->base() != Type::AnyPtr && 2457 !(toop->klass() != NULL && 2458 toop->klass()->is_java_lang_Object() && 2459 toop->offset() == Type::OffsetBot)) { 2460 mem = mmem->memory_at(alias_idx); 2461 // Update input if it is progress over what we have now 2462 } 2463 return mem; 2464 } 2465 2466 // 2467 // Move memory users to their memory slices. 2468 // 2469 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2470 Compile* C = _compile; 2471 PhaseGVN* igvn = _igvn; 2472 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2473 assert(tp != NULL, "ptr type"); 2474 int alias_idx = C->get_alias_index(tp); 2475 int general_idx = C->get_general_index(alias_idx); 2476 2477 // Move users first 2478 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2479 Node* use = n->fast_out(i); 2480 if (use->is_MergeMem()) { 2481 MergeMemNode* mmem = use->as_MergeMem(); 2482 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2483 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2484 continue; // Nothing to do 2485 } 2486 // Replace previous general reference to mem node. 2487 uint orig_uniq = C->unique(); 2488 Node* m = find_inst_mem(n, general_idx, orig_phis); 2489 assert(orig_uniq == C->unique(), "no new nodes"); 2490 mmem->set_memory_at(general_idx, m); 2491 --imax; 2492 --i; 2493 } else if (use->is_MemBar()) { 2494 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2495 if (use->req() > MemBarNode::Precedent && 2496 use->in(MemBarNode::Precedent) == n) { 2497 // Don't move related membars. 2498 record_for_optimizer(use); 2499 continue; 2500 } 2501 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2502 if (tp != NULL && C->get_alias_index(tp) == alias_idx || 2503 alias_idx == general_idx) { 2504 continue; // Nothing to do 2505 } 2506 // Move to general memory slice. 2507 uint orig_uniq = C->unique(); 2508 Node* m = find_inst_mem(n, general_idx, orig_phis); 2509 assert(orig_uniq == C->unique(), "no new nodes"); 2510 igvn->hash_delete(use); 2511 imax -= use->replace_edge(n, m); 2512 igvn->hash_insert(use); 2513 record_for_optimizer(use); 2514 --i; 2515 #ifdef ASSERT 2516 } else if (use->is_Mem()) { 2517 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2518 // Don't move related cardmark. 2519 continue; 2520 } 2521 // Memory nodes should have new memory input. 2522 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2523 assert(tp != NULL, "ptr type"); 2524 int idx = C->get_alias_index(tp); 2525 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2526 "Following memory nodes should have new memory input or be on the same memory slice"); 2527 } else if (use->is_Phi()) { 2528 // Phi nodes should be split and moved already. 2529 tp = use->as_Phi()->adr_type()->isa_ptr(); 2530 assert(tp != NULL, "ptr type"); 2531 int idx = C->get_alias_index(tp); 2532 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2533 } else { 2534 use->dump(); 2535 assert(false, "should not be here"); 2536 #endif 2537 } 2538 } 2539 } 2540 2541 // 2542 // Search memory chain of "mem" to find a MemNode whose address 2543 // is the specified alias index. 2544 // 2545 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2546 if (orig_mem == NULL) 2547 return orig_mem; 2548 Compile* C = _compile; 2549 PhaseGVN* igvn = _igvn; 2550 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2551 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2552 Node *start_mem = C->start()->proj_out(TypeFunc::Memory); 2553 Node *prev = NULL; 2554 Node *result = orig_mem; 2555 while (prev != result) { 2556 prev = result; 2557 if (result == start_mem) 2558 break; // hit one of our sentinels 2559 if (result->is_Mem()) { 2560 const Type *at = igvn->type(result->in(MemNode::Address)); 2561 if (at == Type::TOP) 2562 break; // Dead 2563 assert (at->isa_ptr() != NULL, "pointer type required."); 2564 int idx = C->get_alias_index(at->is_ptr()); 2565 if (idx == alias_idx) 2566 break; // Found 2567 if (!is_instance && (at->isa_oopptr() == NULL || 2568 !at->is_oopptr()->is_known_instance())) { 2569 break; // Do not skip store to general memory slice. 2570 } 2571 result = result->in(MemNode::Memory); 2572 } 2573 if (!is_instance) 2574 continue; // don't search further for non-instance types 2575 // skip over a call which does not affect this memory slice 2576 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2577 Node *proj_in = result->in(0); 2578 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2579 break; // hit one of our sentinels 2580 } else if (proj_in->is_Call()) { 2581 CallNode *call = proj_in->as_Call(); 2582 if (!call->may_modify(toop, igvn)) { 2583 result = call->in(TypeFunc::Memory); 2584 } 2585 } else if (proj_in->is_Initialize()) { 2586 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2587 // Stop if this is the initialization for the object instance which 2588 // which contains this memory slice, otherwise skip over it. 2589 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2590 result = proj_in->in(TypeFunc::Memory); 2591 } 2592 } else if (proj_in->is_MemBar()) { 2593 result = proj_in->in(TypeFunc::Memory); 2594 } 2595 } else if (result->is_MergeMem()) { 2596 MergeMemNode *mmem = result->as_MergeMem(); 2597 result = step_through_mergemem(mmem, alias_idx, toop); 2598 if (result == mmem->base_memory()) { 2599 // Didn't find instance memory, search through general slice recursively. 2600 result = mmem->memory_at(C->get_general_index(alias_idx)); 2601 result = find_inst_mem(result, alias_idx, orig_phis); 2602 if (C->failing()) { 2603 return NULL; 2604 } 2605 mmem->set_memory_at(alias_idx, result); 2606 } 2607 } else if (result->is_Phi() && 2608 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2609 Node *un = result->as_Phi()->unique_input(igvn); 2610 if (un != NULL) { 2611 orig_phis.append_if_missing(result->as_Phi()); 2612 result = un; 2613 } else { 2614 break; 2615 } 2616 } else if (result->is_ClearArray()) { 2617 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 2618 // Can not bypass initialization of the instance 2619 // we are looking for. 2620 break; 2621 } 2622 // Otherwise skip it (the call updated 'result' value). 2623 } else if (result->Opcode() == Op_SCMemProj) { 2624 Node* mem = result->in(0); 2625 Node* adr = NULL; 2626 if (mem->is_LoadStore()) { 2627 adr = mem->in(MemNode::Address); 2628 } else { 2629 assert(mem->Opcode() == Op_EncodeISOArray, "sanity"); 2630 adr = mem->in(3); // Memory edge corresponds to destination array 2631 } 2632 const Type *at = igvn->type(adr); 2633 if (at != Type::TOP) { 2634 assert (at->isa_ptr() != NULL, "pointer type required."); 2635 int idx = C->get_alias_index(at->is_ptr()); 2636 assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field"); 2637 break; 2638 } 2639 result = mem->in(MemNode::Memory); 2640 } 2641 } 2642 if (result->is_Phi()) { 2643 PhiNode *mphi = result->as_Phi(); 2644 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 2645 const TypePtr *t = mphi->adr_type(); 2646 if (!is_instance) { 2647 // Push all non-instance Phis on the orig_phis worklist to update inputs 2648 // during Phase 4 if needed. 2649 orig_phis.append_if_missing(mphi); 2650 } else if (C->get_alias_index(t) != alias_idx) { 2651 // Create a new Phi with the specified alias index type. 2652 result = split_memory_phi(mphi, alias_idx, orig_phis); 2653 } 2654 } 2655 // the result is either MemNode, PhiNode, InitializeNode. 2656 return result; 2657 } 2658 2659 // 2660 // Convert the types of unescaped object to instance types where possible, 2661 // propagate the new type information through the graph, and update memory 2662 // edges and MergeMem inputs to reflect the new type. 2663 // 2664 // We start with allocations (and calls which may be allocations) on alloc_worklist. 2665 // The processing is done in 4 phases: 2666 // 2667 // Phase 1: Process possible allocations from alloc_worklist. Create instance 2668 // types for the CheckCastPP for allocations where possible. 2669 // Propagate the the new types through users as follows: 2670 // casts and Phi: push users on alloc_worklist 2671 // AddP: cast Base and Address inputs to the instance type 2672 // push any AddP users on alloc_worklist and push any memnode 2673 // users onto memnode_worklist. 2674 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2675 // search the Memory chain for a store with the appropriate type 2676 // address type. If a Phi is found, create a new version with 2677 // the appropriate memory slices from each of the Phi inputs. 2678 // For stores, process the users as follows: 2679 // MemNode: push on memnode_worklist 2680 // MergeMem: push on mergemem_worklist 2681 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 2682 // moving the first node encountered of each instance type to the 2683 // the input corresponding to its alias index. 2684 // appropriate memory slice. 2685 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 2686 // 2687 // In the following example, the CheckCastPP nodes are the cast of allocation 2688 // results and the allocation of node 29 is unescaped and eligible to be an 2689 // instance type. 2690 // 2691 // We start with: 2692 // 2693 // 7 Parm #memory 2694 // 10 ConI "12" 2695 // 19 CheckCastPP "Foo" 2696 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2697 // 29 CheckCastPP "Foo" 2698 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 2699 // 2700 // 40 StoreP 25 7 20 ... alias_index=4 2701 // 50 StoreP 35 40 30 ... alias_index=4 2702 // 60 StoreP 45 50 20 ... alias_index=4 2703 // 70 LoadP _ 60 30 ... alias_index=4 2704 // 80 Phi 75 50 60 Memory alias_index=4 2705 // 90 LoadP _ 80 30 ... alias_index=4 2706 // 100 LoadP _ 80 20 ... alias_index=4 2707 // 2708 // 2709 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 2710 // and creating a new alias index for node 30. This gives: 2711 // 2712 // 7 Parm #memory 2713 // 10 ConI "12" 2714 // 19 CheckCastPP "Foo" 2715 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2716 // 29 CheckCastPP "Foo" iid=24 2717 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2718 // 2719 // 40 StoreP 25 7 20 ... alias_index=4 2720 // 50 StoreP 35 40 30 ... alias_index=6 2721 // 60 StoreP 45 50 20 ... alias_index=4 2722 // 70 LoadP _ 60 30 ... alias_index=6 2723 // 80 Phi 75 50 60 Memory alias_index=4 2724 // 90 LoadP _ 80 30 ... alias_index=6 2725 // 100 LoadP _ 80 20 ... alias_index=4 2726 // 2727 // In phase 2, new memory inputs are computed for the loads and stores, 2728 // And a new version of the phi is created. In phase 4, the inputs to 2729 // node 80 are updated and then the memory nodes are updated with the 2730 // values computed in phase 2. This results in: 2731 // 2732 // 7 Parm #memory 2733 // 10 ConI "12" 2734 // 19 CheckCastPP "Foo" 2735 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2736 // 29 CheckCastPP "Foo" iid=24 2737 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2738 // 2739 // 40 StoreP 25 7 20 ... alias_index=4 2740 // 50 StoreP 35 7 30 ... alias_index=6 2741 // 60 StoreP 45 40 20 ... alias_index=4 2742 // 70 LoadP _ 50 30 ... alias_index=6 2743 // 80 Phi 75 40 60 Memory alias_index=4 2744 // 120 Phi 75 50 50 Memory alias_index=6 2745 // 90 LoadP _ 120 30 ... alias_index=6 2746 // 100 LoadP _ 80 20 ... alias_index=4 2747 // 2748 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) { 2749 GrowableArray<Node *> memnode_worklist; 2750 GrowableArray<PhiNode *> orig_phis; 2751 PhaseIterGVN *igvn = _igvn; 2752 uint new_index_start = (uint) _compile->num_alias_types(); 2753 Arena* arena = Thread::current()->resource_area(); 2754 VectorSet visited(arena); 2755 ideal_nodes.clear(); // Reset for use with set_map/get_map. 2756 uint unique_old = _compile->unique(); 2757 2758 // Phase 1: Process possible allocations from alloc_worklist. 2759 // Create instance types for the CheckCastPP for allocations where possible. 2760 // 2761 // (Note: don't forget to change the order of the second AddP node on 2762 // the alloc_worklist if the order of the worklist processing is changed, 2763 // see the comment in find_second_addp().) 2764 // 2765 while (alloc_worklist.length() != 0) { 2766 Node *n = alloc_worklist.pop(); 2767 uint ni = n->_idx; 2768 if (n->is_Call()) { 2769 CallNode *alloc = n->as_Call(); 2770 // copy escape information to call node 2771 PointsToNode* ptn = ptnode_adr(alloc->_idx); 2772 PointsToNode::EscapeState es = ptn->escape_state(); 2773 // We have an allocation or call which returns a Java object, 2774 // see if it is unescaped. 2775 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 2776 continue; 2777 // Find CheckCastPP for the allocate or for the return value of a call 2778 n = alloc->result_cast(); 2779 if (n == NULL) { // No uses except Initialize node 2780 if (alloc->is_Allocate()) { 2781 // Set the scalar_replaceable flag for allocation 2782 // so it could be eliminated if it has no uses. 2783 alloc->as_Allocate()->_is_scalar_replaceable = true; 2784 } 2785 if (alloc->is_CallStaticJava()) { 2786 // Set the scalar_replaceable flag for boxing method 2787 // so it could be eliminated if it has no uses. 2788 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2789 } 2790 continue; 2791 } 2792 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 2793 assert(!alloc->is_Allocate(), "allocation should have unique type"); 2794 continue; 2795 } 2796 2797 // The inline code for Object.clone() casts the allocation result to 2798 // java.lang.Object and then to the actual type of the allocated 2799 // object. Detect this case and use the second cast. 2800 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 2801 // the allocation result is cast to java.lang.Object and then 2802 // to the actual Array type. 2803 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 2804 && (alloc->is_AllocateArray() || 2805 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 2806 Node *cast2 = NULL; 2807 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2808 Node *use = n->fast_out(i); 2809 if (use->is_CheckCastPP()) { 2810 cast2 = use; 2811 break; 2812 } 2813 } 2814 if (cast2 != NULL) { 2815 n = cast2; 2816 } else { 2817 // Non-scalar replaceable if the allocation type is unknown statically 2818 // (reflection allocation), the object can't be restored during 2819 // deoptimization without precise type. 2820 continue; 2821 } 2822 } 2823 if (alloc->is_Allocate()) { 2824 // Set the scalar_replaceable flag for allocation 2825 // so it could be eliminated. 2826 alloc->as_Allocate()->_is_scalar_replaceable = true; 2827 } 2828 if (alloc->is_CallStaticJava()) { 2829 // Set the scalar_replaceable flag for boxing method 2830 // so it could be eliminated. 2831 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2832 } 2833 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state 2834 // in order for an object to be scalar-replaceable, it must be: 2835 // - a direct allocation (not a call returning an object) 2836 // - non-escaping 2837 // - eligible to be a unique type 2838 // - not determined to be ineligible by escape analysis 2839 set_map(alloc, n); 2840 set_map(n, alloc); 2841 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 2842 if (t == NULL) 2843 continue; // not a TypeOopPtr 2844 const TypeOopPtr* tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni); 2845 igvn->hash_delete(n); 2846 igvn->set_type(n, tinst); 2847 n->raise_bottom_type(tinst); 2848 igvn->hash_insert(n); 2849 record_for_optimizer(n); 2850 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 2851 2852 // First, put on the worklist all Field edges from Connection Graph 2853 // which is more accurate then putting immediate users from Ideal Graph. 2854 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 2855 PointsToNode* tgt = e.get(); 2856 Node* use = tgt->ideal_node(); 2857 assert(tgt->is_Field() && use->is_AddP(), 2858 "only AddP nodes are Field edges in CG"); 2859 if (use->outcnt() > 0) { // Don't process dead nodes 2860 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 2861 if (addp2 != NULL) { 2862 assert(alloc->is_AllocateArray(),"array allocation was expected"); 2863 alloc_worklist.append_if_missing(addp2); 2864 } 2865 alloc_worklist.append_if_missing(use); 2866 } 2867 } 2868 2869 // An allocation may have an Initialize which has raw stores. Scan 2870 // the users of the raw allocation result and push AddP users 2871 // on alloc_worklist. 2872 Node *raw_result = alloc->proj_out(TypeFunc::Parms); 2873 assert (raw_result != NULL, "must have an allocation result"); 2874 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 2875 Node *use = raw_result->fast_out(i); 2876 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 2877 Node* addp2 = find_second_addp(use, raw_result); 2878 if (addp2 != NULL) { 2879 assert(alloc->is_AllocateArray(),"array allocation was expected"); 2880 alloc_worklist.append_if_missing(addp2); 2881 } 2882 alloc_worklist.append_if_missing(use); 2883 } else if (use->is_MemBar()) { 2884 memnode_worklist.append_if_missing(use); 2885 } 2886 } 2887 } 2888 } else if (n->is_AddP()) { 2889 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 2890 if (jobj == NULL || jobj == phantom_obj) { 2891 #ifdef ASSERT 2892 ptnode_adr(get_addp_base(n)->_idx)->dump(); 2893 ptnode_adr(n->_idx)->dump(); 2894 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 2895 #endif 2896 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 2897 return; 2898 } 2899 Node *base = get_map(jobj->idx()); // CheckCastPP node 2900 if (!split_AddP(n, base)) continue; // wrong type from dead path 2901 } else if (n->is_Phi() || 2902 n->is_CheckCastPP() || 2903 n->is_EncodeP() || 2904 n->is_DecodeN() || 2905 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 2906 if (visited.test_set(n->_idx)) { 2907 assert(n->is_Phi(), "loops only through Phi's"); 2908 continue; // already processed 2909 } 2910 JavaObjectNode* jobj = unique_java_object(n); 2911 if (jobj == NULL || jobj == phantom_obj) { 2912 #ifdef ASSERT 2913 ptnode_adr(n->_idx)->dump(); 2914 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 2915 #endif 2916 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 2917 return; 2918 } else { 2919 Node *val = get_map(jobj->idx()); // CheckCastPP node 2920 TypeNode *tn = n->as_Type(); 2921 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 2922 assert(tinst != NULL && tinst->is_known_instance() && 2923 tinst->instance_id() == jobj->idx() , "instance type expected."); 2924 2925 const Type *tn_type = igvn->type(tn); 2926 const TypeOopPtr *tn_t; 2927 if (tn_type->isa_narrowoop()) { 2928 tn_t = tn_type->make_ptr()->isa_oopptr(); 2929 } else { 2930 tn_t = tn_type->isa_oopptr(); 2931 } 2932 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 2933 if (tn_type->isa_narrowoop()) { 2934 tn_type = tinst->make_narrowoop(); 2935 } else { 2936 tn_type = tinst; 2937 } 2938 igvn->hash_delete(tn); 2939 igvn->set_type(tn, tn_type); 2940 tn->set_type(tn_type); 2941 igvn->hash_insert(tn); 2942 record_for_optimizer(n); 2943 } else { 2944 assert(tn_type == TypePtr::NULL_PTR || 2945 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 2946 "unexpected type"); 2947 continue; // Skip dead path with different type 2948 } 2949 } 2950 } else { 2951 debug_only(n->dump();) 2952 assert(false, "EA: unexpected node"); 2953 continue; 2954 } 2955 // push allocation's users on appropriate worklist 2956 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2957 Node *use = n->fast_out(i); 2958 if(use->is_Mem() && use->in(MemNode::Address) == n) { 2959 // Load/store to instance's field 2960 memnode_worklist.append_if_missing(use); 2961 } else if (use->is_MemBar()) { 2962 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 2963 memnode_worklist.append_if_missing(use); 2964 } 2965 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 2966 Node* addp2 = find_second_addp(use, n); 2967 if (addp2 != NULL) { 2968 alloc_worklist.append_if_missing(addp2); 2969 } 2970 alloc_worklist.append_if_missing(use); 2971 } else if (use->is_Phi() || 2972 use->is_CheckCastPP() || 2973 use->is_EncodeNarrowPtr() || 2974 use->is_DecodeNarrowPtr() || 2975 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 2976 alloc_worklist.append_if_missing(use); 2977 #ifdef ASSERT 2978 } else if (use->is_Mem()) { 2979 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 2980 } else if (use->is_MergeMem()) { 2981 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 2982 } else if (use->is_SafePoint()) { 2983 // Look for MergeMem nodes for calls which reference unique allocation 2984 // (through CheckCastPP nodes) even for debug info. 2985 Node* m = use->in(TypeFunc::Memory); 2986 if (m->is_MergeMem()) { 2987 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 2988 } 2989 } else if (use->Opcode() == Op_EncodeISOArray) { 2990 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 2991 // EncodeISOArray overwrites destination array 2992 memnode_worklist.append_if_missing(use); 2993 } 2994 } else { 2995 uint op = use->Opcode(); 2996 if (!(op == Op_CmpP || op == Op_Conv2B || 2997 op == Op_CastP2X || op == Op_StoreCM || 2998 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || 2999 op == Op_StrEquals || op == Op_StrIndexOf)) { 3000 n->dump(); 3001 use->dump(); 3002 assert(false, "EA: missing allocation reference path"); 3003 } 3004 #endif 3005 } 3006 } 3007 3008 } 3009 // New alias types were created in split_AddP(). 3010 uint new_index_end = (uint) _compile->num_alias_types(); 3011 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3012 3013 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3014 // compute new values for Memory inputs (the Memory inputs are not 3015 // actually updated until phase 4.) 3016 if (memnode_worklist.length() == 0) 3017 return; // nothing to do 3018 while (memnode_worklist.length() != 0) { 3019 Node *n = memnode_worklist.pop(); 3020 if (visited.test_set(n->_idx)) 3021 continue; 3022 if (n->is_Phi() || n->is_ClearArray()) { 3023 // we don't need to do anything, but the users must be pushed 3024 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3025 // we don't need to do anything, but the users must be pushed 3026 n = n->as_MemBar()->proj_out(TypeFunc::Memory); 3027 if (n == NULL) 3028 continue; 3029 } else if (n->Opcode() == Op_EncodeISOArray) { 3030 // get the memory projection 3031 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3032 Node *use = n->fast_out(i); 3033 if (use->Opcode() == Op_SCMemProj) { 3034 n = use; 3035 break; 3036 } 3037 } 3038 assert(n->Opcode() == Op_SCMemProj, "memory projection required"); 3039 } else { 3040 assert(n->is_Mem(), "memory node required."); 3041 Node *addr = n->in(MemNode::Address); 3042 const Type *addr_t = igvn->type(addr); 3043 if (addr_t == Type::TOP) 3044 continue; 3045 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3046 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3047 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3048 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3049 if (_compile->failing()) { 3050 return; 3051 } 3052 if (mem != n->in(MemNode::Memory)) { 3053 // We delay the memory edge update since we need old one in 3054 // MergeMem code below when instances memory slices are separated. 3055 set_map(n, mem); 3056 } 3057 if (n->is_Load()) { 3058 continue; // don't push users 3059 } else if (n->is_LoadStore()) { 3060 // get the memory projection 3061 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3062 Node *use = n->fast_out(i); 3063 if (use->Opcode() == Op_SCMemProj) { 3064 n = use; 3065 break; 3066 } 3067 } 3068 assert(n->Opcode() == Op_SCMemProj, "memory projection required"); 3069 } 3070 } 3071 // push user on appropriate worklist 3072 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3073 Node *use = n->fast_out(i); 3074 if (use->is_Phi() || use->is_ClearArray()) { 3075 memnode_worklist.append_if_missing(use); 3076 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3077 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 3078 continue; 3079 memnode_worklist.append_if_missing(use); 3080 } else if (use->is_MemBar()) { 3081 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3082 memnode_worklist.append_if_missing(use); 3083 } 3084 #ifdef ASSERT 3085 } else if(use->is_Mem()) { 3086 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3087 } else if (use->is_MergeMem()) { 3088 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3089 } else if (use->Opcode() == Op_EncodeISOArray) { 3090 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3091 // EncodeISOArray overwrites destination array 3092 memnode_worklist.append_if_missing(use); 3093 } 3094 } else { 3095 uint op = use->Opcode(); 3096 if (!(op == Op_StoreCM || 3097 (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL && 3098 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) || 3099 op == Op_AryEq || op == Op_StrComp || 3100 op == Op_StrEquals || op == Op_StrIndexOf)) { 3101 n->dump(); 3102 use->dump(); 3103 assert(false, "EA: missing memory path"); 3104 } 3105 #endif 3106 } 3107 } 3108 } 3109 3110 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3111 // Walk each memory slice moving the first node encountered of each 3112 // instance type to the the input corresponding to its alias index. 3113 uint length = _mergemem_worklist.length(); 3114 for( uint next = 0; next < length; ++next ) { 3115 MergeMemNode* nmm = _mergemem_worklist.at(next); 3116 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3117 // Note: we don't want to use MergeMemStream here because we only want to 3118 // scan inputs which exist at the start, not ones we add during processing. 3119 // Note 2: MergeMem may already contains instance memory slices added 3120 // during find_inst_mem() call when memory nodes were processed above. 3121 igvn->hash_delete(nmm); 3122 uint nslices = nmm->req(); 3123 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3124 Node* mem = nmm->in(i); 3125 Node* cur = NULL; 3126 if (mem == NULL || mem->is_top()) 3127 continue; 3128 // First, update mergemem by moving memory nodes to corresponding slices 3129 // if their type became more precise since this mergemem was created. 3130 while (mem->is_Mem()) { 3131 const Type *at = igvn->type(mem->in(MemNode::Address)); 3132 if (at != Type::TOP) { 3133 assert (at->isa_ptr() != NULL, "pointer type required."); 3134 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3135 if (idx == i) { 3136 if (cur == NULL) 3137 cur = mem; 3138 } else { 3139 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3140 nmm->set_memory_at(idx, mem); 3141 } 3142 } 3143 } 3144 mem = mem->in(MemNode::Memory); 3145 } 3146 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3147 // Find any instance of the current type if we haven't encountered 3148 // already a memory slice of the instance along the memory chain. 3149 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3150 if((uint)_compile->get_general_index(ni) == i) { 3151 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3152 if (nmm->is_empty_memory(m)) { 3153 Node* result = find_inst_mem(mem, ni, orig_phis); 3154 if (_compile->failing()) { 3155 return; 3156 } 3157 nmm->set_memory_at(ni, result); 3158 } 3159 } 3160 } 3161 } 3162 // Find the rest of instances values 3163 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3164 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3165 Node* result = step_through_mergemem(nmm, ni, tinst); 3166 if (result == nmm->base_memory()) { 3167 // Didn't find instance memory, search through general slice recursively. 3168 result = nmm->memory_at(_compile->get_general_index(ni)); 3169 result = find_inst_mem(result, ni, orig_phis); 3170 if (_compile->failing()) { 3171 return; 3172 } 3173 nmm->set_memory_at(ni, result); 3174 } 3175 } 3176 igvn->hash_insert(nmm); 3177 record_for_optimizer(nmm); 3178 } 3179 3180 // Phase 4: Update the inputs of non-instance memory Phis and 3181 // the Memory input of memnodes 3182 // First update the inputs of any non-instance Phi's from 3183 // which we split out an instance Phi. Note we don't have 3184 // to recursively process Phi's encounted on the input memory 3185 // chains as is done in split_memory_phi() since they will 3186 // also be processed here. 3187 for (int j = 0; j < orig_phis.length(); j++) { 3188 PhiNode *phi = orig_phis.at(j); 3189 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3190 igvn->hash_delete(phi); 3191 for (uint i = 1; i < phi->req(); i++) { 3192 Node *mem = phi->in(i); 3193 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3194 if (_compile->failing()) { 3195 return; 3196 } 3197 if (mem != new_mem) { 3198 phi->set_req(i, new_mem); 3199 } 3200 } 3201 igvn->hash_insert(phi); 3202 record_for_optimizer(phi); 3203 } 3204 3205 // Update the memory inputs of MemNodes with the value we computed 3206 // in Phase 2 and move stores memory users to corresponding memory slices. 3207 // Disable memory split verification code until the fix for 6984348. 3208 // Currently it produces false negative results since it does not cover all cases. 3209 #if 0 // ifdef ASSERT 3210 visited.Reset(); 3211 Node_Stack old_mems(arena, _compile->unique() >> 2); 3212 #endif 3213 for (uint i = 0; i < ideal_nodes.size(); i++) { 3214 Node* n = ideal_nodes.at(i); 3215 Node* nmem = get_map(n->_idx); 3216 assert(nmem != NULL, "sanity"); 3217 if (n->is_Mem()) { 3218 #if 0 // ifdef ASSERT 3219 Node* old_mem = n->in(MemNode::Memory); 3220 if (!visited.test_set(old_mem->_idx)) { 3221 old_mems.push(old_mem, old_mem->outcnt()); 3222 } 3223 #endif 3224 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3225 if (!n->is_Load()) { 3226 // Move memory users of a store first. 3227 move_inst_mem(n, orig_phis); 3228 } 3229 // Now update memory input 3230 igvn->hash_delete(n); 3231 n->set_req(MemNode::Memory, nmem); 3232 igvn->hash_insert(n); 3233 record_for_optimizer(n); 3234 } else { 3235 assert(n->is_Allocate() || n->is_CheckCastPP() || 3236 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3237 } 3238 } 3239 #if 0 // ifdef ASSERT 3240 // Verify that memory was split correctly 3241 while (old_mems.is_nonempty()) { 3242 Node* old_mem = old_mems.node(); 3243 uint old_cnt = old_mems.index(); 3244 old_mems.pop(); 3245 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3246 } 3247 #endif 3248 } 3249 3250 #ifndef PRODUCT 3251 static const char *node_type_names[] = { 3252 "UnknownType", 3253 "JavaObject", 3254 "LocalVar", 3255 "Field", 3256 "Arraycopy" 3257 }; 3258 3259 static const char *esc_names[] = { 3260 "UnknownEscape", 3261 "NoEscape", 3262 "ArgEscape", 3263 "GlobalEscape" 3264 }; 3265 3266 void PointsToNode::dump(bool print_state) const { 3267 NodeType nt = node_type(); 3268 tty->print("%s ", node_type_names[(int) nt]); 3269 if (print_state) { 3270 EscapeState es = escape_state(); 3271 EscapeState fields_es = fields_escape_state(); 3272 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3273 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) 3274 tty->print("NSR "); 3275 } 3276 if (is_Field()) { 3277 FieldNode* f = (FieldNode*)this; 3278 if (f->is_oop()) 3279 tty->print("oop "); 3280 if (f->offset() > 0) 3281 tty->print("+%d ", f->offset()); 3282 tty->print("("); 3283 for (BaseIterator i(f); i.has_next(); i.next()) { 3284 PointsToNode* b = i.get(); 3285 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3286 } 3287 tty->print(" )"); 3288 } 3289 tty->print("["); 3290 for (EdgeIterator i(this); i.has_next(); i.next()) { 3291 PointsToNode* e = i.get(); 3292 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3293 } 3294 tty->print(" ["); 3295 for (UseIterator i(this); i.has_next(); i.next()) { 3296 PointsToNode* u = i.get(); 3297 bool is_base = false; 3298 if (PointsToNode::is_base_use(u)) { 3299 is_base = true; 3300 u = PointsToNode::get_use_node(u)->as_Field(); 3301 } 3302 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3303 } 3304 tty->print(" ]] "); 3305 if (_node == NULL) 3306 tty->print_cr("<null>"); 3307 else 3308 _node->dump(); 3309 } 3310 3311 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3312 bool first = true; 3313 int ptnodes_length = ptnodes_worklist.length(); 3314 for (int i = 0; i < ptnodes_length; i++) { 3315 PointsToNode *ptn = ptnodes_worklist.at(i); 3316 if (ptn == NULL || !ptn->is_JavaObject()) 3317 continue; 3318 PointsToNode::EscapeState es = ptn->escape_state(); 3319 if ((es != PointsToNode::NoEscape) && !Verbose) { 3320 continue; 3321 } 3322 Node* n = ptn->ideal_node(); 3323 if (n->is_Allocate() || (n->is_CallStaticJava() && 3324 n->as_CallStaticJava()->is_boxing_method())) { 3325 if (first) { 3326 tty->cr(); 3327 tty->print("======== Connection graph for "); 3328 _compile->method()->print_short_name(); 3329 tty->cr(); 3330 first = false; 3331 } 3332 ptn->dump(); 3333 // Print all locals and fields which reference this allocation 3334 for (UseIterator j(ptn); j.has_next(); j.next()) { 3335 PointsToNode* use = j.get(); 3336 if (use->is_LocalVar()) { 3337 use->dump(Verbose); 3338 } else if (Verbose) { 3339 use->dump(); 3340 } 3341 } 3342 tty->cr(); 3343 } 3344 } 3345 } 3346 #endif