1 /* 2 * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "libadt/vectset.hpp" 29 #include "memory/allocation.hpp" 30 #include "opto/c2compiler.hpp" 31 #include "opto/callnode.hpp" 32 #include "opto/cfgnode.hpp" 33 #include "opto/compile.hpp" 34 #include "opto/escape.hpp" 35 #include "opto/phaseX.hpp" 36 #include "opto/movenode.hpp" 37 #include "opto/rootnode.hpp" 38 39 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 40 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 41 _in_worklist(C->comp_arena()), 42 _next_pidx(0), 43 _collecting(true), 44 _verify(false), 45 _compile(C), 46 _igvn(igvn), 47 _node_map(C->comp_arena()) { 48 // Add unknown java object. 49 add_java_object(C->top(), PointsToNode::GlobalEscape); 50 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 51 // Add ConP(#NULL) and ConN(#NULL) nodes. 52 Node* oop_null = igvn->zerocon(T_OBJECT); 53 assert(oop_null->_idx < nodes_size(), "should be created already"); 54 add_java_object(oop_null, PointsToNode::NoEscape); 55 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 56 if (UseCompressedOops) { 57 Node* noop_null = igvn->zerocon(T_NARROWOOP); 58 assert(noop_null->_idx < nodes_size(), "should be created already"); 59 map_ideal_node(noop_null, null_obj); 60 } 61 _pcmp_neq = NULL; // Should be initialized 62 _pcmp_eq = NULL; 63 } 64 65 bool ConnectionGraph::has_candidates(Compile *C) { 66 // EA brings benefits only when the code has allocations and/or locks which 67 // are represented by ideal Macro nodes. 68 int cnt = C->macro_count(); 69 for (int i = 0; i < cnt; i++) { 70 Node *n = C->macro_node(i); 71 if (n->is_Allocate()) 72 return true; 73 if (n->is_Lock()) { 74 Node* obj = n->as_Lock()->obj_node()->uncast(); 75 if (!(obj->is_Parm() || obj->is_Con())) 76 return true; 77 } 78 if (n->is_CallStaticJava() && 79 n->as_CallStaticJava()->is_boxing_method()) { 80 return true; 81 } 82 } 83 return false; 84 } 85 86 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 87 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 88 ResourceMark rm; 89 90 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 91 // to create space for them in ConnectionGraph::_nodes[]. 92 Node* oop_null = igvn->zerocon(T_OBJECT); 93 Node* noop_null = igvn->zerocon(T_NARROWOOP); 94 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 95 // Perform escape analysis 96 if (congraph->compute_escape()) { 97 // There are non escaping objects. 98 C->set_congraph(congraph); 99 } 100 // Cleanup. 101 if (oop_null->outcnt() == 0) 102 igvn->hash_delete(oop_null); 103 if (noop_null->outcnt() == 0) 104 igvn->hash_delete(noop_null); 105 } 106 107 bool ConnectionGraph::compute_escape() { 108 Compile* C = _compile; 109 PhaseGVN* igvn = _igvn; 110 111 // Worklists used by EA. 112 Unique_Node_List delayed_worklist; 113 GrowableArray<Node*> alloc_worklist; 114 GrowableArray<Node*> ptr_cmp_worklist; 115 GrowableArray<Node*> storestore_worklist; 116 GrowableArray<PointsToNode*> ptnodes_worklist; 117 GrowableArray<JavaObjectNode*> java_objects_worklist; 118 GrowableArray<JavaObjectNode*> non_escaped_worklist; 119 GrowableArray<FieldNode*> oop_fields_worklist; 120 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 121 122 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 123 124 // 1. Populate Connection Graph (CG) with PointsTo nodes. 125 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 126 // Initialize worklist 127 if (C->root() != NULL) { 128 ideal_nodes.push(C->root()); 129 } 130 // Processed ideal nodes are unique on ideal_nodes list 131 // but several ideal nodes are mapped to the phantom_obj. 132 // To avoid duplicated entries on the following worklists 133 // add the phantom_obj only once to them. 134 ptnodes_worklist.append(phantom_obj); 135 java_objects_worklist.append(phantom_obj); 136 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 137 Node* n = ideal_nodes.at(next); 138 // Create PointsTo nodes and add them to Connection Graph. Called 139 // only once per ideal node since ideal_nodes is Unique_Node list. 140 add_node_to_connection_graph(n, &delayed_worklist); 141 PointsToNode* ptn = ptnode_adr(n->_idx); 142 if (ptn != NULL && ptn != phantom_obj) { 143 ptnodes_worklist.append(ptn); 144 if (ptn->is_JavaObject()) { 145 java_objects_worklist.append(ptn->as_JavaObject()); 146 if ((n->is_Allocate() || n->is_CallStaticJava()) && 147 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 148 // Only allocations and java static calls results are interesting. 149 non_escaped_worklist.append(ptn->as_JavaObject()); 150 } 151 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 152 oop_fields_worklist.append(ptn->as_Field()); 153 } 154 } 155 if (n->is_MergeMem()) { 156 // Collect all MergeMem nodes to add memory slices for 157 // scalar replaceable objects in split_unique_types(). 158 _mergemem_worklist.append(n->as_MergeMem()); 159 } else if (OptimizePtrCompare && n->is_Cmp() && 160 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { 161 // Collect compare pointers nodes. 162 ptr_cmp_worklist.append(n); 163 } else if (n->is_MemBarStoreStore()) { 164 // Collect all MemBarStoreStore nodes so that depending on the 165 // escape status of the associated Allocate node some of them 166 // may be eliminated. 167 storestore_worklist.append(n); 168 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && 169 (n->req() > MemBarNode::Precedent)) { 170 record_for_optimizer(n); 171 #ifdef ASSERT 172 } else if (n->is_AddP()) { 173 // Collect address nodes for graph verification. 174 addp_worklist.append(n); 175 #endif 176 } 177 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 178 Node* m = n->fast_out(i); // Get user 179 ideal_nodes.push(m); 180 } 181 } 182 if (non_escaped_worklist.length() == 0) { 183 _collecting = false; 184 return false; // Nothing to do. 185 } 186 // Add final simple edges to graph. 187 while(delayed_worklist.size() > 0) { 188 Node* n = delayed_worklist.pop(); 189 add_final_edges(n); 190 } 191 int ptnodes_length = ptnodes_worklist.length(); 192 193 #ifdef ASSERT 194 if (VerifyConnectionGraph) { 195 // Verify that no new simple edges could be created and all 196 // local vars has edges. 197 _verify = true; 198 for (int next = 0; next < ptnodes_length; ++next) { 199 PointsToNode* ptn = ptnodes_worklist.at(next); 200 add_final_edges(ptn->ideal_node()); 201 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 202 ptn->dump(); 203 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 204 } 205 } 206 _verify = false; 207 } 208 #endif 209 210 // 2. Finish Graph construction by propagating references to all 211 // java objects through graph. 212 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, 213 java_objects_worklist, oop_fields_worklist)) { 214 // All objects escaped or hit time or iterations limits. 215 _collecting = false; 216 return false; 217 } 218 219 // 3. Adjust scalar_replaceable state of nonescaping objects and push 220 // scalar replaceable allocations on alloc_worklist for processing 221 // in split_unique_types(). 222 int non_escaped_length = non_escaped_worklist.length(); 223 for (int next = 0; next < non_escaped_length; next++) { 224 JavaObjectNode* ptn = non_escaped_worklist.at(next); 225 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 226 Node* n = ptn->ideal_node(); 227 if (n->is_Allocate()) { 228 n->as_Allocate()->_is_non_escaping = noescape; 229 } 230 if (n->is_CallStaticJava()) { 231 n->as_CallStaticJava()->_is_non_escaping = noescape; 232 } 233 if (noescape && ptn->scalar_replaceable()) { 234 adjust_scalar_replaceable_state(ptn); 235 if (ptn->scalar_replaceable()) { 236 alloc_worklist.append(ptn->ideal_node()); 237 } 238 } 239 } 240 241 #ifdef ASSERT 242 if (VerifyConnectionGraph) { 243 // Verify that graph is complete - no new edges could be added or needed. 244 verify_connection_graph(ptnodes_worklist, non_escaped_worklist, 245 java_objects_worklist, addp_worklist); 246 } 247 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 248 assert(null_obj->escape_state() == PointsToNode::NoEscape && 249 null_obj->edge_count() == 0 && 250 !null_obj->arraycopy_src() && 251 !null_obj->arraycopy_dst(), "sanity"); 252 #endif 253 254 _collecting = false; 255 256 } // TracePhase t3("connectionGraph") 257 258 // 4. Optimize ideal graph based on EA information. 259 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); 260 if (has_non_escaping_obj) { 261 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 262 } 263 264 #ifndef PRODUCT 265 if (PrintEscapeAnalysis) { 266 dump(ptnodes_worklist); // Dump ConnectionGraph 267 } 268 #endif 269 270 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 271 #ifdef ASSERT 272 if (VerifyConnectionGraph) { 273 int alloc_length = alloc_worklist.length(); 274 for (int next = 0; next < alloc_length; ++next) { 275 Node* n = alloc_worklist.at(next); 276 PointsToNode* ptn = ptnode_adr(n->_idx); 277 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 278 } 279 } 280 #endif 281 282 // 5. Separate memory graph for scalar replaceable allcations. 283 if (has_scalar_replaceable_candidates && 284 C->AliasLevel() >= 3 && EliminateAllocations) { 285 // Now use the escape information to create unique types for 286 // scalar replaceable objects. 287 split_unique_types(alloc_worklist); 288 if (C->failing()) return false; 289 C->print_method(PHASE_AFTER_EA, 2); 290 291 #ifdef ASSERT 292 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 293 tty->print("=== No allocations eliminated for "); 294 C->method()->print_short_name(); 295 if(!EliminateAllocations) { 296 tty->print(" since EliminateAllocations is off ==="); 297 } else if(!has_scalar_replaceable_candidates) { 298 tty->print(" since there are no scalar replaceable candidates ==="); 299 } else if(C->AliasLevel() < 3) { 300 tty->print(" since AliasLevel < 3 ==="); 301 } 302 tty->cr(); 303 #endif 304 } 305 return has_non_escaping_obj; 306 } 307 308 // Utility function for nodes that load an object 309 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 310 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 311 // ThreadLocal has RawPtr type. 312 const Type* t = _igvn->type(n); 313 if (t->make_ptr() != NULL) { 314 Node* adr = n->in(MemNode::Address); 315 #ifdef ASSERT 316 if (!adr->is_AddP()) { 317 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 318 } else { 319 assert((ptnode_adr(adr->_idx) == NULL || 320 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 321 } 322 #endif 323 add_local_var_and_edge(n, PointsToNode::NoEscape, 324 adr, delayed_worklist); 325 } 326 } 327 328 // Populate Connection Graph with PointsTo nodes and create simple 329 // connection graph edges. 330 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 331 assert(!_verify, "this method sould not be called for verification"); 332 PhaseGVN* igvn = _igvn; 333 uint n_idx = n->_idx; 334 PointsToNode* n_ptn = ptnode_adr(n_idx); 335 if (n_ptn != NULL) 336 return; // No need to redefine PointsTo node during first iteration. 337 338 if (n->is_Call()) { 339 // Arguments to allocation and locking don't escape. 340 if (n->is_AbstractLock()) { 341 // Put Lock and Unlock nodes on IGVN worklist to process them during 342 // first IGVN optimization when escape information is still available. 343 record_for_optimizer(n); 344 } else if (n->is_Allocate()) { 345 add_call_node(n->as_Call()); 346 record_for_optimizer(n); 347 } else { 348 if (n->is_CallStaticJava()) { 349 const char* name = n->as_CallStaticJava()->_name; 350 if (name != NULL && strcmp(name, "uncommon_trap") == 0) 351 return; // Skip uncommon traps 352 } 353 // Don't mark as processed since call's arguments have to be processed. 354 delayed_worklist->push(n); 355 // Check if a call returns an object. 356 if ((n->as_Call()->returns_pointer() && 357 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) || 358 (n->is_CallStaticJava() && 359 n->as_CallStaticJava()->is_boxing_method())) { 360 add_call_node(n->as_Call()); 361 } 362 } 363 return; 364 } 365 // Put this check here to process call arguments since some call nodes 366 // point to phantom_obj. 367 if (n_ptn == phantom_obj || n_ptn == null_obj) 368 return; // Skip predefined nodes. 369 370 int opcode = n->Opcode(); 371 switch (opcode) { 372 case Op_AddP: { 373 Node* base = get_addp_base(n); 374 PointsToNode* ptn_base = ptnode_adr(base->_idx); 375 // Field nodes are created for all field types. They are used in 376 // adjust_scalar_replaceable_state() and split_unique_types(). 377 // Note, non-oop fields will have only base edges in Connection 378 // Graph because such fields are not used for oop loads and stores. 379 int offset = address_offset(n, igvn); 380 add_field(n, PointsToNode::NoEscape, offset); 381 if (ptn_base == NULL) { 382 delayed_worklist->push(n); // Process it later. 383 } else { 384 n_ptn = ptnode_adr(n_idx); 385 add_base(n_ptn->as_Field(), ptn_base); 386 } 387 break; 388 } 389 case Op_CastX2P: { 390 map_ideal_node(n, phantom_obj); 391 break; 392 } 393 case Op_CastPP: 394 case Op_CheckCastPP: 395 case Op_EncodeP: 396 case Op_DecodeN: 397 case Op_EncodePKlass: 398 case Op_DecodeNKlass: { 399 add_local_var_and_edge(n, PointsToNode::NoEscape, 400 n->in(1), delayed_worklist); 401 break; 402 } 403 case Op_CMoveP: { 404 add_local_var(n, PointsToNode::NoEscape); 405 // Do not add edges during first iteration because some could be 406 // not defined yet. 407 delayed_worklist->push(n); 408 break; 409 } 410 case Op_ConP: 411 case Op_ConN: 412 case Op_ConNKlass: { 413 // assume all oop constants globally escape except for null 414 PointsToNode::EscapeState es; 415 const Type* t = igvn->type(n); 416 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 417 es = PointsToNode::NoEscape; 418 } else { 419 es = PointsToNode::GlobalEscape; 420 } 421 add_java_object(n, es); 422 break; 423 } 424 case Op_CreateEx: { 425 // assume that all exception objects globally escape 426 map_ideal_node(n, phantom_obj); 427 break; 428 } 429 case Op_LoadKlass: 430 case Op_LoadNKlass: { 431 // Unknown class is loaded 432 map_ideal_node(n, phantom_obj); 433 break; 434 } 435 case Op_LoadP: 436 case Op_LoadN: 437 case Op_LoadPLocked: { 438 add_objload_to_connection_graph(n, delayed_worklist); 439 break; 440 } 441 case Op_Parm: { 442 map_ideal_node(n, phantom_obj); 443 break; 444 } 445 case Op_PartialSubtypeCheck: { 446 // Produces Null or notNull and is used in only in CmpP so 447 // phantom_obj could be used. 448 map_ideal_node(n, phantom_obj); // Result is unknown 449 break; 450 } 451 case Op_Phi: { 452 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 453 // ThreadLocal has RawPtr type. 454 const Type* t = n->as_Phi()->type(); 455 if (t->make_ptr() != NULL) { 456 add_local_var(n, PointsToNode::NoEscape); 457 // Do not add edges during first iteration because some could be 458 // not defined yet. 459 delayed_worklist->push(n); 460 } 461 break; 462 } 463 case Op_Proj: { 464 // we are only interested in the oop result projection from a call 465 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 466 n->in(0)->as_Call()->returns_pointer()) { 467 add_local_var_and_edge(n, PointsToNode::NoEscape, 468 n->in(0), delayed_worklist); 469 } 470 break; 471 } 472 case Op_Rethrow: // Exception object escapes 473 case Op_Return: { 474 if (n->req() > TypeFunc::Parms && 475 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 476 // Treat Return value as LocalVar with GlobalEscape escape state. 477 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 478 n->in(TypeFunc::Parms), delayed_worklist); 479 } 480 break; 481 } 482 case Op_GetAndSetP: 483 case Op_GetAndSetN: { 484 add_objload_to_connection_graph(n, delayed_worklist); 485 // fallthrough 486 } 487 case Op_StoreP: 488 case Op_StoreN: 489 case Op_StoreNKlass: 490 case Op_StorePConditional: 491 case Op_CompareAndSwapP: 492 case Op_CompareAndSwapN: { 493 Node* adr = n->in(MemNode::Address); 494 const Type *adr_type = igvn->type(adr); 495 adr_type = adr_type->make_ptr(); 496 if (adr_type == NULL) { 497 break; // skip dead nodes 498 } 499 if (adr_type->isa_oopptr() || 500 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && 501 (adr_type == TypeRawPtr::NOTNULL && 502 adr->in(AddPNode::Address)->is_Proj() && 503 adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 504 delayed_worklist->push(n); // Process it later. 505 #ifdef ASSERT 506 assert(adr->is_AddP(), "expecting an AddP"); 507 if (adr_type == TypeRawPtr::NOTNULL) { 508 // Verify a raw address for a store captured by Initialize node. 509 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 510 assert(offs != Type::OffsetBot, "offset must be a constant"); 511 } 512 #endif 513 } else { 514 // Ignore copy the displaced header to the BoxNode (OSR compilation). 515 if (adr->is_BoxLock()) 516 break; 517 // Stored value escapes in unsafe access. 518 if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) { 519 // Pointer stores in G1 barriers looks like unsafe access. 520 // Ignore such stores to be able scalar replace non-escaping 521 // allocations. 522 if (UseG1GC && adr->is_AddP()) { 523 Node* base = get_addp_base(adr); 524 if (base->Opcode() == Op_LoadP && 525 base->in(MemNode::Address)->is_AddP()) { 526 adr = base->in(MemNode::Address); 527 Node* tls = get_addp_base(adr); 528 if (tls->Opcode() == Op_ThreadLocal) { 529 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 530 if (offs == in_bytes(JavaThread::satb_mark_queue_offset() + 531 PtrQueue::byte_offset_of_buf())) { 532 break; // G1 pre barier previous oop value store. 533 } 534 if (offs == in_bytes(JavaThread::dirty_card_queue_offset() + 535 PtrQueue::byte_offset_of_buf())) { 536 break; // G1 post barier card address store. 537 } 538 } 539 } 540 } 541 delayed_worklist->push(n); // Process unsafe access later. 542 break; 543 } 544 #ifdef ASSERT 545 n->dump(1); 546 assert(false, "not unsafe or G1 barrier raw StoreP"); 547 #endif 548 } 549 break; 550 } 551 case Op_AryEq: 552 case Op_StrComp: 553 case Op_StrEquals: 554 case Op_StrIndexOf: 555 case Op_EncodeISOArray: { 556 add_local_var(n, PointsToNode::ArgEscape); 557 delayed_worklist->push(n); // Process it later. 558 break; 559 } 560 case Op_ThreadLocal: { 561 add_java_object(n, PointsToNode::ArgEscape); 562 break; 563 } 564 default: 565 ; // Do nothing for nodes not related to EA. 566 } 567 return; 568 } 569 570 #ifdef ASSERT 571 #define ELSE_FAIL(name) \ 572 /* Should not be called for not pointer type. */ \ 573 n->dump(1); \ 574 assert(false, name); \ 575 break; 576 #else 577 #define ELSE_FAIL(name) \ 578 break; 579 #endif 580 581 // Add final simple edges to graph. 582 void ConnectionGraph::add_final_edges(Node *n) { 583 PointsToNode* n_ptn = ptnode_adr(n->_idx); 584 #ifdef ASSERT 585 if (_verify && n_ptn->is_JavaObject()) 586 return; // This method does not change graph for JavaObject. 587 #endif 588 589 if (n->is_Call()) { 590 process_call_arguments(n->as_Call()); 591 return; 592 } 593 assert(n->is_Store() || n->is_LoadStore() || 594 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 595 "node should be registered already"); 596 int opcode = n->Opcode(); 597 switch (opcode) { 598 case Op_AddP: { 599 Node* base = get_addp_base(n); 600 PointsToNode* ptn_base = ptnode_adr(base->_idx); 601 assert(ptn_base != NULL, "field's base should be registered"); 602 add_base(n_ptn->as_Field(), ptn_base); 603 break; 604 } 605 case Op_CastPP: 606 case Op_CheckCastPP: 607 case Op_EncodeP: 608 case Op_DecodeN: 609 case Op_EncodePKlass: 610 case Op_DecodeNKlass: { 611 add_local_var_and_edge(n, PointsToNode::NoEscape, 612 n->in(1), NULL); 613 break; 614 } 615 case Op_CMoveP: { 616 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 617 Node* in = n->in(i); 618 if (in == NULL) 619 continue; // ignore NULL 620 Node* uncast_in = in->uncast(); 621 if (uncast_in->is_top() || uncast_in == n) 622 continue; // ignore top or inputs which go back this node 623 PointsToNode* ptn = ptnode_adr(in->_idx); 624 assert(ptn != NULL, "node should be registered"); 625 add_edge(n_ptn, ptn); 626 } 627 break; 628 } 629 case Op_LoadP: 630 case Op_LoadN: 631 case Op_LoadPLocked: { 632 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 633 // ThreadLocal has RawPtr type. 634 const Type* t = _igvn->type(n); 635 if (t->make_ptr() != NULL) { 636 Node* adr = n->in(MemNode::Address); 637 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 638 break; 639 } 640 ELSE_FAIL("Op_LoadP"); 641 } 642 case Op_Phi: { 643 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 644 // ThreadLocal has RawPtr type. 645 const Type* t = n->as_Phi()->type(); 646 if (t->make_ptr() != NULL) { 647 for (uint i = 1; i < n->req(); i++) { 648 Node* in = n->in(i); 649 if (in == NULL) 650 continue; // ignore NULL 651 Node* uncast_in = in->uncast(); 652 if (uncast_in->is_top() || uncast_in == n) 653 continue; // ignore top or inputs which go back this node 654 PointsToNode* ptn = ptnode_adr(in->_idx); 655 assert(ptn != NULL, "node should be registered"); 656 add_edge(n_ptn, ptn); 657 } 658 break; 659 } 660 ELSE_FAIL("Op_Phi"); 661 } 662 case Op_Proj: { 663 // we are only interested in the oop result projection from a call 664 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 665 n->in(0)->as_Call()->returns_pointer()) { 666 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 667 break; 668 } 669 ELSE_FAIL("Op_Proj"); 670 } 671 case Op_Rethrow: // Exception object escapes 672 case Op_Return: { 673 if (n->req() > TypeFunc::Parms && 674 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 675 // Treat Return value as LocalVar with GlobalEscape escape state. 676 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 677 n->in(TypeFunc::Parms), NULL); 678 break; 679 } 680 ELSE_FAIL("Op_Return"); 681 } 682 case Op_StoreP: 683 case Op_StoreN: 684 case Op_StoreNKlass: 685 case Op_StorePConditional: 686 case Op_CompareAndSwapP: 687 case Op_CompareAndSwapN: 688 case Op_GetAndSetP: 689 case Op_GetAndSetN: { 690 Node* adr = n->in(MemNode::Address); 691 const Type *adr_type = _igvn->type(adr); 692 adr_type = adr_type->make_ptr(); 693 #ifdef ASSERT 694 if (adr_type == NULL) { 695 n->dump(1); 696 assert(adr_type != NULL, "dead node should not be on list"); 697 break; 698 } 699 #endif 700 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) { 701 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 702 } 703 if (adr_type->isa_oopptr() || 704 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && 705 (adr_type == TypeRawPtr::NOTNULL && 706 adr->in(AddPNode::Address)->is_Proj() && 707 adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 708 // Point Address to Value 709 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 710 assert(adr_ptn != NULL && 711 adr_ptn->as_Field()->is_oop(), "node should be registered"); 712 Node *val = n->in(MemNode::ValueIn); 713 PointsToNode* ptn = ptnode_adr(val->_idx); 714 assert(ptn != NULL, "node should be registered"); 715 add_edge(adr_ptn, ptn); 716 break; 717 } else if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) { 718 // Stored value escapes in unsafe access. 719 Node *val = n->in(MemNode::ValueIn); 720 PointsToNode* ptn = ptnode_adr(val->_idx); 721 assert(ptn != NULL, "node should be registered"); 722 set_escape_state(ptn, PointsToNode::GlobalEscape); 723 // Add edge to object for unsafe access with offset. 724 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 725 assert(adr_ptn != NULL, "node should be registered"); 726 if (adr_ptn->is_Field()) { 727 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 728 add_edge(adr_ptn, ptn); 729 } 730 break; 731 } 732 ELSE_FAIL("Op_StoreP"); 733 } 734 case Op_AryEq: 735 case Op_StrComp: 736 case Op_StrEquals: 737 case Op_StrIndexOf: 738 case Op_EncodeISOArray: { 739 // char[] arrays passed to string intrinsic do not escape but 740 // they are not scalar replaceable. Adjust escape state for them. 741 // Start from in(2) edge since in(1) is memory edge. 742 for (uint i = 2; i < n->req(); i++) { 743 Node* adr = n->in(i); 744 const Type* at = _igvn->type(adr); 745 if (!adr->is_top() && at->isa_ptr()) { 746 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 747 at->isa_ptr() != NULL, "expecting a pointer"); 748 if (adr->is_AddP()) { 749 adr = get_addp_base(adr); 750 } 751 PointsToNode* ptn = ptnode_adr(adr->_idx); 752 assert(ptn != NULL, "node should be registered"); 753 add_edge(n_ptn, ptn); 754 } 755 } 756 break; 757 } 758 default: { 759 // This method should be called only for EA specific nodes which may 760 // miss some edges when they were created. 761 #ifdef ASSERT 762 n->dump(1); 763 #endif 764 guarantee(false, "unknown node"); 765 } 766 } 767 return; 768 } 769 770 void ConnectionGraph::add_call_node(CallNode* call) { 771 assert(call->returns_pointer(), "only for call which returns pointer"); 772 uint call_idx = call->_idx; 773 if (call->is_Allocate()) { 774 Node* k = call->in(AllocateNode::KlassNode); 775 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 776 assert(kt != NULL, "TypeKlassPtr required."); 777 ciKlass* cik = kt->klass(); 778 PointsToNode::EscapeState es = PointsToNode::NoEscape; 779 bool scalar_replaceable = true; 780 if (call->is_AllocateArray()) { 781 if (!cik->is_array_klass()) { // StressReflectiveCode 782 es = PointsToNode::GlobalEscape; 783 } else { 784 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 785 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 786 // Not scalar replaceable if the length is not constant or too big. 787 scalar_replaceable = false; 788 } 789 } 790 } else { // Allocate instance 791 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 792 cik->is_subclass_of(_compile->env()->Reference_klass()) || 793 !cik->is_instance_klass() || // StressReflectiveCode 794 cik->as_instance_klass()->has_finalizer()) { 795 es = PointsToNode::GlobalEscape; 796 } 797 } 798 add_java_object(call, es); 799 PointsToNode* ptn = ptnode_adr(call_idx); 800 if (!scalar_replaceable && ptn->scalar_replaceable()) { 801 ptn->set_scalar_replaceable(false); 802 } 803 } else if (call->is_CallStaticJava()) { 804 // Call nodes could be different types: 805 // 806 // 1. CallDynamicJavaNode (what happened during call is unknown): 807 // 808 // - mapped to GlobalEscape JavaObject node if oop is returned; 809 // 810 // - all oop arguments are escaping globally; 811 // 812 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 813 // 814 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 815 // 816 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 817 // - mapped to NoEscape JavaObject node if non-escaping object allocated 818 // during call is returned; 819 // - mapped to ArgEscape LocalVar node pointed to object arguments 820 // which are returned and does not escape during call; 821 // 822 // - oop arguments escaping status is defined by bytecode analysis; 823 // 824 // For a static call, we know exactly what method is being called. 825 // Use bytecode estimator to record whether the call's return value escapes. 826 ciMethod* meth = call->as_CallJava()->method(); 827 if (meth == NULL) { 828 const char* name = call->as_CallStaticJava()->_name; 829 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 830 // Returns a newly allocated unescaped object. 831 add_java_object(call, PointsToNode::NoEscape); 832 ptnode_adr(call_idx)->set_scalar_replaceable(false); 833 } else if (meth->is_boxing_method()) { 834 // Returns boxing object 835 PointsToNode::EscapeState es; 836 vmIntrinsics::ID intr = meth->intrinsic_id(); 837 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 838 // It does not escape if object is always allocated. 839 es = PointsToNode::NoEscape; 840 } else { 841 // It escapes globally if object could be loaded from cache. 842 es = PointsToNode::GlobalEscape; 843 } 844 add_java_object(call, es); 845 } else { 846 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 847 call_analyzer->copy_dependencies(_compile->dependencies()); 848 if (call_analyzer->is_return_allocated()) { 849 // Returns a newly allocated unescaped object, simply 850 // update dependency information. 851 // Mark it as NoEscape so that objects referenced by 852 // it's fields will be marked as NoEscape at least. 853 add_java_object(call, PointsToNode::NoEscape); 854 ptnode_adr(call_idx)->set_scalar_replaceable(false); 855 } else { 856 // Determine whether any arguments are returned. 857 const TypeTuple* d = call->tf()->domain(); 858 bool ret_arg = false; 859 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 860 if (d->field_at(i)->isa_ptr() != NULL && 861 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 862 ret_arg = true; 863 break; 864 } 865 } 866 if (ret_arg) { 867 add_local_var(call, PointsToNode::ArgEscape); 868 } else { 869 // Returns unknown object. 870 map_ideal_node(call, phantom_obj); 871 } 872 } 873 } 874 } else { 875 // An other type of call, assume the worst case: 876 // returned value is unknown and globally escapes. 877 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 878 map_ideal_node(call, phantom_obj); 879 } 880 } 881 882 void ConnectionGraph::process_call_arguments(CallNode *call) { 883 bool is_arraycopy = false; 884 switch (call->Opcode()) { 885 #ifdef ASSERT 886 case Op_Allocate: 887 case Op_AllocateArray: 888 case Op_Lock: 889 case Op_Unlock: 890 assert(false, "should be done already"); 891 break; 892 #endif 893 case Op_ArrayCopy: 894 case Op_CallLeafNoFP: 895 // Most array copies are ArrayCopy nodes at this point but there 896 // are still a few direct calls to the copy subroutines (See 897 // PhaseStringOpts::copy_string()) 898 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 899 (call->as_CallLeaf()->_name != NULL && 900 strstr(call->as_CallLeaf()->_name, "arraycopy") != 0); 901 // fall through 902 case Op_CallLeaf: { 903 // Stub calls, objects do not escape but they are not scale replaceable. 904 // Adjust escape state for outgoing arguments. 905 const TypeTuple * d = call->tf()->domain(); 906 bool src_has_oops = false; 907 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 908 const Type* at = d->field_at(i); 909 Node *arg = call->in(i); 910 if (arg == NULL) { 911 continue; 912 } 913 const Type *aat = _igvn->type(arg); 914 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) 915 continue; 916 if (arg->is_AddP()) { 917 // 918 // The inline_native_clone() case when the arraycopy stub is called 919 // after the allocation before Initialize and CheckCastPP nodes. 920 // Or normal arraycopy for object arrays case. 921 // 922 // Set AddP's base (Allocate) as not scalar replaceable since 923 // pointer to the base (with offset) is passed as argument. 924 // 925 arg = get_addp_base(arg); 926 } 927 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 928 assert(arg_ptn != NULL, "should be registered"); 929 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 930 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 931 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 932 aat->isa_ptr() != NULL, "expecting an Ptr"); 933 bool arg_has_oops = aat->isa_oopptr() && 934 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || 935 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); 936 if (i == TypeFunc::Parms) { 937 src_has_oops = arg_has_oops; 938 } 939 // 940 // src or dst could be j.l.Object when other is basic type array: 941 // 942 // arraycopy(char[],0,Object*,0,size); 943 // arraycopy(Object*,0,char[],0,size); 944 // 945 // Don't add edges in such cases. 946 // 947 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 948 arg_has_oops && (i > TypeFunc::Parms); 949 #ifdef ASSERT 950 if (!(is_arraycopy || 951 (call->as_CallLeaf()->_name != NULL && 952 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 || 953 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 || 954 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 955 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 956 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 957 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 958 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 959 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 960 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 961 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 962 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 963 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 964 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 965 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0) 966 ))) { 967 call->dump(); 968 fatal(err_msg_res("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name)); 969 } 970 #endif 971 // Always process arraycopy's destination object since 972 // we need to add all possible edges to references in 973 // source object. 974 if (arg_esc >= PointsToNode::ArgEscape && 975 !arg_is_arraycopy_dest) { 976 continue; 977 } 978 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 979 if (arg_is_arraycopy_dest) { 980 Node* src = call->in(TypeFunc::Parms); 981 if (src->is_AddP()) { 982 src = get_addp_base(src); 983 } 984 PointsToNode* src_ptn = ptnode_adr(src->_idx); 985 assert(src_ptn != NULL, "should be registered"); 986 if (arg_ptn != src_ptn) { 987 // Special arraycopy edge: 988 // A destination object's field can't have the source object 989 // as base since objects escape states are not related. 990 // Only escape state of destination object's fields affects 991 // escape state of fields in source object. 992 add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn); 993 } 994 } 995 } 996 } 997 break; 998 } 999 case Op_CallStaticJava: { 1000 // For a static call, we know exactly what method is being called. 1001 // Use bytecode estimator to record the call's escape affects 1002 #ifdef ASSERT 1003 const char* name = call->as_CallStaticJava()->_name; 1004 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1005 #endif 1006 ciMethod* meth = call->as_CallJava()->method(); 1007 if ((meth != NULL) && meth->is_boxing_method()) { 1008 break; // Boxing methods do not modify any oops. 1009 } 1010 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1011 // fall-through if not a Java method or no analyzer information 1012 if (call_analyzer != NULL) { 1013 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1014 const TypeTuple* d = call->tf()->domain(); 1015 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1016 const Type* at = d->field_at(i); 1017 int k = i - TypeFunc::Parms; 1018 Node* arg = call->in(i); 1019 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1020 if (at->isa_ptr() != NULL && 1021 call_analyzer->is_arg_returned(k)) { 1022 // The call returns arguments. 1023 if (call_ptn != NULL) { // Is call's result used? 1024 assert(call_ptn->is_LocalVar(), "node should be registered"); 1025 assert(arg_ptn != NULL, "node should be registered"); 1026 add_edge(call_ptn, arg_ptn); 1027 } 1028 } 1029 if (at->isa_oopptr() != NULL && 1030 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1031 if (!call_analyzer->is_arg_stack(k)) { 1032 // The argument global escapes 1033 set_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1034 } else { 1035 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 1036 if (!call_analyzer->is_arg_local(k)) { 1037 // The argument itself doesn't escape, but any fields might 1038 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1039 } 1040 } 1041 } 1042 } 1043 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1044 // The call returns arguments. 1045 assert(call_ptn->edge_count() > 0, "sanity"); 1046 if (!call_analyzer->is_return_local()) { 1047 // Returns also unknown object. 1048 add_edge(call_ptn, phantom_obj); 1049 } 1050 } 1051 break; 1052 } 1053 } 1054 default: { 1055 // Fall-through here if not a Java method or no analyzer information 1056 // or some other type of call, assume the worst case: all arguments 1057 // globally escape. 1058 const TypeTuple* d = call->tf()->domain(); 1059 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1060 const Type* at = d->field_at(i); 1061 if (at->isa_oopptr() != NULL) { 1062 Node* arg = call->in(i); 1063 if (arg->is_AddP()) { 1064 arg = get_addp_base(arg); 1065 } 1066 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1067 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); 1068 } 1069 } 1070 } 1071 } 1072 } 1073 1074 1075 // Finish Graph construction. 1076 bool ConnectionGraph::complete_connection_graph( 1077 GrowableArray<PointsToNode*>& ptnodes_worklist, 1078 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1079 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1080 GrowableArray<FieldNode*>& oop_fields_worklist) { 1081 // Normally only 1-3 passes needed to build Connection Graph depending 1082 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1083 // Set limit to 20 to catch situation when something did go wrong and 1084 // bailout Escape Analysis. 1085 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1086 #define CG_BUILD_ITER_LIMIT 20 1087 1088 // Propagate GlobalEscape and ArgEscape escape states and check that 1089 // we still have non-escaping objects. The method pushs on _worklist 1090 // Field nodes which reference phantom_object. 1091 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1092 return false; // Nothing to do. 1093 } 1094 // Now propagate references to all JavaObject nodes. 1095 int java_objects_length = java_objects_worklist.length(); 1096 elapsedTimer time; 1097 bool timeout = false; 1098 int new_edges = 1; 1099 int iterations = 0; 1100 do { 1101 while ((new_edges > 0) && 1102 (iterations++ < CG_BUILD_ITER_LIMIT)) { 1103 double start_time = time.seconds(); 1104 time.start(); 1105 new_edges = 0; 1106 // Propagate references to phantom_object for nodes pushed on _worklist 1107 // by find_non_escaped_objects() and find_field_value(). 1108 new_edges += add_java_object_edges(phantom_obj, false); 1109 for (int next = 0; next < java_objects_length; ++next) { 1110 JavaObjectNode* ptn = java_objects_worklist.at(next); 1111 new_edges += add_java_object_edges(ptn, true); 1112 1113 #define SAMPLE_SIZE 4 1114 if ((next % SAMPLE_SIZE) == 0) { 1115 // Each 4 iterations calculate how much time it will take 1116 // to complete graph construction. 1117 time.stop(); 1118 // Poll for requests from shutdown mechanism to quiesce compiler 1119 // because Connection graph construction may take long time. 1120 CompileBroker::maybe_block(); 1121 double stop_time = time.seconds(); 1122 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1123 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1124 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1125 timeout = true; 1126 break; // Timeout 1127 } 1128 start_time = stop_time; 1129 time.start(); 1130 } 1131 #undef SAMPLE_SIZE 1132 1133 } 1134 if (timeout) break; 1135 if (new_edges > 0) { 1136 // Update escape states on each iteration if graph was updated. 1137 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1138 return false; // Nothing to do. 1139 } 1140 } 1141 time.stop(); 1142 if (time.seconds() >= EscapeAnalysisTimeout) { 1143 timeout = true; 1144 break; 1145 } 1146 } 1147 if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { 1148 time.start(); 1149 // Find fields which have unknown value. 1150 int fields_length = oop_fields_worklist.length(); 1151 for (int next = 0; next < fields_length; next++) { 1152 FieldNode* field = oop_fields_worklist.at(next); 1153 if (field->edge_count() == 0) { 1154 new_edges += find_field_value(field); 1155 // This code may added new edges to phantom_object. 1156 // Need an other cycle to propagate references to phantom_object. 1157 } 1158 } 1159 time.stop(); 1160 if (time.seconds() >= EscapeAnalysisTimeout) { 1161 timeout = true; 1162 break; 1163 } 1164 } else { 1165 new_edges = 0; // Bailout 1166 } 1167 } while (new_edges > 0); 1168 1169 // Bailout if passed limits. 1170 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { 1171 Compile* C = _compile; 1172 if (C->log() != NULL) { 1173 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1174 C->log()->text("%s", timeout ? "time" : "iterations"); 1175 C->log()->end_elem(" limit'"); 1176 } 1177 assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", 1178 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length())); 1179 // Possible infinite build_connection_graph loop, 1180 // bailout (no changes to ideal graph were made). 1181 return false; 1182 } 1183 #ifdef ASSERT 1184 if (Verbose && PrintEscapeAnalysis) { 1185 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", 1186 iterations, nodes_size(), ptnodes_worklist.length()); 1187 } 1188 #endif 1189 1190 #undef CG_BUILD_ITER_LIMIT 1191 1192 // Find fields initialized by NULL for non-escaping Allocations. 1193 int non_escaped_length = non_escaped_worklist.length(); 1194 for (int next = 0; next < non_escaped_length; next++) { 1195 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1196 PointsToNode::EscapeState es = ptn->escape_state(); 1197 assert(es <= PointsToNode::ArgEscape, "sanity"); 1198 if (es == PointsToNode::NoEscape) { 1199 if (find_init_values(ptn, null_obj, _igvn) > 0) { 1200 // Adding references to NULL object does not change escape states 1201 // since it does not escape. Also no fields are added to NULL object. 1202 add_java_object_edges(null_obj, false); 1203 } 1204 } 1205 Node* n = ptn->ideal_node(); 1206 if (n->is_Allocate()) { 1207 // The object allocated by this Allocate node will never be 1208 // seen by an other thread. Mark it so that when it is 1209 // expanded no MemBarStoreStore is added. 1210 InitializeNode* ini = n->as_Allocate()->initialization(); 1211 if (ini != NULL) 1212 ini->set_does_not_escape(); 1213 } 1214 } 1215 return true; // Finished graph construction. 1216 } 1217 1218 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1219 // and check that we still have non-escaping java objects. 1220 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1221 GrowableArray<JavaObjectNode*>& non_escaped_worklist) { 1222 GrowableArray<PointsToNode*> escape_worklist; 1223 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1224 int ptnodes_length = ptnodes_worklist.length(); 1225 for (int next = 0; next < ptnodes_length; ++next) { 1226 PointsToNode* ptn = ptnodes_worklist.at(next); 1227 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1228 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1229 escape_worklist.push(ptn); 1230 } 1231 } 1232 // Set escape states to referenced nodes (edges list). 1233 while (escape_worklist.length() > 0) { 1234 PointsToNode* ptn = escape_worklist.pop(); 1235 PointsToNode::EscapeState es = ptn->escape_state(); 1236 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1237 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1238 es >= PointsToNode::ArgEscape) { 1239 // GlobalEscape or ArgEscape state of field means it has unknown value. 1240 if (add_edge(ptn, phantom_obj)) { 1241 // New edge was added 1242 add_field_uses_to_worklist(ptn->as_Field()); 1243 } 1244 } 1245 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1246 PointsToNode* e = i.get(); 1247 if (e->is_Arraycopy()) { 1248 assert(ptn->arraycopy_dst(), "sanity"); 1249 // Propagate only fields escape state through arraycopy edge. 1250 if (e->fields_escape_state() < field_es) { 1251 set_fields_escape_state(e, field_es); 1252 escape_worklist.push(e); 1253 } 1254 } else if (es >= field_es) { 1255 // fields_escape_state is also set to 'es' if it is less than 'es'. 1256 if (e->escape_state() < es) { 1257 set_escape_state(e, es); 1258 escape_worklist.push(e); 1259 } 1260 } else { 1261 // Propagate field escape state. 1262 bool es_changed = false; 1263 if (e->fields_escape_state() < field_es) { 1264 set_fields_escape_state(e, field_es); 1265 es_changed = true; 1266 } 1267 if ((e->escape_state() < field_es) && 1268 e->is_Field() && ptn->is_JavaObject() && 1269 e->as_Field()->is_oop()) { 1270 // Change escape state of referenced fileds. 1271 set_escape_state(e, field_es); 1272 es_changed = true;; 1273 } else if (e->escape_state() < es) { 1274 set_escape_state(e, es); 1275 es_changed = true;; 1276 } 1277 if (es_changed) { 1278 escape_worklist.push(e); 1279 } 1280 } 1281 } 1282 } 1283 // Remove escaped objects from non_escaped list. 1284 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { 1285 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1286 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1287 non_escaped_worklist.delete_at(next); 1288 } 1289 if (ptn->escape_state() == PointsToNode::NoEscape) { 1290 // Find fields in non-escaped allocations which have unknown value. 1291 find_init_values(ptn, phantom_obj, NULL); 1292 } 1293 } 1294 return (non_escaped_worklist.length() > 0); 1295 } 1296 1297 // Add all references to JavaObject node by walking over all uses. 1298 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1299 int new_edges = 0; 1300 if (populate_worklist) { 1301 // Populate _worklist by uses of jobj's uses. 1302 for (UseIterator i(jobj); i.has_next(); i.next()) { 1303 PointsToNode* use = i.get(); 1304 if (use->is_Arraycopy()) 1305 continue; 1306 add_uses_to_worklist(use); 1307 if (use->is_Field() && use->as_Field()->is_oop()) { 1308 // Put on worklist all field's uses (loads) and 1309 // related field nodes (same base and offset). 1310 add_field_uses_to_worklist(use->as_Field()); 1311 } 1312 } 1313 } 1314 for (int l = 0; l < _worklist.length(); l++) { 1315 PointsToNode* use = _worklist.at(l); 1316 if (PointsToNode::is_base_use(use)) { 1317 // Add reference from jobj to field and from field to jobj (field's base). 1318 use = PointsToNode::get_use_node(use)->as_Field(); 1319 if (add_base(use->as_Field(), jobj)) { 1320 new_edges++; 1321 } 1322 continue; 1323 } 1324 assert(!use->is_JavaObject(), "sanity"); 1325 if (use->is_Arraycopy()) { 1326 if (jobj == null_obj) // NULL object does not have field edges 1327 continue; 1328 // Added edge from Arraycopy node to arraycopy's source java object 1329 if (add_edge(use, jobj)) { 1330 jobj->set_arraycopy_src(); 1331 new_edges++; 1332 } 1333 // and stop here. 1334 continue; 1335 } 1336 if (!add_edge(use, jobj)) 1337 continue; // No new edge added, there was such edge already. 1338 new_edges++; 1339 if (use->is_LocalVar()) { 1340 add_uses_to_worklist(use); 1341 if (use->arraycopy_dst()) { 1342 for (EdgeIterator i(use); i.has_next(); i.next()) { 1343 PointsToNode* e = i.get(); 1344 if (e->is_Arraycopy()) { 1345 if (jobj == null_obj) // NULL object does not have field edges 1346 continue; 1347 // Add edge from arraycopy's destination java object to Arraycopy node. 1348 if (add_edge(jobj, e)) { 1349 new_edges++; 1350 jobj->set_arraycopy_dst(); 1351 } 1352 } 1353 } 1354 } 1355 } else { 1356 // Added new edge to stored in field values. 1357 // Put on worklist all field's uses (loads) and 1358 // related field nodes (same base and offset). 1359 add_field_uses_to_worklist(use->as_Field()); 1360 } 1361 } 1362 _worklist.clear(); 1363 _in_worklist.Reset(); 1364 return new_edges; 1365 } 1366 1367 // Put on worklist all related field nodes. 1368 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1369 assert(field->is_oop(), "sanity"); 1370 int offset = field->offset(); 1371 add_uses_to_worklist(field); 1372 // Loop over all bases of this field and push on worklist Field nodes 1373 // with the same offset and base (since they may reference the same field). 1374 for (BaseIterator i(field); i.has_next(); i.next()) { 1375 PointsToNode* base = i.get(); 1376 add_fields_to_worklist(field, base); 1377 // Check if the base was source object of arraycopy and go over arraycopy's 1378 // destination objects since values stored to a field of source object are 1379 // accessable by uses (loads) of fields of destination objects. 1380 if (base->arraycopy_src()) { 1381 for (UseIterator j(base); j.has_next(); j.next()) { 1382 PointsToNode* arycp = j.get(); 1383 if (arycp->is_Arraycopy()) { 1384 for (UseIterator k(arycp); k.has_next(); k.next()) { 1385 PointsToNode* abase = k.get(); 1386 if (abase->arraycopy_dst() && abase != base) { 1387 // Look for the same arracopy reference. 1388 add_fields_to_worklist(field, abase); 1389 } 1390 } 1391 } 1392 } 1393 } 1394 } 1395 } 1396 1397 // Put on worklist all related field nodes. 1398 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1399 int offset = field->offset(); 1400 if (base->is_LocalVar()) { 1401 for (UseIterator j(base); j.has_next(); j.next()) { 1402 PointsToNode* f = j.get(); 1403 if (PointsToNode::is_base_use(f)) { // Field 1404 f = PointsToNode::get_use_node(f); 1405 if (f == field || !f->as_Field()->is_oop()) 1406 continue; 1407 int offs = f->as_Field()->offset(); 1408 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1409 add_to_worklist(f); 1410 } 1411 } 1412 } 1413 } else { 1414 assert(base->is_JavaObject(), "sanity"); 1415 if (// Skip phantom_object since it is only used to indicate that 1416 // this field's content globally escapes. 1417 (base != phantom_obj) && 1418 // NULL object node does not have fields. 1419 (base != null_obj)) { 1420 for (EdgeIterator i(base); i.has_next(); i.next()) { 1421 PointsToNode* f = i.get(); 1422 // Skip arraycopy edge since store to destination object field 1423 // does not update value in source object field. 1424 if (f->is_Arraycopy()) { 1425 assert(base->arraycopy_dst(), "sanity"); 1426 continue; 1427 } 1428 if (f == field || !f->as_Field()->is_oop()) 1429 continue; 1430 int offs = f->as_Field()->offset(); 1431 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1432 add_to_worklist(f); 1433 } 1434 } 1435 } 1436 } 1437 } 1438 1439 // Find fields which have unknown value. 1440 int ConnectionGraph::find_field_value(FieldNode* field) { 1441 // Escaped fields should have init value already. 1442 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1443 int new_edges = 0; 1444 for (BaseIterator i(field); i.has_next(); i.next()) { 1445 PointsToNode* base = i.get(); 1446 if (base->is_JavaObject()) { 1447 // Skip Allocate's fields which will be processed later. 1448 if (base->ideal_node()->is_Allocate()) 1449 return 0; 1450 assert(base == null_obj, "only NULL ptr base expected here"); 1451 } 1452 } 1453 if (add_edge(field, phantom_obj)) { 1454 // New edge was added 1455 new_edges++; 1456 add_field_uses_to_worklist(field); 1457 } 1458 return new_edges; 1459 } 1460 1461 // Find fields initializing values for allocations. 1462 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { 1463 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1464 int new_edges = 0; 1465 Node* alloc = pta->ideal_node(); 1466 if (init_val == phantom_obj) { 1467 // Do nothing for Allocate nodes since its fields values are "known". 1468 if (alloc->is_Allocate()) 1469 return 0; 1470 assert(alloc->as_CallStaticJava(), "sanity"); 1471 #ifdef ASSERT 1472 if (alloc->as_CallStaticJava()->method() == NULL) { 1473 const char* name = alloc->as_CallStaticJava()->_name; 1474 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1475 } 1476 #endif 1477 // Non-escaped allocation returned from Java or runtime call have 1478 // unknown values in fields. 1479 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1480 PointsToNode* field = i.get(); 1481 if (field->is_Field() && field->as_Field()->is_oop()) { 1482 if (add_edge(field, phantom_obj)) { 1483 // New edge was added 1484 new_edges++; 1485 add_field_uses_to_worklist(field->as_Field()); 1486 } 1487 } 1488 } 1489 return new_edges; 1490 } 1491 assert(init_val == null_obj, "sanity"); 1492 // Do nothing for Call nodes since its fields values are unknown. 1493 if (!alloc->is_Allocate()) 1494 return 0; 1495 1496 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1497 bool visited_bottom_offset = false; 1498 GrowableArray<int> offsets_worklist; 1499 1500 // Check if an oop field's initializing value is recorded and add 1501 // a corresponding NULL if field's value if it is not recorded. 1502 // Connection Graph does not record a default initialization by NULL 1503 // captured by Initialize node. 1504 // 1505 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1506 PointsToNode* field = i.get(); // Field (AddP) 1507 if (!field->is_Field() || !field->as_Field()->is_oop()) 1508 continue; // Not oop field 1509 int offset = field->as_Field()->offset(); 1510 if (offset == Type::OffsetBot) { 1511 if (!visited_bottom_offset) { 1512 // OffsetBot is used to reference array's element, 1513 // always add reference to NULL to all Field nodes since we don't 1514 // known which element is referenced. 1515 if (add_edge(field, null_obj)) { 1516 // New edge was added 1517 new_edges++; 1518 add_field_uses_to_worklist(field->as_Field()); 1519 visited_bottom_offset = true; 1520 } 1521 } 1522 } else { 1523 // Check only oop fields. 1524 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1525 if (adr_type->isa_rawptr()) { 1526 #ifdef ASSERT 1527 // Raw pointers are used for initializing stores so skip it 1528 // since it should be recorded already 1529 Node* base = get_addp_base(field->ideal_node()); 1530 assert(adr_type->isa_rawptr() && base->is_Proj() && 1531 (base->in(0) == alloc),"unexpected pointer type"); 1532 #endif 1533 continue; 1534 } 1535 if (!offsets_worklist.contains(offset)) { 1536 offsets_worklist.append(offset); 1537 Node* value = NULL; 1538 if (ini != NULL) { 1539 // StoreP::memory_type() == T_ADDRESS 1540 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1541 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1542 // Make sure initializing store has the same type as this AddP. 1543 // This AddP may reference non existing field because it is on a 1544 // dead branch of bimorphic call which is not eliminated yet. 1545 if (store != NULL && store->is_Store() && 1546 store->as_Store()->memory_type() == ft) { 1547 value = store->in(MemNode::ValueIn); 1548 #ifdef ASSERT 1549 if (VerifyConnectionGraph) { 1550 // Verify that AddP already points to all objects the value points to. 1551 PointsToNode* val = ptnode_adr(value->_idx); 1552 assert((val != NULL), "should be processed already"); 1553 PointsToNode* missed_obj = NULL; 1554 if (val->is_JavaObject()) { 1555 if (!field->points_to(val->as_JavaObject())) { 1556 missed_obj = val; 1557 } 1558 } else { 1559 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1560 tty->print_cr("----------init store has invalid value -----"); 1561 store->dump(); 1562 val->dump(); 1563 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1564 } 1565 for (EdgeIterator j(val); j.has_next(); j.next()) { 1566 PointsToNode* obj = j.get(); 1567 if (obj->is_JavaObject()) { 1568 if (!field->points_to(obj->as_JavaObject())) { 1569 missed_obj = obj; 1570 break; 1571 } 1572 } 1573 } 1574 } 1575 if (missed_obj != NULL) { 1576 tty->print_cr("----------field---------------------------------"); 1577 field->dump(); 1578 tty->print_cr("----------missed referernce to object-----------"); 1579 missed_obj->dump(); 1580 tty->print_cr("----------object referernced by init store -----"); 1581 store->dump(); 1582 val->dump(); 1583 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1584 } 1585 } 1586 #endif 1587 } else { 1588 // There could be initializing stores which follow allocation. 1589 // For example, a volatile field store is not collected 1590 // by Initialize node. 1591 // 1592 // Need to check for dependent loads to separate such stores from 1593 // stores which follow loads. For now, add initial value NULL so 1594 // that compare pointers optimization works correctly. 1595 } 1596 } 1597 if (value == NULL) { 1598 // A field's initializing value was not recorded. Add NULL. 1599 if (add_edge(field, null_obj)) { 1600 // New edge was added 1601 new_edges++; 1602 add_field_uses_to_worklist(field->as_Field()); 1603 } 1604 } 1605 } 1606 } 1607 } 1608 return new_edges; 1609 } 1610 1611 // Adjust scalar_replaceable state after Connection Graph is built. 1612 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1613 // Search for non-escaping objects which are not scalar replaceable 1614 // and mark them to propagate the state to referenced objects. 1615 1616 // 1. An object is not scalar replaceable if the field into which it is 1617 // stored has unknown offset (stored into unknown element of an array). 1618 // 1619 for (UseIterator i(jobj); i.has_next(); i.next()) { 1620 PointsToNode* use = i.get(); 1621 assert(!use->is_Arraycopy(), "sanity"); 1622 if (use->is_Field()) { 1623 FieldNode* field = use->as_Field(); 1624 assert(field->is_oop() && field->scalar_replaceable() && 1625 field->fields_escape_state() == PointsToNode::NoEscape, "sanity"); 1626 if (field->offset() == Type::OffsetBot) { 1627 jobj->set_scalar_replaceable(false); 1628 return; 1629 } 1630 // 2. An object is not scalar replaceable if the field into which it is 1631 // stored has multiple bases one of which is null. 1632 if (field->base_count() > 1) { 1633 for (BaseIterator i(field); i.has_next(); i.next()) { 1634 PointsToNode* base = i.get(); 1635 if (base == null_obj) { 1636 jobj->set_scalar_replaceable(false); 1637 return; 1638 } 1639 } 1640 } 1641 } 1642 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1643 // 3. An object is not scalar replaceable if it is merged with other objects. 1644 for (EdgeIterator j(use); j.has_next(); j.next()) { 1645 PointsToNode* ptn = j.get(); 1646 if (ptn->is_JavaObject() && ptn != jobj) { 1647 // Mark all objects. 1648 jobj->set_scalar_replaceable(false); 1649 ptn->set_scalar_replaceable(false); 1650 } 1651 } 1652 if (!jobj->scalar_replaceable()) { 1653 return; 1654 } 1655 } 1656 1657 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1658 // Non-escaping object node should point only to field nodes. 1659 FieldNode* field = j.get()->as_Field(); 1660 int offset = field->as_Field()->offset(); 1661 1662 // 4. An object is not scalar replaceable if it has a field with unknown 1663 // offset (array's element is accessed in loop). 1664 if (offset == Type::OffsetBot) { 1665 jobj->set_scalar_replaceable(false); 1666 return; 1667 } 1668 // 5. Currently an object is not scalar replaceable if a LoadStore node 1669 // access its field since the field value is unknown after it. 1670 // 1671 Node* n = field->ideal_node(); 1672 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1673 if (n->fast_out(i)->is_LoadStore()) { 1674 jobj->set_scalar_replaceable(false); 1675 return; 1676 } 1677 } 1678 1679 // 6. Or the address may point to more then one object. This may produce 1680 // the false positive result (set not scalar replaceable) 1681 // since the flow-insensitive escape analysis can't separate 1682 // the case when stores overwrite the field's value from the case 1683 // when stores happened on different control branches. 1684 // 1685 // Note: it will disable scalar replacement in some cases: 1686 // 1687 // Point p[] = new Point[1]; 1688 // p[0] = new Point(); // Will be not scalar replaced 1689 // 1690 // but it will save us from incorrect optimizations in next cases: 1691 // 1692 // Point p[] = new Point[1]; 1693 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1694 // 1695 if (field->base_count() > 1) { 1696 for (BaseIterator i(field); i.has_next(); i.next()) { 1697 PointsToNode* base = i.get(); 1698 // Don't take into account LocalVar nodes which 1699 // may point to only one object which should be also 1700 // this field's base by now. 1701 if (base->is_JavaObject() && base != jobj) { 1702 // Mark all bases. 1703 jobj->set_scalar_replaceable(false); 1704 base->set_scalar_replaceable(false); 1705 } 1706 } 1707 } 1708 } 1709 } 1710 1711 #ifdef ASSERT 1712 void ConnectionGraph::verify_connection_graph( 1713 GrowableArray<PointsToNode*>& ptnodes_worklist, 1714 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1715 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1716 GrowableArray<Node*>& addp_worklist) { 1717 // Verify that graph is complete - no new edges could be added. 1718 int java_objects_length = java_objects_worklist.length(); 1719 int non_escaped_length = non_escaped_worklist.length(); 1720 int new_edges = 0; 1721 for (int next = 0; next < java_objects_length; ++next) { 1722 JavaObjectNode* ptn = java_objects_worklist.at(next); 1723 new_edges += add_java_object_edges(ptn, true); 1724 } 1725 assert(new_edges == 0, "graph was not complete"); 1726 // Verify that escape state is final. 1727 int length = non_escaped_worklist.length(); 1728 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); 1729 assert((non_escaped_length == non_escaped_worklist.length()) && 1730 (non_escaped_length == length) && 1731 (_worklist.length() == 0), "escape state was not final"); 1732 1733 // Verify fields information. 1734 int addp_length = addp_worklist.length(); 1735 for (int next = 0; next < addp_length; ++next ) { 1736 Node* n = addp_worklist.at(next); 1737 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 1738 if (field->is_oop()) { 1739 // Verify that field has all bases 1740 Node* base = get_addp_base(n); 1741 PointsToNode* ptn = ptnode_adr(base->_idx); 1742 if (ptn->is_JavaObject()) { 1743 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 1744 } else { 1745 assert(ptn->is_LocalVar(), "sanity"); 1746 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1747 PointsToNode* e = i.get(); 1748 if (e->is_JavaObject()) { 1749 assert(field->has_base(e->as_JavaObject()), "sanity"); 1750 } 1751 } 1752 } 1753 // Verify that all fields have initializing values. 1754 if (field->edge_count() == 0) { 1755 tty->print_cr("----------field does not have references----------"); 1756 field->dump(); 1757 for (BaseIterator i(field); i.has_next(); i.next()) { 1758 PointsToNode* base = i.get(); 1759 tty->print_cr("----------field has next base---------------------"); 1760 base->dump(); 1761 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 1762 tty->print_cr("----------base has fields-------------------------"); 1763 for (EdgeIterator j(base); j.has_next(); j.next()) { 1764 j.get()->dump(); 1765 } 1766 tty->print_cr("----------base has references---------------------"); 1767 for (UseIterator j(base); j.has_next(); j.next()) { 1768 j.get()->dump(); 1769 } 1770 } 1771 } 1772 for (UseIterator i(field); i.has_next(); i.next()) { 1773 i.get()->dump(); 1774 } 1775 assert(field->edge_count() > 0, "sanity"); 1776 } 1777 } 1778 } 1779 } 1780 #endif 1781 1782 // Optimize ideal graph. 1783 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 1784 GrowableArray<Node*>& storestore_worklist) { 1785 Compile* C = _compile; 1786 PhaseIterGVN* igvn = _igvn; 1787 if (EliminateLocks) { 1788 // Mark locks before changing ideal graph. 1789 int cnt = C->macro_count(); 1790 for( int i=0; i < cnt; i++ ) { 1791 Node *n = C->macro_node(i); 1792 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1793 AbstractLockNode* alock = n->as_AbstractLock(); 1794 if (!alock->is_non_esc_obj()) { 1795 if (not_global_escape(alock->obj_node())) { 1796 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 1797 // The lock could be marked eliminated by lock coarsening 1798 // code during first IGVN before EA. Replace coarsened flag 1799 // to eliminate all associated locks/unlocks. 1800 alock->set_non_esc_obj(); 1801 } 1802 } 1803 } 1804 } 1805 } 1806 1807 if (OptimizePtrCompare) { 1808 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1809 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1810 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1811 // Optimize objects compare. 1812 while (ptr_cmp_worklist.length() != 0) { 1813 Node *n = ptr_cmp_worklist.pop(); 1814 Node *res = optimize_ptr_compare(n); 1815 if (res != NULL) { 1816 #ifndef PRODUCT 1817 if (PrintOptimizePtrCompare) { 1818 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1819 if (Verbose) { 1820 n->dump(1); 1821 } 1822 } 1823 #endif 1824 igvn->replace_node(n, res); 1825 } 1826 } 1827 // cleanup 1828 if (_pcmp_neq->outcnt() == 0) 1829 igvn->hash_delete(_pcmp_neq); 1830 if (_pcmp_eq->outcnt() == 0) 1831 igvn->hash_delete(_pcmp_eq); 1832 } 1833 1834 // For MemBarStoreStore nodes added in library_call.cpp, check 1835 // escape status of associated AllocateNode and optimize out 1836 // MemBarStoreStore node if the allocated object never escapes. 1837 while (storestore_worklist.length() != 0) { 1838 Node *n = storestore_worklist.pop(); 1839 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 1840 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 1841 assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); 1842 if (not_global_escape(alloc)) { 1843 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 1844 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 1845 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 1846 igvn->register_new_node_with_optimizer(mb); 1847 igvn->replace_node(storestore, mb); 1848 } 1849 } 1850 } 1851 1852 // Optimize objects compare. 1853 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 1854 assert(OptimizePtrCompare, "sanity"); 1855 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 1856 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 1857 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 1858 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 1859 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 1860 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 1861 1862 // Check simple cases first. 1863 if (jobj1 != NULL) { 1864 if (jobj1->escape_state() == PointsToNode::NoEscape) { 1865 if (jobj1 == jobj2) { 1866 // Comparing the same not escaping object. 1867 return _pcmp_eq; 1868 } 1869 Node* obj = jobj1->ideal_node(); 1870 // Comparing not escaping allocation. 1871 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1872 !ptn2->points_to(jobj1)) { 1873 return _pcmp_neq; // This includes nullness check. 1874 } 1875 } 1876 } 1877 if (jobj2 != NULL) { 1878 if (jobj2->escape_state() == PointsToNode::NoEscape) { 1879 Node* obj = jobj2->ideal_node(); 1880 // Comparing not escaping allocation. 1881 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1882 !ptn1->points_to(jobj2)) { 1883 return _pcmp_neq; // This includes nullness check. 1884 } 1885 } 1886 } 1887 if (jobj1 != NULL && jobj1 != phantom_obj && 1888 jobj2 != NULL && jobj2 != phantom_obj && 1889 jobj1->ideal_node()->is_Con() && 1890 jobj2->ideal_node()->is_Con()) { 1891 // Klass or String constants compare. Need to be careful with 1892 // compressed pointers - compare types of ConN and ConP instead of nodes. 1893 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 1894 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 1895 if (t1->make_ptr() == t2->make_ptr()) { 1896 return _pcmp_eq; 1897 } else { 1898 return _pcmp_neq; 1899 } 1900 } 1901 if (ptn1->meet(ptn2)) { 1902 return NULL; // Sets are not disjoint 1903 } 1904 1905 // Sets are disjoint. 1906 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 1907 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 1908 bool set1_has_null_ptr = ptn1->points_to(null_obj); 1909 bool set2_has_null_ptr = ptn2->points_to(null_obj); 1910 if (set1_has_unknown_ptr && set2_has_null_ptr || 1911 set2_has_unknown_ptr && set1_has_null_ptr) { 1912 // Check nullness of unknown object. 1913 return NULL; 1914 } 1915 1916 // Disjointness by itself is not sufficient since 1917 // alias analysis is not complete for escaped objects. 1918 // Disjoint sets are definitely unrelated only when 1919 // at least one set has only not escaping allocations. 1920 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 1921 if (ptn1->non_escaping_allocation()) { 1922 return _pcmp_neq; 1923 } 1924 } 1925 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 1926 if (ptn2->non_escaping_allocation()) { 1927 return _pcmp_neq; 1928 } 1929 } 1930 return NULL; 1931 } 1932 1933 // Connection Graph constuction functions. 1934 1935 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 1936 PointsToNode* ptadr = _nodes.at(n->_idx); 1937 if (ptadr != NULL) { 1938 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 1939 return; 1940 } 1941 Compile* C = _compile; 1942 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 1943 _nodes.at_put(n->_idx, ptadr); 1944 } 1945 1946 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 1947 PointsToNode* ptadr = _nodes.at(n->_idx); 1948 if (ptadr != NULL) { 1949 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 1950 return; 1951 } 1952 Compile* C = _compile; 1953 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 1954 _nodes.at_put(n->_idx, ptadr); 1955 } 1956 1957 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 1958 PointsToNode* ptadr = _nodes.at(n->_idx); 1959 if (ptadr != NULL) { 1960 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 1961 return; 1962 } 1963 bool unsafe = false; 1964 bool is_oop = is_oop_field(n, offset, &unsafe); 1965 if (unsafe) { 1966 es = PointsToNode::GlobalEscape; 1967 } 1968 Compile* C = _compile; 1969 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 1970 _nodes.at_put(n->_idx, field); 1971 } 1972 1973 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 1974 PointsToNode* src, PointsToNode* dst) { 1975 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 1976 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 1977 PointsToNode* ptadr = _nodes.at(n->_idx); 1978 if (ptadr != NULL) { 1979 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 1980 return; 1981 } 1982 Compile* C = _compile; 1983 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 1984 _nodes.at_put(n->_idx, ptadr); 1985 // Add edge from arraycopy node to source object. 1986 (void)add_edge(ptadr, src); 1987 src->set_arraycopy_src(); 1988 // Add edge from destination object to arraycopy node. 1989 (void)add_edge(dst, ptadr); 1990 dst->set_arraycopy_dst(); 1991 } 1992 1993 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 1994 const Type* adr_type = n->as_AddP()->bottom_type(); 1995 BasicType bt = T_INT; 1996 if (offset == Type::OffsetBot) { 1997 // Check only oop fields. 1998 if (!adr_type->isa_aryptr() || 1999 (adr_type->isa_aryptr()->klass() == NULL) || 2000 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { 2001 // OffsetBot is used to reference array's element. Ignore first AddP. 2002 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 2003 bt = T_OBJECT; 2004 } 2005 } 2006 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2007 if (adr_type->isa_instptr()) { 2008 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 2009 if (field != NULL) { 2010 bt = field->layout_type(); 2011 } else { 2012 // Check for unsafe oop field access 2013 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) { 2014 bt = T_OBJECT; 2015 (*unsafe) = true; 2016 } 2017 } 2018 } else if (adr_type->isa_aryptr()) { 2019 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2020 // Ignore array length load. 2021 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2022 // Ignore first AddP. 2023 } else { 2024 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2025 bt = elemtype->array_element_basic_type(); 2026 } 2027 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2028 // Allocation initialization, ThreadLocal field access, unsafe access 2029 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) { 2030 bt = T_OBJECT; 2031 } 2032 } 2033 } 2034 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); 2035 } 2036 2037 // Returns unique pointed java object or NULL. 2038 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2039 assert(!_collecting, "should not call when contructed graph"); 2040 // If the node was created after the escape computation we can't answer. 2041 uint idx = n->_idx; 2042 if (idx >= nodes_size()) { 2043 return NULL; 2044 } 2045 PointsToNode* ptn = ptnode_adr(idx); 2046 if (ptn->is_JavaObject()) { 2047 return ptn->as_JavaObject(); 2048 } 2049 assert(ptn->is_LocalVar(), "sanity"); 2050 // Check all java objects it points to. 2051 JavaObjectNode* jobj = NULL; 2052 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2053 PointsToNode* e = i.get(); 2054 if (e->is_JavaObject()) { 2055 if (jobj == NULL) { 2056 jobj = e->as_JavaObject(); 2057 } else if (jobj != e) { 2058 return NULL; 2059 } 2060 } 2061 } 2062 return jobj; 2063 } 2064 2065 // Return true if this node points only to non-escaping allocations. 2066 bool PointsToNode::non_escaping_allocation() { 2067 if (is_JavaObject()) { 2068 Node* n = ideal_node(); 2069 if (n->is_Allocate() || n->is_CallStaticJava()) { 2070 return (escape_state() == PointsToNode::NoEscape); 2071 } else { 2072 return false; 2073 } 2074 } 2075 assert(is_LocalVar(), "sanity"); 2076 // Check all java objects it points to. 2077 for (EdgeIterator i(this); i.has_next(); i.next()) { 2078 PointsToNode* e = i.get(); 2079 if (e->is_JavaObject()) { 2080 Node* n = e->ideal_node(); 2081 if ((e->escape_state() != PointsToNode::NoEscape) || 2082 !(n->is_Allocate() || n->is_CallStaticJava())) { 2083 return false; 2084 } 2085 } 2086 } 2087 return true; 2088 } 2089 2090 // Return true if we know the node does not escape globally. 2091 bool ConnectionGraph::not_global_escape(Node *n) { 2092 assert(!_collecting, "should not call during graph construction"); 2093 // If the node was created after the escape computation we can't answer. 2094 uint idx = n->_idx; 2095 if (idx >= nodes_size()) { 2096 return false; 2097 } 2098 PointsToNode* ptn = ptnode_adr(idx); 2099 PointsToNode::EscapeState es = ptn->escape_state(); 2100 // If we have already computed a value, return it. 2101 if (es >= PointsToNode::GlobalEscape) 2102 return false; 2103 if (ptn->is_JavaObject()) { 2104 return true; // (es < PointsToNode::GlobalEscape); 2105 } 2106 assert(ptn->is_LocalVar(), "sanity"); 2107 // Check all java objects it points to. 2108 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2109 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) 2110 return false; 2111 } 2112 return true; 2113 } 2114 2115 2116 // Helper functions 2117 2118 // Return true if this node points to specified node or nodes it points to. 2119 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2120 if (is_JavaObject()) { 2121 return (this == ptn); 2122 } 2123 assert(is_LocalVar() || is_Field(), "sanity"); 2124 for (EdgeIterator i(this); i.has_next(); i.next()) { 2125 if (i.get() == ptn) 2126 return true; 2127 } 2128 return false; 2129 } 2130 2131 // Return true if one node points to an other. 2132 bool PointsToNode::meet(PointsToNode* ptn) { 2133 if (this == ptn) { 2134 return true; 2135 } else if (ptn->is_JavaObject()) { 2136 return this->points_to(ptn->as_JavaObject()); 2137 } else if (this->is_JavaObject()) { 2138 return ptn->points_to(this->as_JavaObject()); 2139 } 2140 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2141 int ptn_count = ptn->edge_count(); 2142 for (EdgeIterator i(this); i.has_next(); i.next()) { 2143 PointsToNode* this_e = i.get(); 2144 for (int j = 0; j < ptn_count; j++) { 2145 if (this_e == ptn->edge(j)) 2146 return true; 2147 } 2148 } 2149 return false; 2150 } 2151 2152 #ifdef ASSERT 2153 // Return true if bases point to this java object. 2154 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2155 for (BaseIterator i(this); i.has_next(); i.next()) { 2156 if (i.get() == jobj) 2157 return true; 2158 } 2159 return false; 2160 } 2161 #endif 2162 2163 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2164 const Type *adr_type = phase->type(adr); 2165 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 2166 adr->in(AddPNode::Address)->is_Proj() && 2167 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 2168 // We are computing a raw address for a store captured by an Initialize 2169 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2170 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2171 assert(offs != Type::OffsetBot || 2172 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2173 "offset must be a constant or it is initialization of array"); 2174 return offs; 2175 } 2176 const TypePtr *t_ptr = adr_type->isa_ptr(); 2177 assert(t_ptr != NULL, "must be a pointer type"); 2178 return t_ptr->offset(); 2179 } 2180 2181 Node* ConnectionGraph::get_addp_base(Node *addp) { 2182 assert(addp->is_AddP(), "must be AddP"); 2183 // 2184 // AddP cases for Base and Address inputs: 2185 // case #1. Direct object's field reference: 2186 // Allocate 2187 // | 2188 // Proj #5 ( oop result ) 2189 // | 2190 // CheckCastPP (cast to instance type) 2191 // | | 2192 // AddP ( base == address ) 2193 // 2194 // case #2. Indirect object's field reference: 2195 // Phi 2196 // | 2197 // CastPP (cast to instance type) 2198 // | | 2199 // AddP ( base == address ) 2200 // 2201 // case #3. Raw object's field reference for Initialize node: 2202 // Allocate 2203 // | 2204 // Proj #5 ( oop result ) 2205 // top | 2206 // \ | 2207 // AddP ( base == top ) 2208 // 2209 // case #4. Array's element reference: 2210 // {CheckCastPP | CastPP} 2211 // | | | 2212 // | AddP ( array's element offset ) 2213 // | | 2214 // AddP ( array's offset ) 2215 // 2216 // case #5. Raw object's field reference for arraycopy stub call: 2217 // The inline_native_clone() case when the arraycopy stub is called 2218 // after the allocation before Initialize and CheckCastPP nodes. 2219 // Allocate 2220 // | 2221 // Proj #5 ( oop result ) 2222 // | | 2223 // AddP ( base == address ) 2224 // 2225 // case #6. Constant Pool, ThreadLocal, CastX2P or 2226 // Raw object's field reference: 2227 // {ConP, ThreadLocal, CastX2P, raw Load} 2228 // top | 2229 // \ | 2230 // AddP ( base == top ) 2231 // 2232 // case #7. Klass's field reference. 2233 // LoadKlass 2234 // | | 2235 // AddP ( base == address ) 2236 // 2237 // case #8. narrow Klass's field reference. 2238 // LoadNKlass 2239 // | 2240 // DecodeN 2241 // | | 2242 // AddP ( base == address ) 2243 // 2244 Node *base = addp->in(AddPNode::Base); 2245 if (base->uncast()->is_top()) { // The AddP case #3 and #6. 2246 base = addp->in(AddPNode::Address); 2247 while (base->is_AddP()) { 2248 // Case #6 (unsafe access) may have several chained AddP nodes. 2249 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2250 base = base->in(AddPNode::Address); 2251 } 2252 Node* uncast_base = base->uncast(); 2253 int opcode = uncast_base->Opcode(); 2254 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2255 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2256 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2257 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); 2258 } 2259 return base; 2260 } 2261 2262 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2263 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2264 Node* addp2 = addp->raw_out(0); 2265 if (addp->outcnt() == 1 && addp2->is_AddP() && 2266 addp2->in(AddPNode::Base) == n && 2267 addp2->in(AddPNode::Address) == addp) { 2268 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2269 // 2270 // Find array's offset to push it on worklist first and 2271 // as result process an array's element offset first (pushed second) 2272 // to avoid CastPP for the array's offset. 2273 // Otherwise the inserted CastPP (LocalVar) will point to what 2274 // the AddP (Field) points to. Which would be wrong since 2275 // the algorithm expects the CastPP has the same point as 2276 // as AddP's base CheckCastPP (LocalVar). 2277 // 2278 // ArrayAllocation 2279 // | 2280 // CheckCastPP 2281 // | 2282 // memProj (from ArrayAllocation CheckCastPP) 2283 // | || 2284 // | || Int (element index) 2285 // | || | ConI (log(element size)) 2286 // | || | / 2287 // | || LShift 2288 // | || / 2289 // | AddP (array's element offset) 2290 // | | 2291 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2292 // | / / 2293 // AddP (array's offset) 2294 // | 2295 // Load/Store (memory operation on array's element) 2296 // 2297 return addp2; 2298 } 2299 return NULL; 2300 } 2301 2302 // 2303 // Adjust the type and inputs of an AddP which computes the 2304 // address of a field of an instance 2305 // 2306 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2307 PhaseGVN* igvn = _igvn; 2308 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2309 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2310 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2311 if (t == NULL) { 2312 // We are computing a raw address for a store captured by an Initialize 2313 // compute an appropriate address type (cases #3 and #5). 2314 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2315 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2316 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2317 assert(offs != Type::OffsetBot, "offset must be a constant"); 2318 t = base_t->add_offset(offs)->is_oopptr(); 2319 } 2320 int inst_id = base_t->instance_id(); 2321 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2322 "old type must be non-instance or match new type"); 2323 2324 // The type 't' could be subclass of 'base_t'. 2325 // As result t->offset() could be large then base_t's size and it will 2326 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2327 // constructor verifies correctness of the offset. 2328 // 2329 // It could happened on subclass's branch (from the type profiling 2330 // inlining) which was not eliminated during parsing since the exactness 2331 // of the allocation type was not propagated to the subclass type check. 2332 // 2333 // Or the type 't' could be not related to 'base_t' at all. 2334 // It could happened when CHA type is different from MDO type on a dead path 2335 // (for example, from instanceof check) which is not collapsed during parsing. 2336 // 2337 // Do nothing for such AddP node and don't process its users since 2338 // this code branch will go away. 2339 // 2340 if (!t->is_known_instance() && 2341 !base_t->klass()->is_subtype_of(t->klass())) { 2342 return false; // bail out 2343 } 2344 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 2345 // Do NOT remove the next line: ensure a new alias index is allocated 2346 // for the instance type. Note: C++ will not remove it since the call 2347 // has side effect. 2348 int alias_idx = _compile->get_alias_index(tinst); 2349 igvn->set_type(addp, tinst); 2350 // record the allocation in the node map 2351 set_map(addp, get_map(base->_idx)); 2352 // Set addp's Base and Address to 'base'. 2353 Node *abase = addp->in(AddPNode::Base); 2354 Node *adr = addp->in(AddPNode::Address); 2355 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2356 adr->in(0)->_idx == (uint)inst_id) { 2357 // Skip AddP cases #3 and #5. 2358 } else { 2359 assert(!abase->is_top(), "sanity"); // AddP case #3 2360 if (abase != base) { 2361 igvn->hash_delete(addp); 2362 addp->set_req(AddPNode::Base, base); 2363 if (abase == adr) { 2364 addp->set_req(AddPNode::Address, base); 2365 } else { 2366 // AddP case #4 (adr is array's element offset AddP node) 2367 #ifdef ASSERT 2368 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2369 assert(adr->is_AddP() && atype != NULL && 2370 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2371 #endif 2372 } 2373 igvn->hash_insert(addp); 2374 } 2375 } 2376 // Put on IGVN worklist since at least addp's type was changed above. 2377 record_for_optimizer(addp); 2378 return true; 2379 } 2380 2381 // 2382 // Create a new version of orig_phi if necessary. Returns either the newly 2383 // created phi or an existing phi. Sets create_new to indicate whether a new 2384 // phi was created. Cache the last newly created phi in the node map. 2385 // 2386 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2387 Compile *C = _compile; 2388 PhaseGVN* igvn = _igvn; 2389 new_created = false; 2390 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2391 // nothing to do if orig_phi is bottom memory or matches alias_idx 2392 if (phi_alias_idx == alias_idx) { 2393 return orig_phi; 2394 } 2395 // Have we recently created a Phi for this alias index? 2396 PhiNode *result = get_map_phi(orig_phi->_idx); 2397 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2398 return result; 2399 } 2400 // Previous check may fail when the same wide memory Phi was split into Phis 2401 // for different memory slices. Search all Phis for this region. 2402 if (result != NULL) { 2403 Node* region = orig_phi->in(0); 2404 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2405 Node* phi = region->fast_out(i); 2406 if (phi->is_Phi() && 2407 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2408 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2409 return phi->as_Phi(); 2410 } 2411 } 2412 } 2413 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 2414 if (C->do_escape_analysis() == true && !C->failing()) { 2415 // Retry compilation without escape analysis. 2416 // If this is the first failure, the sentinel string will "stick" 2417 // to the Compile object, and the C2Compiler will see it and retry. 2418 C->record_failure(C2Compiler::retry_no_escape_analysis()); 2419 } 2420 return NULL; 2421 } 2422 orig_phi_worklist.append_if_missing(orig_phi); 2423 const TypePtr *atype = C->get_adr_type(alias_idx); 2424 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2425 C->copy_node_notes_to(result, orig_phi); 2426 igvn->set_type(result, result->bottom_type()); 2427 record_for_optimizer(result); 2428 set_map(orig_phi, result); 2429 new_created = true; 2430 return result; 2431 } 2432 2433 // 2434 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2435 // specified alias index. 2436 // 2437 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2438 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2439 Compile *C = _compile; 2440 PhaseGVN* igvn = _igvn; 2441 bool new_phi_created; 2442 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2443 if (!new_phi_created) { 2444 return result; 2445 } 2446 GrowableArray<PhiNode *> phi_list; 2447 GrowableArray<uint> cur_input; 2448 PhiNode *phi = orig_phi; 2449 uint idx = 1; 2450 bool finished = false; 2451 while(!finished) { 2452 while (idx < phi->req()) { 2453 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2454 if (mem != NULL && mem->is_Phi()) { 2455 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2456 if (new_phi_created) { 2457 // found an phi for which we created a new split, push current one on worklist and begin 2458 // processing new one 2459 phi_list.push(phi); 2460 cur_input.push(idx); 2461 phi = mem->as_Phi(); 2462 result = newphi; 2463 idx = 1; 2464 continue; 2465 } else { 2466 mem = newphi; 2467 } 2468 } 2469 if (C->failing()) { 2470 return NULL; 2471 } 2472 result->set_req(idx++, mem); 2473 } 2474 #ifdef ASSERT 2475 // verify that the new Phi has an input for each input of the original 2476 assert( phi->req() == result->req(), "must have same number of inputs."); 2477 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2478 #endif 2479 // Check if all new phi's inputs have specified alias index. 2480 // Otherwise use old phi. 2481 for (uint i = 1; i < phi->req(); i++) { 2482 Node* in = result->in(i); 2483 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2484 } 2485 // we have finished processing a Phi, see if there are any more to do 2486 finished = (phi_list.length() == 0 ); 2487 if (!finished) { 2488 phi = phi_list.pop(); 2489 idx = cur_input.pop(); 2490 PhiNode *prev_result = get_map_phi(phi->_idx); 2491 prev_result->set_req(idx++, result); 2492 result = prev_result; 2493 } 2494 } 2495 return result; 2496 } 2497 2498 // 2499 // The next methods are derived from methods in MemNode. 2500 // 2501 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2502 Node *mem = mmem; 2503 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2504 // means an array I have not precisely typed yet. Do not do any 2505 // alias stuff with it any time soon. 2506 if (toop->base() != Type::AnyPtr && 2507 !(toop->klass() != NULL && 2508 toop->klass()->is_java_lang_Object() && 2509 toop->offset() == Type::OffsetBot)) { 2510 mem = mmem->memory_at(alias_idx); 2511 // Update input if it is progress over what we have now 2512 } 2513 return mem; 2514 } 2515 2516 // 2517 // Move memory users to their memory slices. 2518 // 2519 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2520 Compile* C = _compile; 2521 PhaseGVN* igvn = _igvn; 2522 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2523 assert(tp != NULL, "ptr type"); 2524 int alias_idx = C->get_alias_index(tp); 2525 int general_idx = C->get_general_index(alias_idx); 2526 2527 // Move users first 2528 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2529 Node* use = n->fast_out(i); 2530 if (use->is_MergeMem()) { 2531 MergeMemNode* mmem = use->as_MergeMem(); 2532 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2533 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2534 continue; // Nothing to do 2535 } 2536 // Replace previous general reference to mem node. 2537 uint orig_uniq = C->unique(); 2538 Node* m = find_inst_mem(n, general_idx, orig_phis); 2539 assert(orig_uniq == C->unique(), "no new nodes"); 2540 mmem->set_memory_at(general_idx, m); 2541 --imax; 2542 --i; 2543 } else if (use->is_MemBar()) { 2544 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2545 if (use->req() > MemBarNode::Precedent && 2546 use->in(MemBarNode::Precedent) == n) { 2547 // Don't move related membars. 2548 record_for_optimizer(use); 2549 continue; 2550 } 2551 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2552 if (tp != NULL && C->get_alias_index(tp) == alias_idx || 2553 alias_idx == general_idx) { 2554 continue; // Nothing to do 2555 } 2556 // Move to general memory slice. 2557 uint orig_uniq = C->unique(); 2558 Node* m = find_inst_mem(n, general_idx, orig_phis); 2559 assert(orig_uniq == C->unique(), "no new nodes"); 2560 igvn->hash_delete(use); 2561 imax -= use->replace_edge(n, m); 2562 igvn->hash_insert(use); 2563 record_for_optimizer(use); 2564 --i; 2565 #ifdef ASSERT 2566 } else if (use->is_Mem()) { 2567 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2568 // Don't move related cardmark. 2569 continue; 2570 } 2571 // Memory nodes should have new memory input. 2572 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2573 assert(tp != NULL, "ptr type"); 2574 int idx = C->get_alias_index(tp); 2575 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2576 "Following memory nodes should have new memory input or be on the same memory slice"); 2577 } else if (use->is_Phi()) { 2578 // Phi nodes should be split and moved already. 2579 tp = use->as_Phi()->adr_type()->isa_ptr(); 2580 assert(tp != NULL, "ptr type"); 2581 int idx = C->get_alias_index(tp); 2582 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2583 } else { 2584 use->dump(); 2585 assert(false, "should not be here"); 2586 #endif 2587 } 2588 } 2589 } 2590 2591 // 2592 // Search memory chain of "mem" to find a MemNode whose address 2593 // is the specified alias index. 2594 // 2595 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2596 if (orig_mem == NULL) 2597 return orig_mem; 2598 Compile* C = _compile; 2599 PhaseGVN* igvn = _igvn; 2600 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2601 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2602 Node *start_mem = C->start()->proj_out(TypeFunc::Memory); 2603 Node *prev = NULL; 2604 Node *result = orig_mem; 2605 while (prev != result) { 2606 prev = result; 2607 if (result == start_mem) 2608 break; // hit one of our sentinels 2609 if (result->is_Mem()) { 2610 const Type *at = igvn->type(result->in(MemNode::Address)); 2611 if (at == Type::TOP) 2612 break; // Dead 2613 assert (at->isa_ptr() != NULL, "pointer type required."); 2614 int idx = C->get_alias_index(at->is_ptr()); 2615 if (idx == alias_idx) 2616 break; // Found 2617 if (!is_instance && (at->isa_oopptr() == NULL || 2618 !at->is_oopptr()->is_known_instance())) { 2619 break; // Do not skip store to general memory slice. 2620 } 2621 result = result->in(MemNode::Memory); 2622 } 2623 if (!is_instance) 2624 continue; // don't search further for non-instance types 2625 // skip over a call which does not affect this memory slice 2626 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2627 Node *proj_in = result->in(0); 2628 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2629 break; // hit one of our sentinels 2630 } else if (proj_in->is_Call()) { 2631 CallNode *call = proj_in->as_Call(); 2632 if (!call->may_modify(toop, igvn)) { 2633 result = call->in(TypeFunc::Memory); 2634 } 2635 } else if (proj_in->is_Initialize()) { 2636 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2637 // Stop if this is the initialization for the object instance which 2638 // which contains this memory slice, otherwise skip over it. 2639 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2640 result = proj_in->in(TypeFunc::Memory); 2641 } 2642 } else if (proj_in->is_MemBar()) { 2643 result = proj_in->in(TypeFunc::Memory); 2644 } 2645 } else if (result->is_MergeMem()) { 2646 MergeMemNode *mmem = result->as_MergeMem(); 2647 result = step_through_mergemem(mmem, alias_idx, toop); 2648 if (result == mmem->base_memory()) { 2649 // Didn't find instance memory, search through general slice recursively. 2650 result = mmem->memory_at(C->get_general_index(alias_idx)); 2651 result = find_inst_mem(result, alias_idx, orig_phis); 2652 if (C->failing()) { 2653 return NULL; 2654 } 2655 mmem->set_memory_at(alias_idx, result); 2656 } 2657 } else if (result->is_Phi() && 2658 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2659 Node *un = result->as_Phi()->unique_input(igvn); 2660 if (un != NULL) { 2661 orig_phis.append_if_missing(result->as_Phi()); 2662 result = un; 2663 } else { 2664 break; 2665 } 2666 } else if (result->is_ClearArray()) { 2667 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 2668 // Can not bypass initialization of the instance 2669 // we are looking for. 2670 break; 2671 } 2672 // Otherwise skip it (the call updated 'result' value). 2673 } else if (result->Opcode() == Op_SCMemProj) { 2674 Node* mem = result->in(0); 2675 Node* adr = NULL; 2676 if (mem->is_LoadStore()) { 2677 adr = mem->in(MemNode::Address); 2678 } else { 2679 assert(mem->Opcode() == Op_EncodeISOArray, "sanity"); 2680 adr = mem->in(3); // Memory edge corresponds to destination array 2681 } 2682 const Type *at = igvn->type(adr); 2683 if (at != Type::TOP) { 2684 assert (at->isa_ptr() != NULL, "pointer type required."); 2685 int idx = C->get_alias_index(at->is_ptr()); 2686 assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field"); 2687 break; 2688 } 2689 result = mem->in(MemNode::Memory); 2690 } 2691 } 2692 if (result->is_Phi()) { 2693 PhiNode *mphi = result->as_Phi(); 2694 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 2695 const TypePtr *t = mphi->adr_type(); 2696 if (!is_instance) { 2697 // Push all non-instance Phis on the orig_phis worklist to update inputs 2698 // during Phase 4 if needed. 2699 orig_phis.append_if_missing(mphi); 2700 } else if (C->get_alias_index(t) != alias_idx) { 2701 // Create a new Phi with the specified alias index type. 2702 result = split_memory_phi(mphi, alias_idx, orig_phis); 2703 } 2704 } 2705 // the result is either MemNode, PhiNode, InitializeNode. 2706 return result; 2707 } 2708 2709 // 2710 // Convert the types of unescaped object to instance types where possible, 2711 // propagate the new type information through the graph, and update memory 2712 // edges and MergeMem inputs to reflect the new type. 2713 // 2714 // We start with allocations (and calls which may be allocations) on alloc_worklist. 2715 // The processing is done in 4 phases: 2716 // 2717 // Phase 1: Process possible allocations from alloc_worklist. Create instance 2718 // types for the CheckCastPP for allocations where possible. 2719 // Propagate the the new types through users as follows: 2720 // casts and Phi: push users on alloc_worklist 2721 // AddP: cast Base and Address inputs to the instance type 2722 // push any AddP users on alloc_worklist and push any memnode 2723 // users onto memnode_worklist. 2724 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2725 // search the Memory chain for a store with the appropriate type 2726 // address type. If a Phi is found, create a new version with 2727 // the appropriate memory slices from each of the Phi inputs. 2728 // For stores, process the users as follows: 2729 // MemNode: push on memnode_worklist 2730 // MergeMem: push on mergemem_worklist 2731 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 2732 // moving the first node encountered of each instance type to the 2733 // the input corresponding to its alias index. 2734 // appropriate memory slice. 2735 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 2736 // 2737 // In the following example, the CheckCastPP nodes are the cast of allocation 2738 // results and the allocation of node 29 is unescaped and eligible to be an 2739 // instance type. 2740 // 2741 // We start with: 2742 // 2743 // 7 Parm #memory 2744 // 10 ConI "12" 2745 // 19 CheckCastPP "Foo" 2746 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2747 // 29 CheckCastPP "Foo" 2748 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 2749 // 2750 // 40 StoreP 25 7 20 ... alias_index=4 2751 // 50 StoreP 35 40 30 ... alias_index=4 2752 // 60 StoreP 45 50 20 ... alias_index=4 2753 // 70 LoadP _ 60 30 ... alias_index=4 2754 // 80 Phi 75 50 60 Memory alias_index=4 2755 // 90 LoadP _ 80 30 ... alias_index=4 2756 // 100 LoadP _ 80 20 ... alias_index=4 2757 // 2758 // 2759 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 2760 // and creating a new alias index for node 30. This gives: 2761 // 2762 // 7 Parm #memory 2763 // 10 ConI "12" 2764 // 19 CheckCastPP "Foo" 2765 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2766 // 29 CheckCastPP "Foo" iid=24 2767 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2768 // 2769 // 40 StoreP 25 7 20 ... alias_index=4 2770 // 50 StoreP 35 40 30 ... alias_index=6 2771 // 60 StoreP 45 50 20 ... alias_index=4 2772 // 70 LoadP _ 60 30 ... alias_index=6 2773 // 80 Phi 75 50 60 Memory alias_index=4 2774 // 90 LoadP _ 80 30 ... alias_index=6 2775 // 100 LoadP _ 80 20 ... alias_index=4 2776 // 2777 // In phase 2, new memory inputs are computed for the loads and stores, 2778 // And a new version of the phi is created. In phase 4, the inputs to 2779 // node 80 are updated and then the memory nodes are updated with the 2780 // values computed in phase 2. This results in: 2781 // 2782 // 7 Parm #memory 2783 // 10 ConI "12" 2784 // 19 CheckCastPP "Foo" 2785 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2786 // 29 CheckCastPP "Foo" iid=24 2787 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2788 // 2789 // 40 StoreP 25 7 20 ... alias_index=4 2790 // 50 StoreP 35 7 30 ... alias_index=6 2791 // 60 StoreP 45 40 20 ... alias_index=4 2792 // 70 LoadP _ 50 30 ... alias_index=6 2793 // 80 Phi 75 40 60 Memory alias_index=4 2794 // 120 Phi 75 50 50 Memory alias_index=6 2795 // 90 LoadP _ 120 30 ... alias_index=6 2796 // 100 LoadP _ 80 20 ... alias_index=4 2797 // 2798 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) { 2799 GrowableArray<Node *> memnode_worklist; 2800 GrowableArray<PhiNode *> orig_phis; 2801 PhaseIterGVN *igvn = _igvn; 2802 uint new_index_start = (uint) _compile->num_alias_types(); 2803 Arena* arena = Thread::current()->resource_area(); 2804 VectorSet visited(arena); 2805 ideal_nodes.clear(); // Reset for use with set_map/get_map. 2806 uint unique_old = _compile->unique(); 2807 2808 // Phase 1: Process possible allocations from alloc_worklist. 2809 // Create instance types for the CheckCastPP for allocations where possible. 2810 // 2811 // (Note: don't forget to change the order of the second AddP node on 2812 // the alloc_worklist if the order of the worklist processing is changed, 2813 // see the comment in find_second_addp().) 2814 // 2815 while (alloc_worklist.length() != 0) { 2816 Node *n = alloc_worklist.pop(); 2817 uint ni = n->_idx; 2818 if (n->is_Call()) { 2819 CallNode *alloc = n->as_Call(); 2820 // copy escape information to call node 2821 PointsToNode* ptn = ptnode_adr(alloc->_idx); 2822 PointsToNode::EscapeState es = ptn->escape_state(); 2823 // We have an allocation or call which returns a Java object, 2824 // see if it is unescaped. 2825 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 2826 continue; 2827 // Find CheckCastPP for the allocate or for the return value of a call 2828 n = alloc->result_cast(); 2829 if (n == NULL) { // No uses except Initialize node 2830 if (alloc->is_Allocate()) { 2831 // Set the scalar_replaceable flag for allocation 2832 // so it could be eliminated if it has no uses. 2833 alloc->as_Allocate()->_is_scalar_replaceable = true; 2834 } 2835 if (alloc->is_CallStaticJava()) { 2836 // Set the scalar_replaceable flag for boxing method 2837 // so it could be eliminated if it has no uses. 2838 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2839 } 2840 continue; 2841 } 2842 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 2843 assert(!alloc->is_Allocate(), "allocation should have unique type"); 2844 continue; 2845 } 2846 2847 // The inline code for Object.clone() casts the allocation result to 2848 // java.lang.Object and then to the actual type of the allocated 2849 // object. Detect this case and use the second cast. 2850 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 2851 // the allocation result is cast to java.lang.Object and then 2852 // to the actual Array type. 2853 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 2854 && (alloc->is_AllocateArray() || 2855 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 2856 Node *cast2 = NULL; 2857 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2858 Node *use = n->fast_out(i); 2859 if (use->is_CheckCastPP()) { 2860 cast2 = use; 2861 break; 2862 } 2863 } 2864 if (cast2 != NULL) { 2865 n = cast2; 2866 } else { 2867 // Non-scalar replaceable if the allocation type is unknown statically 2868 // (reflection allocation), the object can't be restored during 2869 // deoptimization without precise type. 2870 continue; 2871 } 2872 } 2873 2874 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 2875 if (t == NULL) 2876 continue; // not a TypeOopPtr 2877 if (!t->klass_is_exact()) 2878 continue; // not an unique type 2879 2880 if (alloc->is_Allocate()) { 2881 // Set the scalar_replaceable flag for allocation 2882 // so it could be eliminated. 2883 alloc->as_Allocate()->_is_scalar_replaceable = true; 2884 } 2885 if (alloc->is_CallStaticJava()) { 2886 // Set the scalar_replaceable flag for boxing method 2887 // so it could be eliminated. 2888 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2889 } 2890 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state 2891 // in order for an object to be scalar-replaceable, it must be: 2892 // - a direct allocation (not a call returning an object) 2893 // - non-escaping 2894 // - eligible to be a unique type 2895 // - not determined to be ineligible by escape analysis 2896 set_map(alloc, n); 2897 set_map(n, alloc); 2898 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 2899 igvn->hash_delete(n); 2900 igvn->set_type(n, tinst); 2901 n->raise_bottom_type(tinst); 2902 igvn->hash_insert(n); 2903 record_for_optimizer(n); 2904 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 2905 2906 // First, put on the worklist all Field edges from Connection Graph 2907 // which is more accurate then putting immediate users from Ideal Graph. 2908 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 2909 PointsToNode* tgt = e.get(); 2910 Node* use = tgt->ideal_node(); 2911 assert(tgt->is_Field() && use->is_AddP(), 2912 "only AddP nodes are Field edges in CG"); 2913 if (use->outcnt() > 0) { // Don't process dead nodes 2914 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 2915 if (addp2 != NULL) { 2916 assert(alloc->is_AllocateArray(),"array allocation was expected"); 2917 alloc_worklist.append_if_missing(addp2); 2918 } 2919 alloc_worklist.append_if_missing(use); 2920 } 2921 } 2922 2923 // An allocation may have an Initialize which has raw stores. Scan 2924 // the users of the raw allocation result and push AddP users 2925 // on alloc_worklist. 2926 Node *raw_result = alloc->proj_out(TypeFunc::Parms); 2927 assert (raw_result != NULL, "must have an allocation result"); 2928 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 2929 Node *use = raw_result->fast_out(i); 2930 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 2931 Node* addp2 = find_second_addp(use, raw_result); 2932 if (addp2 != NULL) { 2933 assert(alloc->is_AllocateArray(),"array allocation was expected"); 2934 alloc_worklist.append_if_missing(addp2); 2935 } 2936 alloc_worklist.append_if_missing(use); 2937 } else if (use->is_MemBar()) { 2938 memnode_worklist.append_if_missing(use); 2939 } 2940 } 2941 } 2942 } else if (n->is_AddP()) { 2943 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 2944 if (jobj == NULL || jobj == phantom_obj) { 2945 #ifdef ASSERT 2946 ptnode_adr(get_addp_base(n)->_idx)->dump(); 2947 ptnode_adr(n->_idx)->dump(); 2948 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 2949 #endif 2950 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 2951 return; 2952 } 2953 Node *base = get_map(jobj->idx()); // CheckCastPP node 2954 if (!split_AddP(n, base)) continue; // wrong type from dead path 2955 } else if (n->is_Phi() || 2956 n->is_CheckCastPP() || 2957 n->is_EncodeP() || 2958 n->is_DecodeN() || 2959 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 2960 if (visited.test_set(n->_idx)) { 2961 assert(n->is_Phi(), "loops only through Phi's"); 2962 continue; // already processed 2963 } 2964 JavaObjectNode* jobj = unique_java_object(n); 2965 if (jobj == NULL || jobj == phantom_obj) { 2966 #ifdef ASSERT 2967 ptnode_adr(n->_idx)->dump(); 2968 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 2969 #endif 2970 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 2971 return; 2972 } else { 2973 Node *val = get_map(jobj->idx()); // CheckCastPP node 2974 TypeNode *tn = n->as_Type(); 2975 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 2976 assert(tinst != NULL && tinst->is_known_instance() && 2977 tinst->instance_id() == jobj->idx() , "instance type expected."); 2978 2979 const Type *tn_type = igvn->type(tn); 2980 const TypeOopPtr *tn_t; 2981 if (tn_type->isa_narrowoop()) { 2982 tn_t = tn_type->make_ptr()->isa_oopptr(); 2983 } else { 2984 tn_t = tn_type->isa_oopptr(); 2985 } 2986 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 2987 if (tn_type->isa_narrowoop()) { 2988 tn_type = tinst->make_narrowoop(); 2989 } else { 2990 tn_type = tinst; 2991 } 2992 igvn->hash_delete(tn); 2993 igvn->set_type(tn, tn_type); 2994 tn->set_type(tn_type); 2995 igvn->hash_insert(tn); 2996 record_for_optimizer(n); 2997 } else { 2998 assert(tn_type == TypePtr::NULL_PTR || 2999 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 3000 "unexpected type"); 3001 continue; // Skip dead path with different type 3002 } 3003 } 3004 } else { 3005 debug_only(n->dump();) 3006 assert(false, "EA: unexpected node"); 3007 continue; 3008 } 3009 // push allocation's users on appropriate worklist 3010 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3011 Node *use = n->fast_out(i); 3012 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3013 // Load/store to instance's field 3014 memnode_worklist.append_if_missing(use); 3015 } else if (use->is_MemBar()) { 3016 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3017 memnode_worklist.append_if_missing(use); 3018 } 3019 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3020 Node* addp2 = find_second_addp(use, n); 3021 if (addp2 != NULL) { 3022 alloc_worklist.append_if_missing(addp2); 3023 } 3024 alloc_worklist.append_if_missing(use); 3025 } else if (use->is_Phi() || 3026 use->is_CheckCastPP() || 3027 use->is_EncodeNarrowPtr() || 3028 use->is_DecodeNarrowPtr() || 3029 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3030 alloc_worklist.append_if_missing(use); 3031 #ifdef ASSERT 3032 } else if (use->is_Mem()) { 3033 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3034 } else if (use->is_MergeMem()) { 3035 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3036 } else if (use->is_SafePoint()) { 3037 // Look for MergeMem nodes for calls which reference unique allocation 3038 // (through CheckCastPP nodes) even for debug info. 3039 Node* m = use->in(TypeFunc::Memory); 3040 if (m->is_MergeMem()) { 3041 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3042 } 3043 } else if (use->Opcode() == Op_EncodeISOArray) { 3044 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3045 // EncodeISOArray overwrites destination array 3046 memnode_worklist.append_if_missing(use); 3047 } 3048 } else { 3049 uint op = use->Opcode(); 3050 if (!(op == Op_CmpP || op == Op_Conv2B || 3051 op == Op_CastP2X || op == Op_StoreCM || 3052 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || 3053 op == Op_StrEquals || op == Op_StrIndexOf)) { 3054 n->dump(); 3055 use->dump(); 3056 assert(false, "EA: missing allocation reference path"); 3057 } 3058 #endif 3059 } 3060 } 3061 3062 } 3063 // New alias types were created in split_AddP(). 3064 uint new_index_end = (uint) _compile->num_alias_types(); 3065 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3066 3067 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3068 // compute new values for Memory inputs (the Memory inputs are not 3069 // actually updated until phase 4.) 3070 if (memnode_worklist.length() == 0) 3071 return; // nothing to do 3072 while (memnode_worklist.length() != 0) { 3073 Node *n = memnode_worklist.pop(); 3074 if (visited.test_set(n->_idx)) 3075 continue; 3076 if (n->is_Phi() || n->is_ClearArray()) { 3077 // we don't need to do anything, but the users must be pushed 3078 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3079 // we don't need to do anything, but the users must be pushed 3080 n = n->as_MemBar()->proj_out(TypeFunc::Memory); 3081 if (n == NULL) 3082 continue; 3083 } else if (n->Opcode() == Op_EncodeISOArray) { 3084 // get the memory projection 3085 n = n->find_out_with(Op_SCMemProj); 3086 assert(n->Opcode() == Op_SCMemProj, "memory projection required"); 3087 } else { 3088 assert(n->is_Mem(), "memory node required."); 3089 Node *addr = n->in(MemNode::Address); 3090 const Type *addr_t = igvn->type(addr); 3091 if (addr_t == Type::TOP) 3092 continue; 3093 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3094 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3095 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3096 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3097 if (_compile->failing()) { 3098 return; 3099 } 3100 if (mem != n->in(MemNode::Memory)) { 3101 // We delay the memory edge update since we need old one in 3102 // MergeMem code below when instances memory slices are separated. 3103 set_map(n, mem); 3104 } 3105 if (n->is_Load()) { 3106 continue; // don't push users 3107 } else if (n->is_LoadStore()) { 3108 // get the memory projection 3109 n = n->find_out_with(Op_SCMemProj); 3110 assert(n->Opcode() == Op_SCMemProj, "memory projection required"); 3111 } 3112 } 3113 // push user on appropriate worklist 3114 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3115 Node *use = n->fast_out(i); 3116 if (use->is_Phi() || use->is_ClearArray()) { 3117 memnode_worklist.append_if_missing(use); 3118 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3119 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 3120 continue; 3121 memnode_worklist.append_if_missing(use); 3122 } else if (use->is_MemBar()) { 3123 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3124 memnode_worklist.append_if_missing(use); 3125 } 3126 #ifdef ASSERT 3127 } else if(use->is_Mem()) { 3128 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3129 } else if (use->is_MergeMem()) { 3130 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3131 } else if (use->Opcode() == Op_EncodeISOArray) { 3132 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3133 // EncodeISOArray overwrites destination array 3134 memnode_worklist.append_if_missing(use); 3135 } 3136 } else { 3137 uint op = use->Opcode(); 3138 if (!(op == Op_StoreCM || 3139 (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL && 3140 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) || 3141 op == Op_AryEq || op == Op_StrComp || 3142 op == Op_StrEquals || op == Op_StrIndexOf)) { 3143 n->dump(); 3144 use->dump(); 3145 assert(false, "EA: missing memory path"); 3146 } 3147 #endif 3148 } 3149 } 3150 } 3151 3152 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3153 // Walk each memory slice moving the first node encountered of each 3154 // instance type to the the input corresponding to its alias index. 3155 uint length = _mergemem_worklist.length(); 3156 for( uint next = 0; next < length; ++next ) { 3157 MergeMemNode* nmm = _mergemem_worklist.at(next); 3158 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3159 // Note: we don't want to use MergeMemStream here because we only want to 3160 // scan inputs which exist at the start, not ones we add during processing. 3161 // Note 2: MergeMem may already contains instance memory slices added 3162 // during find_inst_mem() call when memory nodes were processed above. 3163 igvn->hash_delete(nmm); 3164 uint nslices = nmm->req(); 3165 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3166 Node* mem = nmm->in(i); 3167 Node* cur = NULL; 3168 if (mem == NULL || mem->is_top()) 3169 continue; 3170 // First, update mergemem by moving memory nodes to corresponding slices 3171 // if their type became more precise since this mergemem was created. 3172 while (mem->is_Mem()) { 3173 const Type *at = igvn->type(mem->in(MemNode::Address)); 3174 if (at != Type::TOP) { 3175 assert (at->isa_ptr() != NULL, "pointer type required."); 3176 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3177 if (idx == i) { 3178 if (cur == NULL) 3179 cur = mem; 3180 } else { 3181 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3182 nmm->set_memory_at(idx, mem); 3183 } 3184 } 3185 } 3186 mem = mem->in(MemNode::Memory); 3187 } 3188 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3189 // Find any instance of the current type if we haven't encountered 3190 // already a memory slice of the instance along the memory chain. 3191 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3192 if((uint)_compile->get_general_index(ni) == i) { 3193 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3194 if (nmm->is_empty_memory(m)) { 3195 Node* result = find_inst_mem(mem, ni, orig_phis); 3196 if (_compile->failing()) { 3197 return; 3198 } 3199 nmm->set_memory_at(ni, result); 3200 } 3201 } 3202 } 3203 } 3204 // Find the rest of instances values 3205 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3206 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3207 Node* result = step_through_mergemem(nmm, ni, tinst); 3208 if (result == nmm->base_memory()) { 3209 // Didn't find instance memory, search through general slice recursively. 3210 result = nmm->memory_at(_compile->get_general_index(ni)); 3211 result = find_inst_mem(result, ni, orig_phis); 3212 if (_compile->failing()) { 3213 return; 3214 } 3215 nmm->set_memory_at(ni, result); 3216 } 3217 } 3218 igvn->hash_insert(nmm); 3219 record_for_optimizer(nmm); 3220 } 3221 3222 // Phase 4: Update the inputs of non-instance memory Phis and 3223 // the Memory input of memnodes 3224 // First update the inputs of any non-instance Phi's from 3225 // which we split out an instance Phi. Note we don't have 3226 // to recursively process Phi's encounted on the input memory 3227 // chains as is done in split_memory_phi() since they will 3228 // also be processed here. 3229 for (int j = 0; j < orig_phis.length(); j++) { 3230 PhiNode *phi = orig_phis.at(j); 3231 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3232 igvn->hash_delete(phi); 3233 for (uint i = 1; i < phi->req(); i++) { 3234 Node *mem = phi->in(i); 3235 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3236 if (_compile->failing()) { 3237 return; 3238 } 3239 if (mem != new_mem) { 3240 phi->set_req(i, new_mem); 3241 } 3242 } 3243 igvn->hash_insert(phi); 3244 record_for_optimizer(phi); 3245 } 3246 3247 // Update the memory inputs of MemNodes with the value we computed 3248 // in Phase 2 and move stores memory users to corresponding memory slices. 3249 // Disable memory split verification code until the fix for 6984348. 3250 // Currently it produces false negative results since it does not cover all cases. 3251 #if 0 // ifdef ASSERT 3252 visited.Reset(); 3253 Node_Stack old_mems(arena, _compile->unique() >> 2); 3254 #endif 3255 for (uint i = 0; i < ideal_nodes.size(); i++) { 3256 Node* n = ideal_nodes.at(i); 3257 Node* nmem = get_map(n->_idx); 3258 assert(nmem != NULL, "sanity"); 3259 if (n->is_Mem()) { 3260 #if 0 // ifdef ASSERT 3261 Node* old_mem = n->in(MemNode::Memory); 3262 if (!visited.test_set(old_mem->_idx)) { 3263 old_mems.push(old_mem, old_mem->outcnt()); 3264 } 3265 #endif 3266 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3267 if (!n->is_Load()) { 3268 // Move memory users of a store first. 3269 move_inst_mem(n, orig_phis); 3270 } 3271 // Now update memory input 3272 igvn->hash_delete(n); 3273 n->set_req(MemNode::Memory, nmem); 3274 igvn->hash_insert(n); 3275 record_for_optimizer(n); 3276 } else { 3277 assert(n->is_Allocate() || n->is_CheckCastPP() || 3278 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3279 } 3280 } 3281 #if 0 // ifdef ASSERT 3282 // Verify that memory was split correctly 3283 while (old_mems.is_nonempty()) { 3284 Node* old_mem = old_mems.node(); 3285 uint old_cnt = old_mems.index(); 3286 old_mems.pop(); 3287 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3288 } 3289 #endif 3290 } 3291 3292 #ifndef PRODUCT 3293 static const char *node_type_names[] = { 3294 "UnknownType", 3295 "JavaObject", 3296 "LocalVar", 3297 "Field", 3298 "Arraycopy" 3299 }; 3300 3301 static const char *esc_names[] = { 3302 "UnknownEscape", 3303 "NoEscape", 3304 "ArgEscape", 3305 "GlobalEscape" 3306 }; 3307 3308 void PointsToNode::dump(bool print_state) const { 3309 NodeType nt = node_type(); 3310 tty->print("%s ", node_type_names[(int) nt]); 3311 if (print_state) { 3312 EscapeState es = escape_state(); 3313 EscapeState fields_es = fields_escape_state(); 3314 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3315 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) 3316 tty->print("NSR "); 3317 } 3318 if (is_Field()) { 3319 FieldNode* f = (FieldNode*)this; 3320 if (f->is_oop()) 3321 tty->print("oop "); 3322 if (f->offset() > 0) 3323 tty->print("+%d ", f->offset()); 3324 tty->print("("); 3325 for (BaseIterator i(f); i.has_next(); i.next()) { 3326 PointsToNode* b = i.get(); 3327 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3328 } 3329 tty->print(" )"); 3330 } 3331 tty->print("["); 3332 for (EdgeIterator i(this); i.has_next(); i.next()) { 3333 PointsToNode* e = i.get(); 3334 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3335 } 3336 tty->print(" ["); 3337 for (UseIterator i(this); i.has_next(); i.next()) { 3338 PointsToNode* u = i.get(); 3339 bool is_base = false; 3340 if (PointsToNode::is_base_use(u)) { 3341 is_base = true; 3342 u = PointsToNode::get_use_node(u)->as_Field(); 3343 } 3344 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3345 } 3346 tty->print(" ]] "); 3347 if (_node == NULL) 3348 tty->print_cr("<null>"); 3349 else 3350 _node->dump(); 3351 } 3352 3353 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3354 bool first = true; 3355 int ptnodes_length = ptnodes_worklist.length(); 3356 for (int i = 0; i < ptnodes_length; i++) { 3357 PointsToNode *ptn = ptnodes_worklist.at(i); 3358 if (ptn == NULL || !ptn->is_JavaObject()) 3359 continue; 3360 PointsToNode::EscapeState es = ptn->escape_state(); 3361 if ((es != PointsToNode::NoEscape) && !Verbose) { 3362 continue; 3363 } 3364 Node* n = ptn->ideal_node(); 3365 if (n->is_Allocate() || (n->is_CallStaticJava() && 3366 n->as_CallStaticJava()->is_boxing_method())) { 3367 if (first) { 3368 tty->cr(); 3369 tty->print("======== Connection graph for "); 3370 _compile->method()->print_short_name(); 3371 tty->cr(); 3372 first = false; 3373 } 3374 ptn->dump(); 3375 // Print all locals and fields which reference this allocation 3376 for (UseIterator j(ptn); j.has_next(); j.next()) { 3377 PointsToNode* use = j.get(); 3378 if (use->is_LocalVar()) { 3379 use->dump(Verbose); 3380 } else if (Verbose) { 3381 use->dump(); 3382 } 3383 } 3384 tty->cr(); 3385 } 3386 } 3387 } 3388 #endif