1 /* 2 * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "libadt/vectset.hpp" 28 #include "memory/allocation.hpp" 29 #include "opto/c2compiler.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/compile.hpp" 33 #include "opto/escape.hpp" 34 #include "opto/phaseX.hpp" 35 #include "opto/rootnode.hpp" 36 37 void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) { 38 uint v = (targIdx << EdgeShift) + ((uint) et); 39 if (_edges == NULL) { 40 Arena *a = Compile::current()->comp_arena(); 41 _edges = new(a) GrowableArray<uint>(a, INITIAL_EDGE_COUNT, 0, 0); 42 } 43 _edges->append_if_missing(v); 44 } 45 46 void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) { 47 uint v = (targIdx << EdgeShift) + ((uint) et); 48 49 _edges->remove(v); 50 } 51 52 #ifndef PRODUCT 53 static const char *node_type_names[] = { 54 "UnknownType", 55 "JavaObject", 56 "LocalVar", 57 "Field" 58 }; 59 60 static const char *esc_names[] = { 61 "UnknownEscape", 62 "NoEscape", 63 "ArgEscape", 64 "GlobalEscape" 65 }; 66 67 static const char *edge_type_suffix[] = { 68 "?", // UnknownEdge 69 "P", // PointsToEdge 70 "D", // DeferredEdge 71 "F" // FieldEdge 72 }; 73 74 void PointsToNode::dump(bool print_state) const { 75 NodeType nt = node_type(); 76 tty->print("%s ", node_type_names[(int) nt]); 77 if (print_state) { 78 EscapeState es = escape_state(); 79 tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR"); 80 } 81 tty->print("[["); 82 for (uint i = 0; i < edge_count(); i++) { 83 tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]); 84 } 85 tty->print("]] "); 86 if (_node == NULL) 87 tty->print_cr("<null>"); 88 else 89 _node->dump(); 90 } 91 #endif 92 93 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 94 _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()), 95 _processed(C->comp_arena()), 96 pt_ptset(C->comp_arena()), 97 pt_visited(C->comp_arena()), 98 pt_worklist(C->comp_arena(), 4, 0, 0), 99 _collecting(true), 100 _progress(false), 101 _compile(C), 102 _igvn(igvn), 103 _node_map(C->comp_arena()) { 104 105 _phantom_object = C->top()->_idx, 106 add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true); 107 108 // Add ConP(#NULL) and ConN(#NULL) nodes. 109 Node* oop_null = igvn->zerocon(T_OBJECT); 110 _oop_null = oop_null->_idx; 111 assert(_oop_null < nodes_size(), "should be created already"); 112 add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true); 113 114 if (UseCompressedOops) { 115 Node* noop_null = igvn->zerocon(T_NARROWOOP); 116 _noop_null = noop_null->_idx; 117 assert(_noop_null < nodes_size(), "should be created already"); 118 add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true); 119 } else { 120 _noop_null = _oop_null; // Should be initialized 121 } 122 _pcmp_neq = NULL; // Should be initialized 123 _pcmp_eq = NULL; 124 } 125 126 void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) { 127 PointsToNode *f = ptnode_adr(from_i); 128 PointsToNode *t = ptnode_adr(to_i); 129 130 assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set"); 131 assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge"); 132 assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge"); 133 add_edge(f, to_i, PointsToNode::PointsToEdge); 134 } 135 136 void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) { 137 PointsToNode *f = ptnode_adr(from_i); 138 PointsToNode *t = ptnode_adr(to_i); 139 140 assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set"); 141 assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge"); 142 assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge"); 143 // don't add a self-referential edge, this can occur during removal of 144 // deferred edges 145 if (from_i != to_i) 146 add_edge(f, to_i, PointsToNode::DeferredEdge); 147 } 148 149 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 150 const Type *adr_type = phase->type(adr); 151 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 152 adr->in(AddPNode::Address)->is_Proj() && 153 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 154 // We are computing a raw address for a store captured by an Initialize 155 // compute an appropriate address type. AddP cases #3 and #5 (see below). 156 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 157 assert(offs != Type::OffsetBot || 158 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 159 "offset must be a constant or it is initialization of array"); 160 return offs; 161 } 162 const TypePtr *t_ptr = adr_type->isa_ptr(); 163 assert(t_ptr != NULL, "must be a pointer type"); 164 return t_ptr->offset(); 165 } 166 167 void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) { 168 PointsToNode *f = ptnode_adr(from_i); 169 PointsToNode *t = ptnode_adr(to_i); 170 171 assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set"); 172 assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge"); 173 assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge"); 174 assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets"); 175 t->set_offset(offset); 176 177 add_edge(f, to_i, PointsToNode::FieldEdge); 178 } 179 180 void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) { 181 // Don't change non-escaping state of NULL pointer. 182 if (ni == _noop_null || ni == _oop_null) 183 return; 184 PointsToNode *npt = ptnode_adr(ni); 185 PointsToNode::EscapeState old_es = npt->escape_state(); 186 if (es > old_es) 187 npt->set_escape_state(es); 188 } 189 190 void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt, 191 PointsToNode::EscapeState es, bool done) { 192 PointsToNode* ptadr = ptnode_adr(n->_idx); 193 ptadr->_node = n; 194 ptadr->set_node_type(nt); 195 196 // inline set_escape_state(idx, es); 197 PointsToNode::EscapeState old_es = ptadr->escape_state(); 198 if (es > old_es) 199 ptadr->set_escape_state(es); 200 201 if (done) 202 _processed.set(n->_idx); 203 } 204 205 PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n) { 206 uint idx = n->_idx; 207 PointsToNode::EscapeState es; 208 209 // If we are still collecting or there were no non-escaping allocations 210 // we don't know the answer yet 211 if (_collecting) 212 return PointsToNode::UnknownEscape; 213 214 // if the node was created after the escape computation, return 215 // UnknownEscape 216 if (idx >= nodes_size()) 217 return PointsToNode::UnknownEscape; 218 219 es = ptnode_adr(idx)->escape_state(); 220 221 // if we have already computed a value, return it 222 if (es != PointsToNode::UnknownEscape && 223 ptnode_adr(idx)->node_type() == PointsToNode::JavaObject) 224 return es; 225 226 // PointsTo() calls n->uncast() which can return a new ideal node. 227 if (n->uncast()->_idx >= nodes_size()) 228 return PointsToNode::UnknownEscape; 229 230 PointsToNode::EscapeState orig_es = es; 231 232 // compute max escape state of anything this node could point to 233 for(VectorSetI i(PointsTo(n)); i.test() && es != PointsToNode::GlobalEscape; ++i) { 234 uint pt = i.elem; 235 PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state(); 236 if (pes > es) 237 es = pes; 238 } 239 if (orig_es != es) { 240 // cache the computed escape state 241 assert(es > orig_es, "should have computed an escape state"); 242 set_escape_state(idx, es); 243 } // orig_es could be PointsToNode::UnknownEscape 244 return es; 245 } 246 247 VectorSet* ConnectionGraph::PointsTo(Node * n) { 248 pt_ptset.Reset(); 249 pt_visited.Reset(); 250 pt_worklist.clear(); 251 252 #ifdef ASSERT 253 Node *orig_n = n; 254 #endif 255 256 n = n->uncast(); 257 PointsToNode* npt = ptnode_adr(n->_idx); 258 259 // If we have a JavaObject, return just that object 260 if (npt->node_type() == PointsToNode::JavaObject) { 261 pt_ptset.set(n->_idx); 262 return &pt_ptset; 263 } 264 #ifdef ASSERT 265 if (npt->_node == NULL) { 266 if (orig_n != n) 267 orig_n->dump(); 268 n->dump(); 269 assert(npt->_node != NULL, "unregistered node"); 270 } 271 #endif 272 pt_worklist.push(n->_idx); 273 while(pt_worklist.length() > 0) { 274 int ni = pt_worklist.pop(); 275 if (pt_visited.test_set(ni)) 276 continue; 277 278 PointsToNode* pn = ptnode_adr(ni); 279 // ensure that all inputs of a Phi have been processed 280 assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),""); 281 282 int edges_processed = 0; 283 uint e_cnt = pn->edge_count(); 284 for (uint e = 0; e < e_cnt; e++) { 285 uint etgt = pn->edge_target(e); 286 PointsToNode::EdgeType et = pn->edge_type(e); 287 if (et == PointsToNode::PointsToEdge) { 288 pt_ptset.set(etgt); 289 edges_processed++; 290 } else if (et == PointsToNode::DeferredEdge) { 291 pt_worklist.push(etgt); 292 edges_processed++; 293 } else { 294 assert(false,"neither PointsToEdge or DeferredEdge"); 295 } 296 } 297 if (edges_processed == 0) { 298 // no deferred or pointsto edges found. Assume the value was set 299 // outside this method. Add the phantom object to the pointsto set. 300 pt_ptset.set(_phantom_object); 301 } 302 } 303 return &pt_ptset; 304 } 305 306 void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) { 307 // This method is most expensive during ConnectionGraph construction. 308 // Reuse vectorSet and an additional growable array for deferred edges. 309 deferred_edges->clear(); 310 visited->Reset(); 311 312 visited->set(ni); 313 PointsToNode *ptn = ptnode_adr(ni); 314 if (ptn->edge_count() == 0) { 315 // No deferred or pointsto edges found. Assume the value was set 316 // outside this method. Add edge to phantom object. 317 add_pointsto_edge(ni, _phantom_object); 318 } 319 320 // Mark current edges as visited and move deferred edges to separate array. 321 for (uint i = 0; i < ptn->edge_count(); ) { 322 uint t = ptn->edge_target(i); 323 #ifdef ASSERT 324 assert(!visited->test_set(t), "expecting no duplications"); 325 #else 326 visited->set(t); 327 #endif 328 if (ptn->edge_type(i) == PointsToNode::DeferredEdge) { 329 ptn->remove_edge(t, PointsToNode::DeferredEdge); 330 deferred_edges->append(t); 331 } else { 332 i++; 333 } 334 } 335 for (int next = 0; next < deferred_edges->length(); ++next) { 336 uint t = deferred_edges->at(next); 337 PointsToNode *ptt = ptnode_adr(t); 338 uint e_cnt = ptt->edge_count(); 339 if (e_cnt == 0) { 340 // No deferred or pointsto edges found. Assume the value was set 341 // outside this method. Add edge to phantom object. 342 add_pointsto_edge(t, _phantom_object); 343 add_pointsto_edge(ni, _phantom_object); 344 } 345 for (uint e = 0; e < e_cnt; e++) { 346 uint etgt = ptt->edge_target(e); 347 if (visited->test_set(etgt)) 348 continue; 349 350 PointsToNode::EdgeType et = ptt->edge_type(e); 351 if (et == PointsToNode::PointsToEdge) { 352 add_pointsto_edge(ni, etgt); 353 if(etgt == _phantom_object) { 354 // Special case - field set outside (globally escaping). 355 set_escape_state(ni, PointsToNode::GlobalEscape); 356 } 357 } else if (et == PointsToNode::DeferredEdge) { 358 deferred_edges->append(etgt); 359 } else { 360 assert(false,"invalid connection graph"); 361 } 362 } 363 } 364 } 365 366 367 // Add an edge to node given by "to_i" from any field of adr_i whose offset 368 // matches "offset" A deferred edge is added if to_i is a LocalVar, and 369 // a pointsto edge is added if it is a JavaObject 370 371 void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) { 372 PointsToNode* an = ptnode_adr(adr_i); 373 PointsToNode* to = ptnode_adr(to_i); 374 bool deferred = (to->node_type() == PointsToNode::LocalVar); 375 376 for (uint fe = 0; fe < an->edge_count(); fe++) { 377 assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge"); 378 int fi = an->edge_target(fe); 379 PointsToNode* pf = ptnode_adr(fi); 380 int po = pf->offset(); 381 if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) { 382 if (deferred) 383 add_deferred_edge(fi, to_i); 384 else 385 add_pointsto_edge(fi, to_i); 386 } 387 } 388 } 389 390 // Add a deferred edge from node given by "from_i" to any field of adr_i 391 // whose offset matches "offset". 392 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) { 393 PointsToNode* an = ptnode_adr(adr_i); 394 bool is_alloc = an->_node->is_Allocate(); 395 for (uint fe = 0; fe < an->edge_count(); fe++) { 396 assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge"); 397 int fi = an->edge_target(fe); 398 PointsToNode* pf = ptnode_adr(fi); 399 int offset = pf->offset(); 400 if (!is_alloc) { 401 // Assume the field was set outside this method if it is not Allocation 402 add_pointsto_edge(fi, _phantom_object); 403 } 404 if (offset == offs || offset == Type::OffsetBot || offs == Type::OffsetBot) { 405 add_deferred_edge(from_i, fi); 406 } 407 } 408 // Some fields references (AddP) may still be missing 409 // until Connection Graph construction is complete. 410 // For example, loads from RAW pointers with offset 0 411 // which don't have AddP. 412 // A reference to phantom_object will be added if 413 // a field reference is still missing after completing 414 // Connection Graph (see remove_deferred()). 415 } 416 417 // Helper functions 418 419 static Node* get_addp_base(Node *addp) { 420 assert(addp->is_AddP(), "must be AddP"); 421 // 422 // AddP cases for Base and Address inputs: 423 // case #1. Direct object's field reference: 424 // Allocate 425 // | 426 // Proj #5 ( oop result ) 427 // | 428 // CheckCastPP (cast to instance type) 429 // | | 430 // AddP ( base == address ) 431 // 432 // case #2. Indirect object's field reference: 433 // Phi 434 // | 435 // CastPP (cast to instance type) 436 // | | 437 // AddP ( base == address ) 438 // 439 // case #3. Raw object's field reference for Initialize node: 440 // Allocate 441 // | 442 // Proj #5 ( oop result ) 443 // top | 444 // \ | 445 // AddP ( base == top ) 446 // 447 // case #4. Array's element reference: 448 // {CheckCastPP | CastPP} 449 // | | | 450 // | AddP ( array's element offset ) 451 // | | 452 // AddP ( array's offset ) 453 // 454 // case #5. Raw object's field reference for arraycopy stub call: 455 // The inline_native_clone() case when the arraycopy stub is called 456 // after the allocation before Initialize and CheckCastPP nodes. 457 // Allocate 458 // | 459 // Proj #5 ( oop result ) 460 // | | 461 // AddP ( base == address ) 462 // 463 // case #6. Constant Pool, ThreadLocal, CastX2P or 464 // Raw object's field reference: 465 // {ConP, ThreadLocal, CastX2P, raw Load} 466 // top | 467 // \ | 468 // AddP ( base == top ) 469 // 470 // case #7. Klass's field reference. 471 // LoadKlass 472 // | | 473 // AddP ( base == address ) 474 // 475 // case #8. narrow Klass's field reference. 476 // LoadNKlass 477 // | 478 // DecodeN 479 // | | 480 // AddP ( base == address ) 481 // 482 Node *base = addp->in(AddPNode::Base)->uncast(); 483 if (base->is_top()) { // The AddP case #3 and #6. 484 base = addp->in(AddPNode::Address)->uncast(); 485 while (base->is_AddP()) { 486 // Case #6 (unsafe access) may have several chained AddP nodes. 487 assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only"); 488 base = base->in(AddPNode::Address)->uncast(); 489 } 490 assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal || 491 base->Opcode() == Op_CastX2P || base->is_DecodeN() || 492 (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) || 493 (base->is_Proj() && base->in(0)->is_Allocate()), "sanity"); 494 } 495 return base; 496 } 497 498 static Node* find_second_addp(Node* addp, Node* n) { 499 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 500 501 Node* addp2 = addp->raw_out(0); 502 if (addp->outcnt() == 1 && addp2->is_AddP() && 503 addp2->in(AddPNode::Base) == n && 504 addp2->in(AddPNode::Address) == addp) { 505 506 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 507 // 508 // Find array's offset to push it on worklist first and 509 // as result process an array's element offset first (pushed second) 510 // to avoid CastPP for the array's offset. 511 // Otherwise the inserted CastPP (LocalVar) will point to what 512 // the AddP (Field) points to. Which would be wrong since 513 // the algorithm expects the CastPP has the same point as 514 // as AddP's base CheckCastPP (LocalVar). 515 // 516 // ArrayAllocation 517 // | 518 // CheckCastPP 519 // | 520 // memProj (from ArrayAllocation CheckCastPP) 521 // | || 522 // | || Int (element index) 523 // | || | ConI (log(element size)) 524 // | || | / 525 // | || LShift 526 // | || / 527 // | AddP (array's element offset) 528 // | | 529 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 530 // | / / 531 // AddP (array's offset) 532 // | 533 // Load/Store (memory operation on array's element) 534 // 535 return addp2; 536 } 537 return NULL; 538 } 539 540 // 541 // Adjust the type and inputs of an AddP which computes the 542 // address of a field of an instance 543 // 544 bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) { 545 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 546 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 547 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 548 if (t == NULL) { 549 // We are computing a raw address for a store captured by an Initialize 550 // compute an appropriate address type (cases #3 and #5). 551 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 552 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 553 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 554 assert(offs != Type::OffsetBot, "offset must be a constant"); 555 t = base_t->add_offset(offs)->is_oopptr(); 556 } 557 int inst_id = base_t->instance_id(); 558 assert(!t->is_known_instance() || t->instance_id() == inst_id, 559 "old type must be non-instance or match new type"); 560 561 // The type 't' could be subclass of 'base_t'. 562 // As result t->offset() could be large then base_t's size and it will 563 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 564 // constructor verifies correctness of the offset. 565 // 566 // It could happened on subclass's branch (from the type profiling 567 // inlining) which was not eliminated during parsing since the exactness 568 // of the allocation type was not propagated to the subclass type check. 569 // 570 // Or the type 't' could be not related to 'base_t' at all. 571 // It could happened when CHA type is different from MDO type on a dead path 572 // (for example, from instanceof check) which is not collapsed during parsing. 573 // 574 // Do nothing for such AddP node and don't process its users since 575 // this code branch will go away. 576 // 577 if (!t->is_known_instance() && 578 !base_t->klass()->is_subtype_of(t->klass())) { 579 return false; // bail out 580 } 581 582 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 583 // Do NOT remove the next line: ensure a new alias index is allocated 584 // for the instance type. Note: C++ will not remove it since the call 585 // has side effect. 586 int alias_idx = _compile->get_alias_index(tinst); 587 igvn->set_type(addp, tinst); 588 // record the allocation in the node map 589 assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered"); 590 set_map(addp->_idx, get_map(base->_idx)); 591 592 // Set addp's Base and Address to 'base'. 593 Node *abase = addp->in(AddPNode::Base); 594 Node *adr = addp->in(AddPNode::Address); 595 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 596 adr->in(0)->_idx == (uint)inst_id) { 597 // Skip AddP cases #3 and #5. 598 } else { 599 assert(!abase->is_top(), "sanity"); // AddP case #3 600 if (abase != base) { 601 igvn->hash_delete(addp); 602 addp->set_req(AddPNode::Base, base); 603 if (abase == adr) { 604 addp->set_req(AddPNode::Address, base); 605 } else { 606 // AddP case #4 (adr is array's element offset AddP node) 607 #ifdef ASSERT 608 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 609 assert(adr->is_AddP() && atype != NULL && 610 atype->instance_id() == inst_id, "array's element offset should be processed first"); 611 #endif 612 } 613 igvn->hash_insert(addp); 614 } 615 } 616 // Put on IGVN worklist since at least addp's type was changed above. 617 record_for_optimizer(addp); 618 return true; 619 } 620 621 // 622 // Create a new version of orig_phi if necessary. Returns either the newly 623 // created phi or an existing phi. Sets create_new to indicate whether a new 624 // phi was created. Cache the last newly created phi in the node map. 625 // 626 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn, bool &new_created) { 627 Compile *C = _compile; 628 new_created = false; 629 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 630 // nothing to do if orig_phi is bottom memory or matches alias_idx 631 if (phi_alias_idx == alias_idx) { 632 return orig_phi; 633 } 634 // Have we recently created a Phi for this alias index? 635 PhiNode *result = get_map_phi(orig_phi->_idx); 636 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 637 return result; 638 } 639 // Previous check may fail when the same wide memory Phi was split into Phis 640 // for different memory slices. Search all Phis for this region. 641 if (result != NULL) { 642 Node* region = orig_phi->in(0); 643 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 644 Node* phi = region->fast_out(i); 645 if (phi->is_Phi() && 646 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 647 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 648 return phi->as_Phi(); 649 } 650 } 651 } 652 if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) { 653 if (C->do_escape_analysis() == true && !C->failing()) { 654 // Retry compilation without escape analysis. 655 // If this is the first failure, the sentinel string will "stick" 656 // to the Compile object, and the C2Compiler will see it and retry. 657 C->record_failure(C2Compiler::retry_no_escape_analysis()); 658 } 659 return NULL; 660 } 661 orig_phi_worklist.append_if_missing(orig_phi); 662 const TypePtr *atype = C->get_adr_type(alias_idx); 663 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 664 C->copy_node_notes_to(result, orig_phi); 665 igvn->set_type(result, result->bottom_type()); 666 record_for_optimizer(result); 667 668 debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;) 669 assert(pn == NULL || pn == orig_phi, "wrong node"); 670 set_map(orig_phi->_idx, result); 671 ptnode_adr(orig_phi->_idx)->_node = orig_phi; 672 673 new_created = true; 674 return result; 675 } 676 677 // 678 // Return a new version of Memory Phi "orig_phi" with the inputs having the 679 // specified alias index. 680 // 681 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn) { 682 683 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 684 Compile *C = _compile; 685 bool new_phi_created; 686 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created); 687 if (!new_phi_created) { 688 return result; 689 } 690 691 GrowableArray<PhiNode *> phi_list; 692 GrowableArray<uint> cur_input; 693 694 PhiNode *phi = orig_phi; 695 uint idx = 1; 696 bool finished = false; 697 while(!finished) { 698 while (idx < phi->req()) { 699 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn); 700 if (mem != NULL && mem->is_Phi()) { 701 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created); 702 if (new_phi_created) { 703 // found an phi for which we created a new split, push current one on worklist and begin 704 // processing new one 705 phi_list.push(phi); 706 cur_input.push(idx); 707 phi = mem->as_Phi(); 708 result = newphi; 709 idx = 1; 710 continue; 711 } else { 712 mem = newphi; 713 } 714 } 715 if (C->failing()) { 716 return NULL; 717 } 718 result->set_req(idx++, mem); 719 } 720 #ifdef ASSERT 721 // verify that the new Phi has an input for each input of the original 722 assert( phi->req() == result->req(), "must have same number of inputs."); 723 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 724 #endif 725 // Check if all new phi's inputs have specified alias index. 726 // Otherwise use old phi. 727 for (uint i = 1; i < phi->req(); i++) { 728 Node* in = result->in(i); 729 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 730 } 731 // we have finished processing a Phi, see if there are any more to do 732 finished = (phi_list.length() == 0 ); 733 if (!finished) { 734 phi = phi_list.pop(); 735 idx = cur_input.pop(); 736 PhiNode *prev_result = get_map_phi(phi->_idx); 737 prev_result->set_req(idx++, result); 738 result = prev_result; 739 } 740 } 741 return result; 742 } 743 744 745 // 746 // The next methods are derived from methods in MemNode. 747 // 748 static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 749 Node *mem = mmem; 750 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 751 // means an array I have not precisely typed yet. Do not do any 752 // alias stuff with it any time soon. 753 if( toop->base() != Type::AnyPtr && 754 !(toop->klass() != NULL && 755 toop->klass()->is_java_lang_Object() && 756 toop->offset() == Type::OffsetBot) ) { 757 mem = mmem->memory_at(alias_idx); 758 // Update input if it is progress over what we have now 759 } 760 return mem; 761 } 762 763 // 764 // Move memory users to their memory slices. 765 // 766 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis, PhaseGVN *igvn) { 767 Compile* C = _compile; 768 769 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 770 assert(tp != NULL, "ptr type"); 771 int alias_idx = C->get_alias_index(tp); 772 int general_idx = C->get_general_index(alias_idx); 773 774 // Move users first 775 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 776 Node* use = n->fast_out(i); 777 if (use->is_MergeMem()) { 778 MergeMemNode* mmem = use->as_MergeMem(); 779 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 780 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 781 continue; // Nothing to do 782 } 783 // Replace previous general reference to mem node. 784 uint orig_uniq = C->unique(); 785 Node* m = find_inst_mem(n, general_idx, orig_phis, igvn); 786 assert(orig_uniq == C->unique(), "no new nodes"); 787 mmem->set_memory_at(general_idx, m); 788 --imax; 789 --i; 790 } else if (use->is_MemBar()) { 791 assert(!use->is_Initialize(), "initializing stores should not be moved"); 792 if (use->req() > MemBarNode::Precedent && 793 use->in(MemBarNode::Precedent) == n) { 794 // Don't move related membars. 795 record_for_optimizer(use); 796 continue; 797 } 798 tp = use->as_MemBar()->adr_type()->isa_ptr(); 799 if (tp != NULL && C->get_alias_index(tp) == alias_idx || 800 alias_idx == general_idx) { 801 continue; // Nothing to do 802 } 803 // Move to general memory slice. 804 uint orig_uniq = C->unique(); 805 Node* m = find_inst_mem(n, general_idx, orig_phis, igvn); 806 assert(orig_uniq == C->unique(), "no new nodes"); 807 igvn->hash_delete(use); 808 imax -= use->replace_edge(n, m); 809 igvn->hash_insert(use); 810 record_for_optimizer(use); 811 --i; 812 #ifdef ASSERT 813 } else if (use->is_Mem()) { 814 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 815 // Don't move related cardmark. 816 continue; 817 } 818 // Memory nodes should have new memory input. 819 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 820 assert(tp != NULL, "ptr type"); 821 int idx = C->get_alias_index(tp); 822 assert(get_map(use->_idx) != NULL || idx == alias_idx, 823 "Following memory nodes should have new memory input or be on the same memory slice"); 824 } else if (use->is_Phi()) { 825 // Phi nodes should be split and moved already. 826 tp = use->as_Phi()->adr_type()->isa_ptr(); 827 assert(tp != NULL, "ptr type"); 828 int idx = C->get_alias_index(tp); 829 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 830 } else { 831 use->dump(); 832 assert(false, "should not be here"); 833 #endif 834 } 835 } 836 } 837 838 // 839 // Search memory chain of "mem" to find a MemNode whose address 840 // is the specified alias index. 841 // 842 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis, PhaseGVN *phase) { 843 if (orig_mem == NULL) 844 return orig_mem; 845 Compile* C = phase->C; 846 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 847 bool is_instance = (toop != NULL) && toop->is_known_instance(); 848 Node *start_mem = C->start()->proj_out(TypeFunc::Memory); 849 Node *prev = NULL; 850 Node *result = orig_mem; 851 while (prev != result) { 852 prev = result; 853 if (result == start_mem) 854 break; // hit one of our sentinels 855 if (result->is_Mem()) { 856 const Type *at = phase->type(result->in(MemNode::Address)); 857 if (at == Type::TOP) 858 break; // Dead 859 assert (at->isa_ptr() != NULL, "pointer type required."); 860 int idx = C->get_alias_index(at->is_ptr()); 861 if (idx == alias_idx) 862 break; // Found 863 if (!is_instance && (at->isa_oopptr() == NULL || 864 !at->is_oopptr()->is_known_instance())) { 865 break; // Do not skip store to general memory slice. 866 } 867 result = result->in(MemNode::Memory); 868 } 869 if (!is_instance) 870 continue; // don't search further for non-instance types 871 // skip over a call which does not affect this memory slice 872 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 873 Node *proj_in = result->in(0); 874 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 875 break; // hit one of our sentinels 876 } else if (proj_in->is_Call()) { 877 CallNode *call = proj_in->as_Call(); 878 if (!call->may_modify(toop, phase)) { 879 result = call->in(TypeFunc::Memory); 880 } 881 } else if (proj_in->is_Initialize()) { 882 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 883 // Stop if this is the initialization for the object instance which 884 // which contains this memory slice, otherwise skip over it. 885 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 886 result = proj_in->in(TypeFunc::Memory); 887 } 888 } else if (proj_in->is_MemBar()) { 889 result = proj_in->in(TypeFunc::Memory); 890 } 891 } else if (result->is_MergeMem()) { 892 MergeMemNode *mmem = result->as_MergeMem(); 893 result = step_through_mergemem(mmem, alias_idx, toop); 894 if (result == mmem->base_memory()) { 895 // Didn't find instance memory, search through general slice recursively. 896 result = mmem->memory_at(C->get_general_index(alias_idx)); 897 result = find_inst_mem(result, alias_idx, orig_phis, phase); 898 if (C->failing()) { 899 return NULL; 900 } 901 mmem->set_memory_at(alias_idx, result); 902 } 903 } else if (result->is_Phi() && 904 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 905 Node *un = result->as_Phi()->unique_input(phase); 906 if (un != NULL) { 907 orig_phis.append_if_missing(result->as_Phi()); 908 result = un; 909 } else { 910 break; 911 } 912 } else if (result->is_ClearArray()) { 913 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), phase)) { 914 // Can not bypass initialization of the instance 915 // we are looking for. 916 break; 917 } 918 // Otherwise skip it (the call updated 'result' value). 919 } else if (result->Opcode() == Op_SCMemProj) { 920 assert(result->in(0)->is_LoadStore(), "sanity"); 921 const Type *at = phase->type(result->in(0)->in(MemNode::Address)); 922 if (at != Type::TOP) { 923 assert (at->isa_ptr() != NULL, "pointer type required."); 924 int idx = C->get_alias_index(at->is_ptr()); 925 assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field"); 926 break; 927 } 928 result = result->in(0)->in(MemNode::Memory); 929 } 930 } 931 if (result->is_Phi()) { 932 PhiNode *mphi = result->as_Phi(); 933 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 934 const TypePtr *t = mphi->adr_type(); 935 if (!is_instance) { 936 // Push all non-instance Phis on the orig_phis worklist to update inputs 937 // during Phase 4 if needed. 938 orig_phis.append_if_missing(mphi); 939 } else if (C->get_alias_index(t) != alias_idx) { 940 // Create a new Phi with the specified alias index type. 941 result = split_memory_phi(mphi, alias_idx, orig_phis, phase); 942 } 943 } 944 // the result is either MemNode, PhiNode, InitializeNode. 945 return result; 946 } 947 948 // 949 // Convert the types of unescaped object to instance types where possible, 950 // propagate the new type information through the graph, and update memory 951 // edges and MergeMem inputs to reflect the new type. 952 // 953 // We start with allocations (and calls which may be allocations) on alloc_worklist. 954 // The processing is done in 4 phases: 955 // 956 // Phase 1: Process possible allocations from alloc_worklist. Create instance 957 // types for the CheckCastPP for allocations where possible. 958 // Propagate the the new types through users as follows: 959 // casts and Phi: push users on alloc_worklist 960 // AddP: cast Base and Address inputs to the instance type 961 // push any AddP users on alloc_worklist and push any memnode 962 // users onto memnode_worklist. 963 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 964 // search the Memory chain for a store with the appropriate type 965 // address type. If a Phi is found, create a new version with 966 // the appropriate memory slices from each of the Phi inputs. 967 // For stores, process the users as follows: 968 // MemNode: push on memnode_worklist 969 // MergeMem: push on mergemem_worklist 970 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 971 // moving the first node encountered of each instance type to the 972 // the input corresponding to its alias index. 973 // appropriate memory slice. 974 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 975 // 976 // In the following example, the CheckCastPP nodes are the cast of allocation 977 // results and the allocation of node 29 is unescaped and eligible to be an 978 // instance type. 979 // 980 // We start with: 981 // 982 // 7 Parm #memory 983 // 10 ConI "12" 984 // 19 CheckCastPP "Foo" 985 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 986 // 29 CheckCastPP "Foo" 987 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 988 // 989 // 40 StoreP 25 7 20 ... alias_index=4 990 // 50 StoreP 35 40 30 ... alias_index=4 991 // 60 StoreP 45 50 20 ... alias_index=4 992 // 70 LoadP _ 60 30 ... alias_index=4 993 // 80 Phi 75 50 60 Memory alias_index=4 994 // 90 LoadP _ 80 30 ... alias_index=4 995 // 100 LoadP _ 80 20 ... alias_index=4 996 // 997 // 998 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 999 // and creating a new alias index for node 30. This gives: 1000 // 1001 // 7 Parm #memory 1002 // 10 ConI "12" 1003 // 19 CheckCastPP "Foo" 1004 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 1005 // 29 CheckCastPP "Foo" iid=24 1006 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 1007 // 1008 // 40 StoreP 25 7 20 ... alias_index=4 1009 // 50 StoreP 35 40 30 ... alias_index=6 1010 // 60 StoreP 45 50 20 ... alias_index=4 1011 // 70 LoadP _ 60 30 ... alias_index=6 1012 // 80 Phi 75 50 60 Memory alias_index=4 1013 // 90 LoadP _ 80 30 ... alias_index=6 1014 // 100 LoadP _ 80 20 ... alias_index=4 1015 // 1016 // In phase 2, new memory inputs are computed for the loads and stores, 1017 // And a new version of the phi is created. In phase 4, the inputs to 1018 // node 80 are updated and then the memory nodes are updated with the 1019 // values computed in phase 2. This results in: 1020 // 1021 // 7 Parm #memory 1022 // 10 ConI "12" 1023 // 19 CheckCastPP "Foo" 1024 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 1025 // 29 CheckCastPP "Foo" iid=24 1026 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 1027 // 1028 // 40 StoreP 25 7 20 ... alias_index=4 1029 // 50 StoreP 35 7 30 ... alias_index=6 1030 // 60 StoreP 45 40 20 ... alias_index=4 1031 // 70 LoadP _ 50 30 ... alias_index=6 1032 // 80 Phi 75 40 60 Memory alias_index=4 1033 // 120 Phi 75 50 50 Memory alias_index=6 1034 // 90 LoadP _ 120 30 ... alias_index=6 1035 // 100 LoadP _ 80 20 ... alias_index=4 1036 // 1037 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) { 1038 GrowableArray<Node *> memnode_worklist; 1039 GrowableArray<PhiNode *> orig_phis; 1040 1041 PhaseIterGVN *igvn = _igvn; 1042 uint new_index_start = (uint) _compile->num_alias_types(); 1043 Arena* arena = Thread::current()->resource_area(); 1044 VectorSet visited(arena); 1045 1046 1047 // Phase 1: Process possible allocations from alloc_worklist. 1048 // Create instance types for the CheckCastPP for allocations where possible. 1049 // 1050 // (Note: don't forget to change the order of the second AddP node on 1051 // the alloc_worklist if the order of the worklist processing is changed, 1052 // see the comment in find_second_addp().) 1053 // 1054 while (alloc_worklist.length() != 0) { 1055 Node *n = alloc_worklist.pop(); 1056 uint ni = n->_idx; 1057 const TypeOopPtr* tinst = NULL; 1058 if (n->is_Call()) { 1059 CallNode *alloc = n->as_Call(); 1060 // copy escape information to call node 1061 PointsToNode* ptn = ptnode_adr(alloc->_idx); 1062 PointsToNode::EscapeState es = escape_state(alloc); 1063 // We have an allocation or call which returns a Java object, 1064 // see if it is unescaped. 1065 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 1066 continue; 1067 1068 // Find CheckCastPP for the allocate or for the return value of a call 1069 n = alloc->result_cast(); 1070 if (n == NULL) { // No uses except Initialize node 1071 if (alloc->is_Allocate()) { 1072 // Set the scalar_replaceable flag for allocation 1073 // so it could be eliminated if it has no uses. 1074 alloc->as_Allocate()->_is_scalar_replaceable = true; 1075 } 1076 continue; 1077 } 1078 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 1079 assert(!alloc->is_Allocate(), "allocation should have unique type"); 1080 continue; 1081 } 1082 1083 // The inline code for Object.clone() casts the allocation result to 1084 // java.lang.Object and then to the actual type of the allocated 1085 // object. Detect this case and use the second cast. 1086 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 1087 // the allocation result is cast to java.lang.Object and then 1088 // to the actual Array type. 1089 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 1090 && (alloc->is_AllocateArray() || 1091 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 1092 Node *cast2 = NULL; 1093 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1094 Node *use = n->fast_out(i); 1095 if (use->is_CheckCastPP()) { 1096 cast2 = use; 1097 break; 1098 } 1099 } 1100 if (cast2 != NULL) { 1101 n = cast2; 1102 } else { 1103 // Non-scalar replaceable if the allocation type is unknown statically 1104 // (reflection allocation), the object can't be restored during 1105 // deoptimization without precise type. 1106 continue; 1107 } 1108 } 1109 if (alloc->is_Allocate()) { 1110 // Set the scalar_replaceable flag for allocation 1111 // so it could be eliminated. 1112 alloc->as_Allocate()->_is_scalar_replaceable = true; 1113 } 1114 set_escape_state(n->_idx, es); // CheckCastPP escape state 1115 // in order for an object to be scalar-replaceable, it must be: 1116 // - a direct allocation (not a call returning an object) 1117 // - non-escaping 1118 // - eligible to be a unique type 1119 // - not determined to be ineligible by escape analysis 1120 assert(ptnode_adr(alloc->_idx)->_node != NULL && 1121 ptnode_adr(n->_idx)->_node != NULL, "should be registered"); 1122 set_map(alloc->_idx, n); 1123 set_map(n->_idx, alloc); 1124 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 1125 if (t == NULL) 1126 continue; // not a TypeOopPtr 1127 tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni); 1128 igvn->hash_delete(n); 1129 igvn->set_type(n, tinst); 1130 n->raise_bottom_type(tinst); 1131 igvn->hash_insert(n); 1132 record_for_optimizer(n); 1133 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 1134 1135 // First, put on the worklist all Field edges from Connection Graph 1136 // which is more accurate then putting immediate users from Ideal Graph. 1137 for (uint e = 0; e < ptn->edge_count(); e++) { 1138 Node *use = ptnode_adr(ptn->edge_target(e))->_node; 1139 assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(), 1140 "only AddP nodes are Field edges in CG"); 1141 if (use->outcnt() > 0) { // Don't process dead nodes 1142 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 1143 if (addp2 != NULL) { 1144 assert(alloc->is_AllocateArray(),"array allocation was expected"); 1145 alloc_worklist.append_if_missing(addp2); 1146 } 1147 alloc_worklist.append_if_missing(use); 1148 } 1149 } 1150 1151 // An allocation may have an Initialize which has raw stores. Scan 1152 // the users of the raw allocation result and push AddP users 1153 // on alloc_worklist. 1154 Node *raw_result = alloc->proj_out(TypeFunc::Parms); 1155 assert (raw_result != NULL, "must have an allocation result"); 1156 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 1157 Node *use = raw_result->fast_out(i); 1158 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 1159 Node* addp2 = find_second_addp(use, raw_result); 1160 if (addp2 != NULL) { 1161 assert(alloc->is_AllocateArray(),"array allocation was expected"); 1162 alloc_worklist.append_if_missing(addp2); 1163 } 1164 alloc_worklist.append_if_missing(use); 1165 } else if (use->is_MemBar()) { 1166 memnode_worklist.append_if_missing(use); 1167 } 1168 } 1169 } 1170 } else if (n->is_AddP()) { 1171 VectorSet* ptset = PointsTo(get_addp_base(n)); 1172 assert(ptset->Size() == 1, "AddP address is unique"); 1173 uint elem = ptset->getelem(); // Allocation node's index 1174 if (elem == _phantom_object) { 1175 assert(false, "escaped allocation"); 1176 continue; // Assume the value was set outside this method. 1177 } 1178 Node *base = get_map(elem); // CheckCastPP node 1179 if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path 1180 tinst = igvn->type(base)->isa_oopptr(); 1181 } else if (n->is_Phi() || 1182 n->is_CheckCastPP() || 1183 n->is_EncodeP() || 1184 n->is_DecodeN() || 1185 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 1186 if (visited.test_set(n->_idx)) { 1187 assert(n->is_Phi(), "loops only through Phi's"); 1188 continue; // already processed 1189 } 1190 VectorSet* ptset = PointsTo(n); 1191 if (ptset->Size() == 1) { 1192 uint elem = ptset->getelem(); // Allocation node's index 1193 if (elem == _phantom_object) { 1194 assert(false, "escaped allocation"); 1195 continue; // Assume the value was set outside this method. 1196 } 1197 Node *val = get_map(elem); // CheckCastPP node 1198 TypeNode *tn = n->as_Type(); 1199 tinst = igvn->type(val)->isa_oopptr(); 1200 assert(tinst != NULL && tinst->is_known_instance() && 1201 (uint)tinst->instance_id() == elem , "instance type expected."); 1202 1203 const Type *tn_type = igvn->type(tn); 1204 const TypeOopPtr *tn_t; 1205 if (tn_type->isa_narrowoop()) { 1206 tn_t = tn_type->make_ptr()->isa_oopptr(); 1207 } else { 1208 tn_t = tn_type->isa_oopptr(); 1209 } 1210 1211 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 1212 if (tn_type->isa_narrowoop()) { 1213 tn_type = tinst->make_narrowoop(); 1214 } else { 1215 tn_type = tinst; 1216 } 1217 igvn->hash_delete(tn); 1218 igvn->set_type(tn, tn_type); 1219 tn->set_type(tn_type); 1220 igvn->hash_insert(tn); 1221 record_for_optimizer(n); 1222 } else { 1223 assert(tn_type == TypePtr::NULL_PTR || 1224 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 1225 "unexpected type"); 1226 continue; // Skip dead path with different type 1227 } 1228 } 1229 } else { 1230 debug_only(n->dump();) 1231 assert(false, "EA: unexpected node"); 1232 continue; 1233 } 1234 // push allocation's users on appropriate worklist 1235 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1236 Node *use = n->fast_out(i); 1237 if(use->is_Mem() && use->in(MemNode::Address) == n) { 1238 // Load/store to instance's field 1239 memnode_worklist.append_if_missing(use); 1240 } else if (use->is_MemBar()) { 1241 memnode_worklist.append_if_missing(use); 1242 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 1243 Node* addp2 = find_second_addp(use, n); 1244 if (addp2 != NULL) { 1245 alloc_worklist.append_if_missing(addp2); 1246 } 1247 alloc_worklist.append_if_missing(use); 1248 } else if (use->is_Phi() || 1249 use->is_CheckCastPP() || 1250 use->is_EncodeP() || 1251 use->is_DecodeN() || 1252 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 1253 alloc_worklist.append_if_missing(use); 1254 #ifdef ASSERT 1255 } else if (use->is_Mem()) { 1256 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 1257 } else if (use->is_MergeMem()) { 1258 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 1259 } else if (use->is_SafePoint()) { 1260 // Look for MergeMem nodes for calls which reference unique allocation 1261 // (through CheckCastPP nodes) even for debug info. 1262 Node* m = use->in(TypeFunc::Memory); 1263 if (m->is_MergeMem()) { 1264 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 1265 } 1266 } else { 1267 uint op = use->Opcode(); 1268 if (!(op == Op_CmpP || op == Op_Conv2B || 1269 op == Op_CastP2X || op == Op_StoreCM || 1270 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || 1271 op == Op_StrEquals || op == Op_StrIndexOf)) { 1272 n->dump(); 1273 use->dump(); 1274 assert(false, "EA: missing allocation reference path"); 1275 } 1276 #endif 1277 } 1278 } 1279 1280 } 1281 // New alias types were created in split_AddP(). 1282 uint new_index_end = (uint) _compile->num_alias_types(); 1283 1284 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 1285 // compute new values for Memory inputs (the Memory inputs are not 1286 // actually updated until phase 4.) 1287 if (memnode_worklist.length() == 0) 1288 return; // nothing to do 1289 1290 while (memnode_worklist.length() != 0) { 1291 Node *n = memnode_worklist.pop(); 1292 if (visited.test_set(n->_idx)) 1293 continue; 1294 if (n->is_Phi() || n->is_ClearArray()) { 1295 // we don't need to do anything, but the users must be pushed 1296 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 1297 // we don't need to do anything, but the users must be pushed 1298 n = n->as_MemBar()->proj_out(TypeFunc::Memory); 1299 if (n == NULL) 1300 continue; 1301 } else { 1302 assert(n->is_Mem(), "memory node required."); 1303 Node *addr = n->in(MemNode::Address); 1304 const Type *addr_t = igvn->type(addr); 1305 if (addr_t == Type::TOP) 1306 continue; 1307 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 1308 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 1309 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 1310 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn); 1311 if (_compile->failing()) { 1312 return; 1313 } 1314 if (mem != n->in(MemNode::Memory)) { 1315 // We delay the memory edge update since we need old one in 1316 // MergeMem code below when instances memory slices are separated. 1317 debug_only(Node* pn = ptnode_adr(n->_idx)->_node;) 1318 assert(pn == NULL || pn == n, "wrong node"); 1319 set_map(n->_idx, mem); 1320 ptnode_adr(n->_idx)->_node = n; 1321 } 1322 if (n->is_Load()) { 1323 continue; // don't push users 1324 } else if (n->is_LoadStore()) { 1325 // get the memory projection 1326 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1327 Node *use = n->fast_out(i); 1328 if (use->Opcode() == Op_SCMemProj) { 1329 n = use; 1330 break; 1331 } 1332 } 1333 assert(n->Opcode() == Op_SCMemProj, "memory projection required"); 1334 } 1335 } 1336 // push user on appropriate worklist 1337 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1338 Node *use = n->fast_out(i); 1339 if (use->is_Phi() || use->is_ClearArray()) { 1340 memnode_worklist.append_if_missing(use); 1341 } else if(use->is_Mem() && use->in(MemNode::Memory) == n) { 1342 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 1343 continue; 1344 memnode_worklist.append_if_missing(use); 1345 } else if (use->is_MemBar()) { 1346 memnode_worklist.append_if_missing(use); 1347 #ifdef ASSERT 1348 } else if(use->is_Mem()) { 1349 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 1350 } else if (use->is_MergeMem()) { 1351 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 1352 } else { 1353 uint op = use->Opcode(); 1354 if (!(op == Op_StoreCM || 1355 (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL && 1356 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) || 1357 op == Op_AryEq || op == Op_StrComp || 1358 op == Op_StrEquals || op == Op_StrIndexOf)) { 1359 n->dump(); 1360 use->dump(); 1361 assert(false, "EA: missing memory path"); 1362 } 1363 #endif 1364 } 1365 } 1366 } 1367 1368 // Phase 3: Process MergeMem nodes from mergemem_worklist. 1369 // Walk each memory slice moving the first node encountered of each 1370 // instance type to the the input corresponding to its alias index. 1371 uint length = _mergemem_worklist.length(); 1372 for( uint next = 0; next < length; ++next ) { 1373 MergeMemNode* nmm = _mergemem_worklist.at(next); 1374 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 1375 // Note: we don't want to use MergeMemStream here because we only want to 1376 // scan inputs which exist at the start, not ones we add during processing. 1377 // Note 2: MergeMem may already contains instance memory slices added 1378 // during find_inst_mem() call when memory nodes were processed above. 1379 igvn->hash_delete(nmm); 1380 uint nslices = nmm->req(); 1381 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 1382 Node* mem = nmm->in(i); 1383 Node* cur = NULL; 1384 if (mem == NULL || mem->is_top()) 1385 continue; 1386 // First, update mergemem by moving memory nodes to corresponding slices 1387 // if their type became more precise since this mergemem was created. 1388 while (mem->is_Mem()) { 1389 const Type *at = igvn->type(mem->in(MemNode::Address)); 1390 if (at != Type::TOP) { 1391 assert (at->isa_ptr() != NULL, "pointer type required."); 1392 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 1393 if (idx == i) { 1394 if (cur == NULL) 1395 cur = mem; 1396 } else { 1397 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 1398 nmm->set_memory_at(idx, mem); 1399 } 1400 } 1401 } 1402 mem = mem->in(MemNode::Memory); 1403 } 1404 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 1405 // Find any instance of the current type if we haven't encountered 1406 // already a memory slice of the instance along the memory chain. 1407 for (uint ni = new_index_start; ni < new_index_end; ni++) { 1408 if((uint)_compile->get_general_index(ni) == i) { 1409 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 1410 if (nmm->is_empty_memory(m)) { 1411 Node* result = find_inst_mem(mem, ni, orig_phis, igvn); 1412 if (_compile->failing()) { 1413 return; 1414 } 1415 nmm->set_memory_at(ni, result); 1416 } 1417 } 1418 } 1419 } 1420 // Find the rest of instances values 1421 for (uint ni = new_index_start; ni < new_index_end; ni++) { 1422 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 1423 Node* result = step_through_mergemem(nmm, ni, tinst); 1424 if (result == nmm->base_memory()) { 1425 // Didn't find instance memory, search through general slice recursively. 1426 result = nmm->memory_at(_compile->get_general_index(ni)); 1427 result = find_inst_mem(result, ni, orig_phis, igvn); 1428 if (_compile->failing()) { 1429 return; 1430 } 1431 nmm->set_memory_at(ni, result); 1432 } 1433 } 1434 igvn->hash_insert(nmm); 1435 record_for_optimizer(nmm); 1436 } 1437 1438 // Phase 4: Update the inputs of non-instance memory Phis and 1439 // the Memory input of memnodes 1440 // First update the inputs of any non-instance Phi's from 1441 // which we split out an instance Phi. Note we don't have 1442 // to recursively process Phi's encounted on the input memory 1443 // chains as is done in split_memory_phi() since they will 1444 // also be processed here. 1445 for (int j = 0; j < orig_phis.length(); j++) { 1446 PhiNode *phi = orig_phis.at(j); 1447 int alias_idx = _compile->get_alias_index(phi->adr_type()); 1448 igvn->hash_delete(phi); 1449 for (uint i = 1; i < phi->req(); i++) { 1450 Node *mem = phi->in(i); 1451 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn); 1452 if (_compile->failing()) { 1453 return; 1454 } 1455 if (mem != new_mem) { 1456 phi->set_req(i, new_mem); 1457 } 1458 } 1459 igvn->hash_insert(phi); 1460 record_for_optimizer(phi); 1461 } 1462 1463 // Update the memory inputs of MemNodes with the value we computed 1464 // in Phase 2 and move stores memory users to corresponding memory slices. 1465 1466 // Disable memory split verification code until the fix for 6984348. 1467 // Currently it produces false negative results since it does not cover all cases. 1468 #if 0 // ifdef ASSERT 1469 visited.Reset(); 1470 Node_Stack old_mems(arena, _compile->unique() >> 2); 1471 #endif 1472 for (uint i = 0; i < nodes_size(); i++) { 1473 Node *nmem = get_map(i); 1474 if (nmem != NULL) { 1475 Node *n = ptnode_adr(i)->_node; 1476 assert(n != NULL, "sanity"); 1477 if (n->is_Mem()) { 1478 #if 0 // ifdef ASSERT 1479 Node* old_mem = n->in(MemNode::Memory); 1480 if (!visited.test_set(old_mem->_idx)) { 1481 old_mems.push(old_mem, old_mem->outcnt()); 1482 } 1483 #endif 1484 assert(n->in(MemNode::Memory) != nmem, "sanity"); 1485 if (!n->is_Load()) { 1486 // Move memory users of a store first. 1487 move_inst_mem(n, orig_phis, igvn); 1488 } 1489 // Now update memory input 1490 igvn->hash_delete(n); 1491 n->set_req(MemNode::Memory, nmem); 1492 igvn->hash_insert(n); 1493 record_for_optimizer(n); 1494 } else { 1495 assert(n->is_Allocate() || n->is_CheckCastPP() || 1496 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 1497 } 1498 } 1499 } 1500 #if 0 // ifdef ASSERT 1501 // Verify that memory was split correctly 1502 while (old_mems.is_nonempty()) { 1503 Node* old_mem = old_mems.node(); 1504 uint old_cnt = old_mems.index(); 1505 old_mems.pop(); 1506 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 1507 } 1508 #endif 1509 } 1510 1511 bool ConnectionGraph::has_candidates(Compile *C) { 1512 // EA brings benefits only when the code has allocations and/or locks which 1513 // are represented by ideal Macro nodes. 1514 int cnt = C->macro_count(); 1515 for( int i=0; i < cnt; i++ ) { 1516 Node *n = C->macro_node(i); 1517 if ( n->is_Allocate() ) 1518 return true; 1519 if( n->is_Lock() ) { 1520 Node* obj = n->as_Lock()->obj_node()->uncast(); 1521 if( !(obj->is_Parm() || obj->is_Con()) ) 1522 return true; 1523 } 1524 } 1525 return false; 1526 } 1527 1528 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 1529 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 1530 // to create space for them in ConnectionGraph::_nodes[]. 1531 Node* oop_null = igvn->zerocon(T_OBJECT); 1532 Node* noop_null = igvn->zerocon(T_NARROWOOP); 1533 1534 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 1535 // Perform escape analysis 1536 if (congraph->compute_escape()) { 1537 // There are non escaping objects. 1538 C->set_congraph(congraph); 1539 } 1540 1541 // Cleanup. 1542 if (oop_null->outcnt() == 0) 1543 igvn->hash_delete(oop_null); 1544 if (noop_null->outcnt() == 0) 1545 igvn->hash_delete(noop_null); 1546 } 1547 1548 bool ConnectionGraph::compute_escape() { 1549 Compile* C = _compile; 1550 1551 // 1. Populate Connection Graph (CG) with Ideal nodes. 1552 1553 Unique_Node_List worklist_init; 1554 worklist_init.map(C->unique(), NULL); // preallocate space 1555 1556 // Initialize worklist 1557 if (C->root() != NULL) { 1558 worklist_init.push(C->root()); 1559 } 1560 1561 GrowableArray<Node*> alloc_worklist; 1562 GrowableArray<Node*> addp_worklist; 1563 GrowableArray<Node*> ptr_cmp_worklist; 1564 PhaseGVN* igvn = _igvn; 1565 bool has_allocations = false; 1566 1567 // Push all useful nodes onto CG list and set their type. 1568 for( uint next = 0; next < worklist_init.size(); ++next ) { 1569 Node* n = worklist_init.at(next); 1570 record_for_escape_analysis(n, igvn); 1571 // Only allocations and java static calls results are checked 1572 // for an escape status. See process_call_result() below. 1573 if (n->is_Allocate() || n->is_CallStaticJava() && 1574 ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) { 1575 has_allocations = true; 1576 if (n->is_Allocate()) 1577 alloc_worklist.append(n); 1578 } else if(n->is_AddP()) { 1579 // Collect address nodes. Use them during stage 3 below 1580 // to build initial connection graph field edges. 1581 addp_worklist.append(n); 1582 } else if (n->is_MergeMem()) { 1583 // Collect all MergeMem nodes to add memory slices for 1584 // scalar replaceable objects in split_unique_types(). 1585 _mergemem_worklist.append(n->as_MergeMem()); 1586 } else if (OptimizePtrCompare && n->is_Cmp() && 1587 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { 1588 // Compare pointers nodes 1589 ptr_cmp_worklist.append(n); 1590 } 1591 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1592 Node* m = n->fast_out(i); // Get user 1593 worklist_init.push(m); 1594 } 1595 } 1596 1597 if (!has_allocations) { 1598 _collecting = false; 1599 return false; // Nothing to do. 1600 } 1601 1602 // 2. First pass to create simple CG edges (doesn't require to walk CG). 1603 uint delayed_size = _delayed_worklist.size(); 1604 for( uint next = 0; next < delayed_size; ++next ) { 1605 Node* n = _delayed_worklist.at(next); 1606 build_connection_graph(n, igvn); 1607 } 1608 1609 // 3. Pass to create initial fields edges (JavaObject -F-> AddP) 1610 // to reduce number of iterations during stage 4 below. 1611 uint addp_length = addp_worklist.length(); 1612 for( uint next = 0; next < addp_length; ++next ) { 1613 Node* n = addp_worklist.at(next); 1614 Node* base = get_addp_base(n); 1615 if (base->is_Proj() && base->in(0)->is_Call()) 1616 base = base->in(0); 1617 PointsToNode::NodeType nt = ptnode_adr(base->_idx)->node_type(); 1618 if (nt == PointsToNode::JavaObject) { 1619 build_connection_graph(n, igvn); 1620 } 1621 } 1622 1623 GrowableArray<int> cg_worklist; 1624 cg_worklist.append(_phantom_object); 1625 GrowableArray<uint> worklist; 1626 1627 // 4. Build Connection Graph which need 1628 // to walk the connection graph. 1629 _progress = false; 1630 for (uint ni = 0; ni < nodes_size(); ni++) { 1631 PointsToNode* ptn = ptnode_adr(ni); 1632 Node *n = ptn->_node; 1633 if (n != NULL) { // Call, AddP, LoadP, StoreP 1634 build_connection_graph(n, igvn); 1635 if (ptn->node_type() != PointsToNode::UnknownType) 1636 cg_worklist.append(n->_idx); // Collect CG nodes 1637 if (!_processed.test(n->_idx)) 1638 worklist.append(n->_idx); // Collect C/A/L/S nodes 1639 } 1640 } 1641 1642 // After IGVN user nodes may have smaller _idx than 1643 // their inputs so they will be processed first in 1644 // previous loop. Because of that not all Graph 1645 // edges will be created. Walk over interesting 1646 // nodes again until no new edges are created. 1647 // 1648 // Normally only 1-3 passes needed to build 1649 // Connection Graph depending on graph complexity. 1650 // Observed 8 passes in jvm2008 compiler.compiler. 1651 // Set limit to 20 to catch situation when something 1652 // did go wrong and recompile the method without EA. 1653 1654 #define CG_BUILD_ITER_LIMIT 20 1655 1656 uint length = worklist.length(); 1657 int iterations = 0; 1658 while(_progress && (iterations++ < CG_BUILD_ITER_LIMIT)) { 1659 _progress = false; 1660 for( uint next = 0; next < length; ++next ) { 1661 int ni = worklist.at(next); 1662 PointsToNode* ptn = ptnode_adr(ni); 1663 Node* n = ptn->_node; 1664 assert(n != NULL, "should be known node"); 1665 build_connection_graph(n, igvn); 1666 } 1667 } 1668 if (iterations >= CG_BUILD_ITER_LIMIT) { 1669 assert(iterations < CG_BUILD_ITER_LIMIT, 1670 err_msg("infinite EA connection graph build with %d nodes and worklist size %d", 1671 nodes_size(), length)); 1672 // Possible infinite build_connection_graph loop, 1673 // retry compilation without escape analysis. 1674 C->record_failure(C2Compiler::retry_no_escape_analysis()); 1675 _collecting = false; 1676 return false; 1677 } 1678 #undef CG_BUILD_ITER_LIMIT 1679 1680 Arena* arena = Thread::current()->resource_area(); 1681 VectorSet visited(arena); 1682 1683 // 5. Find fields initializing values for not escaped allocations 1684 uint alloc_length = alloc_worklist.length(); 1685 for (uint next = 0; next < alloc_length; ++next) { 1686 Node* n = alloc_worklist.at(next); 1687 if (ptnode_adr(n->_idx)->escape_state() == PointsToNode::NoEscape) { 1688 find_init_values(n, &visited, igvn); 1689 } 1690 } 1691 1692 worklist.clear(); 1693 1694 // 6. Remove deferred edges from the graph. 1695 uint cg_length = cg_worklist.length(); 1696 for (uint next = 0; next < cg_length; ++next) { 1697 int ni = cg_worklist.at(next); 1698 PointsToNode* ptn = ptnode_adr(ni); 1699 PointsToNode::NodeType nt = ptn->node_type(); 1700 if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) { 1701 remove_deferred(ni, &worklist, &visited); 1702 } 1703 } 1704 1705 // 7. Adjust escape state of nonescaping objects. 1706 for (uint next = 0; next < addp_length; ++next) { 1707 Node* n = addp_worklist.at(next); 1708 adjust_escape_state(n); 1709 } 1710 1711 // 8. Propagate escape states. 1712 worklist.clear(); 1713 1714 // mark all nodes reachable from GlobalEscape nodes 1715 (void)propagate_escape_state(&cg_worklist, &worklist, PointsToNode::GlobalEscape); 1716 1717 // mark all nodes reachable from ArgEscape nodes 1718 bool has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ArgEscape); 1719 1720 // push all NoEscape nodes on the worklist 1721 for( uint next = 0; next < cg_length; ++next ) { 1722 int nk = cg_worklist.at(next); 1723 if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape) 1724 worklist.push(nk); 1725 } 1726 alloc_worklist.clear(); 1727 // mark all nodes reachable from NoEscape nodes 1728 while(worklist.length() > 0) { 1729 uint nk = worklist.pop(); 1730 PointsToNode* ptn = ptnode_adr(nk); 1731 if (ptn->node_type() == PointsToNode::JavaObject && 1732 !(nk == _noop_null || nk == _oop_null)) 1733 has_non_escaping_obj = true; // Non Escape 1734 Node* n = ptn->_node; 1735 bool scalar_replaceable = ptn->scalar_replaceable(); 1736 if (n->is_Allocate() && scalar_replaceable) { 1737 // Push scalar replaceable allocations on alloc_worklist 1738 // for processing in split_unique_types(). Note, 1739 // following code may change scalar_replaceable value. 1740 alloc_worklist.append(n); 1741 } 1742 uint e_cnt = ptn->edge_count(); 1743 for (uint ei = 0; ei < e_cnt; ei++) { 1744 uint npi = ptn->edge_target(ei); 1745 PointsToNode *np = ptnode_adr(npi); 1746 if (np->escape_state() < PointsToNode::NoEscape) { 1747 set_escape_state(npi, PointsToNode::NoEscape); 1748 if (!scalar_replaceable) { 1749 np->set_scalar_replaceable(false); 1750 } 1751 worklist.push(npi); 1752 } else if (np->scalar_replaceable() && !scalar_replaceable) { 1753 // Propagate scalar_replaceable value. 1754 np->set_scalar_replaceable(false); 1755 worklist.push(npi); 1756 } 1757 } 1758 } 1759 1760 _collecting = false; 1761 assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build"); 1762 1763 assert(ptnode_adr(_oop_null)->escape_state() == PointsToNode::NoEscape, "sanity"); 1764 if (UseCompressedOops) { 1765 assert(ptnode_adr(_noop_null)->escape_state() == PointsToNode::NoEscape, "sanity"); 1766 } 1767 1768 if (EliminateLocks && has_non_escaping_obj) { 1769 // Mark locks before changing ideal graph. 1770 int cnt = C->macro_count(); 1771 for( int i=0; i < cnt; i++ ) { 1772 Node *n = C->macro_node(i); 1773 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1774 AbstractLockNode* alock = n->as_AbstractLock(); 1775 if (!alock->is_eliminated() || alock->is_coarsened()) { 1776 PointsToNode::EscapeState es = escape_state(alock->obj_node()); 1777 assert(es != PointsToNode::UnknownEscape, "should know"); 1778 if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) { 1779 if (!alock->is_eliminated()) { 1780 // Mark it eliminated to update any counters 1781 alock->set_eliminated(); 1782 } else { 1783 // The lock could be marked eliminated by lock coarsening 1784 // code during first IGVN before EA. Clear coarsened flag 1785 // to eliminate all associated locks/unlocks and relock 1786 // during deoptimization. 1787 alock->clear_coarsened(); 1788 } 1789 } 1790 } 1791 } 1792 } 1793 } 1794 1795 if (OptimizePtrCompare && has_non_escaping_obj) { 1796 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1797 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1798 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1799 // Optimize objects compare. 1800 while (ptr_cmp_worklist.length() != 0) { 1801 Node *n = ptr_cmp_worklist.pop(); 1802 Node *res = optimize_ptr_compare(n); 1803 if (res != NULL) { 1804 #ifndef PRODUCT 1805 if (PrintOptimizePtrCompare) { 1806 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1807 if (Verbose) { 1808 n->dump(1); 1809 } 1810 } 1811 #endif 1812 _igvn->replace_node(n, res); 1813 } 1814 } 1815 // cleanup 1816 if (_pcmp_neq->outcnt() == 0) 1817 igvn->hash_delete(_pcmp_neq); 1818 if (_pcmp_eq->outcnt() == 0) 1819 igvn->hash_delete(_pcmp_eq); 1820 } 1821 1822 #ifndef PRODUCT 1823 if (PrintEscapeAnalysis) { 1824 dump(); // Dump ConnectionGraph 1825 } 1826 #endif 1827 1828 bool has_scalar_replaceable_candidates = false; 1829 alloc_length = alloc_worklist.length(); 1830 for (uint next = 0; next < alloc_length; ++next) { 1831 Node* n = alloc_worklist.at(next); 1832 PointsToNode* ptn = ptnode_adr(n->_idx); 1833 assert(ptn->escape_state() == PointsToNode::NoEscape, "sanity"); 1834 if (ptn->scalar_replaceable()) { 1835 has_scalar_replaceable_candidates = true; 1836 break; 1837 } 1838 } 1839 1840 if ( has_scalar_replaceable_candidates && 1841 C->AliasLevel() >= 3 && EliminateAllocations ) { 1842 1843 // Now use the escape information to create unique types for 1844 // scalar replaceable objects. 1845 split_unique_types(alloc_worklist); 1846 1847 if (C->failing()) return false; 1848 1849 C->print_method("After Escape Analysis", 2); 1850 1851 #ifdef ASSERT 1852 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 1853 tty->print("=== No allocations eliminated for "); 1854 C->method()->print_short_name(); 1855 if(!EliminateAllocations) { 1856 tty->print(" since EliminateAllocations is off ==="); 1857 } else if(!has_scalar_replaceable_candidates) { 1858 tty->print(" since there are no scalar replaceable candidates ==="); 1859 } else if(C->AliasLevel() < 3) { 1860 tty->print(" since AliasLevel < 3 ==="); 1861 } 1862 tty->cr(); 1863 #endif 1864 } 1865 return has_non_escaping_obj; 1866 } 1867 1868 // Find fields initializing values for allocations. 1869 void ConnectionGraph::find_init_values(Node* alloc, VectorSet* visited, PhaseTransform* phase) { 1870 assert(alloc->is_Allocate(), "Should be called for Allocate nodes only"); 1871 PointsToNode* pta = ptnode_adr(alloc->_idx); 1872 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1873 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1874 1875 Compile* C = _compile; 1876 visited->Reset(); 1877 // Check if a oop field's initializing value is recorded and add 1878 // a corresponding NULL field's value if it is not recorded. 1879 // Connection Graph does not record a default initialization by NULL 1880 // captured by Initialize node. 1881 // 1882 uint ae_cnt = pta->edge_count(); 1883 for (uint ei = 0; ei < ae_cnt; ei++) { 1884 uint nidx = pta->edge_target(ei); // Field (AddP) 1885 PointsToNode* ptn = ptnode_adr(nidx); 1886 assert(ptn->_node->is_AddP(), "Should be AddP nodes only"); 1887 int offset = ptn->offset(); 1888 if (offset != Type::OffsetBot && 1889 offset != oopDesc::klass_offset_in_bytes() && 1890 !visited->test_set(offset)) { 1891 1892 // Check only oop fields. 1893 const Type* adr_type = ptn->_node->as_AddP()->bottom_type(); 1894 BasicType basic_field_type = T_INT; 1895 if (adr_type->isa_instptr()) { 1896 ciField* field = C->alias_type(adr_type->isa_instptr())->field(); 1897 if (field != NULL) { 1898 basic_field_type = field->layout_type(); 1899 } else { 1900 // Ignore non field load (for example, klass load) 1901 } 1902 } else if (adr_type->isa_aryptr()) { 1903 if (offset != arrayOopDesc::length_offset_in_bytes()) { 1904 const Type* elemtype = adr_type->isa_aryptr()->elem(); 1905 basic_field_type = elemtype->array_element_basic_type(); 1906 } else { 1907 // Ignore array length load 1908 } 1909 #ifdef ASSERT 1910 } else { 1911 // Raw pointers are used for initializing stores so skip it 1912 // since it should be recorded already 1913 Node* base = get_addp_base(ptn->_node); 1914 assert(adr_type->isa_rawptr() && base->is_Proj() && 1915 (base->in(0) == alloc),"unexpected pointer type"); 1916 #endif 1917 } 1918 if (basic_field_type == T_OBJECT || 1919 basic_field_type == T_NARROWOOP || 1920 basic_field_type == T_ARRAY) { 1921 Node* value = NULL; 1922 if (ini != NULL) { 1923 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT; 1924 Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase); 1925 if (store != NULL && store->is_Store()) { 1926 value = store->in(MemNode::ValueIn); 1927 } else if (ptn->edge_count() > 0) { // Are there oop stores? 1928 // Check for a store which follows allocation without branches. 1929 // For example, a volatile field store is not collected 1930 // by Initialize node. TODO: it would be nice to use idom() here. 1931 // 1932 // Search all references to the same field which use different 1933 // AddP nodes, for example, in the next case: 1934 // 1935 // Point p[] = new Point[1]; 1936 // if ( x ) { p[0] = new Point(); p[0].x = x; } 1937 // if ( p[0] != null ) { y = p[0].x; } // has CastPP 1938 // 1939 for (uint next = ei; (next < ae_cnt) && (value == NULL); next++) { 1940 uint fpi = pta->edge_target(next); // Field (AddP) 1941 PointsToNode *ptf = ptnode_adr(fpi); 1942 if (ptf->offset() == offset) { 1943 Node* nf = ptf->_node; 1944 for (DUIterator_Fast imax, i = nf->fast_outs(imax); i < imax; i++) { 1945 store = nf->fast_out(i); 1946 if (store->is_Store() && store->in(0) != NULL) { 1947 Node* ctrl = store->in(0); 1948 while(!(ctrl == ini || ctrl == alloc || ctrl == NULL || 1949 ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() || 1950 ctrl->is_IfTrue() || ctrl->is_IfFalse())) { 1951 ctrl = ctrl->in(0); 1952 } 1953 if (ctrl == ini || ctrl == alloc) { 1954 value = store->in(MemNode::ValueIn); 1955 break; 1956 } 1957 } 1958 } 1959 } 1960 } 1961 } 1962 } 1963 if (value == NULL || value != ptnode_adr(value->_idx)->_node) { 1964 // A field's initializing value was not recorded. Add NULL. 1965 uint null_idx = UseCompressedOops ? _noop_null : _oop_null; 1966 add_edge_from_fields(alloc->_idx, null_idx, offset); 1967 } 1968 } 1969 } 1970 } 1971 } 1972 1973 // Adjust escape state after Connection Graph is built. 1974 void ConnectionGraph::adjust_escape_state(Node* n) { 1975 PointsToNode* ptn = ptnode_adr(n->_idx); 1976 assert(n->is_AddP(), "Should be called for AddP nodes only"); 1977 // Search for objects which are not scalar replaceable 1978 // and mark them to propagate the state to referenced objects. 1979 // 1980 1981 int offset = ptn->offset(); 1982 Node* base = get_addp_base(n); 1983 VectorSet* ptset = PointsTo(base); 1984 int ptset_size = ptset->Size(); 1985 1986 // An object is not scalar replaceable if the field which may point 1987 // to it has unknown offset (unknown element of an array of objects). 1988 // 1989 1990 if (offset == Type::OffsetBot) { 1991 uint e_cnt = ptn->edge_count(); 1992 for (uint ei = 0; ei < e_cnt; ei++) { 1993 uint npi = ptn->edge_target(ei); 1994 ptnode_adr(npi)->set_scalar_replaceable(false); 1995 } 1996 } 1997 1998 // Currently an object is not scalar replaceable if a LoadStore node 1999 // access its field since the field value is unknown after it. 2000 // 2001 bool has_LoadStore = false; 2002 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2003 Node *use = n->fast_out(i); 2004 if (use->is_LoadStore()) { 2005 has_LoadStore = true; 2006 break; 2007 } 2008 } 2009 // An object is not scalar replaceable if the address points 2010 // to unknown field (unknown element for arrays, offset is OffsetBot). 2011 // 2012 // Or the address may point to more then one object. This may produce 2013 // the false positive result (set not scalar replaceable) 2014 // since the flow-insensitive escape analysis can't separate 2015 // the case when stores overwrite the field's value from the case 2016 // when stores happened on different control branches. 2017 // 2018 // Note: it will disable scalar replacement in some cases: 2019 // 2020 // Point p[] = new Point[1]; 2021 // p[0] = new Point(); // Will be not scalar replaced 2022 // 2023 // but it will save us from incorrect optimizations in next cases: 2024 // 2025 // Point p[] = new Point[1]; 2026 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 2027 // 2028 if (ptset_size > 1 || ptset_size != 0 && 2029 (has_LoadStore || offset == Type::OffsetBot)) { 2030 for( VectorSetI j(ptset); j.test(); ++j ) { 2031 ptnode_adr(j.elem)->set_scalar_replaceable(false); 2032 } 2033 } 2034 } 2035 2036 // Propagate escape states to referenced nodes. 2037 bool ConnectionGraph::propagate_escape_state(GrowableArray<int>* cg_worklist, 2038 GrowableArray<uint>* worklist, 2039 PointsToNode::EscapeState esc_state) { 2040 bool has_java_obj = false; 2041 2042 // push all nodes with the same escape state on the worklist 2043 uint cg_length = cg_worklist->length(); 2044 for (uint next = 0; next < cg_length; ++next) { 2045 int nk = cg_worklist->at(next); 2046 if (ptnode_adr(nk)->escape_state() == esc_state) 2047 worklist->push(nk); 2048 } 2049 // mark all reachable nodes 2050 while (worklist->length() > 0) { 2051 PointsToNode* ptn = ptnode_adr(worklist->pop()); 2052 if (ptn->node_type() == PointsToNode::JavaObject) { 2053 has_java_obj = true; 2054 } 2055 uint e_cnt = ptn->edge_count(); 2056 for (uint ei = 0; ei < e_cnt; ei++) { 2057 uint npi = ptn->edge_target(ei); 2058 PointsToNode *np = ptnode_adr(npi); 2059 if (np->escape_state() < esc_state) { 2060 set_escape_state(npi, esc_state); 2061 worklist->push(npi); 2062 } 2063 } 2064 } 2065 // Has not escaping java objects 2066 return has_java_obj && (esc_state < PointsToNode::GlobalEscape); 2067 } 2068 2069 // Optimize objects compare. 2070 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 2071 assert(OptimizePtrCompare, "sanity"); 2072 // Clone returned Set since PointsTo() returns pointer 2073 // to the same structure ConnectionGraph.pt_ptset. 2074 VectorSet ptset1 = *PointsTo(n->in(1)); 2075 VectorSet ptset2 = *PointsTo(n->in(2)); 2076 2077 // Check simple cases first. 2078 if (ptset1.Size() == 1) { 2079 uint pt1 = ptset1.getelem(); 2080 PointsToNode* ptn1 = ptnode_adr(pt1); 2081 if (ptn1->escape_state() == PointsToNode::NoEscape) { 2082 if (ptset2.Size() == 1 && ptset2.getelem() == pt1) { 2083 // Comparing the same not escaping object. 2084 return _pcmp_eq; 2085 } 2086 Node* obj = ptn1->_node; 2087 // Comparing not escaping allocation. 2088 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2089 !ptset2.test(pt1)) { 2090 return _pcmp_neq; // This includes nullness check. 2091 } 2092 } 2093 } else if (ptset2.Size() == 1) { 2094 uint pt2 = ptset2.getelem(); 2095 PointsToNode* ptn2 = ptnode_adr(pt2); 2096 if (ptn2->escape_state() == PointsToNode::NoEscape) { 2097 Node* obj = ptn2->_node; 2098 // Comparing not escaping allocation. 2099 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2100 !ptset1.test(pt2)) { 2101 return _pcmp_neq; // This includes nullness check. 2102 } 2103 } 2104 } 2105 2106 if (!ptset1.disjoint(ptset2)) { 2107 return NULL; // Sets are not disjoint 2108 } 2109 2110 // Sets are disjoint. 2111 bool set1_has_unknown_ptr = ptset1.test(_phantom_object) != 0; 2112 bool set2_has_unknown_ptr = ptset2.test(_phantom_object) != 0; 2113 bool set1_has_null_ptr = (ptset1.test(_oop_null) | ptset1.test(_noop_null)) != 0; 2114 bool set2_has_null_ptr = (ptset2.test(_oop_null) | ptset2.test(_noop_null)) != 0; 2115 2116 if (set1_has_unknown_ptr && set2_has_null_ptr || 2117 set2_has_unknown_ptr && set1_has_null_ptr) { 2118 // Check nullness of unknown object. 2119 return NULL; 2120 } 2121 2122 // Disjointness by itself is not sufficient since 2123 // alias analysis is not complete for escaped objects. 2124 // Disjoint sets are definitely unrelated only when 2125 // at least one set has only not escaping objects. 2126 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 2127 bool has_only_non_escaping_alloc = true; 2128 for (VectorSetI i(&ptset1); i.test(); ++i) { 2129 uint pt = i.elem; 2130 PointsToNode* ptn = ptnode_adr(pt); 2131 Node* obj = ptn->_node; 2132 if (ptn->escape_state() != PointsToNode::NoEscape || 2133 !(obj->is_Allocate() || obj->is_CallStaticJava())) { 2134 has_only_non_escaping_alloc = false; 2135 break; 2136 } 2137 } 2138 if (has_only_non_escaping_alloc) { 2139 return _pcmp_neq; 2140 } 2141 } 2142 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2143 bool has_only_non_escaping_alloc = true; 2144 for (VectorSetI i(&ptset2); i.test(); ++i) { 2145 uint pt = i.elem; 2146 PointsToNode* ptn = ptnode_adr(pt); 2147 Node* obj = ptn->_node; 2148 if (ptn->escape_state() != PointsToNode::NoEscape || 2149 !(obj->is_Allocate() || obj->is_CallStaticJava())) { 2150 has_only_non_escaping_alloc = false; 2151 break; 2152 } 2153 } 2154 if (has_only_non_escaping_alloc) { 2155 return _pcmp_neq; 2156 } 2157 } 2158 return NULL; 2159 } 2160 2161 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) { 2162 2163 switch (call->Opcode()) { 2164 #ifdef ASSERT 2165 case Op_Allocate: 2166 case Op_AllocateArray: 2167 case Op_Lock: 2168 case Op_Unlock: 2169 assert(false, "should be done already"); 2170 break; 2171 #endif 2172 case Op_CallLeaf: 2173 case Op_CallLeafNoFP: 2174 { 2175 // Stub calls, objects do not escape but they are not scale replaceable. 2176 // Adjust escape state for outgoing arguments. 2177 const TypeTuple * d = call->tf()->domain(); 2178 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2179 const Type* at = d->field_at(i); 2180 Node *arg = call->in(i)->uncast(); 2181 const Type *aat = phase->type(arg); 2182 if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() && 2183 ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) { 2184 2185 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 2186 aat->isa_ptr() != NULL, "expecting an Ptr"); 2187 #ifdef ASSERT 2188 if (!(call->Opcode() == Op_CallLeafNoFP && 2189 call->as_CallLeaf()->_name != NULL && 2190 (strstr(call->as_CallLeaf()->_name, "arraycopy") != 0) || 2191 call->as_CallLeaf()->_name != NULL && 2192 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 || 2193 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 )) 2194 ) { 2195 call->dump(); 2196 assert(false, "EA: unexpected CallLeaf"); 2197 } 2198 #endif 2199 set_escape_state(arg->_idx, PointsToNode::ArgEscape); 2200 if (arg->is_AddP()) { 2201 // 2202 // The inline_native_clone() case when the arraycopy stub is called 2203 // after the allocation before Initialize and CheckCastPP nodes. 2204 // 2205 // Set AddP's base (Allocate) as not scalar replaceable since 2206 // pointer to the base (with offset) is passed as argument. 2207 // 2208 arg = get_addp_base(arg); 2209 } 2210 for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) { 2211 uint pt = j.elem; 2212 set_escape_state(pt, PointsToNode::ArgEscape); 2213 } 2214 } 2215 } 2216 break; 2217 } 2218 2219 case Op_CallStaticJava: 2220 // For a static call, we know exactly what method is being called. 2221 // Use bytecode estimator to record the call's escape affects 2222 { 2223 ciMethod *meth = call->as_CallJava()->method(); 2224 BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 2225 // fall-through if not a Java method or no analyzer information 2226 if (call_analyzer != NULL) { 2227 const TypeTuple * d = call->tf()->domain(); 2228 bool copy_dependencies = false; 2229 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2230 const Type* at = d->field_at(i); 2231 int k = i - TypeFunc::Parms; 2232 Node *arg = call->in(i)->uncast(); 2233 2234 if (at->isa_oopptr() != NULL && 2235 ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) { 2236 2237 bool global_escapes = false; 2238 bool fields_escapes = false; 2239 if (!call_analyzer->is_arg_stack(k)) { 2240 // The argument global escapes, mark everything it could point to 2241 set_escape_state(arg->_idx, PointsToNode::GlobalEscape); 2242 global_escapes = true; 2243 } else { 2244 if (!call_analyzer->is_arg_local(k)) { 2245 // The argument itself doesn't escape, but any fields might 2246 fields_escapes = true; 2247 } 2248 set_escape_state(arg->_idx, PointsToNode::ArgEscape); 2249 copy_dependencies = true; 2250 } 2251 2252 for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) { 2253 uint pt = j.elem; 2254 if (global_escapes) { 2255 //The argument global escapes, mark everything it could point to 2256 set_escape_state(pt, PointsToNode::GlobalEscape); 2257 } else { 2258 if (fields_escapes) { 2259 // The argument itself doesn't escape, but any fields might 2260 add_edge_from_fields(pt, _phantom_object, Type::OffsetBot); 2261 } 2262 set_escape_state(pt, PointsToNode::ArgEscape); 2263 } 2264 } 2265 } 2266 } 2267 if (copy_dependencies) 2268 call_analyzer->copy_dependencies(_compile->dependencies()); 2269 break; 2270 } 2271 } 2272 2273 default: 2274 // Fall-through here if not a Java method or no analyzer information 2275 // or some other type of call, assume the worst case: all arguments 2276 // globally escape. 2277 { 2278 // adjust escape state for outgoing arguments 2279 const TypeTuple * d = call->tf()->domain(); 2280 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2281 const Type* at = d->field_at(i); 2282 if (at->isa_oopptr() != NULL) { 2283 Node *arg = call->in(i)->uncast(); 2284 set_escape_state(arg->_idx, PointsToNode::GlobalEscape); 2285 for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) { 2286 uint pt = j.elem; 2287 set_escape_state(pt, PointsToNode::GlobalEscape); 2288 } 2289 } 2290 } 2291 } 2292 } 2293 } 2294 void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) { 2295 CallNode *call = resproj->in(0)->as_Call(); 2296 uint call_idx = call->_idx; 2297 uint resproj_idx = resproj->_idx; 2298 2299 switch (call->Opcode()) { 2300 case Op_Allocate: 2301 { 2302 Node *k = call->in(AllocateNode::KlassNode); 2303 const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr(); 2304 assert(kt != NULL, "TypeKlassPtr required."); 2305 ciKlass* cik = kt->klass(); 2306 2307 PointsToNode::EscapeState es; 2308 uint edge_to; 2309 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 2310 !cik->is_instance_klass() || // StressReflectiveCode 2311 cik->as_instance_klass()->has_finalizer()) { 2312 es = PointsToNode::GlobalEscape; 2313 edge_to = _phantom_object; // Could not be worse 2314 } else { 2315 es = PointsToNode::NoEscape; 2316 edge_to = call_idx; 2317 assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity"); 2318 } 2319 set_escape_state(call_idx, es); 2320 add_pointsto_edge(resproj_idx, edge_to); 2321 _processed.set(resproj_idx); 2322 break; 2323 } 2324 2325 case Op_AllocateArray: 2326 { 2327 2328 Node *k = call->in(AllocateNode::KlassNode); 2329 const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr(); 2330 assert(kt != NULL, "TypeKlassPtr required."); 2331 ciKlass* cik = kt->klass(); 2332 2333 PointsToNode::EscapeState es; 2334 uint edge_to; 2335 if (!cik->is_array_klass()) { // StressReflectiveCode 2336 es = PointsToNode::GlobalEscape; 2337 edge_to = _phantom_object; 2338 } else { 2339 es = PointsToNode::NoEscape; 2340 edge_to = call_idx; 2341 assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity"); 2342 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 2343 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 2344 // Not scalar replaceable if the length is not constant or too big. 2345 ptnode_adr(call_idx)->set_scalar_replaceable(false); 2346 } 2347 } 2348 set_escape_state(call_idx, es); 2349 add_pointsto_edge(resproj_idx, edge_to); 2350 _processed.set(resproj_idx); 2351 break; 2352 } 2353 2354 case Op_CallStaticJava: 2355 // For a static call, we know exactly what method is being called. 2356 // Use bytecode estimator to record whether the call's return value escapes 2357 { 2358 bool done = true; 2359 const TypeTuple *r = call->tf()->range(); 2360 const Type* ret_type = NULL; 2361 2362 if (r->cnt() > TypeFunc::Parms) 2363 ret_type = r->field_at(TypeFunc::Parms); 2364 2365 // Note: we use isa_ptr() instead of isa_oopptr() here because the 2366 // _multianewarray functions return a TypeRawPtr. 2367 if (ret_type == NULL || ret_type->isa_ptr() == NULL) { 2368 _processed.set(resproj_idx); 2369 break; // doesn't return a pointer type 2370 } 2371 ciMethod *meth = call->as_CallJava()->method(); 2372 const TypeTuple * d = call->tf()->domain(); 2373 if (meth == NULL) { 2374 // not a Java method, assume global escape 2375 set_escape_state(call_idx, PointsToNode::GlobalEscape); 2376 add_pointsto_edge(resproj_idx, _phantom_object); 2377 } else { 2378 BCEscapeAnalyzer *call_analyzer = meth->get_bcea(); 2379 bool copy_dependencies = false; 2380 2381 if (call_analyzer->is_return_allocated()) { 2382 // Returns a newly allocated unescaped object, simply 2383 // update dependency information. 2384 // Mark it as NoEscape so that objects referenced by 2385 // it's fields will be marked as NoEscape at least. 2386 set_escape_state(call_idx, PointsToNode::NoEscape); 2387 ptnode_adr(call_idx)->set_scalar_replaceable(false); 2388 add_pointsto_edge(resproj_idx, call_idx); 2389 copy_dependencies = true; 2390 } else if (call_analyzer->is_return_local()) { 2391 // determine whether any arguments are returned 2392 set_escape_state(call_idx, PointsToNode::ArgEscape); 2393 bool ret_arg = false; 2394 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2395 const Type* at = d->field_at(i); 2396 2397 if (at->isa_oopptr() != NULL) { 2398 Node *arg = call->in(i)->uncast(); 2399 2400 if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 2401 ret_arg = true; 2402 PointsToNode *arg_esp = ptnode_adr(arg->_idx); 2403 if (arg_esp->node_type() == PointsToNode::UnknownType) 2404 done = false; 2405 else if (arg_esp->node_type() == PointsToNode::JavaObject) 2406 add_pointsto_edge(resproj_idx, arg->_idx); 2407 else 2408 add_deferred_edge(resproj_idx, arg->_idx); 2409 } 2410 } 2411 } 2412 if (done && !ret_arg) { 2413 // Returns unknown object. 2414 set_escape_state(call_idx, PointsToNode::GlobalEscape); 2415 add_pointsto_edge(resproj_idx, _phantom_object); 2416 } 2417 if (done) { 2418 copy_dependencies = true; 2419 } 2420 } else { 2421 set_escape_state(call_idx, PointsToNode::GlobalEscape); 2422 add_pointsto_edge(resproj_idx, _phantom_object); 2423 } 2424 if (copy_dependencies) 2425 call_analyzer->copy_dependencies(_compile->dependencies()); 2426 } 2427 if (done) 2428 _processed.set(resproj_idx); 2429 break; 2430 } 2431 2432 default: 2433 // Some other type of call, assume the worst case that the 2434 // returned value, if any, globally escapes. 2435 { 2436 const TypeTuple *r = call->tf()->range(); 2437 if (r->cnt() > TypeFunc::Parms) { 2438 const Type* ret_type = r->field_at(TypeFunc::Parms); 2439 2440 // Note: we use isa_ptr() instead of isa_oopptr() here because the 2441 // _multianewarray functions return a TypeRawPtr. 2442 if (ret_type->isa_ptr() != NULL) { 2443 set_escape_state(call_idx, PointsToNode::GlobalEscape); 2444 add_pointsto_edge(resproj_idx, _phantom_object); 2445 } 2446 } 2447 _processed.set(resproj_idx); 2448 } 2449 } 2450 } 2451 2452 // Populate Connection Graph with Ideal nodes and create simple 2453 // connection graph edges (do not need to check the node_type of inputs 2454 // or to call PointsTo() to walk the connection graph). 2455 void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) { 2456 if (_processed.test(n->_idx)) 2457 return; // No need to redefine node's state. 2458 2459 if (n->is_Call()) { 2460 // Arguments to allocation and locking don't escape. 2461 if (n->is_Allocate()) { 2462 add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true); 2463 record_for_optimizer(n); 2464 } else if (n->is_Lock() || n->is_Unlock()) { 2465 // Put Lock and Unlock nodes on IGVN worklist to process them during 2466 // the first IGVN optimization when escape information is still available. 2467 record_for_optimizer(n); 2468 _processed.set(n->_idx); 2469 } else { 2470 // Don't mark as processed since call's arguments have to be processed. 2471 PointsToNode::NodeType nt = PointsToNode::UnknownType; 2472 PointsToNode::EscapeState es = PointsToNode::UnknownEscape; 2473 2474 // Check if a call returns an object. 2475 const TypeTuple *r = n->as_Call()->tf()->range(); 2476 if (r->cnt() > TypeFunc::Parms && 2477 r->field_at(TypeFunc::Parms)->isa_ptr() && 2478 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) { 2479 nt = PointsToNode::JavaObject; 2480 if (!n->is_CallStaticJava()) { 2481 // Since the called mathod is statically unknown assume 2482 // the worst case that the returned value globally escapes. 2483 es = PointsToNode::GlobalEscape; 2484 } 2485 } 2486 add_node(n, nt, es, false); 2487 } 2488 return; 2489 } 2490 2491 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 2492 // ThreadLocal has RawPrt type. 2493 switch (n->Opcode()) { 2494 case Op_AddP: 2495 { 2496 add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false); 2497 break; 2498 } 2499 case Op_CastX2P: 2500 { // "Unsafe" memory access. 2501 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true); 2502 break; 2503 } 2504 case Op_CastPP: 2505 case Op_CheckCastPP: 2506 case Op_EncodeP: 2507 case Op_DecodeN: 2508 { 2509 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); 2510 int ti = n->in(1)->_idx; 2511 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); 2512 if (nt == PointsToNode::UnknownType) { 2513 _delayed_worklist.push(n); // Process it later. 2514 break; 2515 } else if (nt == PointsToNode::JavaObject) { 2516 add_pointsto_edge(n->_idx, ti); 2517 } else { 2518 add_deferred_edge(n->_idx, ti); 2519 } 2520 _processed.set(n->_idx); 2521 break; 2522 } 2523 case Op_ConP: 2524 { 2525 // assume all pointer constants globally escape except for null 2526 PointsToNode::EscapeState es; 2527 if (phase->type(n) == TypePtr::NULL_PTR) 2528 es = PointsToNode::NoEscape; 2529 else 2530 es = PointsToNode::GlobalEscape; 2531 2532 add_node(n, PointsToNode::JavaObject, es, true); 2533 break; 2534 } 2535 case Op_ConN: 2536 { 2537 // assume all narrow oop constants globally escape except for null 2538 PointsToNode::EscapeState es; 2539 if (phase->type(n) == TypeNarrowOop::NULL_PTR) 2540 es = PointsToNode::NoEscape; 2541 else 2542 es = PointsToNode::GlobalEscape; 2543 2544 add_node(n, PointsToNode::JavaObject, es, true); 2545 break; 2546 } 2547 case Op_CreateEx: 2548 { 2549 // assume that all exception objects globally escape 2550 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true); 2551 break; 2552 } 2553 case Op_LoadKlass: 2554 case Op_LoadNKlass: 2555 { 2556 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true); 2557 break; 2558 } 2559 case Op_LoadP: 2560 case Op_LoadN: 2561 { 2562 const Type *t = phase->type(n); 2563 if (t->make_ptr() == NULL) { 2564 _processed.set(n->_idx); 2565 return; 2566 } 2567 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); 2568 break; 2569 } 2570 case Op_Parm: 2571 { 2572 _processed.set(n->_idx); // No need to redefine it state. 2573 uint con = n->as_Proj()->_con; 2574 if (con < TypeFunc::Parms) 2575 return; 2576 const Type *t = n->in(0)->as_Start()->_domain->field_at(con); 2577 if (t->isa_ptr() == NULL) 2578 return; 2579 // We have to assume all input parameters globally escape 2580 // (Note: passing 'false' since _processed is already set). 2581 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false); 2582 break; 2583 } 2584 case Op_PartialSubtypeCheck: 2585 { // Produces Null or notNull and is used in CmpP. 2586 add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true); 2587 break; 2588 } 2589 case Op_Phi: 2590 { 2591 const Type *t = n->as_Phi()->type(); 2592 if (t->make_ptr() == NULL) { 2593 // nothing to do if not an oop or narrow oop 2594 _processed.set(n->_idx); 2595 return; 2596 } 2597 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); 2598 uint i; 2599 for (i = 1; i < n->req() ; i++) { 2600 Node* in = n->in(i); 2601 if (in == NULL) 2602 continue; // ignore NULL 2603 in = in->uncast(); 2604 if (in->is_top() || in == n) 2605 continue; // ignore top or inputs which go back this node 2606 int ti = in->_idx; 2607 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); 2608 if (nt == PointsToNode::UnknownType) { 2609 break; 2610 } else if (nt == PointsToNode::JavaObject) { 2611 add_pointsto_edge(n->_idx, ti); 2612 } else { 2613 add_deferred_edge(n->_idx, ti); 2614 } 2615 } 2616 if (i >= n->req()) 2617 _processed.set(n->_idx); 2618 else 2619 _delayed_worklist.push(n); 2620 break; 2621 } 2622 case Op_Proj: 2623 { 2624 // we are only interested in the oop result projection from a call 2625 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) { 2626 const TypeTuple *r = n->in(0)->as_Call()->tf()->range(); 2627 assert(r->cnt() > TypeFunc::Parms, "sanity"); 2628 if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) { 2629 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); 2630 int ti = n->in(0)->_idx; 2631 // The call may not be registered yet (since not all its inputs are registered) 2632 // if this is the projection from backbranch edge of Phi. 2633 if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) { 2634 process_call_result(n->as_Proj(), phase); 2635 } 2636 if (!_processed.test(n->_idx)) { 2637 // The call's result may need to be processed later if the call 2638 // returns it's argument and the argument is not processed yet. 2639 _delayed_worklist.push(n); 2640 } 2641 break; 2642 } 2643 } 2644 _processed.set(n->_idx); 2645 break; 2646 } 2647 case Op_Return: 2648 { 2649 if( n->req() > TypeFunc::Parms && 2650 phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) { 2651 // Treat Return value as LocalVar with GlobalEscape escape state. 2652 add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false); 2653 int ti = n->in(TypeFunc::Parms)->_idx; 2654 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); 2655 if (nt == PointsToNode::UnknownType) { 2656 _delayed_worklist.push(n); // Process it later. 2657 break; 2658 } else if (nt == PointsToNode::JavaObject) { 2659 add_pointsto_edge(n->_idx, ti); 2660 } else { 2661 add_deferred_edge(n->_idx, ti); 2662 } 2663 } 2664 _processed.set(n->_idx); 2665 break; 2666 } 2667 case Op_StoreP: 2668 case Op_StoreN: 2669 { 2670 const Type *adr_type = phase->type(n->in(MemNode::Address)); 2671 adr_type = adr_type->make_ptr(); 2672 if (adr_type->isa_oopptr()) { 2673 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); 2674 } else { 2675 Node* adr = n->in(MemNode::Address); 2676 if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL && 2677 adr->in(AddPNode::Address)->is_Proj() && 2678 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 2679 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); 2680 // We are computing a raw address for a store captured 2681 // by an Initialize compute an appropriate address type. 2682 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2683 assert(offs != Type::OffsetBot, "offset must be a constant"); 2684 } else { 2685 _processed.set(n->_idx); 2686 return; 2687 } 2688 } 2689 break; 2690 } 2691 case Op_StorePConditional: 2692 case Op_CompareAndSwapP: 2693 case Op_CompareAndSwapN: 2694 { 2695 const Type *adr_type = phase->type(n->in(MemNode::Address)); 2696 adr_type = adr_type->make_ptr(); 2697 if (adr_type->isa_oopptr()) { 2698 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); 2699 } else { 2700 _processed.set(n->_idx); 2701 return; 2702 } 2703 break; 2704 } 2705 case Op_AryEq: 2706 case Op_StrComp: 2707 case Op_StrEquals: 2708 case Op_StrIndexOf: 2709 { 2710 // char[] arrays passed to string intrinsics are not scalar replaceable. 2711 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); 2712 break; 2713 } 2714 case Op_ThreadLocal: 2715 { 2716 add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true); 2717 break; 2718 } 2719 default: 2720 ; 2721 // nothing to do 2722 } 2723 return; 2724 } 2725 2726 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) { 2727 uint n_idx = n->_idx; 2728 assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered"); 2729 2730 // Don't set processed bit for AddP, LoadP, StoreP since 2731 // they may need more then one pass to process. 2732 // Also don't mark as processed Call nodes since their 2733 // arguments may need more then one pass to process. 2734 if (_processed.test(n_idx)) 2735 return; // No need to redefine node's state. 2736 2737 if (n->is_Call()) { 2738 CallNode *call = n->as_Call(); 2739 process_call_arguments(call, phase); 2740 return; 2741 } 2742 2743 switch (n->Opcode()) { 2744 case Op_AddP: 2745 { 2746 Node *base = get_addp_base(n); 2747 int offset = address_offset(n, phase); 2748 // Create a field edge to this node from everything base could point to. 2749 for( VectorSetI i(PointsTo(base)); i.test(); ++i ) { 2750 uint pt = i.elem; 2751 add_field_edge(pt, n_idx, offset); 2752 } 2753 break; 2754 } 2755 case Op_CastX2P: 2756 { 2757 assert(false, "Op_CastX2P"); 2758 break; 2759 } 2760 case Op_CastPP: 2761 case Op_CheckCastPP: 2762 case Op_EncodeP: 2763 case Op_DecodeN: 2764 { 2765 int ti = n->in(1)->_idx; 2766 assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered"); 2767 if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) { 2768 add_pointsto_edge(n_idx, ti); 2769 } else { 2770 add_deferred_edge(n_idx, ti); 2771 } 2772 _processed.set(n_idx); 2773 break; 2774 } 2775 case Op_ConP: 2776 { 2777 assert(false, "Op_ConP"); 2778 break; 2779 } 2780 case Op_ConN: 2781 { 2782 assert(false, "Op_ConN"); 2783 break; 2784 } 2785 case Op_CreateEx: 2786 { 2787 assert(false, "Op_CreateEx"); 2788 break; 2789 } 2790 case Op_LoadKlass: 2791 case Op_LoadNKlass: 2792 { 2793 assert(false, "Op_LoadKlass"); 2794 break; 2795 } 2796 case Op_LoadP: 2797 case Op_LoadN: 2798 { 2799 const Type *t = phase->type(n); 2800 #ifdef ASSERT 2801 if (t->make_ptr() == NULL) 2802 assert(false, "Op_LoadP"); 2803 #endif 2804 2805 Node* adr = n->in(MemNode::Address)->uncast(); 2806 Node* adr_base; 2807 if (adr->is_AddP()) { 2808 adr_base = get_addp_base(adr); 2809 } else { 2810 adr_base = adr; 2811 } 2812 2813 // For everything "adr_base" could point to, create a deferred edge from 2814 // this node to each field with the same offset. 2815 int offset = address_offset(adr, phase); 2816 for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) { 2817 uint pt = i.elem; 2818 if (adr->is_AddP()) { 2819 // Add field edge if it is missing. 2820 add_field_edge(pt, adr->_idx, offset); 2821 } 2822 add_deferred_edge_to_fields(n_idx, pt, offset); 2823 } 2824 break; 2825 } 2826 case Op_Parm: 2827 { 2828 assert(false, "Op_Parm"); 2829 break; 2830 } 2831 case Op_PartialSubtypeCheck: 2832 { 2833 assert(false, "Op_PartialSubtypeCheck"); 2834 break; 2835 } 2836 case Op_Phi: 2837 { 2838 #ifdef ASSERT 2839 const Type *t = n->as_Phi()->type(); 2840 if (t->make_ptr() == NULL) 2841 assert(false, "Op_Phi"); 2842 #endif 2843 for (uint i = 1; i < n->req() ; i++) { 2844 Node* in = n->in(i); 2845 if (in == NULL) 2846 continue; // ignore NULL 2847 in = in->uncast(); 2848 if (in->is_top() || in == n) 2849 continue; // ignore top or inputs which go back this node 2850 int ti = in->_idx; 2851 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); 2852 assert(nt != PointsToNode::UnknownType, "all nodes should be known"); 2853 if (nt == PointsToNode::JavaObject) { 2854 add_pointsto_edge(n_idx, ti); 2855 } else { 2856 add_deferred_edge(n_idx, ti); 2857 } 2858 } 2859 _processed.set(n_idx); 2860 break; 2861 } 2862 case Op_Proj: 2863 { 2864 // we are only interested in the oop result projection from a call 2865 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) { 2866 assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType, 2867 "all nodes should be registered"); 2868 const TypeTuple *r = n->in(0)->as_Call()->tf()->range(); 2869 assert(r->cnt() > TypeFunc::Parms, "sanity"); 2870 if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) { 2871 process_call_result(n->as_Proj(), phase); 2872 assert(_processed.test(n_idx), "all call results should be processed"); 2873 break; 2874 } 2875 } 2876 assert(false, "Op_Proj"); 2877 break; 2878 } 2879 case Op_Return: 2880 { 2881 #ifdef ASSERT 2882 if( n->req() <= TypeFunc::Parms || 2883 !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) { 2884 assert(false, "Op_Return"); 2885 } 2886 #endif 2887 int ti = n->in(TypeFunc::Parms)->_idx; 2888 assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered"); 2889 if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) { 2890 add_pointsto_edge(n_idx, ti); 2891 } else { 2892 add_deferred_edge(n_idx, ti); 2893 } 2894 _processed.set(n_idx); 2895 break; 2896 } 2897 case Op_StoreP: 2898 case Op_StoreN: 2899 case Op_StorePConditional: 2900 case Op_CompareAndSwapP: 2901 case Op_CompareAndSwapN: 2902 { 2903 Node *adr = n->in(MemNode::Address); 2904 const Type *adr_type = phase->type(adr)->make_ptr(); 2905 #ifdef ASSERT 2906 if (!adr_type->isa_oopptr()) 2907 assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP"); 2908 #endif 2909 2910 assert(adr->is_AddP(), "expecting an AddP"); 2911 Node *adr_base = get_addp_base(adr); 2912 Node *val = n->in(MemNode::ValueIn)->uncast(); 2913 int offset = address_offset(adr, phase); 2914 // For everything "adr_base" could point to, create a deferred edge 2915 // to "val" from each field with the same offset. 2916 for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) { 2917 uint pt = i.elem; 2918 // Add field edge if it is missing. 2919 add_field_edge(pt, adr->_idx, offset); 2920 add_edge_from_fields(pt, val->_idx, offset); 2921 } 2922 break; 2923 } 2924 case Op_AryEq: 2925 case Op_StrComp: 2926 case Op_StrEquals: 2927 case Op_StrIndexOf: 2928 { 2929 // char[] arrays passed to string intrinsic do not escape but 2930 // they are not scalar replaceable. Adjust escape state for them. 2931 // Start from in(2) edge since in(1) is memory edge. 2932 for (uint i = 2; i < n->req(); i++) { 2933 Node* adr = n->in(i)->uncast(); 2934 const Type *at = phase->type(adr); 2935 if (!adr->is_top() && at->isa_ptr()) { 2936 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 2937 at->isa_ptr() != NULL, "expecting an Ptr"); 2938 if (adr->is_AddP()) { 2939 adr = get_addp_base(adr); 2940 } 2941 // Mark as ArgEscape everything "adr" could point to. 2942 set_escape_state(adr->_idx, PointsToNode::ArgEscape); 2943 } 2944 } 2945 _processed.set(n_idx); 2946 break; 2947 } 2948 case Op_ThreadLocal: 2949 { 2950 assert(false, "Op_ThreadLocal"); 2951 break; 2952 } 2953 default: 2954 // This method should be called only for EA specific nodes. 2955 ShouldNotReachHere(); 2956 } 2957 } 2958 2959 #ifndef PRODUCT 2960 void ConnectionGraph::dump() { 2961 bool first = true; 2962 2963 uint size = nodes_size(); 2964 for (uint ni = 0; ni < size; ni++) { 2965 PointsToNode *ptn = ptnode_adr(ni); 2966 PointsToNode::NodeType ptn_type = ptn->node_type(); 2967 2968 if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL) 2969 continue; 2970 PointsToNode::EscapeState es = escape_state(ptn->_node); 2971 if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) { 2972 if (first) { 2973 tty->cr(); 2974 tty->print("======== Connection graph for "); 2975 _compile->method()->print_short_name(); 2976 tty->cr(); 2977 first = false; 2978 } 2979 tty->print("%6d ", ni); 2980 ptn->dump(); 2981 // Print all locals which reference this allocation 2982 for (uint li = ni; li < size; li++) { 2983 PointsToNode *ptn_loc = ptnode_adr(li); 2984 PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type(); 2985 if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL && 2986 ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) { 2987 ptnode_adr(li)->dump(false); 2988 } 2989 } 2990 if (Verbose) { 2991 // Print all fields which reference this allocation 2992 for (uint i = 0; i < ptn->edge_count(); i++) { 2993 uint ei = ptn->edge_target(i); 2994 ptnode_adr(ei)->dump(false); 2995 } 2996 } 2997 tty->cr(); 2998 } 2999 } 3000 } 3001 #endif