1 /* 2 * Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_escape.cpp.incl" 27 28 void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) { 29 uint v = (targIdx << EdgeShift) + ((uint) et); 30 if (_edges == NULL) { 31 Arena *a = Compile::current()->comp_arena(); 32 _edges = new(a) GrowableArray<uint>(a, INITIAL_EDGE_COUNT, 0, 0); 33 } 34 _edges->append_if_missing(v); 35 } 36 37 void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) { 38 uint v = (targIdx << EdgeShift) + ((uint) et); 39 40 _edges->remove(v); 41 } 42 43 #ifndef PRODUCT 44 static const char *node_type_names[] = { 45 "UnknownType", 46 "JavaObject", 47 "LocalVar", 48 "Field" 49 }; 50 51 static const char *esc_names[] = { 52 "UnknownEscape", 53 "NoEscape", 54 "ArgEscape", 55 "GlobalEscape" 56 }; 57 58 static const char *edge_type_suffix[] = { 59 "?", // UnknownEdge 60 "P", // PointsToEdge 61 "D", // DeferredEdge 62 "F" // FieldEdge 63 }; 64 65 void PointsToNode::dump(bool print_state) const { 66 NodeType nt = node_type(); 67 tty->print("%s ", node_type_names[(int) nt]); 68 if (print_state) { 69 EscapeState es = escape_state(); 70 tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR"); 71 } 72 tty->print("[["); 73 for (uint i = 0; i < edge_count(); i++) { 74 tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]); 75 } 76 tty->print("]] "); 77 if (_node == NULL) 78 tty->print_cr("<null>"); 79 else 80 _node->dump(); 81 } 82 #endif 83 84 ConnectionGraph::ConnectionGraph(Compile * C) : 85 _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()), 86 _processed(C->comp_arena()), 87 _collecting(true), 88 _compile(C), 89 _node_map(C->comp_arena()) { 90 91 _phantom_object = C->top()->_idx, 92 add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true); 93 94 // Add ConP(#NULL) and ConN(#NULL) nodes. 95 PhaseGVN* igvn = C->initial_gvn(); 96 Node* oop_null = igvn->zerocon(T_OBJECT); 97 _oop_null = oop_null->_idx; 98 assert(_oop_null < C->unique(), "should be created already"); 99 add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true); 100 101 if (UseCompressedOops) { 102 Node* noop_null = igvn->zerocon(T_NARROWOOP); 103 _noop_null = noop_null->_idx; 104 assert(_noop_null < C->unique(), "should be created already"); 105 add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true); 106 } 107 } 108 109 void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) { 110 PointsToNode *f = ptnode_adr(from_i); 111 PointsToNode *t = ptnode_adr(to_i); 112 113 assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set"); 114 assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge"); 115 assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge"); 116 f->add_edge(to_i, PointsToNode::PointsToEdge); 117 } 118 119 void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) { 120 PointsToNode *f = ptnode_adr(from_i); 121 PointsToNode *t = ptnode_adr(to_i); 122 123 assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set"); 124 assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge"); 125 assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge"); 126 // don't add a self-referential edge, this can occur during removal of 127 // deferred edges 128 if (from_i != to_i) 129 f->add_edge(to_i, PointsToNode::DeferredEdge); 130 } 131 132 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 133 const Type *adr_type = phase->type(adr); 134 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 135 adr->in(AddPNode::Address)->is_Proj() && 136 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 137 // We are computing a raw address for a store captured by an Initialize 138 // compute an appropriate address type. AddP cases #3 and #5 (see below). 139 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 140 assert(offs != Type::OffsetBot || 141 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 142 "offset must be a constant or it is initialization of array"); 143 return offs; 144 } 145 const TypePtr *t_ptr = adr_type->isa_ptr(); 146 assert(t_ptr != NULL, "must be a pointer type"); 147 return t_ptr->offset(); 148 } 149 150 void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) { 151 PointsToNode *f = ptnode_adr(from_i); 152 PointsToNode *t = ptnode_adr(to_i); 153 154 assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set"); 155 assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge"); 156 assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge"); 157 assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets"); 158 t->set_offset(offset); 159 160 f->add_edge(to_i, PointsToNode::FieldEdge); 161 } 162 163 void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) { 164 PointsToNode *npt = ptnode_adr(ni); 165 PointsToNode::EscapeState old_es = npt->escape_state(); 166 if (es > old_es) 167 npt->set_escape_state(es); 168 } 169 170 void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt, 171 PointsToNode::EscapeState es, bool done) { 172 PointsToNode* ptadr = ptnode_adr(n->_idx); 173 ptadr->_node = n; 174 ptadr->set_node_type(nt); 175 176 // inline set_escape_state(idx, es); 177 PointsToNode::EscapeState old_es = ptadr->escape_state(); 178 if (es > old_es) 179 ptadr->set_escape_state(es); 180 181 if (done) 182 _processed.set(n->_idx); 183 } 184 185 PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n, PhaseTransform *phase) { 186 uint idx = n->_idx; 187 PointsToNode::EscapeState es; 188 189 // If we are still collecting or there were no non-escaping allocations 190 // we don't know the answer yet 191 if (_collecting) 192 return PointsToNode::UnknownEscape; 193 194 // if the node was created after the escape computation, return 195 // UnknownEscape 196 if (idx >= nodes_size()) 197 return PointsToNode::UnknownEscape; 198 199 es = ptnode_adr(idx)->escape_state(); 200 201 // if we have already computed a value, return it 202 if (es != PointsToNode::UnknownEscape && 203 ptnode_adr(idx)->node_type() == PointsToNode::JavaObject) 204 return es; 205 206 // PointsTo() calls n->uncast() which can return a new ideal node. 207 if (n->uncast()->_idx >= nodes_size()) 208 return PointsToNode::UnknownEscape; 209 210 // compute max escape state of anything this node could point to 211 VectorSet ptset(Thread::current()->resource_area()); 212 PointsTo(ptset, n, phase); 213 for(VectorSetI i(&ptset); i.test() && es != PointsToNode::GlobalEscape; ++i) { 214 uint pt = i.elem; 215 PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state(); 216 if (pes > es) 217 es = pes; 218 } 219 // cache the computed escape state 220 assert(es != PointsToNode::UnknownEscape, "should have computed an escape state"); 221 ptnode_adr(idx)->set_escape_state(es); 222 return es; 223 } 224 225 void ConnectionGraph::PointsTo(VectorSet &ptset, Node * n, PhaseTransform *phase) { 226 VectorSet visited(Thread::current()->resource_area()); 227 GrowableArray<uint> worklist; 228 229 #ifdef ASSERT 230 Node *orig_n = n; 231 #endif 232 233 n = n->uncast(); 234 PointsToNode* npt = ptnode_adr(n->_idx); 235 236 // If we have a JavaObject, return just that object 237 if (npt->node_type() == PointsToNode::JavaObject) { 238 ptset.set(n->_idx); 239 return; 240 } 241 #ifdef ASSERT 242 if (npt->_node == NULL) { 243 if (orig_n != n) 244 orig_n->dump(); 245 n->dump(); 246 assert(npt->_node != NULL, "unregistered node"); 247 } 248 #endif 249 worklist.push(n->_idx); 250 while(worklist.length() > 0) { 251 int ni = worklist.pop(); 252 if (visited.test_set(ni)) 253 continue; 254 255 PointsToNode* pn = ptnode_adr(ni); 256 // ensure that all inputs of a Phi have been processed 257 assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),""); 258 259 int edges_processed = 0; 260 uint e_cnt = pn->edge_count(); 261 for (uint e = 0; e < e_cnt; e++) { 262 uint etgt = pn->edge_target(e); 263 PointsToNode::EdgeType et = pn->edge_type(e); 264 if (et == PointsToNode::PointsToEdge) { 265 ptset.set(etgt); 266 edges_processed++; 267 } else if (et == PointsToNode::DeferredEdge) { 268 worklist.push(etgt); 269 edges_processed++; 270 } else { 271 assert(false,"neither PointsToEdge or DeferredEdge"); 272 } 273 } 274 if (edges_processed == 0) { 275 // no deferred or pointsto edges found. Assume the value was set 276 // outside this method. Add the phantom object to the pointsto set. 277 ptset.set(_phantom_object); 278 } 279 } 280 } 281 282 void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) { 283 // This method is most expensive during ConnectionGraph construction. 284 // Reuse vectorSet and an additional growable array for deferred edges. 285 deferred_edges->clear(); 286 visited->Clear(); 287 288 visited->set(ni); 289 PointsToNode *ptn = ptnode_adr(ni); 290 291 // Mark current edges as visited and move deferred edges to separate array. 292 for (uint i = 0; i < ptn->edge_count(); ) { 293 uint t = ptn->edge_target(i); 294 #ifdef ASSERT 295 assert(!visited->test_set(t), "expecting no duplications"); 296 #else 297 visited->set(t); 298 #endif 299 if (ptn->edge_type(i) == PointsToNode::DeferredEdge) { 300 ptn->remove_edge(t, PointsToNode::DeferredEdge); 301 deferred_edges->append(t); 302 } else { 303 i++; 304 } 305 } 306 for (int next = 0; next < deferred_edges->length(); ++next) { 307 uint t = deferred_edges->at(next); 308 PointsToNode *ptt = ptnode_adr(t); 309 uint e_cnt = ptt->edge_count(); 310 for (uint e = 0; e < e_cnt; e++) { 311 uint etgt = ptt->edge_target(e); 312 if (visited->test_set(etgt)) 313 continue; 314 315 PointsToNode::EdgeType et = ptt->edge_type(e); 316 if (et == PointsToNode::PointsToEdge) { 317 add_pointsto_edge(ni, etgt); 318 if(etgt == _phantom_object) { 319 // Special case - field set outside (globally escaping). 320 ptn->set_escape_state(PointsToNode::GlobalEscape); 321 } 322 } else if (et == PointsToNode::DeferredEdge) { 323 deferred_edges->append(etgt); 324 } else { 325 assert(false,"invalid connection graph"); 326 } 327 } 328 } 329 } 330 331 332 // Add an edge to node given by "to_i" from any field of adr_i whose offset 333 // matches "offset" A deferred edge is added if to_i is a LocalVar, and 334 // a pointsto edge is added if it is a JavaObject 335 336 void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) { 337 PointsToNode* an = ptnode_adr(adr_i); 338 PointsToNode* to = ptnode_adr(to_i); 339 bool deferred = (to->node_type() == PointsToNode::LocalVar); 340 341 for (uint fe = 0; fe < an->edge_count(); fe++) { 342 assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge"); 343 int fi = an->edge_target(fe); 344 PointsToNode* pf = ptnode_adr(fi); 345 int po = pf->offset(); 346 if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) { 347 if (deferred) 348 add_deferred_edge(fi, to_i); 349 else 350 add_pointsto_edge(fi, to_i); 351 } 352 } 353 } 354 355 // Add a deferred edge from node given by "from_i" to any field of adr_i 356 // whose offset matches "offset". 357 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) { 358 PointsToNode* an = ptnode_adr(adr_i); 359 for (uint fe = 0; fe < an->edge_count(); fe++) { 360 assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge"); 361 int fi = an->edge_target(fe); 362 PointsToNode* pf = ptnode_adr(fi); 363 int po = pf->offset(); 364 if (pf->edge_count() == 0) { 365 // we have not seen any stores to this field, assume it was set outside this method 366 add_pointsto_edge(fi, _phantom_object); 367 } 368 if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) { 369 add_deferred_edge(from_i, fi); 370 } 371 } 372 } 373 374 // Helper functions 375 376 static Node* get_addp_base(Node *addp) { 377 assert(addp->is_AddP(), "must be AddP"); 378 // 379 // AddP cases for Base and Address inputs: 380 // case #1. Direct object's field reference: 381 // Allocate 382 // | 383 // Proj #5 ( oop result ) 384 // | 385 // CheckCastPP (cast to instance type) 386 // | | 387 // AddP ( base == address ) 388 // 389 // case #2. Indirect object's field reference: 390 // Phi 391 // | 392 // CastPP (cast to instance type) 393 // | | 394 // AddP ( base == address ) 395 // 396 // case #3. Raw object's field reference for Initialize node: 397 // Allocate 398 // | 399 // Proj #5 ( oop result ) 400 // top | 401 // \ | 402 // AddP ( base == top ) 403 // 404 // case #4. Array's element reference: 405 // {CheckCastPP | CastPP} 406 // | | | 407 // | AddP ( array's element offset ) 408 // | | 409 // AddP ( array's offset ) 410 // 411 // case #5. Raw object's field reference for arraycopy stub call: 412 // The inline_native_clone() case when the arraycopy stub is called 413 // after the allocation before Initialize and CheckCastPP nodes. 414 // Allocate 415 // | 416 // Proj #5 ( oop result ) 417 // | | 418 // AddP ( base == address ) 419 // 420 // case #6. Constant Pool, ThreadLocal, CastX2P or 421 // Raw object's field reference: 422 // {ConP, ThreadLocal, CastX2P, raw Load} 423 // top | 424 // \ | 425 // AddP ( base == top ) 426 // 427 // case #7. Klass's field reference. 428 // LoadKlass 429 // | | 430 // AddP ( base == address ) 431 // 432 // case #8. narrow Klass's field reference. 433 // LoadNKlass 434 // | 435 // DecodeN 436 // | | 437 // AddP ( base == address ) 438 // 439 Node *base = addp->in(AddPNode::Base)->uncast(); 440 if (base->is_top()) { // The AddP case #3 and #6. 441 base = addp->in(AddPNode::Address)->uncast(); 442 while (base->is_AddP()) { 443 // Case #6 (unsafe access) may have several chained AddP nodes. 444 assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only"); 445 base = base->in(AddPNode::Address)->uncast(); 446 } 447 assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal || 448 base->Opcode() == Op_CastX2P || base->is_DecodeN() || 449 (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) || 450 (base->is_Proj() && base->in(0)->is_Allocate()), "sanity"); 451 } 452 return base; 453 } 454 455 static Node* find_second_addp(Node* addp, Node* n) { 456 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 457 458 Node* addp2 = addp->raw_out(0); 459 if (addp->outcnt() == 1 && addp2->is_AddP() && 460 addp2->in(AddPNode::Base) == n && 461 addp2->in(AddPNode::Address) == addp) { 462 463 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 464 // 465 // Find array's offset to push it on worklist first and 466 // as result process an array's element offset first (pushed second) 467 // to avoid CastPP for the array's offset. 468 // Otherwise the inserted CastPP (LocalVar) will point to what 469 // the AddP (Field) points to. Which would be wrong since 470 // the algorithm expects the CastPP has the same point as 471 // as AddP's base CheckCastPP (LocalVar). 472 // 473 // ArrayAllocation 474 // | 475 // CheckCastPP 476 // | 477 // memProj (from ArrayAllocation CheckCastPP) 478 // | || 479 // | || Int (element index) 480 // | || | ConI (log(element size)) 481 // | || | / 482 // | || LShift 483 // | || / 484 // | AddP (array's element offset) 485 // | | 486 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 487 // | / / 488 // AddP (array's offset) 489 // | 490 // Load/Store (memory operation on array's element) 491 // 492 return addp2; 493 } 494 return NULL; 495 } 496 497 // 498 // Adjust the type and inputs of an AddP which computes the 499 // address of a field of an instance 500 // 501 bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) { 502 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 503 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 504 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 505 if (t == NULL) { 506 // We are computing a raw address for a store captured by an Initialize 507 // compute an appropriate address type (cases #3 and #5). 508 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 509 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 510 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 511 assert(offs != Type::OffsetBot, "offset must be a constant"); 512 t = base_t->add_offset(offs)->is_oopptr(); 513 } 514 int inst_id = base_t->instance_id(); 515 assert(!t->is_known_instance() || t->instance_id() == inst_id, 516 "old type must be non-instance or match new type"); 517 518 // The type 't' could be subclass of 'base_t'. 519 // As result t->offset() could be large then base_t's size and it will 520 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 521 // constructor verifies correctness of the offset. 522 // 523 // It could happened on subclass's branch (from the type profiling 524 // inlining) which was not eliminated during parsing since the exactness 525 // of the allocation type was not propagated to the subclass type check. 526 // 527 // Or the type 't' could be not related to 'base_t' at all. 528 // It could happened when CHA type is different from MDO type on a dead path 529 // (for example, from instanceof check) which is not collapsed during parsing. 530 // 531 // Do nothing for such AddP node and don't process its users since 532 // this code branch will go away. 533 // 534 if (!t->is_known_instance() && 535 !base_t->klass()->is_subtype_of(t->klass())) { 536 return false; // bail out 537 } 538 539 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 540 // Do NOT remove the next line: ensure a new alias index is allocated 541 // for the instance type. Note: C++ will not remove it since the call 542 // has side effect. 543 int alias_idx = _compile->get_alias_index(tinst); 544 igvn->set_type(addp, tinst); 545 // record the allocation in the node map 546 set_map(addp->_idx, get_map(base->_idx)); 547 548 // Set addp's Base and Address to 'base'. 549 Node *abase = addp->in(AddPNode::Base); 550 Node *adr = addp->in(AddPNode::Address); 551 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 552 adr->in(0)->_idx == (uint)inst_id) { 553 // Skip AddP cases #3 and #5. 554 } else { 555 assert(!abase->is_top(), "sanity"); // AddP case #3 556 if (abase != base) { 557 igvn->hash_delete(addp); 558 addp->set_req(AddPNode::Base, base); 559 if (abase == adr) { 560 addp->set_req(AddPNode::Address, base); 561 } else { 562 // AddP case #4 (adr is array's element offset AddP node) 563 #ifdef ASSERT 564 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 565 assert(adr->is_AddP() && atype != NULL && 566 atype->instance_id() == inst_id, "array's element offset should be processed first"); 567 #endif 568 } 569 igvn->hash_insert(addp); 570 } 571 } 572 // Put on IGVN worklist since at least addp's type was changed above. 573 record_for_optimizer(addp); 574 return true; 575 } 576 577 // 578 // Create a new version of orig_phi if necessary. Returns either the newly 579 // created phi or an existing phi. Sets create_new to indicate wheter a new 580 // phi was created. Cache the last newly created phi in the node map. 581 // 582 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn, bool &new_created) { 583 Compile *C = _compile; 584 new_created = false; 585 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 586 // nothing to do if orig_phi is bottom memory or matches alias_idx 587 if (phi_alias_idx == alias_idx) { 588 return orig_phi; 589 } 590 // Have we recently created a Phi for this alias index? 591 PhiNode *result = get_map_phi(orig_phi->_idx); 592 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 593 return result; 594 } 595 // Previous check may fail when the same wide memory Phi was split into Phis 596 // for different memory slices. Search all Phis for this region. 597 if (result != NULL) { 598 Node* region = orig_phi->in(0); 599 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 600 Node* phi = region->fast_out(i); 601 if (phi->is_Phi() && 602 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 603 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 604 return phi->as_Phi(); 605 } 606 } 607 } 608 if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) { 609 if (C->do_escape_analysis() == true && !C->failing()) { 610 // Retry compilation without escape analysis. 611 // If this is the first failure, the sentinel string will "stick" 612 // to the Compile object, and the C2Compiler will see it and retry. 613 C->record_failure(C2Compiler::retry_no_escape_analysis()); 614 } 615 return NULL; 616 } 617 orig_phi_worklist.append_if_missing(orig_phi); 618 const TypePtr *atype = C->get_adr_type(alias_idx); 619 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 620 C->copy_node_notes_to(result, orig_phi); 621 set_map_phi(orig_phi->_idx, result); 622 igvn->set_type(result, result->bottom_type()); 623 record_for_optimizer(result); 624 new_created = true; 625 return result; 626 } 627 628 // 629 // Return a new version of Memory Phi "orig_phi" with the inputs having the 630 // specified alias index. 631 // 632 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn) { 633 634 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 635 Compile *C = _compile; 636 bool new_phi_created; 637 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created); 638 if (!new_phi_created) { 639 return result; 640 } 641 642 GrowableArray<PhiNode *> phi_list; 643 GrowableArray<uint> cur_input; 644 645 PhiNode *phi = orig_phi; 646 uint idx = 1; 647 bool finished = false; 648 while(!finished) { 649 while (idx < phi->req()) { 650 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn); 651 if (mem != NULL && mem->is_Phi()) { 652 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created); 653 if (new_phi_created) { 654 // found an phi for which we created a new split, push current one on worklist and begin 655 // processing new one 656 phi_list.push(phi); 657 cur_input.push(idx); 658 phi = mem->as_Phi(); 659 result = newphi; 660 idx = 1; 661 continue; 662 } else { 663 mem = newphi; 664 } 665 } 666 if (C->failing()) { 667 return NULL; 668 } 669 result->set_req(idx++, mem); 670 } 671 #ifdef ASSERT 672 // verify that the new Phi has an input for each input of the original 673 assert( phi->req() == result->req(), "must have same number of inputs."); 674 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 675 #endif 676 // Check if all new phi's inputs have specified alias index. 677 // Otherwise use old phi. 678 for (uint i = 1; i < phi->req(); i++) { 679 Node* in = result->in(i); 680 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 681 } 682 // we have finished processing a Phi, see if there are any more to do 683 finished = (phi_list.length() == 0 ); 684 if (!finished) { 685 phi = phi_list.pop(); 686 idx = cur_input.pop(); 687 PhiNode *prev_result = get_map_phi(phi->_idx); 688 prev_result->set_req(idx++, result); 689 result = prev_result; 690 } 691 } 692 return result; 693 } 694 695 696 // 697 // The next methods are derived from methods in MemNode. 698 // 699 static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *tinst) { 700 Node *mem = mmem; 701 // TypeInstPtr::NOTNULL+any is an OOP with unknown offset - generally 702 // means an array I have not precisely typed yet. Do not do any 703 // alias stuff with it any time soon. 704 if( tinst->base() != Type::AnyPtr && 705 !(tinst->klass()->is_java_lang_Object() && 706 tinst->offset() == Type::OffsetBot) ) { 707 mem = mmem->memory_at(alias_idx); 708 // Update input if it is progress over what we have now 709 } 710 return mem; 711 } 712 713 // 714 // Search memory chain of "mem" to find a MemNode whose address 715 // is the specified alias index. 716 // 717 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis, PhaseGVN *phase) { 718 if (orig_mem == NULL) 719 return orig_mem; 720 Compile* C = phase->C; 721 const TypeOopPtr *tinst = C->get_adr_type(alias_idx)->isa_oopptr(); 722 bool is_instance = (tinst != NULL) && tinst->is_known_instance(); 723 Node *start_mem = C->start()->proj_out(TypeFunc::Memory); 724 Node *prev = NULL; 725 Node *result = orig_mem; 726 while (prev != result) { 727 prev = result; 728 if (result == start_mem) 729 break; // hit one of our sentinels 730 if (result->is_Mem()) { 731 const Type *at = phase->type(result->in(MemNode::Address)); 732 if (at != Type::TOP) { 733 assert (at->isa_ptr() != NULL, "pointer type required."); 734 int idx = C->get_alias_index(at->is_ptr()); 735 if (idx == alias_idx) 736 break; 737 } 738 result = result->in(MemNode::Memory); 739 } 740 if (!is_instance) 741 continue; // don't search further for non-instance types 742 // skip over a call which does not affect this memory slice 743 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 744 Node *proj_in = result->in(0); 745 if (proj_in->is_Allocate() && proj_in->_idx == (uint)tinst->instance_id()) { 746 break; // hit one of our sentinels 747 } else if (proj_in->is_Call()) { 748 CallNode *call = proj_in->as_Call(); 749 if (!call->may_modify(tinst, phase)) { 750 result = call->in(TypeFunc::Memory); 751 } 752 } else if (proj_in->is_Initialize()) { 753 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 754 // Stop if this is the initialization for the object instance which 755 // which contains this memory slice, otherwise skip over it. 756 if (alloc == NULL || alloc->_idx != (uint)tinst->instance_id()) { 757 result = proj_in->in(TypeFunc::Memory); 758 } 759 } else if (proj_in->is_MemBar()) { 760 result = proj_in->in(TypeFunc::Memory); 761 } 762 } else if (result->is_MergeMem()) { 763 MergeMemNode *mmem = result->as_MergeMem(); 764 result = step_through_mergemem(mmem, alias_idx, tinst); 765 if (result == mmem->base_memory()) { 766 // Didn't find instance memory, search through general slice recursively. 767 result = mmem->memory_at(C->get_general_index(alias_idx)); 768 result = find_inst_mem(result, alias_idx, orig_phis, phase); 769 if (C->failing()) { 770 return NULL; 771 } 772 mmem->set_memory_at(alias_idx, result); 773 } 774 } else if (result->is_Phi() && 775 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 776 Node *un = result->as_Phi()->unique_input(phase); 777 if (un != NULL) { 778 result = un; 779 } else { 780 break; 781 } 782 } else if (result->Opcode() == Op_ClearArray) { 783 // This node initializes new object storage to zero. 784 intptr_t offset; 785 AllocateNode* alloc = AllocateNode::Ideal_allocation(result->in(3), phase, offset); 786 // Can not bypass initialization of the instance 787 // we are looking for or when something is wrong. 788 if (alloc == NULL || alloc->_idx == (uint)tinst->instance_id()) 789 break; 790 // Otherwise skip it. 791 InitializeNode* init = alloc->initialization(); 792 if (init != NULL) 793 result = init->in(TypeFunc::Memory); 794 else 795 result = alloc->in(TypeFunc::Memory); 796 } else if (result->Opcode() == Op_SCMemProj) { 797 assert(result->in(0)->is_LoadStore(), "sanity"); 798 const Type *at = phase->type(result->in(0)->in(MemNode::Address)); 799 if (at != Type::TOP) { 800 assert (at->isa_ptr() != NULL, "pointer type required."); 801 int idx = C->get_alias_index(at->is_ptr()); 802 assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field"); 803 break; 804 } 805 result = result->in(0)->in(MemNode::Memory); 806 } 807 } 808 if (result->is_Phi()) { 809 PhiNode *mphi = result->as_Phi(); 810 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 811 const TypePtr *t = mphi->adr_type(); 812 if (C->get_alias_index(t) != alias_idx) { 813 // Create a new Phi with the specified alias index type. 814 result = split_memory_phi(mphi, alias_idx, orig_phis, phase); 815 } else if (!is_instance) { 816 // Push all non-instance Phis on the orig_phis worklist to update inputs 817 // during Phase 4 if needed. 818 orig_phis.append_if_missing(mphi); 819 } 820 } 821 // the result is either MemNode, PhiNode, InitializeNode. 822 return result; 823 } 824 825 // 826 // Convert the types of unescaped object to instance types where possible, 827 // propagate the new type information through the graph, and update memory 828 // edges and MergeMem inputs to reflect the new type. 829 // 830 // We start with allocations (and calls which may be allocations) on alloc_worklist. 831 // The processing is done in 4 phases: 832 // 833 // Phase 1: Process possible allocations from alloc_worklist. Create instance 834 // types for the CheckCastPP for allocations where possible. 835 // Propagate the the new types through users as follows: 836 // casts and Phi: push users on alloc_worklist 837 // AddP: cast Base and Address inputs to the instance type 838 // push any AddP users on alloc_worklist and push any memnode 839 // users onto memnode_worklist. 840 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 841 // search the Memory chain for a store with the appropriate type 842 // address type. If a Phi is found, create a new version with 843 // the appropriate memory slices from each of the Phi inputs. 844 // For stores, process the users as follows: 845 // MemNode: push on memnode_worklist 846 // MergeMem: push on mergemem_worklist 847 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 848 // moving the first node encountered of each instance type to the 849 // the input corresponding to its alias index. 850 // appropriate memory slice. 851 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 852 // 853 // In the following example, the CheckCastPP nodes are the cast of allocation 854 // results and the allocation of node 29 is unescaped and eligible to be an 855 // instance type. 856 // 857 // We start with: 858 // 859 // 7 Parm #memory 860 // 10 ConI "12" 861 // 19 CheckCastPP "Foo" 862 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 863 // 29 CheckCastPP "Foo" 864 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 865 // 866 // 40 StoreP 25 7 20 ... alias_index=4 867 // 50 StoreP 35 40 30 ... alias_index=4 868 // 60 StoreP 45 50 20 ... alias_index=4 869 // 70 LoadP _ 60 30 ... alias_index=4 870 // 80 Phi 75 50 60 Memory alias_index=4 871 // 90 LoadP _ 80 30 ... alias_index=4 872 // 100 LoadP _ 80 20 ... alias_index=4 873 // 874 // 875 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 876 // and creating a new alias index for node 30. This gives: 877 // 878 // 7 Parm #memory 879 // 10 ConI "12" 880 // 19 CheckCastPP "Foo" 881 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 882 // 29 CheckCastPP "Foo" iid=24 883 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 884 // 885 // 40 StoreP 25 7 20 ... alias_index=4 886 // 50 StoreP 35 40 30 ... alias_index=6 887 // 60 StoreP 45 50 20 ... alias_index=4 888 // 70 LoadP _ 60 30 ... alias_index=6 889 // 80 Phi 75 50 60 Memory alias_index=4 890 // 90 LoadP _ 80 30 ... alias_index=6 891 // 100 LoadP _ 80 20 ... alias_index=4 892 // 893 // In phase 2, new memory inputs are computed for the loads and stores, 894 // And a new version of the phi is created. In phase 4, the inputs to 895 // node 80 are updated and then the memory nodes are updated with the 896 // values computed in phase 2. This results in: 897 // 898 // 7 Parm #memory 899 // 10 ConI "12" 900 // 19 CheckCastPP "Foo" 901 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 902 // 29 CheckCastPP "Foo" iid=24 903 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 904 // 905 // 40 StoreP 25 7 20 ... alias_index=4 906 // 50 StoreP 35 7 30 ... alias_index=6 907 // 60 StoreP 45 40 20 ... alias_index=4 908 // 70 LoadP _ 50 30 ... alias_index=6 909 // 80 Phi 75 40 60 Memory alias_index=4 910 // 120 Phi 75 50 50 Memory alias_index=6 911 // 90 LoadP _ 120 30 ... alias_index=6 912 // 100 LoadP _ 80 20 ... alias_index=4 913 // 914 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) { 915 GrowableArray<Node *> memnode_worklist; 916 GrowableArray<PhiNode *> orig_phis; 917 PhaseGVN *igvn = _compile->initial_gvn(); 918 uint new_index_start = (uint) _compile->num_alias_types(); 919 VectorSet visited(Thread::current()->resource_area()); 920 VectorSet ptset(Thread::current()->resource_area()); 921 922 923 // Phase 1: Process possible allocations from alloc_worklist. 924 // Create instance types for the CheckCastPP for allocations where possible. 925 // 926 // (Note: don't forget to change the order of the second AddP node on 927 // the alloc_worklist if the order of the worklist processing is changed, 928 // see the comment in find_second_addp().) 929 // 930 while (alloc_worklist.length() != 0) { 931 Node *n = alloc_worklist.pop(); 932 uint ni = n->_idx; 933 const TypeOopPtr* tinst = NULL; 934 if (n->is_Call()) { 935 CallNode *alloc = n->as_Call(); 936 // copy escape information to call node 937 PointsToNode* ptn = ptnode_adr(alloc->_idx); 938 PointsToNode::EscapeState es = escape_state(alloc, igvn); 939 // We have an allocation or call which returns a Java object, 940 // see if it is unescaped. 941 if (es != PointsToNode::NoEscape || !ptn->_scalar_replaceable) 942 continue; 943 944 // Find CheckCastPP for the allocate or for the return value of a call 945 n = alloc->result_cast(); 946 if (n == NULL) { // No uses except Initialize node 947 if (alloc->is_Allocate()) { 948 // Set the scalar_replaceable flag for allocation 949 // so it could be eliminated if it has no uses. 950 alloc->as_Allocate()->_is_scalar_replaceable = true; 951 } 952 continue; 953 } 954 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 955 assert(!alloc->is_Allocate(), "allocation should have unique type"); 956 continue; 957 } 958 959 // The inline code for Object.clone() casts the allocation result to 960 // java.lang.Object and then to the actual type of the allocated 961 // object. Detect this case and use the second cast. 962 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 963 // the allocation result is cast to java.lang.Object and then 964 // to the actual Array type. 965 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 966 && (alloc->is_AllocateArray() || 967 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 968 Node *cast2 = NULL; 969 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 970 Node *use = n->fast_out(i); 971 if (use->is_CheckCastPP()) { 972 cast2 = use; 973 break; 974 } 975 } 976 if (cast2 != NULL) { 977 n = cast2; 978 } else { 979 // Non-scalar replaceable if the allocation type is unknown statically 980 // (reflection allocation), the object can't be restored during 981 // deoptimization without precise type. 982 continue; 983 } 984 } 985 if (alloc->is_Allocate()) { 986 // Set the scalar_replaceable flag for allocation 987 // so it could be eliminated. 988 alloc->as_Allocate()->_is_scalar_replaceable = true; 989 } 990 set_escape_state(n->_idx, es); 991 // in order for an object to be scalar-replaceable, it must be: 992 // - a direct allocation (not a call returning an object) 993 // - non-escaping 994 // - eligible to be a unique type 995 // - not determined to be ineligible by escape analysis 996 set_map(alloc->_idx, n); 997 set_map(n->_idx, alloc); 998 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 999 if (t == NULL) 1000 continue; // not a TypeInstPtr 1001 tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni); 1002 igvn->hash_delete(n); 1003 igvn->set_type(n, tinst); 1004 n->raise_bottom_type(tinst); 1005 igvn->hash_insert(n); 1006 record_for_optimizer(n); 1007 if (alloc->is_Allocate() && ptn->_scalar_replaceable && 1008 (t->isa_instptr() || t->isa_aryptr())) { 1009 1010 // First, put on the worklist all Field edges from Connection Graph 1011 // which is more accurate then putting immediate users from Ideal Graph. 1012 for (uint e = 0; e < ptn->edge_count(); e++) { 1013 Node *use = ptnode_adr(ptn->edge_target(e))->_node; 1014 assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(), 1015 "only AddP nodes are Field edges in CG"); 1016 if (use->outcnt() > 0) { // Don't process dead nodes 1017 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 1018 if (addp2 != NULL) { 1019 assert(alloc->is_AllocateArray(),"array allocation was expected"); 1020 alloc_worklist.append_if_missing(addp2); 1021 } 1022 alloc_worklist.append_if_missing(use); 1023 } 1024 } 1025 1026 // An allocation may have an Initialize which has raw stores. Scan 1027 // the users of the raw allocation result and push AddP users 1028 // on alloc_worklist. 1029 Node *raw_result = alloc->proj_out(TypeFunc::Parms); 1030 assert (raw_result != NULL, "must have an allocation result"); 1031 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 1032 Node *use = raw_result->fast_out(i); 1033 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 1034 Node* addp2 = find_second_addp(use, raw_result); 1035 if (addp2 != NULL) { 1036 assert(alloc->is_AllocateArray(),"array allocation was expected"); 1037 alloc_worklist.append_if_missing(addp2); 1038 } 1039 alloc_worklist.append_if_missing(use); 1040 } else if (use->is_MemBar()) { 1041 memnode_worklist.append_if_missing(use); 1042 } 1043 } 1044 } 1045 } else if (n->is_AddP()) { 1046 ptset.Clear(); 1047 PointsTo(ptset, get_addp_base(n), igvn); 1048 assert(ptset.Size() == 1, "AddP address is unique"); 1049 uint elem = ptset.getelem(); // Allocation node's index 1050 if (elem == _phantom_object) 1051 continue; // Assume the value was set outside this method. 1052 Node *base = get_map(elem); // CheckCastPP node 1053 if (!split_AddP(n, base, igvn)) continue; // wrong type 1054 tinst = igvn->type(base)->isa_oopptr(); 1055 } else if (n->is_Phi() || 1056 n->is_CheckCastPP() || 1057 n->is_EncodeP() || 1058 n->is_DecodeN() || 1059 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 1060 if (visited.test_set(n->_idx)) { 1061 assert(n->is_Phi(), "loops only through Phi's"); 1062 continue; // already processed 1063 } 1064 ptset.Clear(); 1065 PointsTo(ptset, n, igvn); 1066 if (ptset.Size() == 1) { 1067 uint elem = ptset.getelem(); // Allocation node's index 1068 if (elem == _phantom_object) 1069 continue; // Assume the value was set outside this method. 1070 Node *val = get_map(elem); // CheckCastPP node 1071 TypeNode *tn = n->as_Type(); 1072 tinst = igvn->type(val)->isa_oopptr(); 1073 assert(tinst != NULL && tinst->is_known_instance() && 1074 (uint)tinst->instance_id() == elem , "instance type expected."); 1075 1076 const Type *tn_type = igvn->type(tn); 1077 const TypeOopPtr *tn_t; 1078 if (tn_type->isa_narrowoop()) { 1079 tn_t = tn_type->make_ptr()->isa_oopptr(); 1080 } else { 1081 tn_t = tn_type->isa_oopptr(); 1082 } 1083 1084 if (tn_t != NULL && 1085 tinst->cast_to_instance_id(TypeOopPtr::InstanceBot)->higher_equal(tn_t)) { 1086 if (tn_type->isa_narrowoop()) { 1087 tn_type = tinst->make_narrowoop(); 1088 } else { 1089 tn_type = tinst; 1090 } 1091 igvn->hash_delete(tn); 1092 igvn->set_type(tn, tn_type); 1093 tn->set_type(tn_type); 1094 igvn->hash_insert(tn); 1095 record_for_optimizer(n); 1096 } else { 1097 continue; // wrong type 1098 } 1099 } 1100 } else { 1101 continue; 1102 } 1103 // push allocation's users on appropriate worklist 1104 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1105 Node *use = n->fast_out(i); 1106 if(use->is_Mem() && use->in(MemNode::Address) == n) { 1107 memnode_worklist.append_if_missing(use); 1108 } else if (use->is_MemBar()) { 1109 memnode_worklist.append_if_missing(use); 1110 #ifdef ASSERT 1111 } else if (use->is_MergeMem()) { 1112 assert(_mergemem_worklist.contains(use->as_MergeMem()), "missing MergeMem node in the worklist"); 1113 #endif 1114 } else if (use->is_SafePoint() && tinst != NULL) { 1115 // Look for MergeMem nodes for calls which reference unique allocation 1116 // (through CheckCastPP nodes) even for debug info. 1117 Node* m = use->in(TypeFunc::Memory); 1118 uint iid = tinst->instance_id(); 1119 while (m->is_Proj() && m->in(0)->is_SafePoint() && 1120 m->in(0) != use && !m->in(0)->_idx != iid) { 1121 m = m->in(0)->in(TypeFunc::Memory); 1122 } 1123 #ifdef ASSERT 1124 if (m->is_MergeMem()) { 1125 assert(_mergemem_worklist.contains(m->as_MergeMem()), "missing MergeMem node in the worklist"); 1126 } 1127 #endif 1128 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 1129 Node* addp2 = find_second_addp(use, n); 1130 if (addp2 != NULL) { 1131 alloc_worklist.append_if_missing(addp2); 1132 } 1133 alloc_worklist.append_if_missing(use); 1134 } else if (use->is_Phi() || 1135 use->is_CheckCastPP() || 1136 use->is_EncodeP() || 1137 use->is_DecodeN() || 1138 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 1139 alloc_worklist.append_if_missing(use); 1140 } 1141 } 1142 1143 } 1144 // New alias types were created in split_AddP(). 1145 uint new_index_end = (uint) _compile->num_alias_types(); 1146 1147 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 1148 // compute new values for Memory inputs (the Memory inputs are not 1149 // actually updated until phase 4.) 1150 if (memnode_worklist.length() == 0) 1151 return; // nothing to do 1152 1153 while (memnode_worklist.length() != 0) { 1154 Node *n = memnode_worklist.pop(); 1155 if (visited.test_set(n->_idx)) 1156 continue; 1157 if (n->is_Phi() || n->Opcode() == Op_ClearArray) { 1158 // we don't need to do anything, but the users must be pushed if we haven't processed 1159 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 1160 // we don't need to do anything, but the users of the memory projection must be pushed 1161 // especially MergeMem nodes to populate the instance memory slices 1162 n = n->as_MemBar()->proj_out(TypeFunc::Memory); 1163 if (n == NULL) 1164 continue; 1165 } else { 1166 assert(n->is_Mem(), "memory node required."); 1167 Node *addr = n->in(MemNode::Address); 1168 assert(addr->is_AddP(), "AddP required"); 1169 const Type *addr_t = igvn->type(addr); 1170 if (addr_t == Type::TOP) 1171 continue; 1172 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 1173 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 1174 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 1175 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn); 1176 if (_compile->failing()) { 1177 return; 1178 } 1179 if (mem != n->in(MemNode::Memory)) { 1180 set_map(n->_idx, mem); 1181 ptnode_adr(n->_idx)->_node = n; 1182 } 1183 if (n->is_Load()) { 1184 continue; // don't push users 1185 } else if (n->is_LoadStore()) { 1186 // get the memory projection 1187 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1188 Node *use = n->fast_out(i); 1189 if (use->Opcode() == Op_SCMemProj) { 1190 n = use; 1191 break; 1192 } 1193 } 1194 assert(n->Opcode() == Op_SCMemProj, "memory projection required"); 1195 } 1196 } 1197 // push user on appropriate worklist 1198 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1199 Node *use = n->fast_out(i); 1200 if (use->is_Phi() || use->Opcode() == Op_ClearArray) { 1201 memnode_worklist.append_if_missing(use); 1202 } else if(use->is_Mem() && use->in(MemNode::Memory) == n) { 1203 memnode_worklist.append_if_missing(use); 1204 } else if (use->is_MemBar()) { 1205 memnode_worklist.append_if_missing(use); 1206 #ifdef ASSERT 1207 } else if (use->is_MergeMem()) { 1208 assert(_mergemem_worklist.contains(use->as_MergeMem()), "missing MergeMem node in the worklist"); 1209 } else { 1210 uint op = use->Opcode(); 1211 if (!(op = Op_StoreCM || op == Op_AryEq || op == Op_StrComp || 1212 op == Op_StrEquals || op == Op_StrIndexOf)) { 1213 n->dump(); 1214 use->dump(); 1215 assert(false, "EA missing memory path"); 1216 } 1217 #endif 1218 } 1219 } 1220 } 1221 1222 // Phase 3: Process MergeMem nodes from mergemem_worklist. 1223 // Walk each memory slice moving the first node encountered of each 1224 // instance type to the the input corresponding to its alias index. 1225 uint length = _mergemem_worklist.length(); 1226 for( uint next = 0; next < length; ++next ) { 1227 MergeMemNode* nmm = _mergemem_worklist.at(next); 1228 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 1229 // Note: we don't want to use MergeMemStream here because we only want to 1230 // scan inputs which exist at the start, not ones we add during processing. 1231 // Note 2: MergeMem may already contains instance memory slices added 1232 // during find_inst_mem() call when memory nodes were processed above. 1233 igvn->hash_delete(nmm); 1234 uint nslices = nmm->req(); 1235 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 1236 Node* mem = nmm->in(i); 1237 Node* cur = NULL; 1238 if (mem == NULL || mem->is_top()) 1239 continue; 1240 while (mem->is_Mem()) { 1241 const Type *at = igvn->type(mem->in(MemNode::Address)); 1242 if (at != Type::TOP) { 1243 assert (at->isa_ptr() != NULL, "pointer type required."); 1244 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 1245 if (idx == i) { 1246 if (cur == NULL) 1247 cur = mem; 1248 } else { 1249 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 1250 nmm->set_memory_at(idx, mem); 1251 } 1252 } 1253 } 1254 mem = mem->in(MemNode::Memory); 1255 } 1256 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 1257 // Find any instance of the current type if we haven't encountered 1258 // a value of the instance along the chain. 1259 for (uint ni = new_index_start; ni < new_index_end; ni++) { 1260 if((uint)_compile->get_general_index(ni) == i) { 1261 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 1262 if (nmm->is_empty_memory(m)) { 1263 Node* result = find_inst_mem(mem, ni, orig_phis, igvn); 1264 if (_compile->failing()) { 1265 return; 1266 } 1267 nmm->set_memory_at(ni, result); 1268 } 1269 } 1270 } 1271 } 1272 // Find the rest of instances values 1273 for (uint ni = new_index_start; ni < new_index_end; ni++) { 1274 const TypeOopPtr *tinst = igvn->C->get_adr_type(ni)->isa_oopptr(); 1275 Node* result = step_through_mergemem(nmm, ni, tinst); 1276 if (result == nmm->base_memory()) { 1277 // Didn't find instance memory, search through general slice recursively. 1278 result = nmm->memory_at(igvn->C->get_general_index(ni)); 1279 result = find_inst_mem(result, ni, orig_phis, igvn); 1280 if (_compile->failing()) { 1281 return; 1282 } 1283 nmm->set_memory_at(ni, result); 1284 } 1285 } 1286 igvn->hash_insert(nmm); 1287 record_for_optimizer(nmm); 1288 } 1289 1290 // Phase 4: Update the inputs of non-instance memory Phis and 1291 // the Memory input of memnodes 1292 // First update the inputs of any non-instance Phi's from 1293 // which we split out an instance Phi. Note we don't have 1294 // to recursively process Phi's encounted on the input memory 1295 // chains as is done in split_memory_phi() since they will 1296 // also be processed here. 1297 for (int j = 0; j < orig_phis.length(); j++) { 1298 PhiNode *phi = orig_phis.at(j); 1299 int alias_idx = _compile->get_alias_index(phi->adr_type()); 1300 igvn->hash_delete(phi); 1301 for (uint i = 1; i < phi->req(); i++) { 1302 Node *mem = phi->in(i); 1303 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn); 1304 if (_compile->failing()) { 1305 return; 1306 } 1307 if (mem != new_mem) { 1308 phi->set_req(i, new_mem); 1309 } 1310 } 1311 igvn->hash_insert(phi); 1312 record_for_optimizer(phi); 1313 } 1314 1315 // Update the memory inputs of MemNodes with the value we computed 1316 // in Phase 2. 1317 for (uint i = 0; i < nodes_size(); i++) { 1318 Node *nmem = get_map(i); 1319 if (nmem != NULL) { 1320 Node *n = ptnode_adr(i)->_node; 1321 if (n != NULL && n->is_Mem()) { 1322 igvn->hash_delete(n); 1323 n->set_req(MemNode::Memory, nmem); 1324 igvn->hash_insert(n); 1325 record_for_optimizer(n); 1326 } 1327 } 1328 } 1329 } 1330 1331 bool ConnectionGraph::has_candidates(Compile *C) { 1332 // EA brings benefits only when the code has allocations and/or locks which 1333 // are represented by ideal Macro nodes. 1334 int cnt = C->macro_count(); 1335 for( int i=0; i < cnt; i++ ) { 1336 Node *n = C->macro_node(i); 1337 if ( n->is_Allocate() ) 1338 return true; 1339 if( n->is_Lock() ) { 1340 Node* obj = n->as_Lock()->obj_node()->uncast(); 1341 if( !(obj->is_Parm() || obj->is_Con()) ) 1342 return true; 1343 } 1344 } 1345 return false; 1346 } 1347 1348 bool ConnectionGraph::compute_escape() { 1349 Compile* C = _compile; 1350 1351 // 1. Populate Connection Graph (CG) with Ideal nodes. 1352 1353 Unique_Node_List worklist_init; 1354 worklist_init.map(C->unique(), NULL); // preallocate space 1355 1356 // Initialize worklist 1357 if (C->root() != NULL) { 1358 worklist_init.push(C->root()); 1359 } 1360 1361 GrowableArray<int> cg_worklist; 1362 PhaseGVN* igvn = C->initial_gvn(); 1363 bool has_allocations = false; 1364 1365 // Push all useful nodes onto CG list and set their type. 1366 for( uint next = 0; next < worklist_init.size(); ++next ) { 1367 Node* n = worklist_init.at(next); 1368 record_for_escape_analysis(n, igvn); 1369 // Only allocations and java static calls results are checked 1370 // for an escape status. See process_call_result() below. 1371 if (n->is_Allocate() || n->is_CallStaticJava() && 1372 ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) { 1373 has_allocations = true; 1374 } 1375 if(n->is_AddP()) { 1376 // Collect address nodes which directly reference an allocation. 1377 // Use them during stage 3 below to build initial connection graph 1378 // field edges. Other field edges could be added after StoreP/LoadP 1379 // nodes are processed during stage 4 below. 1380 Node* base = get_addp_base(n); 1381 if(base->is_Proj() && base->in(0)->is_Allocate()) { 1382 cg_worklist.append(n->_idx); 1383 } 1384 } else if (n->is_MergeMem()) { 1385 // Collect all MergeMem nodes to add memory slices for 1386 // scalar replaceable objects in split_unique_types(). 1387 _mergemem_worklist.append(n->as_MergeMem()); 1388 } 1389 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1390 Node* m = n->fast_out(i); // Get user 1391 worklist_init.push(m); 1392 } 1393 } 1394 1395 if (!has_allocations) { 1396 _collecting = false; 1397 return false; // Nothing to do. 1398 } 1399 1400 // 2. First pass to create simple CG edges (doesn't require to walk CG). 1401 uint delayed_size = _delayed_worklist.size(); 1402 for( uint next = 0; next < delayed_size; ++next ) { 1403 Node* n = _delayed_worklist.at(next); 1404 build_connection_graph(n, igvn); 1405 } 1406 1407 // 3. Pass to create fields edges (Allocate -F-> AddP). 1408 uint cg_length = cg_worklist.length(); 1409 for( uint next = 0; next < cg_length; ++next ) { 1410 int ni = cg_worklist.at(next); 1411 build_connection_graph(ptnode_adr(ni)->_node, igvn); 1412 } 1413 1414 cg_worklist.clear(); 1415 cg_worklist.append(_phantom_object); 1416 1417 // 4. Build Connection Graph which need 1418 // to walk the connection graph. 1419 for (uint ni = 0; ni < nodes_size(); ni++) { 1420 PointsToNode* ptn = ptnode_adr(ni); 1421 Node *n = ptn->_node; 1422 if (n != NULL) { // Call, AddP, LoadP, StoreP 1423 build_connection_graph(n, igvn); 1424 if (ptn->node_type() != PointsToNode::UnknownType) 1425 cg_worklist.append(n->_idx); // Collect CG nodes 1426 } 1427 } 1428 1429 Arena* arena = Thread::current()->resource_area(); 1430 VectorSet ptset(arena); 1431 GrowableArray<uint> deferred_edges; 1432 VectorSet visited(arena); 1433 1434 // 5. Remove deferred edges from the graph and adjust 1435 // escape state of nonescaping objects. 1436 cg_length = cg_worklist.length(); 1437 for( uint next = 0; next < cg_length; ++next ) { 1438 int ni = cg_worklist.at(next); 1439 PointsToNode* ptn = ptnode_adr(ni); 1440 PointsToNode::NodeType nt = ptn->node_type(); 1441 if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) { 1442 remove_deferred(ni, &deferred_edges, &visited); 1443 Node *n = ptn->_node; 1444 if (n->is_AddP()) { 1445 // Search for objects which are not scalar replaceable 1446 // and adjust their escape state. 1447 verify_escape_state(ni, ptset, igvn); 1448 } 1449 } 1450 } 1451 1452 // 6. Propagate escape states. 1453 GrowableArray<int> worklist; 1454 bool has_non_escaping_obj = false; 1455 1456 // push all GlobalEscape nodes on the worklist 1457 for( uint next = 0; next < cg_length; ++next ) { 1458 int nk = cg_worklist.at(next); 1459 if (ptnode_adr(nk)->escape_state() == PointsToNode::GlobalEscape) 1460 worklist.push(nk); 1461 } 1462 // mark all nodes reachable from GlobalEscape nodes 1463 while(worklist.length() > 0) { 1464 PointsToNode* ptn = ptnode_adr(worklist.pop()); 1465 uint e_cnt = ptn->edge_count(); 1466 for (uint ei = 0; ei < e_cnt; ei++) { 1467 uint npi = ptn->edge_target(ei); 1468 PointsToNode *np = ptnode_adr(npi); 1469 if (np->escape_state() < PointsToNode::GlobalEscape) { 1470 np->set_escape_state(PointsToNode::GlobalEscape); 1471 worklist.push(npi); 1472 } 1473 } 1474 } 1475 1476 // push all ArgEscape nodes on the worklist 1477 for( uint next = 0; next < cg_length; ++next ) { 1478 int nk = cg_worklist.at(next); 1479 if (ptnode_adr(nk)->escape_state() == PointsToNode::ArgEscape) 1480 worklist.push(nk); 1481 } 1482 // mark all nodes reachable from ArgEscape nodes 1483 while(worklist.length() > 0) { 1484 PointsToNode* ptn = ptnode_adr(worklist.pop()); 1485 if (ptn->node_type() == PointsToNode::JavaObject) 1486 has_non_escaping_obj = true; // Non GlobalEscape 1487 uint e_cnt = ptn->edge_count(); 1488 for (uint ei = 0; ei < e_cnt; ei++) { 1489 uint npi = ptn->edge_target(ei); 1490 PointsToNode *np = ptnode_adr(npi); 1491 if (np->escape_state() < PointsToNode::ArgEscape) { 1492 np->set_escape_state(PointsToNode::ArgEscape); 1493 worklist.push(npi); 1494 } 1495 } 1496 } 1497 1498 GrowableArray<Node*> alloc_worklist; 1499 1500 // push all NoEscape nodes on the worklist 1501 for( uint next = 0; next < cg_length; ++next ) { 1502 int nk = cg_worklist.at(next); 1503 if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape) 1504 worklist.push(nk); 1505 } 1506 // mark all nodes reachable from NoEscape nodes 1507 while(worklist.length() > 0) { 1508 PointsToNode* ptn = ptnode_adr(worklist.pop()); 1509 if (ptn->node_type() == PointsToNode::JavaObject) 1510 has_non_escaping_obj = true; // Non GlobalEscape 1511 Node* n = ptn->_node; 1512 if (n->is_Allocate() && ptn->_scalar_replaceable ) { 1513 // Push scalar replaceable allocations on alloc_worklist 1514 // for processing in split_unique_types(). 1515 alloc_worklist.append(n); 1516 } 1517 uint e_cnt = ptn->edge_count(); 1518 for (uint ei = 0; ei < e_cnt; ei++) { 1519 uint npi = ptn->edge_target(ei); 1520 PointsToNode *np = ptnode_adr(npi); 1521 if (np->escape_state() < PointsToNode::NoEscape) { 1522 np->set_escape_state(PointsToNode::NoEscape); 1523 worklist.push(npi); 1524 } 1525 } 1526 } 1527 1528 _collecting = false; 1529 assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build"); 1530 1531 bool has_scalar_replaceable_candidates = alloc_worklist.length() > 0; 1532 if ( has_scalar_replaceable_candidates && 1533 C->AliasLevel() >= 3 && EliminateAllocations ) { 1534 1535 // Now use the escape information to create unique types for 1536 // scalar replaceable objects. 1537 split_unique_types(alloc_worklist); 1538 1539 if (C->failing()) return false; 1540 1541 // Clean up after split unique types. 1542 ResourceMark rm; 1543 PhaseRemoveUseless pru(C->initial_gvn(), C->for_igvn()); 1544 1545 C->print_method("After Escape Analysis", 2); 1546 1547 #ifdef ASSERT 1548 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 1549 tty->print("=== No allocations eliminated for "); 1550 C->method()->print_short_name(); 1551 if(!EliminateAllocations) { 1552 tty->print(" since EliminateAllocations is off ==="); 1553 } else if(!has_scalar_replaceable_candidates) { 1554 tty->print(" since there are no scalar replaceable candidates ==="); 1555 } else if(C->AliasLevel() < 3) { 1556 tty->print(" since AliasLevel < 3 ==="); 1557 } 1558 tty->cr(); 1559 #endif 1560 } 1561 return has_non_escaping_obj; 1562 } 1563 1564 // Search for objects which are not scalar replaceable. 1565 void ConnectionGraph::verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase) { 1566 PointsToNode* ptn = ptnode_adr(nidx); 1567 Node* n = ptn->_node; 1568 assert(n->is_AddP(), "Should be called for AddP nodes only"); 1569 // Search for objects which are not scalar replaceable. 1570 // Mark their escape state as ArgEscape to propagate the state 1571 // to referenced objects. 1572 // Note: currently there are no difference in compiler optimizations 1573 // for ArgEscape objects and NoEscape objects which are not 1574 // scalar replaceable. 1575 1576 Compile* C = _compile; 1577 1578 int offset = ptn->offset(); 1579 Node* base = get_addp_base(n); 1580 ptset.Clear(); 1581 PointsTo(ptset, base, phase); 1582 int ptset_size = ptset.Size(); 1583 1584 // Check if a oop field's initializing value is recorded and add 1585 // a corresponding NULL field's value if it is not recorded. 1586 // Connection Graph does not record a default initialization by NULL 1587 // captured by Initialize node. 1588 // 1589 // Note: it will disable scalar replacement in some cases: 1590 // 1591 // Point p[] = new Point[1]; 1592 // p[0] = new Point(); // Will be not scalar replaced 1593 // 1594 // but it will save us from incorrect optimizations in next cases: 1595 // 1596 // Point p[] = new Point[1]; 1597 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1598 // 1599 // Do a simple control flow analysis to distinguish above cases. 1600 // 1601 if (offset != Type::OffsetBot && ptset_size == 1) { 1602 uint elem = ptset.getelem(); // Allocation node's index 1603 // It does not matter if it is not Allocation node since 1604 // only non-escaping allocations are scalar replaced. 1605 if (ptnode_adr(elem)->_node->is_Allocate() && 1606 ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) { 1607 AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate(); 1608 InitializeNode* ini = alloc->initialization(); 1609 1610 // Check only oop fields. 1611 const Type* adr_type = n->as_AddP()->bottom_type(); 1612 BasicType basic_field_type = T_INT; 1613 if (adr_type->isa_instptr()) { 1614 ciField* field = C->alias_type(adr_type->isa_instptr())->field(); 1615 if (field != NULL) { 1616 basic_field_type = field->layout_type(); 1617 } else { 1618 // Ignore non field load (for example, klass load) 1619 } 1620 } else if (adr_type->isa_aryptr()) { 1621 const Type* elemtype = adr_type->isa_aryptr()->elem(); 1622 basic_field_type = elemtype->array_element_basic_type(); 1623 } else { 1624 // Raw pointers are used for initializing stores so skip it. 1625 assert(adr_type->isa_rawptr() && base->is_Proj() && 1626 (base->in(0) == alloc),"unexpected pointer type"); 1627 } 1628 if (basic_field_type == T_OBJECT || 1629 basic_field_type == T_NARROWOOP || 1630 basic_field_type == T_ARRAY) { 1631 Node* value = NULL; 1632 if (ini != NULL) { 1633 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT; 1634 Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase); 1635 if (store != NULL && store->is_Store()) { 1636 value = store->in(MemNode::ValueIn); 1637 } else if (ptn->edge_count() > 0) { // Are there oop stores? 1638 // Check for a store which follows allocation without branches. 1639 // For example, a volatile field store is not collected 1640 // by Initialize node. TODO: it would be nice to use idom() here. 1641 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1642 store = n->fast_out(i); 1643 if (store->is_Store() && store->in(0) != NULL) { 1644 Node* ctrl = store->in(0); 1645 while(!(ctrl == ini || ctrl == alloc || ctrl == NULL || 1646 ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() || 1647 ctrl->is_IfTrue() || ctrl->is_IfFalse())) { 1648 ctrl = ctrl->in(0); 1649 } 1650 if (ctrl == ini || ctrl == alloc) { 1651 value = store->in(MemNode::ValueIn); 1652 break; 1653 } 1654 } 1655 } 1656 } 1657 } 1658 if (value == NULL || value != ptnode_adr(value->_idx)->_node) { 1659 // A field's initializing value was not recorded. Add NULL. 1660 uint null_idx = UseCompressedOops ? _noop_null : _oop_null; 1661 add_pointsto_edge(nidx, null_idx); 1662 } 1663 } 1664 } 1665 } 1666 1667 // An object is not scalar replaceable if the field which may point 1668 // to it has unknown offset (unknown element of an array of objects). 1669 // 1670 if (offset == Type::OffsetBot) { 1671 uint e_cnt = ptn->edge_count(); 1672 for (uint ei = 0; ei < e_cnt; ei++) { 1673 uint npi = ptn->edge_target(ei); 1674 set_escape_state(npi, PointsToNode::ArgEscape); 1675 ptnode_adr(npi)->_scalar_replaceable = false; 1676 } 1677 } 1678 1679 // Currently an object is not scalar replaceable if a LoadStore node 1680 // access its field since the field value is unknown after it. 1681 // 1682 bool has_LoadStore = false; 1683 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1684 Node *use = n->fast_out(i); 1685 if (use->is_LoadStore()) { 1686 has_LoadStore = true; 1687 break; 1688 } 1689 } 1690 // An object is not scalar replaceable if the address points 1691 // to unknown field (unknown element for arrays, offset is OffsetBot). 1692 // 1693 // Or the address may point to more then one object. This may produce 1694 // the false positive result (set scalar_replaceable to false) 1695 // since the flow-insensitive escape analysis can't separate 1696 // the case when stores overwrite the field's value from the case 1697 // when stores happened on different control branches. 1698 // 1699 if (ptset_size > 1 || ptset_size != 0 && 1700 (has_LoadStore || offset == Type::OffsetBot)) { 1701 for( VectorSetI j(&ptset); j.test(); ++j ) { 1702 set_escape_state(j.elem, PointsToNode::ArgEscape); 1703 ptnode_adr(j.elem)->_scalar_replaceable = false; 1704 } 1705 } 1706 } 1707 1708 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) { 1709 1710 switch (call->Opcode()) { 1711 #ifdef ASSERT 1712 case Op_Allocate: 1713 case Op_AllocateArray: 1714 case Op_Lock: 1715 case Op_Unlock: 1716 assert(false, "should be done already"); 1717 break; 1718 #endif 1719 case Op_CallLeafNoFP: 1720 { 1721 // Stub calls, objects do not escape but they are not scale replaceable. 1722 // Adjust escape state for outgoing arguments. 1723 const TypeTuple * d = call->tf()->domain(); 1724 VectorSet ptset(Thread::current()->resource_area()); 1725 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1726 const Type* at = d->field_at(i); 1727 Node *arg = call->in(i)->uncast(); 1728 const Type *aat = phase->type(arg); 1729 if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr()) { 1730 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 1731 aat->isa_ptr() != NULL, "expecting an Ptr"); 1732 set_escape_state(arg->_idx, PointsToNode::ArgEscape); 1733 if (arg->is_AddP()) { 1734 // 1735 // The inline_native_clone() case when the arraycopy stub is called 1736 // after the allocation before Initialize and CheckCastPP nodes. 1737 // 1738 // Set AddP's base (Allocate) as not scalar replaceable since 1739 // pointer to the base (with offset) is passed as argument. 1740 // 1741 arg = get_addp_base(arg); 1742 } 1743 ptset.Clear(); 1744 PointsTo(ptset, arg, phase); 1745 for( VectorSetI j(&ptset); j.test(); ++j ) { 1746 uint pt = j.elem; 1747 set_escape_state(pt, PointsToNode::ArgEscape); 1748 } 1749 } 1750 } 1751 break; 1752 } 1753 1754 case Op_CallStaticJava: 1755 // For a static call, we know exactly what method is being called. 1756 // Use bytecode estimator to record the call's escape affects 1757 { 1758 ciMethod *meth = call->as_CallJava()->method(); 1759 BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1760 // fall-through if not a Java method or no analyzer information 1761 if (call_analyzer != NULL) { 1762 const TypeTuple * d = call->tf()->domain(); 1763 VectorSet ptset(Thread::current()->resource_area()); 1764 bool copy_dependencies = false; 1765 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1766 const Type* at = d->field_at(i); 1767 int k = i - TypeFunc::Parms; 1768 1769 if (at->isa_oopptr() != NULL) { 1770 Node *arg = call->in(i)->uncast(); 1771 1772 bool global_escapes = false; 1773 bool fields_escapes = false; 1774 if (!call_analyzer->is_arg_stack(k)) { 1775 // The argument global escapes, mark everything it could point to 1776 set_escape_state(arg->_idx, PointsToNode::GlobalEscape); 1777 global_escapes = true; 1778 } else { 1779 if (!call_analyzer->is_arg_local(k)) { 1780 // The argument itself doesn't escape, but any fields might 1781 fields_escapes = true; 1782 } 1783 set_escape_state(arg->_idx, PointsToNode::ArgEscape); 1784 copy_dependencies = true; 1785 } 1786 1787 ptset.Clear(); 1788 PointsTo(ptset, arg, phase); 1789 for( VectorSetI j(&ptset); j.test(); ++j ) { 1790 uint pt = j.elem; 1791 if (global_escapes) { 1792 //The argument global escapes, mark everything it could point to 1793 set_escape_state(pt, PointsToNode::GlobalEscape); 1794 } else { 1795 if (fields_escapes) { 1796 // The argument itself doesn't escape, but any fields might 1797 add_edge_from_fields(pt, _phantom_object, Type::OffsetBot); 1798 } 1799 set_escape_state(pt, PointsToNode::ArgEscape); 1800 } 1801 } 1802 } 1803 } 1804 if (copy_dependencies) 1805 call_analyzer->copy_dependencies(_compile->dependencies()); 1806 break; 1807 } 1808 } 1809 1810 default: 1811 // Fall-through here if not a Java method or no analyzer information 1812 // or some other type of call, assume the worst case: all arguments 1813 // globally escape. 1814 { 1815 // adjust escape state for outgoing arguments 1816 const TypeTuple * d = call->tf()->domain(); 1817 VectorSet ptset(Thread::current()->resource_area()); 1818 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1819 const Type* at = d->field_at(i); 1820 if (at->isa_oopptr() != NULL) { 1821 Node *arg = call->in(i)->uncast(); 1822 set_escape_state(arg->_idx, PointsToNode::GlobalEscape); 1823 ptset.Clear(); 1824 PointsTo(ptset, arg, phase); 1825 for( VectorSetI j(&ptset); j.test(); ++j ) { 1826 uint pt = j.elem; 1827 set_escape_state(pt, PointsToNode::GlobalEscape); 1828 } 1829 } 1830 } 1831 } 1832 } 1833 } 1834 void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) { 1835 CallNode *call = resproj->in(0)->as_Call(); 1836 uint call_idx = call->_idx; 1837 uint resproj_idx = resproj->_idx; 1838 1839 switch (call->Opcode()) { 1840 case Op_Allocate: 1841 { 1842 Node *k = call->in(AllocateNode::KlassNode); 1843 const TypeKlassPtr *kt; 1844 if (k->Opcode() == Op_LoadKlass) { 1845 kt = k->as_Load()->type()->isa_klassptr(); 1846 } else { 1847 // Also works for DecodeN(LoadNKlass). 1848 kt = k->as_Type()->type()->isa_klassptr(); 1849 } 1850 assert(kt != NULL, "TypeKlassPtr required."); 1851 ciKlass* cik = kt->klass(); 1852 ciInstanceKlass* ciik = cik->as_instance_klass(); 1853 1854 PointsToNode::EscapeState es; 1855 uint edge_to; 1856 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || ciik->has_finalizer()) { 1857 es = PointsToNode::GlobalEscape; 1858 edge_to = _phantom_object; // Could not be worse 1859 } else { 1860 es = PointsToNode::NoEscape; 1861 edge_to = call_idx; 1862 } 1863 set_escape_state(call_idx, es); 1864 add_pointsto_edge(resproj_idx, edge_to); 1865 _processed.set(resproj_idx); 1866 break; 1867 } 1868 1869 case Op_AllocateArray: 1870 { 1871 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 1872 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 1873 // Not scalar replaceable if the length is not constant or too big. 1874 ptnode_adr(call_idx)->_scalar_replaceable = false; 1875 } 1876 set_escape_state(call_idx, PointsToNode::NoEscape); 1877 add_pointsto_edge(resproj_idx, call_idx); 1878 _processed.set(resproj_idx); 1879 break; 1880 } 1881 1882 case Op_CallStaticJava: 1883 // For a static call, we know exactly what method is being called. 1884 // Use bytecode estimator to record whether the call's return value escapes 1885 { 1886 bool done = true; 1887 const TypeTuple *r = call->tf()->range(); 1888 const Type* ret_type = NULL; 1889 1890 if (r->cnt() > TypeFunc::Parms) 1891 ret_type = r->field_at(TypeFunc::Parms); 1892 1893 // Note: we use isa_ptr() instead of isa_oopptr() here because the 1894 // _multianewarray functions return a TypeRawPtr. 1895 if (ret_type == NULL || ret_type->isa_ptr() == NULL) { 1896 _processed.set(resproj_idx); 1897 break; // doesn't return a pointer type 1898 } 1899 ciMethod *meth = call->as_CallJava()->method(); 1900 const TypeTuple * d = call->tf()->domain(); 1901 if (meth == NULL) { 1902 // not a Java method, assume global escape 1903 set_escape_state(call_idx, PointsToNode::GlobalEscape); 1904 add_pointsto_edge(resproj_idx, _phantom_object); 1905 } else { 1906 BCEscapeAnalyzer *call_analyzer = meth->get_bcea(); 1907 bool copy_dependencies = false; 1908 1909 if (call_analyzer->is_return_allocated()) { 1910 // Returns a newly allocated unescaped object, simply 1911 // update dependency information. 1912 // Mark it as NoEscape so that objects referenced by 1913 // it's fields will be marked as NoEscape at least. 1914 set_escape_state(call_idx, PointsToNode::NoEscape); 1915 add_pointsto_edge(resproj_idx, call_idx); 1916 copy_dependencies = true; 1917 } else if (call_analyzer->is_return_local()) { 1918 // determine whether any arguments are returned 1919 set_escape_state(call_idx, PointsToNode::NoEscape); 1920 bool ret_arg = false; 1921 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1922 const Type* at = d->field_at(i); 1923 1924 if (at->isa_oopptr() != NULL) { 1925 Node *arg = call->in(i)->uncast(); 1926 1927 if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 1928 ret_arg = true; 1929 PointsToNode *arg_esp = ptnode_adr(arg->_idx); 1930 if (arg_esp->node_type() == PointsToNode::UnknownType) 1931 done = false; 1932 else if (arg_esp->node_type() == PointsToNode::JavaObject) 1933 add_pointsto_edge(resproj_idx, arg->_idx); 1934 else 1935 add_deferred_edge(resproj_idx, arg->_idx); 1936 arg_esp->_hidden_alias = true; 1937 } 1938 } 1939 } 1940 if (done && !ret_arg) { 1941 // Returns unknown object. 1942 set_escape_state(call_idx, PointsToNode::GlobalEscape); 1943 add_pointsto_edge(resproj_idx, _phantom_object); 1944 } 1945 copy_dependencies = true; 1946 } else { 1947 set_escape_state(call_idx, PointsToNode::GlobalEscape); 1948 add_pointsto_edge(resproj_idx, _phantom_object); 1949 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1950 const Type* at = d->field_at(i); 1951 if (at->isa_oopptr() != NULL) { 1952 Node *arg = call->in(i)->uncast(); 1953 PointsToNode *arg_esp = ptnode_adr(arg->_idx); 1954 arg_esp->_hidden_alias = true; 1955 } 1956 } 1957 } 1958 if (copy_dependencies) 1959 call_analyzer->copy_dependencies(_compile->dependencies()); 1960 } 1961 if (done) 1962 _processed.set(resproj_idx); 1963 break; 1964 } 1965 1966 default: 1967 // Some other type of call, assume the worst case that the 1968 // returned value, if any, globally escapes. 1969 { 1970 const TypeTuple *r = call->tf()->range(); 1971 if (r->cnt() > TypeFunc::Parms) { 1972 const Type* ret_type = r->field_at(TypeFunc::Parms); 1973 1974 // Note: we use isa_ptr() instead of isa_oopptr() here because the 1975 // _multianewarray functions return a TypeRawPtr. 1976 if (ret_type->isa_ptr() != NULL) { 1977 set_escape_state(call_idx, PointsToNode::GlobalEscape); 1978 add_pointsto_edge(resproj_idx, _phantom_object); 1979 } 1980 } 1981 _processed.set(resproj_idx); 1982 } 1983 } 1984 } 1985 1986 // Populate Connection Graph with Ideal nodes and create simple 1987 // connection graph edges (do not need to check the node_type of inputs 1988 // or to call PointsTo() to walk the connection graph). 1989 void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) { 1990 if (_processed.test(n->_idx)) 1991 return; // No need to redefine node's state. 1992 1993 if (n->is_Call()) { 1994 // Arguments to allocation and locking don't escape. 1995 if (n->is_Allocate()) { 1996 add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true); 1997 record_for_optimizer(n); 1998 } else if (n->is_Lock() || n->is_Unlock()) { 1999 // Put Lock and Unlock nodes on IGVN worklist to process them during 2000 // the first IGVN optimization when escape information is still available. 2001 record_for_optimizer(n); 2002 _processed.set(n->_idx); 2003 } else { 2004 // Don't mark as processed since call's arguments have to be processed. 2005 PointsToNode::NodeType nt = PointsToNode::UnknownType; 2006 PointsToNode::EscapeState es = PointsToNode::UnknownEscape; 2007 2008 // Check if a call returns an object. 2009 const TypeTuple *r = n->as_Call()->tf()->range(); 2010 if (r->cnt() > TypeFunc::Parms && 2011 r->field_at(TypeFunc::Parms)->isa_ptr() && 2012 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) { 2013 nt = PointsToNode::JavaObject; 2014 if (!n->is_CallStaticJava()) { 2015 // Since the called mathod is statically unknown assume 2016 // the worst case that the returned value globally escapes. 2017 es = PointsToNode::GlobalEscape; 2018 } 2019 } 2020 add_node(n, nt, es, false); 2021 } 2022 return; 2023 } 2024 2025 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 2026 // ThreadLocal has RawPrt type. 2027 switch (n->Opcode()) { 2028 case Op_AddP: 2029 { 2030 add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false); 2031 break; 2032 } 2033 case Op_CastX2P: 2034 { // "Unsafe" memory access. 2035 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true); 2036 break; 2037 } 2038 case Op_CastPP: 2039 case Op_CheckCastPP: 2040 case Op_EncodeP: 2041 case Op_DecodeN: 2042 { 2043 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); 2044 int ti = n->in(1)->_idx; 2045 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); 2046 if (nt == PointsToNode::UnknownType) { 2047 _delayed_worklist.push(n); // Process it later. 2048 break; 2049 } else if (nt == PointsToNode::JavaObject) { 2050 add_pointsto_edge(n->_idx, ti); 2051 } else { 2052 add_deferred_edge(n->_idx, ti); 2053 } 2054 _processed.set(n->_idx); 2055 break; 2056 } 2057 case Op_ConP: 2058 { 2059 // assume all pointer constants globally escape except for null 2060 PointsToNode::EscapeState es; 2061 if (phase->type(n) == TypePtr::NULL_PTR) 2062 es = PointsToNode::NoEscape; 2063 else 2064 es = PointsToNode::GlobalEscape; 2065 2066 add_node(n, PointsToNode::JavaObject, es, true); 2067 break; 2068 } 2069 case Op_ConN: 2070 { 2071 // assume all narrow oop constants globally escape except for null 2072 PointsToNode::EscapeState es; 2073 if (phase->type(n) == TypeNarrowOop::NULL_PTR) 2074 es = PointsToNode::NoEscape; 2075 else 2076 es = PointsToNode::GlobalEscape; 2077 2078 add_node(n, PointsToNode::JavaObject, es, true); 2079 break; 2080 } 2081 case Op_CreateEx: 2082 { 2083 // assume that all exception objects globally escape 2084 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true); 2085 break; 2086 } 2087 case Op_LoadKlass: 2088 case Op_LoadNKlass: 2089 { 2090 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true); 2091 break; 2092 } 2093 case Op_LoadP: 2094 case Op_LoadN: 2095 { 2096 const Type *t = phase->type(n); 2097 if (t->make_ptr() == NULL) { 2098 _processed.set(n->_idx); 2099 return; 2100 } 2101 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); 2102 break; 2103 } 2104 case Op_Parm: 2105 { 2106 _processed.set(n->_idx); // No need to redefine it state. 2107 uint con = n->as_Proj()->_con; 2108 if (con < TypeFunc::Parms) 2109 return; 2110 const Type *t = n->in(0)->as_Start()->_domain->field_at(con); 2111 if (t->isa_ptr() == NULL) 2112 return; 2113 // We have to assume all input parameters globally escape 2114 // (Note: passing 'false' since _processed is already set). 2115 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false); 2116 break; 2117 } 2118 case Op_Phi: 2119 { 2120 const Type *t = n->as_Phi()->type(); 2121 if (t->make_ptr() == NULL) { 2122 // nothing to do if not an oop or narrow oop 2123 _processed.set(n->_idx); 2124 return; 2125 } 2126 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); 2127 uint i; 2128 for (i = 1; i < n->req() ; i++) { 2129 Node* in = n->in(i); 2130 if (in == NULL) 2131 continue; // ignore NULL 2132 in = in->uncast(); 2133 if (in->is_top() || in == n) 2134 continue; // ignore top or inputs which go back this node 2135 int ti = in->_idx; 2136 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); 2137 if (nt == PointsToNode::UnknownType) { 2138 break; 2139 } else if (nt == PointsToNode::JavaObject) { 2140 add_pointsto_edge(n->_idx, ti); 2141 } else { 2142 add_deferred_edge(n->_idx, ti); 2143 } 2144 } 2145 if (i >= n->req()) 2146 _processed.set(n->_idx); 2147 else 2148 _delayed_worklist.push(n); 2149 break; 2150 } 2151 case Op_Proj: 2152 { 2153 // we are only interested in the oop result projection from a call 2154 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) { 2155 const TypeTuple *r = n->in(0)->as_Call()->tf()->range(); 2156 assert(r->cnt() > TypeFunc::Parms, "sanity"); 2157 if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) { 2158 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); 2159 int ti = n->in(0)->_idx; 2160 // The call may not be registered yet (since not all its inputs are registered) 2161 // if this is the projection from backbranch edge of Phi. 2162 if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) { 2163 process_call_result(n->as_Proj(), phase); 2164 } 2165 if (!_processed.test(n->_idx)) { 2166 // The call's result may need to be processed later if the call 2167 // returns it's argument and the argument is not processed yet. 2168 _delayed_worklist.push(n); 2169 } 2170 break; 2171 } 2172 } 2173 _processed.set(n->_idx); 2174 break; 2175 } 2176 case Op_Return: 2177 { 2178 if( n->req() > TypeFunc::Parms && 2179 phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) { 2180 // Treat Return value as LocalVar with GlobalEscape escape state. 2181 add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false); 2182 int ti = n->in(TypeFunc::Parms)->_idx; 2183 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); 2184 if (nt == PointsToNode::UnknownType) { 2185 _delayed_worklist.push(n); // Process it later. 2186 break; 2187 } else if (nt == PointsToNode::JavaObject) { 2188 add_pointsto_edge(n->_idx, ti); 2189 } else { 2190 add_deferred_edge(n->_idx, ti); 2191 } 2192 } 2193 _processed.set(n->_idx); 2194 break; 2195 } 2196 case Op_StoreP: 2197 case Op_StoreN: 2198 { 2199 const Type *adr_type = phase->type(n->in(MemNode::Address)); 2200 adr_type = adr_type->make_ptr(); 2201 if (adr_type->isa_oopptr()) { 2202 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); 2203 } else { 2204 Node* adr = n->in(MemNode::Address); 2205 if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL && 2206 adr->in(AddPNode::Address)->is_Proj() && 2207 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 2208 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); 2209 // We are computing a raw address for a store captured 2210 // by an Initialize compute an appropriate address type. 2211 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2212 assert(offs != Type::OffsetBot, "offset must be a constant"); 2213 } else { 2214 _processed.set(n->_idx); 2215 return; 2216 } 2217 } 2218 break; 2219 } 2220 case Op_StorePConditional: 2221 case Op_CompareAndSwapP: 2222 case Op_CompareAndSwapN: 2223 { 2224 const Type *adr_type = phase->type(n->in(MemNode::Address)); 2225 adr_type = adr_type->make_ptr(); 2226 if (adr_type->isa_oopptr()) { 2227 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); 2228 } else { 2229 _processed.set(n->_idx); 2230 return; 2231 } 2232 break; 2233 } 2234 case Op_AryEq: 2235 case Op_StrComp: 2236 case Op_StrEquals: 2237 case Op_StrIndexOf: 2238 { 2239 // char[] arrays passed to string intrinsics are not scalar replaceable. 2240 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); 2241 break; 2242 } 2243 case Op_ThreadLocal: 2244 { 2245 add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true); 2246 break; 2247 } 2248 default: 2249 ; 2250 // nothing to do 2251 } 2252 return; 2253 } 2254 2255 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) { 2256 uint n_idx = n->_idx; 2257 assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered"); 2258 2259 // Don't set processed bit for AddP, LoadP, StoreP since 2260 // they may need more then one pass to process. 2261 if (_processed.test(n_idx)) 2262 return; // No need to redefine node's state. 2263 2264 if (n->is_Call()) { 2265 CallNode *call = n->as_Call(); 2266 process_call_arguments(call, phase); 2267 _processed.set(n_idx); 2268 return; 2269 } 2270 2271 switch (n->Opcode()) { 2272 case Op_AddP: 2273 { 2274 Node *base = get_addp_base(n); 2275 // Create a field edge to this node from everything base could point to. 2276 VectorSet ptset(Thread::current()->resource_area()); 2277 PointsTo(ptset, base, phase); 2278 for( VectorSetI i(&ptset); i.test(); ++i ) { 2279 uint pt = i.elem; 2280 add_field_edge(pt, n_idx, address_offset(n, phase)); 2281 } 2282 break; 2283 } 2284 case Op_CastX2P: 2285 { 2286 assert(false, "Op_CastX2P"); 2287 break; 2288 } 2289 case Op_CastPP: 2290 case Op_CheckCastPP: 2291 case Op_EncodeP: 2292 case Op_DecodeN: 2293 { 2294 int ti = n->in(1)->_idx; 2295 assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered"); 2296 if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) { 2297 add_pointsto_edge(n_idx, ti); 2298 } else { 2299 add_deferred_edge(n_idx, ti); 2300 } 2301 _processed.set(n_idx); 2302 break; 2303 } 2304 case Op_ConP: 2305 { 2306 assert(false, "Op_ConP"); 2307 break; 2308 } 2309 case Op_ConN: 2310 { 2311 assert(false, "Op_ConN"); 2312 break; 2313 } 2314 case Op_CreateEx: 2315 { 2316 assert(false, "Op_CreateEx"); 2317 break; 2318 } 2319 case Op_LoadKlass: 2320 case Op_LoadNKlass: 2321 { 2322 assert(false, "Op_LoadKlass"); 2323 break; 2324 } 2325 case Op_LoadP: 2326 case Op_LoadN: 2327 { 2328 const Type *t = phase->type(n); 2329 #ifdef ASSERT 2330 if (t->make_ptr() == NULL) 2331 assert(false, "Op_LoadP"); 2332 #endif 2333 2334 Node* adr = n->in(MemNode::Address)->uncast(); 2335 Node* adr_base; 2336 if (adr->is_AddP()) { 2337 adr_base = get_addp_base(adr); 2338 } else { 2339 adr_base = adr; 2340 } 2341 2342 // For everything "adr_base" could point to, create a deferred edge from 2343 // this node to each field with the same offset. 2344 VectorSet ptset(Thread::current()->resource_area()); 2345 PointsTo(ptset, adr_base, phase); 2346 int offset = address_offset(adr, phase); 2347 for( VectorSetI i(&ptset); i.test(); ++i ) { 2348 uint pt = i.elem; 2349 add_deferred_edge_to_fields(n_idx, pt, offset); 2350 } 2351 break; 2352 } 2353 case Op_Parm: 2354 { 2355 assert(false, "Op_Parm"); 2356 break; 2357 } 2358 case Op_Phi: 2359 { 2360 #ifdef ASSERT 2361 const Type *t = n->as_Phi()->type(); 2362 if (t->make_ptr() == NULL) 2363 assert(false, "Op_Phi"); 2364 #endif 2365 for (uint i = 1; i < n->req() ; i++) { 2366 Node* in = n->in(i); 2367 if (in == NULL) 2368 continue; // ignore NULL 2369 in = in->uncast(); 2370 if (in->is_top() || in == n) 2371 continue; // ignore top or inputs which go back this node 2372 int ti = in->_idx; 2373 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); 2374 assert(nt != PointsToNode::UnknownType, "all nodes should be known"); 2375 if (nt == PointsToNode::JavaObject) { 2376 add_pointsto_edge(n_idx, ti); 2377 } else { 2378 add_deferred_edge(n_idx, ti); 2379 } 2380 } 2381 _processed.set(n_idx); 2382 break; 2383 } 2384 case Op_Proj: 2385 { 2386 // we are only interested in the oop result projection from a call 2387 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) { 2388 assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType, 2389 "all nodes should be registered"); 2390 const TypeTuple *r = n->in(0)->as_Call()->tf()->range(); 2391 assert(r->cnt() > TypeFunc::Parms, "sanity"); 2392 if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) { 2393 process_call_result(n->as_Proj(), phase); 2394 assert(_processed.test(n_idx), "all call results should be processed"); 2395 break; 2396 } 2397 } 2398 assert(false, "Op_Proj"); 2399 break; 2400 } 2401 case Op_Return: 2402 { 2403 #ifdef ASSERT 2404 if( n->req() <= TypeFunc::Parms || 2405 !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) { 2406 assert(false, "Op_Return"); 2407 } 2408 #endif 2409 int ti = n->in(TypeFunc::Parms)->_idx; 2410 assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered"); 2411 if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) { 2412 add_pointsto_edge(n_idx, ti); 2413 } else { 2414 add_deferred_edge(n_idx, ti); 2415 } 2416 _processed.set(n_idx); 2417 break; 2418 } 2419 case Op_StoreP: 2420 case Op_StoreN: 2421 case Op_StorePConditional: 2422 case Op_CompareAndSwapP: 2423 case Op_CompareAndSwapN: 2424 { 2425 Node *adr = n->in(MemNode::Address); 2426 const Type *adr_type = phase->type(adr)->make_ptr(); 2427 #ifdef ASSERT 2428 if (!adr_type->isa_oopptr()) 2429 assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP"); 2430 #endif 2431 2432 assert(adr->is_AddP(), "expecting an AddP"); 2433 Node *adr_base = get_addp_base(adr); 2434 Node *val = n->in(MemNode::ValueIn)->uncast(); 2435 // For everything "adr_base" could point to, create a deferred edge 2436 // to "val" from each field with the same offset. 2437 VectorSet ptset(Thread::current()->resource_area()); 2438 PointsTo(ptset, adr_base, phase); 2439 for( VectorSetI i(&ptset); i.test(); ++i ) { 2440 uint pt = i.elem; 2441 add_edge_from_fields(pt, val->_idx, address_offset(adr, phase)); 2442 } 2443 break; 2444 } 2445 case Op_AryEq: 2446 case Op_StrComp: 2447 case Op_StrEquals: 2448 case Op_StrIndexOf: 2449 { 2450 // char[] arrays passed to string intrinsic do not escape but 2451 // they are not scalar replaceable. Adjust escape state for them. 2452 // Start from in(2) edge since in(1) is memory edge. 2453 for (uint i = 2; i < n->req(); i++) { 2454 Node* adr = n->in(i)->uncast(); 2455 const Type *at = phase->type(adr); 2456 if (!adr->is_top() && at->isa_ptr()) { 2457 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 2458 at->isa_ptr() != NULL, "expecting an Ptr"); 2459 if (adr->is_AddP()) { 2460 adr = get_addp_base(adr); 2461 } 2462 // Mark as ArgEscape everything "adr" could point to. 2463 set_escape_state(adr->_idx, PointsToNode::ArgEscape); 2464 } 2465 } 2466 _processed.set(n_idx); 2467 break; 2468 } 2469 case Op_ThreadLocal: 2470 { 2471 assert(false, "Op_ThreadLocal"); 2472 break; 2473 } 2474 default: 2475 // This method should be called only for EA specific nodes. 2476 ShouldNotReachHere(); 2477 } 2478 } 2479 2480 #ifndef PRODUCT 2481 void ConnectionGraph::dump() { 2482 PhaseGVN *igvn = _compile->initial_gvn(); 2483 bool first = true; 2484 2485 uint size = nodes_size(); 2486 for (uint ni = 0; ni < size; ni++) { 2487 PointsToNode *ptn = ptnode_adr(ni); 2488 PointsToNode::NodeType ptn_type = ptn->node_type(); 2489 2490 if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL) 2491 continue; 2492 PointsToNode::EscapeState es = escape_state(ptn->_node, igvn); 2493 if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) { 2494 if (first) { 2495 tty->cr(); 2496 tty->print("======== Connection graph for "); 2497 _compile->method()->print_short_name(); 2498 tty->cr(); 2499 first = false; 2500 } 2501 tty->print("%6d ", ni); 2502 ptn->dump(); 2503 // Print all locals which reference this allocation 2504 for (uint li = ni; li < size; li++) { 2505 PointsToNode *ptn_loc = ptnode_adr(li); 2506 PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type(); 2507 if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL && 2508 ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) { 2509 ptnode_adr(li)->dump(false); 2510 } 2511 } 2512 if (Verbose) { 2513 // Print all fields which reference this allocation 2514 for (uint i = 0; i < ptn->edge_count(); i++) { 2515 uint ei = ptn->edge_target(i); 2516 ptnode_adr(ei)->dump(false); 2517 } 2518 } 2519 tty->cr(); 2520 } 2521 } 2522 } 2523 #endif