1 /* 2 * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "libadt/vectset.hpp" 29 #include "memory/allocation.hpp" 30 #include "opto/c2compiler.hpp" 31 #include "opto/callnode.hpp" 32 #include "opto/cfgnode.hpp" 33 #include "opto/compile.hpp" 34 #include "opto/escape.hpp" 35 #include "opto/phaseX.hpp" 36 #include "opto/movenode.hpp" 37 #include "opto/rootnode.hpp" 38 39 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 40 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 41 _in_worklist(C->comp_arena()), 42 _next_pidx(0), 43 _collecting(true), 44 _verify(false), 45 _compile(C), 46 _igvn(igvn), 47 _node_map(C->comp_arena()) { 48 // Add unknown java object. 49 add_java_object(C->top(), PointsToNode::GlobalEscape); 50 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 51 // Add ConP(#NULL) and ConN(#NULL) nodes. 52 Node* oop_null = igvn->zerocon(T_OBJECT); 53 assert(oop_null->_idx < nodes_size(), "should be created already"); 54 add_java_object(oop_null, PointsToNode::NoEscape); 55 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 56 if (UseCompressedOops) { 57 Node* noop_null = igvn->zerocon(T_NARROWOOP); 58 assert(noop_null->_idx < nodes_size(), "should be created already"); 59 map_ideal_node(noop_null, null_obj); 60 } 61 _pcmp_neq = NULL; // Should be initialized 62 _pcmp_eq = NULL; 63 } 64 65 bool ConnectionGraph::has_candidates(Compile *C) { 66 // EA brings benefits only when the code has allocations and/or locks which 67 // are represented by ideal Macro nodes. 68 int cnt = C->macro_count(); 69 for (int i = 0; i < cnt; i++) { 70 Node *n = C->macro_node(i); 71 if (n->is_Allocate()) 72 return true; 73 if (n->is_Lock()) { 74 Node* obj = n->as_Lock()->obj_node()->uncast(); 75 if (!(obj->is_Parm() || obj->is_Con())) 76 return true; 77 } 78 if (n->is_CallStaticJava() && 79 n->as_CallStaticJava()->is_boxing_method()) { 80 return true; 81 } 82 } 83 return false; 84 } 85 86 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 87 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 88 ResourceMark rm; 89 90 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 91 // to create space for them in ConnectionGraph::_nodes[]. 92 Node* oop_null = igvn->zerocon(T_OBJECT); 93 Node* noop_null = igvn->zerocon(T_NARROWOOP); 94 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 95 // Perform escape analysis 96 if (congraph->compute_escape()) { 97 // There are non escaping objects. 98 C->set_congraph(congraph); 99 } 100 // Cleanup. 101 if (oop_null->outcnt() == 0) 102 igvn->hash_delete(oop_null); 103 if (noop_null->outcnt() == 0) 104 igvn->hash_delete(noop_null); 105 } 106 107 bool ConnectionGraph::compute_escape() { 108 Compile* C = _compile; 109 PhaseGVN* igvn = _igvn; 110 111 // Worklists used by EA. 112 Unique_Node_List delayed_worklist; 113 GrowableArray<Node*> alloc_worklist; 114 GrowableArray<Node*> ptr_cmp_worklist; 115 GrowableArray<Node*> storestore_worklist; 116 GrowableArray<PointsToNode*> ptnodes_worklist; 117 GrowableArray<JavaObjectNode*> java_objects_worklist; 118 GrowableArray<JavaObjectNode*> non_escaped_worklist; 119 GrowableArray<FieldNode*> oop_fields_worklist; 120 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 121 122 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 123 124 // 1. Populate Connection Graph (CG) with PointsTo nodes. 125 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 126 // Initialize worklist 127 if (C->root() != NULL) { 128 ideal_nodes.push(C->root()); 129 } 130 ptnodes_worklist.append(phantom_obj); 131 java_objects_worklist.append(phantom_obj); 132 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 133 Node* n = ideal_nodes.at(next); 134 // Create PointsTo nodes and add them to Connection Graph. Called 135 // only once per ideal node since ideal_nodes is Unique_Node list. 136 add_node_to_connection_graph(n, &delayed_worklist); 137 PointsToNode* ptn = ptnode_adr(n->_idx); 138 if (ptn != NULL && ptn != phantom_obj) { 139 ptnodes_worklist.append(ptn); 140 if (ptn->is_JavaObject()) { 141 java_objects_worklist.append(ptn->as_JavaObject()); 142 if ((n->is_Allocate() || n->is_CallStaticJava()) && 143 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 144 // Only allocations and java static calls results are interesting. 145 non_escaped_worklist.append(ptn->as_JavaObject()); 146 } 147 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 148 oop_fields_worklist.append(ptn->as_Field()); 149 } 150 } 151 if (n->is_MergeMem()) { 152 // Collect all MergeMem nodes to add memory slices for 153 // scalar replaceable objects in split_unique_types(). 154 _mergemem_worklist.append(n->as_MergeMem()); 155 } else if (OptimizePtrCompare && n->is_Cmp() && 156 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { 157 // Collect compare pointers nodes. 158 ptr_cmp_worklist.append(n); 159 } else if (n->is_MemBarStoreStore()) { 160 // Collect all MemBarStoreStore nodes so that depending on the 161 // escape status of the associated Allocate node some of them 162 // may be eliminated. 163 storestore_worklist.append(n); 164 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && 165 (n->req() > MemBarNode::Precedent)) { 166 record_for_optimizer(n); 167 #ifdef ASSERT 168 } else if (n->is_AddP()) { 169 // Collect address nodes for graph verification. 170 addp_worklist.append(n); 171 #endif 172 } 173 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 174 Node* m = n->fast_out(i); // Get user 175 ideal_nodes.push(m); 176 } 177 } 178 if (non_escaped_worklist.length() == 0) { 179 _collecting = false; 180 return false; // Nothing to do. 181 } 182 // Add final simple edges to graph. 183 while(delayed_worklist.size() > 0) { 184 Node* n = delayed_worklist.pop(); 185 add_final_edges(n); 186 } 187 int ptnodes_length = ptnodes_worklist.length(); 188 189 #ifdef ASSERT 190 if (VerifyConnectionGraph) { 191 // Verify that no new simple edges could be created and all 192 // local vars has edges. 193 _verify = true; 194 for (int next = 0; next < ptnodes_length; ++next) { 195 PointsToNode* ptn = ptnodes_worklist.at(next); 196 add_final_edges(ptn->ideal_node()); 197 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 198 ptn->dump(); 199 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 200 } 201 } 202 _verify = false; 203 } 204 #endif 205 206 // 2. Finish Graph construction by propagating references to all 207 // java objects through graph. 208 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, 209 java_objects_worklist, oop_fields_worklist)) { 210 // All objects escaped or hit time or iterations limits. 211 _collecting = false; 212 return false; 213 } 214 215 // 3. Adjust scalar_replaceable state of nonescaping objects and push 216 // scalar replaceable allocations on alloc_worklist for processing 217 // in split_unique_types(). 218 int non_escaped_length = non_escaped_worklist.length(); 219 for (int next = 0; next < non_escaped_length; next++) { 220 JavaObjectNode* ptn = non_escaped_worklist.at(next); 221 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 222 Node* n = ptn->ideal_node(); 223 if (n->is_Allocate()) { 224 n->as_Allocate()->_is_non_escaping = noescape; 225 } 226 if (n->is_CallStaticJava()) { 227 n->as_CallStaticJava()->_is_non_escaping = noescape; 228 } 229 if (noescape && ptn->scalar_replaceable()) { 230 adjust_scalar_replaceable_state(ptn); 231 if (ptn->scalar_replaceable()) { 232 alloc_worklist.append(ptn->ideal_node()); 233 } 234 } 235 } 236 237 #ifdef ASSERT 238 if (VerifyConnectionGraph) { 239 // Verify that graph is complete - no new edges could be added or needed. 240 verify_connection_graph(ptnodes_worklist, non_escaped_worklist, 241 java_objects_worklist, addp_worklist); 242 } 243 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 244 assert(null_obj->escape_state() == PointsToNode::NoEscape && 245 null_obj->edge_count() == 0 && 246 !null_obj->arraycopy_src() && 247 !null_obj->arraycopy_dst(), "sanity"); 248 #endif 249 250 _collecting = false; 251 252 } // TracePhase t3("connectionGraph") 253 254 // 4. Optimize ideal graph based on EA information. 255 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); 256 if (has_non_escaping_obj) { 257 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 258 } 259 260 #ifndef PRODUCT 261 if (PrintEscapeAnalysis) { 262 dump(ptnodes_worklist); // Dump ConnectionGraph 263 } 264 #endif 265 266 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 267 #ifdef ASSERT 268 if (VerifyConnectionGraph) { 269 int alloc_length = alloc_worklist.length(); 270 for (int next = 0; next < alloc_length; ++next) { 271 Node* n = alloc_worklist.at(next); 272 PointsToNode* ptn = ptnode_adr(n->_idx); 273 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 274 } 275 } 276 #endif 277 278 // 5. Separate memory graph for scalar replaceable allcations. 279 if (has_scalar_replaceable_candidates && 280 C->AliasLevel() >= 3 && EliminateAllocations) { 281 // Now use the escape information to create unique types for 282 // scalar replaceable objects. 283 split_unique_types(alloc_worklist); 284 if (C->failing()) return false; 285 C->print_method(PHASE_AFTER_EA, 2); 286 287 #ifdef ASSERT 288 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 289 tty->print("=== No allocations eliminated for "); 290 C->method()->print_short_name(); 291 if(!EliminateAllocations) { 292 tty->print(" since EliminateAllocations is off ==="); 293 } else if(!has_scalar_replaceable_candidates) { 294 tty->print(" since there are no scalar replaceable candidates ==="); 295 } else if(C->AliasLevel() < 3) { 296 tty->print(" since AliasLevel < 3 ==="); 297 } 298 tty->cr(); 299 #endif 300 } 301 return has_non_escaping_obj; 302 } 303 304 // Utility function for nodes that load an object 305 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 306 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 307 // ThreadLocal has RawPtr type. 308 const Type* t = _igvn->type(n); 309 if (t->make_ptr() != NULL) { 310 Node* adr = n->in(MemNode::Address); 311 #ifdef ASSERT 312 if (!adr->is_AddP()) { 313 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 314 } else { 315 assert((ptnode_adr(adr->_idx) == NULL || 316 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 317 } 318 #endif 319 add_local_var_and_edge(n, PointsToNode::NoEscape, 320 adr, delayed_worklist); 321 } 322 } 323 324 // Populate Connection Graph with PointsTo nodes and create simple 325 // connection graph edges. 326 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 327 assert(!_verify, "this method sould not be called for verification"); 328 PhaseGVN* igvn = _igvn; 329 uint n_idx = n->_idx; 330 PointsToNode* n_ptn = ptnode_adr(n_idx); 331 if (n_ptn != NULL) 332 return; // No need to redefine PointsTo node during first iteration. 333 334 if (n->is_Call()) { 335 // Arguments to allocation and locking don't escape. 336 if (n->is_AbstractLock()) { 337 // Put Lock and Unlock nodes on IGVN worklist to process them during 338 // first IGVN optimization when escape information is still available. 339 record_for_optimizer(n); 340 } else if (n->is_Allocate()) { 341 add_call_node(n->as_Call()); 342 record_for_optimizer(n); 343 } else { 344 if (n->is_CallStaticJava()) { 345 const char* name = n->as_CallStaticJava()->_name; 346 if (name != NULL && strcmp(name, "uncommon_trap") == 0) 347 return; // Skip uncommon traps 348 } 349 // Don't mark as processed since call's arguments have to be processed. 350 delayed_worklist->push(n); 351 // Check if a call returns an object. 352 if ((n->as_Call()->returns_pointer() && 353 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) || 354 (n->is_CallStaticJava() && 355 n->as_CallStaticJava()->is_boxing_method())) { 356 add_call_node(n->as_Call()); 357 } 358 } 359 return; 360 } 361 // Put this check here to process call arguments since some call nodes 362 // point to phantom_obj. 363 if (n_ptn == phantom_obj || n_ptn == null_obj) 364 return; // Skip predefined nodes. 365 366 int opcode = n->Opcode(); 367 switch (opcode) { 368 case Op_AddP: { 369 Node* base = get_addp_base(n); 370 PointsToNode* ptn_base = ptnode_adr(base->_idx); 371 // Field nodes are created for all field types. They are used in 372 // adjust_scalar_replaceable_state() and split_unique_types(). 373 // Note, non-oop fields will have only base edges in Connection 374 // Graph because such fields are not used for oop loads and stores. 375 int offset = address_offset(n, igvn); 376 add_field(n, PointsToNode::NoEscape, offset); 377 if (ptn_base == NULL) { 378 delayed_worklist->push(n); // Process it later. 379 } else { 380 n_ptn = ptnode_adr(n_idx); 381 add_base(n_ptn->as_Field(), ptn_base); 382 } 383 break; 384 } 385 case Op_CastX2P: { 386 map_ideal_node(n, phantom_obj); 387 break; 388 } 389 case Op_CastPP: 390 case Op_CheckCastPP: 391 case Op_EncodeP: 392 case Op_DecodeN: 393 case Op_EncodePKlass: 394 case Op_DecodeNKlass: { 395 add_local_var_and_edge(n, PointsToNode::NoEscape, 396 n->in(1), delayed_worklist); 397 break; 398 } 399 case Op_CMoveP: { 400 add_local_var(n, PointsToNode::NoEscape); 401 // Do not add edges during first iteration because some could be 402 // not defined yet. 403 delayed_worklist->push(n); 404 break; 405 } 406 case Op_ConP: 407 case Op_ConN: 408 case Op_ConNKlass: { 409 // assume all oop constants globally escape except for null 410 PointsToNode::EscapeState es; 411 const Type* t = igvn->type(n); 412 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 413 es = PointsToNode::NoEscape; 414 } else { 415 es = PointsToNode::GlobalEscape; 416 } 417 add_java_object(n, es); 418 break; 419 } 420 case Op_CreateEx: { 421 // assume that all exception objects globally escape 422 map_ideal_node(n, phantom_obj); 423 break; 424 } 425 case Op_LoadKlass: 426 case Op_LoadNKlass: { 427 // Unknown class is loaded 428 map_ideal_node(n, phantom_obj); 429 break; 430 } 431 case Op_LoadP: 432 case Op_LoadN: 433 case Op_LoadPLocked: { 434 add_objload_to_connection_graph(n, delayed_worklist); 435 break; 436 } 437 case Op_Parm: { 438 map_ideal_node(n, phantom_obj); 439 break; 440 } 441 case Op_PartialSubtypeCheck: { 442 // Produces Null or notNull and is used in only in CmpP so 443 // phantom_obj could be used. 444 map_ideal_node(n, phantom_obj); // Result is unknown 445 break; 446 } 447 case Op_Phi: { 448 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 449 // ThreadLocal has RawPtr type. 450 const Type* t = n->as_Phi()->type(); 451 if (t->make_ptr() != NULL) { 452 add_local_var(n, PointsToNode::NoEscape); 453 // Do not add edges during first iteration because some could be 454 // not defined yet. 455 delayed_worklist->push(n); 456 } 457 break; 458 } 459 case Op_Proj: { 460 // we are only interested in the oop result projection from a call 461 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 462 n->in(0)->as_Call()->returns_pointer()) { 463 add_local_var_and_edge(n, PointsToNode::NoEscape, 464 n->in(0), delayed_worklist); 465 } 466 break; 467 } 468 case Op_Rethrow: // Exception object escapes 469 case Op_Return: { 470 if (n->req() > TypeFunc::Parms && 471 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 472 // Treat Return value as LocalVar with GlobalEscape escape state. 473 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 474 n->in(TypeFunc::Parms), delayed_worklist); 475 } 476 break; 477 } 478 case Op_GetAndSetP: 479 case Op_GetAndSetN: { 480 add_objload_to_connection_graph(n, delayed_worklist); 481 // fallthrough 482 } 483 case Op_StoreP: 484 case Op_StoreN: 485 case Op_StoreNKlass: 486 case Op_StorePConditional: 487 case Op_CompareAndSwapP: 488 case Op_CompareAndSwapN: { 489 Node* adr = n->in(MemNode::Address); 490 const Type *adr_type = igvn->type(adr); 491 adr_type = adr_type->make_ptr(); 492 if (adr_type == NULL) { 493 break; // skip dead nodes 494 } 495 if (adr_type->isa_oopptr() || 496 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && 497 (adr_type == TypeRawPtr::NOTNULL && 498 adr->in(AddPNode::Address)->is_Proj() && 499 adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 500 delayed_worklist->push(n); // Process it later. 501 #ifdef ASSERT 502 assert(adr->is_AddP(), "expecting an AddP"); 503 if (adr_type == TypeRawPtr::NOTNULL) { 504 // Verify a raw address for a store captured by Initialize node. 505 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 506 assert(offs != Type::OffsetBot, "offset must be a constant"); 507 } 508 #endif 509 } else { 510 // Ignore copy the displaced header to the BoxNode (OSR compilation). 511 if (adr->is_BoxLock()) 512 break; 513 // Stored value escapes in unsafe access. 514 if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) { 515 // Pointer stores in G1 barriers looks like unsafe access. 516 // Ignore such stores to be able scalar replace non-escaping 517 // allocations. 518 if (UseG1GC && adr->is_AddP()) { 519 Node* base = get_addp_base(adr); 520 if (base->Opcode() == Op_LoadP && 521 base->in(MemNode::Address)->is_AddP()) { 522 adr = base->in(MemNode::Address); 523 Node* tls = get_addp_base(adr); 524 if (tls->Opcode() == Op_ThreadLocal) { 525 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 526 if (offs == in_bytes(JavaThread::satb_mark_queue_offset() + 527 PtrQueue::byte_offset_of_buf())) { 528 break; // G1 pre barier previous oop value store. 529 } 530 if (offs == in_bytes(JavaThread::dirty_card_queue_offset() + 531 PtrQueue::byte_offset_of_buf())) { 532 break; // G1 post barier card address store. 533 } 534 } 535 } 536 } 537 delayed_worklist->push(n); // Process unsafe access later. 538 break; 539 } 540 #ifdef ASSERT 541 n->dump(1); 542 assert(false, "not unsafe or G1 barrier raw StoreP"); 543 #endif 544 } 545 break; 546 } 547 case Op_AryEq: 548 case Op_StrComp: 549 case Op_StrEquals: 550 case Op_StrIndexOf: 551 case Op_EncodeISOArray: { 552 add_local_var(n, PointsToNode::ArgEscape); 553 delayed_worklist->push(n); // Process it later. 554 break; 555 } 556 case Op_ThreadLocal: { 557 add_java_object(n, PointsToNode::ArgEscape); 558 break; 559 } 560 default: 561 ; // Do nothing for nodes not related to EA. 562 } 563 return; 564 } 565 566 #ifdef ASSERT 567 #define ELSE_FAIL(name) \ 568 /* Should not be called for not pointer type. */ \ 569 n->dump(1); \ 570 assert(false, name); \ 571 break; 572 #else 573 #define ELSE_FAIL(name) \ 574 break; 575 #endif 576 577 // Add final simple edges to graph. 578 void ConnectionGraph::add_final_edges(Node *n) { 579 PointsToNode* n_ptn = ptnode_adr(n->_idx); 580 #ifdef ASSERT 581 if (_verify && n_ptn->is_JavaObject()) 582 return; // This method does not change graph for JavaObject. 583 #endif 584 585 if (n->is_Call()) { 586 process_call_arguments(n->as_Call()); 587 return; 588 } 589 assert(n->is_Store() || n->is_LoadStore() || 590 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 591 "node should be registered already"); 592 int opcode = n->Opcode(); 593 switch (opcode) { 594 case Op_AddP: { 595 Node* base = get_addp_base(n); 596 PointsToNode* ptn_base = ptnode_adr(base->_idx); 597 assert(ptn_base != NULL, "field's base should be registered"); 598 add_base(n_ptn->as_Field(), ptn_base); 599 break; 600 } 601 case Op_CastPP: 602 case Op_CheckCastPP: 603 case Op_EncodeP: 604 case Op_DecodeN: 605 case Op_EncodePKlass: 606 case Op_DecodeNKlass: { 607 add_local_var_and_edge(n, PointsToNode::NoEscape, 608 n->in(1), NULL); 609 break; 610 } 611 case Op_CMoveP: { 612 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 613 Node* in = n->in(i); 614 if (in == NULL) 615 continue; // ignore NULL 616 Node* uncast_in = in->uncast(); 617 if (uncast_in->is_top() || uncast_in == n) 618 continue; // ignore top or inputs which go back this node 619 PointsToNode* ptn = ptnode_adr(in->_idx); 620 assert(ptn != NULL, "node should be registered"); 621 add_edge(n_ptn, ptn); 622 } 623 break; 624 } 625 case Op_LoadP: 626 case Op_LoadN: 627 case Op_LoadPLocked: { 628 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 629 // ThreadLocal has RawPtr type. 630 const Type* t = _igvn->type(n); 631 if (t->make_ptr() != NULL) { 632 Node* adr = n->in(MemNode::Address); 633 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 634 break; 635 } 636 ELSE_FAIL("Op_LoadP"); 637 } 638 case Op_Phi: { 639 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 640 // ThreadLocal has RawPtr type. 641 const Type* t = n->as_Phi()->type(); 642 if (t->make_ptr() != NULL) { 643 for (uint i = 1; i < n->req(); i++) { 644 Node* in = n->in(i); 645 if (in == NULL) 646 continue; // ignore NULL 647 Node* uncast_in = in->uncast(); 648 if (uncast_in->is_top() || uncast_in == n) 649 continue; // ignore top or inputs which go back this node 650 PointsToNode* ptn = ptnode_adr(in->_idx); 651 assert(ptn != NULL, "node should be registered"); 652 add_edge(n_ptn, ptn); 653 } 654 break; 655 } 656 ELSE_FAIL("Op_Phi"); 657 } 658 case Op_Proj: { 659 // we are only interested in the oop result projection from a call 660 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 661 n->in(0)->as_Call()->returns_pointer()) { 662 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 663 break; 664 } 665 ELSE_FAIL("Op_Proj"); 666 } 667 case Op_Rethrow: // Exception object escapes 668 case Op_Return: { 669 if (n->req() > TypeFunc::Parms && 670 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 671 // Treat Return value as LocalVar with GlobalEscape escape state. 672 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 673 n->in(TypeFunc::Parms), NULL); 674 break; 675 } 676 ELSE_FAIL("Op_Return"); 677 } 678 case Op_StoreP: 679 case Op_StoreN: 680 case Op_StoreNKlass: 681 case Op_StorePConditional: 682 case Op_CompareAndSwapP: 683 case Op_CompareAndSwapN: 684 case Op_GetAndSetP: 685 case Op_GetAndSetN: { 686 Node* adr = n->in(MemNode::Address); 687 const Type *adr_type = _igvn->type(adr); 688 adr_type = adr_type->make_ptr(); 689 #ifdef ASSERT 690 if (adr_type == NULL) { 691 n->dump(1); 692 assert(adr_type != NULL, "dead node should not be on list"); 693 break; 694 } 695 #endif 696 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) { 697 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 698 } 699 if (adr_type->isa_oopptr() || 700 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && 701 (adr_type == TypeRawPtr::NOTNULL && 702 adr->in(AddPNode::Address)->is_Proj() && 703 adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 704 // Point Address to Value 705 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 706 assert(adr_ptn != NULL && 707 adr_ptn->as_Field()->is_oop(), "node should be registered"); 708 Node *val = n->in(MemNode::ValueIn); 709 PointsToNode* ptn = ptnode_adr(val->_idx); 710 assert(ptn != NULL, "node should be registered"); 711 add_edge(adr_ptn, ptn); 712 break; 713 } else if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) { 714 // Stored value escapes in unsafe access. 715 Node *val = n->in(MemNode::ValueIn); 716 PointsToNode* ptn = ptnode_adr(val->_idx); 717 assert(ptn != NULL, "node should be registered"); 718 set_escape_state(ptn, PointsToNode::GlobalEscape); 719 // Add edge to object for unsafe access with offset. 720 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 721 assert(adr_ptn != NULL, "node should be registered"); 722 if (adr_ptn->is_Field()) { 723 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 724 add_edge(adr_ptn, ptn); 725 } 726 break; 727 } 728 ELSE_FAIL("Op_StoreP"); 729 } 730 case Op_AryEq: 731 case Op_StrComp: 732 case Op_StrEquals: 733 case Op_StrIndexOf: 734 case Op_EncodeISOArray: { 735 // char[] arrays passed to string intrinsic do not escape but 736 // they are not scalar replaceable. Adjust escape state for them. 737 // Start from in(2) edge since in(1) is memory edge. 738 for (uint i = 2; i < n->req(); i++) { 739 Node* adr = n->in(i); 740 const Type* at = _igvn->type(adr); 741 if (!adr->is_top() && at->isa_ptr()) { 742 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 743 at->isa_ptr() != NULL, "expecting a pointer"); 744 if (adr->is_AddP()) { 745 adr = get_addp_base(adr); 746 } 747 PointsToNode* ptn = ptnode_adr(adr->_idx); 748 assert(ptn != NULL, "node should be registered"); 749 add_edge(n_ptn, ptn); 750 } 751 } 752 break; 753 } 754 default: { 755 // This method should be called only for EA specific nodes which may 756 // miss some edges when they were created. 757 #ifdef ASSERT 758 n->dump(1); 759 #endif 760 guarantee(false, "unknown node"); 761 } 762 } 763 return; 764 } 765 766 void ConnectionGraph::add_call_node(CallNode* call) { 767 assert(call->returns_pointer(), "only for call which returns pointer"); 768 uint call_idx = call->_idx; 769 if (call->is_Allocate()) { 770 Node* k = call->in(AllocateNode::KlassNode); 771 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 772 assert(kt != NULL, "TypeKlassPtr required."); 773 ciKlass* cik = kt->klass(); 774 PointsToNode::EscapeState es = PointsToNode::NoEscape; 775 bool scalar_replaceable = true; 776 if (call->is_AllocateArray()) { 777 if (!cik->is_array_klass()) { // StressReflectiveCode 778 es = PointsToNode::GlobalEscape; 779 } else { 780 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 781 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 782 // Not scalar replaceable if the length is not constant or too big. 783 scalar_replaceable = false; 784 } 785 } 786 } else { // Allocate instance 787 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 788 cik->is_subclass_of(_compile->env()->Reference_klass()) || 789 !cik->is_instance_klass() || // StressReflectiveCode 790 cik->as_instance_klass()->has_finalizer()) { 791 es = PointsToNode::GlobalEscape; 792 } 793 } 794 add_java_object(call, es); 795 PointsToNode* ptn = ptnode_adr(call_idx); 796 if (!scalar_replaceable && ptn->scalar_replaceable()) { 797 ptn->set_scalar_replaceable(false); 798 } 799 } else if (call->is_CallStaticJava()) { 800 // Call nodes could be different types: 801 // 802 // 1. CallDynamicJavaNode (what happened during call is unknown): 803 // 804 // - mapped to GlobalEscape JavaObject node if oop is returned; 805 // 806 // - all oop arguments are escaping globally; 807 // 808 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 809 // 810 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 811 // 812 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 813 // - mapped to NoEscape JavaObject node if non-escaping object allocated 814 // during call is returned; 815 // - mapped to ArgEscape LocalVar node pointed to object arguments 816 // which are returned and does not escape during call; 817 // 818 // - oop arguments escaping status is defined by bytecode analysis; 819 // 820 // For a static call, we know exactly what method is being called. 821 // Use bytecode estimator to record whether the call's return value escapes. 822 ciMethod* meth = call->as_CallJava()->method(); 823 if (meth == NULL) { 824 const char* name = call->as_CallStaticJava()->_name; 825 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 826 // Returns a newly allocated unescaped object. 827 add_java_object(call, PointsToNode::NoEscape); 828 ptnode_adr(call_idx)->set_scalar_replaceable(false); 829 } else if (meth->is_boxing_method()) { 830 // Returns boxing object 831 PointsToNode::EscapeState es; 832 vmIntrinsics::ID intr = meth->intrinsic_id(); 833 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 834 // It does not escape if object is always allocated. 835 es = PointsToNode::NoEscape; 836 } else { 837 // It escapes globally if object could be loaded from cache. 838 es = PointsToNode::GlobalEscape; 839 } 840 add_java_object(call, es); 841 } else { 842 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 843 call_analyzer->copy_dependencies(_compile->dependencies()); 844 if (call_analyzer->is_return_allocated()) { 845 // Returns a newly allocated unescaped object, simply 846 // update dependency information. 847 // Mark it as NoEscape so that objects referenced by 848 // it's fields will be marked as NoEscape at least. 849 add_java_object(call, PointsToNode::NoEscape); 850 ptnode_adr(call_idx)->set_scalar_replaceable(false); 851 } else { 852 // Determine whether any arguments are returned. 853 const TypeTuple* d = call->tf()->domain(); 854 bool ret_arg = false; 855 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 856 if (d->field_at(i)->isa_ptr() != NULL && 857 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 858 ret_arg = true; 859 break; 860 } 861 } 862 if (ret_arg) { 863 add_local_var(call, PointsToNode::ArgEscape); 864 } else { 865 // Returns unknown object. 866 map_ideal_node(call, phantom_obj); 867 } 868 } 869 } 870 } else { 871 // An other type of call, assume the worst case: 872 // returned value is unknown and globally escapes. 873 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 874 map_ideal_node(call, phantom_obj); 875 } 876 } 877 878 void ConnectionGraph::process_call_arguments(CallNode *call) { 879 bool is_arraycopy = false; 880 switch (call->Opcode()) { 881 #ifdef ASSERT 882 case Op_Allocate: 883 case Op_AllocateArray: 884 case Op_Lock: 885 case Op_Unlock: 886 assert(false, "should be done already"); 887 break; 888 #endif 889 case Op_ArrayCopy: 890 case Op_CallLeafNoFP: 891 // Most array copies are ArrayCopy nodes at this point but there 892 // are still a few direct calls to the copy subroutines (See 893 // PhaseStringOpts::copy_string()) 894 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 895 (call->as_CallLeaf()->_name != NULL && 896 strstr(call->as_CallLeaf()->_name, "arraycopy") != 0); 897 // fall through 898 case Op_CallLeaf: { 899 // Stub calls, objects do not escape but they are not scale replaceable. 900 // Adjust escape state for outgoing arguments. 901 const TypeTuple * d = call->tf()->domain(); 902 bool src_has_oops = false; 903 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 904 const Type* at = d->field_at(i); 905 Node *arg = call->in(i); 906 if (arg == NULL) { 907 continue; 908 } 909 const Type *aat = _igvn->type(arg); 910 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) 911 continue; 912 if (arg->is_AddP()) { 913 // 914 // The inline_native_clone() case when the arraycopy stub is called 915 // after the allocation before Initialize and CheckCastPP nodes. 916 // Or normal arraycopy for object arrays case. 917 // 918 // Set AddP's base (Allocate) as not scalar replaceable since 919 // pointer to the base (with offset) is passed as argument. 920 // 921 arg = get_addp_base(arg); 922 } 923 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 924 assert(arg_ptn != NULL, "should be registered"); 925 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 926 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 927 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 928 aat->isa_ptr() != NULL, "expecting an Ptr"); 929 bool arg_has_oops = aat->isa_oopptr() && 930 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || 931 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); 932 if (i == TypeFunc::Parms) { 933 src_has_oops = arg_has_oops; 934 } 935 // 936 // src or dst could be j.l.Object when other is basic type array: 937 // 938 // arraycopy(char[],0,Object*,0,size); 939 // arraycopy(Object*,0,char[],0,size); 940 // 941 // Don't add edges in such cases. 942 // 943 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 944 arg_has_oops && (i > TypeFunc::Parms); 945 #ifdef ASSERT 946 if (!(is_arraycopy || 947 (call->as_CallLeaf()->_name != NULL && 948 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 || 949 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 || 950 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 951 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 952 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 953 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 954 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 955 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 956 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 957 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 958 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 959 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 960 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 961 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0) 962 ))) { 963 call->dump(); 964 fatal(err_msg_res("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name)); 965 } 966 #endif 967 // Always process arraycopy's destination object since 968 // we need to add all possible edges to references in 969 // source object. 970 if (arg_esc >= PointsToNode::ArgEscape && 971 !arg_is_arraycopy_dest) { 972 continue; 973 } 974 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 975 if (arg_is_arraycopy_dest) { 976 Node* src = call->in(TypeFunc::Parms); 977 if (src->is_AddP()) { 978 src = get_addp_base(src); 979 } 980 PointsToNode* src_ptn = ptnode_adr(src->_idx); 981 assert(src_ptn != NULL, "should be registered"); 982 if (arg_ptn != src_ptn) { 983 // Special arraycopy edge: 984 // A destination object's field can't have the source object 985 // as base since objects escape states are not related. 986 // Only escape state of destination object's fields affects 987 // escape state of fields in source object. 988 add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn); 989 } 990 } 991 } 992 } 993 break; 994 } 995 case Op_CallStaticJava: { 996 // For a static call, we know exactly what method is being called. 997 // Use bytecode estimator to record the call's escape affects 998 #ifdef ASSERT 999 const char* name = call->as_CallStaticJava()->_name; 1000 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1001 #endif 1002 ciMethod* meth = call->as_CallJava()->method(); 1003 if ((meth != NULL) && meth->is_boxing_method()) { 1004 break; // Boxing methods do not modify any oops. 1005 } 1006 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1007 // fall-through if not a Java method or no analyzer information 1008 if (call_analyzer != NULL) { 1009 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1010 const TypeTuple* d = call->tf()->domain(); 1011 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1012 const Type* at = d->field_at(i); 1013 int k = i - TypeFunc::Parms; 1014 Node* arg = call->in(i); 1015 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1016 if (at->isa_ptr() != NULL && 1017 call_analyzer->is_arg_returned(k)) { 1018 // The call returns arguments. 1019 if (call_ptn != NULL) { // Is call's result used? 1020 assert(call_ptn->is_LocalVar(), "node should be registered"); 1021 assert(arg_ptn != NULL, "node should be registered"); 1022 add_edge(call_ptn, arg_ptn); 1023 } 1024 } 1025 if (at->isa_oopptr() != NULL && 1026 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1027 if (!call_analyzer->is_arg_stack(k)) { 1028 // The argument global escapes 1029 set_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1030 } else { 1031 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 1032 if (!call_analyzer->is_arg_local(k)) { 1033 // The argument itself doesn't escape, but any fields might 1034 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1035 } 1036 } 1037 } 1038 } 1039 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1040 // The call returns arguments. 1041 assert(call_ptn->edge_count() > 0, "sanity"); 1042 if (!call_analyzer->is_return_local()) { 1043 // Returns also unknown object. 1044 add_edge(call_ptn, phantom_obj); 1045 } 1046 } 1047 break; 1048 } 1049 } 1050 default: { 1051 // Fall-through here if not a Java method or no analyzer information 1052 // or some other type of call, assume the worst case: all arguments 1053 // globally escape. 1054 const TypeTuple* d = call->tf()->domain(); 1055 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1056 const Type* at = d->field_at(i); 1057 if (at->isa_oopptr() != NULL) { 1058 Node* arg = call->in(i); 1059 if (arg->is_AddP()) { 1060 arg = get_addp_base(arg); 1061 } 1062 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1063 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); 1064 } 1065 } 1066 } 1067 } 1068 } 1069 1070 1071 // Finish Graph construction. 1072 bool ConnectionGraph::complete_connection_graph( 1073 GrowableArray<PointsToNode*>& ptnodes_worklist, 1074 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1075 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1076 GrowableArray<FieldNode*>& oop_fields_worklist) { 1077 // Normally only 1-3 passes needed to build Connection Graph depending 1078 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1079 // Set limit to 20 to catch situation when something did go wrong and 1080 // bailout Escape Analysis. 1081 // Also limit build time to 30 sec (60 in debug VM). 1082 #define CG_BUILD_ITER_LIMIT 20 1083 #ifdef ASSERT 1084 #define CG_BUILD_TIME_LIMIT 60.0 1085 #else 1086 #define CG_BUILD_TIME_LIMIT 30.0 1087 #endif 1088 1089 // Propagate GlobalEscape and ArgEscape escape states and check that 1090 // we still have non-escaping objects. The method pushs on _worklist 1091 // Field nodes which reference phantom_object. 1092 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1093 return false; // Nothing to do. 1094 } 1095 // Now propagate references to all JavaObject nodes. 1096 int java_objects_length = java_objects_worklist.length(); 1097 elapsedTimer time; 1098 bool timeout = false; 1099 int new_edges = 1; 1100 int iterations = 0; 1101 do { 1102 while ((new_edges > 0) && 1103 (iterations++ < CG_BUILD_ITER_LIMIT)) { 1104 double start_time = time.seconds(); 1105 time.start(); 1106 new_edges = 0; 1107 // Propagate references to phantom_object for nodes pushed on _worklist 1108 // by find_non_escaped_objects() and find_field_value(). 1109 new_edges += add_java_object_edges(phantom_obj, false); 1110 for (int next = 0; next < java_objects_length; ++next) { 1111 JavaObjectNode* ptn = java_objects_worklist.at(next); 1112 new_edges += add_java_object_edges(ptn, true); 1113 if ((next & 3) == 0) { 1114 // Each 4 iterations calculate how much time it will take 1115 // to complete graph construction. 1116 time.stop(); 1117 double stop_time = time.seconds(); 1118 double time_per_iter = (stop_time - start_time) * 0.25; 1119 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1120 if ((start_time + time_until_end) >= CG_BUILD_TIME_LIMIT) { 1121 timeout = true; 1122 break; // Timeout 1123 } 1124 start_time = stop_time; 1125 time.start(); 1126 } 1127 } 1128 if (timeout) break; 1129 if (new_edges > 0) { 1130 // Update escape states on each iteration if graph was updated. 1131 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1132 return false; // Nothing to do. 1133 } 1134 } 1135 time.stop(); 1136 if (time.seconds() >= CG_BUILD_TIME_LIMIT) { 1137 timeout = true; 1138 break; 1139 } 1140 } 1141 if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { 1142 time.start(); 1143 // Find fields which have unknown value. 1144 int fields_length = oop_fields_worklist.length(); 1145 for (int next = 0; next < fields_length; next++) { 1146 FieldNode* field = oop_fields_worklist.at(next); 1147 if (field->edge_count() == 0) { 1148 new_edges += find_field_value(field); 1149 // This code may added new edges to phantom_object. 1150 // Need an other cycle to propagate references to phantom_object. 1151 } 1152 } 1153 time.stop(); 1154 if (time.seconds() >= CG_BUILD_TIME_LIMIT) { 1155 timeout = true; 1156 break; 1157 } 1158 } else { 1159 new_edges = 0; // Bailout 1160 } 1161 } while (new_edges > 0); 1162 1163 // Bailout if passed limits. 1164 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { 1165 Compile* C = _compile; 1166 if (C->log() != NULL) { 1167 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1168 C->log()->text("%s", timeout ? "time" : "iterations"); 1169 C->log()->end_elem(" limit'"); 1170 } 1171 assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", 1172 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length())); 1173 // Possible infinite build_connection_graph loop, 1174 // bailout (no changes to ideal graph were made). 1175 return false; 1176 } 1177 #ifdef ASSERT 1178 if (Verbose && PrintEscapeAnalysis) { 1179 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", 1180 iterations, nodes_size(), ptnodes_worklist.length()); 1181 } 1182 #endif 1183 1184 #undef CG_BUILD_ITER_LIMIT 1185 #undef CG_BUILD_TIME_LIMIT 1186 1187 // Find fields initialized by NULL for non-escaping Allocations. 1188 int non_escaped_length = non_escaped_worklist.length(); 1189 for (int next = 0; next < non_escaped_length; next++) { 1190 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1191 PointsToNode::EscapeState es = ptn->escape_state(); 1192 assert(es <= PointsToNode::ArgEscape, "sanity"); 1193 if (es == PointsToNode::NoEscape) { 1194 if (find_init_values(ptn, null_obj, _igvn) > 0) { 1195 // Adding references to NULL object does not change escape states 1196 // since it does not escape. Also no fields are added to NULL object. 1197 add_java_object_edges(null_obj, false); 1198 } 1199 } 1200 Node* n = ptn->ideal_node(); 1201 if (n->is_Allocate()) { 1202 // The object allocated by this Allocate node will never be 1203 // seen by an other thread. Mark it so that when it is 1204 // expanded no MemBarStoreStore is added. 1205 InitializeNode* ini = n->as_Allocate()->initialization(); 1206 if (ini != NULL) 1207 ini->set_does_not_escape(); 1208 } 1209 } 1210 return true; // Finished graph construction. 1211 } 1212 1213 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1214 // and check that we still have non-escaping java objects. 1215 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1216 GrowableArray<JavaObjectNode*>& non_escaped_worklist) { 1217 GrowableArray<PointsToNode*> escape_worklist; 1218 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1219 int ptnodes_length = ptnodes_worklist.length(); 1220 for (int next = 0; next < ptnodes_length; ++next) { 1221 PointsToNode* ptn = ptnodes_worklist.at(next); 1222 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1223 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1224 escape_worklist.push(ptn); 1225 } 1226 } 1227 // Set escape states to referenced nodes (edges list). 1228 while (escape_worklist.length() > 0) { 1229 PointsToNode* ptn = escape_worklist.pop(); 1230 PointsToNode::EscapeState es = ptn->escape_state(); 1231 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1232 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1233 es >= PointsToNode::ArgEscape) { 1234 // GlobalEscape or ArgEscape state of field means it has unknown value. 1235 if (add_edge(ptn, phantom_obj)) { 1236 // New edge was added 1237 add_field_uses_to_worklist(ptn->as_Field()); 1238 } 1239 } 1240 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1241 PointsToNode* e = i.get(); 1242 if (e->is_Arraycopy()) { 1243 assert(ptn->arraycopy_dst(), "sanity"); 1244 // Propagate only fields escape state through arraycopy edge. 1245 if (e->fields_escape_state() < field_es) { 1246 set_fields_escape_state(e, field_es); 1247 escape_worklist.push(e); 1248 } 1249 } else if (es >= field_es) { 1250 // fields_escape_state is also set to 'es' if it is less than 'es'. 1251 if (e->escape_state() < es) { 1252 set_escape_state(e, es); 1253 escape_worklist.push(e); 1254 } 1255 } else { 1256 // Propagate field escape state. 1257 bool es_changed = false; 1258 if (e->fields_escape_state() < field_es) { 1259 set_fields_escape_state(e, field_es); 1260 es_changed = true; 1261 } 1262 if ((e->escape_state() < field_es) && 1263 e->is_Field() && ptn->is_JavaObject() && 1264 e->as_Field()->is_oop()) { 1265 // Change escape state of referenced fileds. 1266 set_escape_state(e, field_es); 1267 es_changed = true;; 1268 } else if (e->escape_state() < es) { 1269 set_escape_state(e, es); 1270 es_changed = true;; 1271 } 1272 if (es_changed) { 1273 escape_worklist.push(e); 1274 } 1275 } 1276 } 1277 } 1278 // Remove escaped objects from non_escaped list. 1279 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { 1280 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1281 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1282 non_escaped_worklist.delete_at(next); 1283 } 1284 if (ptn->escape_state() == PointsToNode::NoEscape) { 1285 // Find fields in non-escaped allocations which have unknown value. 1286 find_init_values(ptn, phantom_obj, NULL); 1287 } 1288 } 1289 return (non_escaped_worklist.length() > 0); 1290 } 1291 1292 // Add all references to JavaObject node by walking over all uses. 1293 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1294 int new_edges = 0; 1295 if (populate_worklist) { 1296 // Populate _worklist by uses of jobj's uses. 1297 for (UseIterator i(jobj); i.has_next(); i.next()) { 1298 PointsToNode* use = i.get(); 1299 if (use->is_Arraycopy()) 1300 continue; 1301 add_uses_to_worklist(use); 1302 if (use->is_Field() && use->as_Field()->is_oop()) { 1303 // Put on worklist all field's uses (loads) and 1304 // related field nodes (same base and offset). 1305 add_field_uses_to_worklist(use->as_Field()); 1306 } 1307 } 1308 } 1309 for (int l = 0; l < _worklist.length(); l++) { 1310 PointsToNode* use = _worklist.at(l); 1311 if (PointsToNode::is_base_use(use)) { 1312 // Add reference from jobj to field and from field to jobj (field's base). 1313 use = PointsToNode::get_use_node(use)->as_Field(); 1314 if (add_base(use->as_Field(), jobj)) { 1315 new_edges++; 1316 } 1317 continue; 1318 } 1319 assert(!use->is_JavaObject(), "sanity"); 1320 if (use->is_Arraycopy()) { 1321 if (jobj == null_obj) // NULL object does not have field edges 1322 continue; 1323 // Added edge from Arraycopy node to arraycopy's source java object 1324 if (add_edge(use, jobj)) { 1325 jobj->set_arraycopy_src(); 1326 new_edges++; 1327 } 1328 // and stop here. 1329 continue; 1330 } 1331 if (!add_edge(use, jobj)) 1332 continue; // No new edge added, there was such edge already. 1333 new_edges++; 1334 if (use->is_LocalVar()) { 1335 add_uses_to_worklist(use); 1336 if (use->arraycopy_dst()) { 1337 for (EdgeIterator i(use); i.has_next(); i.next()) { 1338 PointsToNode* e = i.get(); 1339 if (e->is_Arraycopy()) { 1340 if (jobj == null_obj) // NULL object does not have field edges 1341 continue; 1342 // Add edge from arraycopy's destination java object to Arraycopy node. 1343 if (add_edge(jobj, e)) { 1344 new_edges++; 1345 jobj->set_arraycopy_dst(); 1346 } 1347 } 1348 } 1349 } 1350 } else { 1351 // Added new edge to stored in field values. 1352 // Put on worklist all field's uses (loads) and 1353 // related field nodes (same base and offset). 1354 add_field_uses_to_worklist(use->as_Field()); 1355 } 1356 } 1357 _worklist.clear(); 1358 _in_worklist.Reset(); 1359 return new_edges; 1360 } 1361 1362 // Put on worklist all related field nodes. 1363 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1364 assert(field->is_oop(), "sanity"); 1365 int offset = field->offset(); 1366 add_uses_to_worklist(field); 1367 // Loop over all bases of this field and push on worklist Field nodes 1368 // with the same offset and base (since they may reference the same field). 1369 for (BaseIterator i(field); i.has_next(); i.next()) { 1370 PointsToNode* base = i.get(); 1371 add_fields_to_worklist(field, base); 1372 // Check if the base was source object of arraycopy and go over arraycopy's 1373 // destination objects since values stored to a field of source object are 1374 // accessable by uses (loads) of fields of destination objects. 1375 if (base->arraycopy_src()) { 1376 for (UseIterator j(base); j.has_next(); j.next()) { 1377 PointsToNode* arycp = j.get(); 1378 if (arycp->is_Arraycopy()) { 1379 for (UseIterator k(arycp); k.has_next(); k.next()) { 1380 PointsToNode* abase = k.get(); 1381 if (abase->arraycopy_dst() && abase != base) { 1382 // Look for the same arracopy reference. 1383 add_fields_to_worklist(field, abase); 1384 } 1385 } 1386 } 1387 } 1388 } 1389 } 1390 } 1391 1392 // Put on worklist all related field nodes. 1393 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1394 int offset = field->offset(); 1395 if (base->is_LocalVar()) { 1396 for (UseIterator j(base); j.has_next(); j.next()) { 1397 PointsToNode* f = j.get(); 1398 if (PointsToNode::is_base_use(f)) { // Field 1399 f = PointsToNode::get_use_node(f); 1400 if (f == field || !f->as_Field()->is_oop()) 1401 continue; 1402 int offs = f->as_Field()->offset(); 1403 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1404 add_to_worklist(f); 1405 } 1406 } 1407 } 1408 } else { 1409 assert(base->is_JavaObject(), "sanity"); 1410 if (// Skip phantom_object since it is only used to indicate that 1411 // this field's content globally escapes. 1412 (base != phantom_obj) && 1413 // NULL object node does not have fields. 1414 (base != null_obj)) { 1415 for (EdgeIterator i(base); i.has_next(); i.next()) { 1416 PointsToNode* f = i.get(); 1417 // Skip arraycopy edge since store to destination object field 1418 // does not update value in source object field. 1419 if (f->is_Arraycopy()) { 1420 assert(base->arraycopy_dst(), "sanity"); 1421 continue; 1422 } 1423 if (f == field || !f->as_Field()->is_oop()) 1424 continue; 1425 int offs = f->as_Field()->offset(); 1426 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1427 add_to_worklist(f); 1428 } 1429 } 1430 } 1431 } 1432 } 1433 1434 // Find fields which have unknown value. 1435 int ConnectionGraph::find_field_value(FieldNode* field) { 1436 // Escaped fields should have init value already. 1437 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1438 int new_edges = 0; 1439 for (BaseIterator i(field); i.has_next(); i.next()) { 1440 PointsToNode* base = i.get(); 1441 if (base->is_JavaObject()) { 1442 // Skip Allocate's fields which will be processed later. 1443 if (base->ideal_node()->is_Allocate()) 1444 return 0; 1445 assert(base == null_obj, "only NULL ptr base expected here"); 1446 } 1447 } 1448 if (add_edge(field, phantom_obj)) { 1449 // New edge was added 1450 new_edges++; 1451 add_field_uses_to_worklist(field); 1452 } 1453 return new_edges; 1454 } 1455 1456 // Find fields initializing values for allocations. 1457 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { 1458 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1459 int new_edges = 0; 1460 Node* alloc = pta->ideal_node(); 1461 if (init_val == phantom_obj) { 1462 // Do nothing for Allocate nodes since its fields values are "known". 1463 if (alloc->is_Allocate()) 1464 return 0; 1465 assert(alloc->as_CallStaticJava(), "sanity"); 1466 #ifdef ASSERT 1467 if (alloc->as_CallStaticJava()->method() == NULL) { 1468 const char* name = alloc->as_CallStaticJava()->_name; 1469 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1470 } 1471 #endif 1472 // Non-escaped allocation returned from Java or runtime call have 1473 // unknown values in fields. 1474 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1475 PointsToNode* field = i.get(); 1476 if (field->is_Field() && field->as_Field()->is_oop()) { 1477 if (add_edge(field, phantom_obj)) { 1478 // New edge was added 1479 new_edges++; 1480 add_field_uses_to_worklist(field->as_Field()); 1481 } 1482 } 1483 } 1484 return new_edges; 1485 } 1486 assert(init_val == null_obj, "sanity"); 1487 // Do nothing for Call nodes since its fields values are unknown. 1488 if (!alloc->is_Allocate()) 1489 return 0; 1490 1491 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1492 bool visited_bottom_offset = false; 1493 GrowableArray<int> offsets_worklist; 1494 1495 // Check if an oop field's initializing value is recorded and add 1496 // a corresponding NULL if field's value if it is not recorded. 1497 // Connection Graph does not record a default initialization by NULL 1498 // captured by Initialize node. 1499 // 1500 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1501 PointsToNode* field = i.get(); // Field (AddP) 1502 if (!field->is_Field() || !field->as_Field()->is_oop()) 1503 continue; // Not oop field 1504 int offset = field->as_Field()->offset(); 1505 if (offset == Type::OffsetBot) { 1506 if (!visited_bottom_offset) { 1507 // OffsetBot is used to reference array's element, 1508 // always add reference to NULL to all Field nodes since we don't 1509 // known which element is referenced. 1510 if (add_edge(field, null_obj)) { 1511 // New edge was added 1512 new_edges++; 1513 add_field_uses_to_worklist(field->as_Field()); 1514 visited_bottom_offset = true; 1515 } 1516 } 1517 } else { 1518 // Check only oop fields. 1519 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1520 if (adr_type->isa_rawptr()) { 1521 #ifdef ASSERT 1522 // Raw pointers are used for initializing stores so skip it 1523 // since it should be recorded already 1524 Node* base = get_addp_base(field->ideal_node()); 1525 assert(adr_type->isa_rawptr() && base->is_Proj() && 1526 (base->in(0) == alloc),"unexpected pointer type"); 1527 #endif 1528 continue; 1529 } 1530 if (!offsets_worklist.contains(offset)) { 1531 offsets_worklist.append(offset); 1532 Node* value = NULL; 1533 if (ini != NULL) { 1534 // StoreP::memory_type() == T_ADDRESS 1535 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1536 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1537 // Make sure initializing store has the same type as this AddP. 1538 // This AddP may reference non existing field because it is on a 1539 // dead branch of bimorphic call which is not eliminated yet. 1540 if (store != NULL && store->is_Store() && 1541 store->as_Store()->memory_type() == ft) { 1542 value = store->in(MemNode::ValueIn); 1543 #ifdef ASSERT 1544 if (VerifyConnectionGraph) { 1545 // Verify that AddP already points to all objects the value points to. 1546 PointsToNode* val = ptnode_adr(value->_idx); 1547 assert((val != NULL), "should be processed already"); 1548 PointsToNode* missed_obj = NULL; 1549 if (val->is_JavaObject()) { 1550 if (!field->points_to(val->as_JavaObject())) { 1551 missed_obj = val; 1552 } 1553 } else { 1554 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1555 tty->print_cr("----------init store has invalid value -----"); 1556 store->dump(); 1557 val->dump(); 1558 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1559 } 1560 for (EdgeIterator j(val); j.has_next(); j.next()) { 1561 PointsToNode* obj = j.get(); 1562 if (obj->is_JavaObject()) { 1563 if (!field->points_to(obj->as_JavaObject())) { 1564 missed_obj = obj; 1565 break; 1566 } 1567 } 1568 } 1569 } 1570 if (missed_obj != NULL) { 1571 tty->print_cr("----------field---------------------------------"); 1572 field->dump(); 1573 tty->print_cr("----------missed referernce to object-----------"); 1574 missed_obj->dump(); 1575 tty->print_cr("----------object referernced by init store -----"); 1576 store->dump(); 1577 val->dump(); 1578 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1579 } 1580 } 1581 #endif 1582 } else { 1583 // There could be initializing stores which follow allocation. 1584 // For example, a volatile field store is not collected 1585 // by Initialize node. 1586 // 1587 // Need to check for dependent loads to separate such stores from 1588 // stores which follow loads. For now, add initial value NULL so 1589 // that compare pointers optimization works correctly. 1590 } 1591 } 1592 if (value == NULL) { 1593 // A field's initializing value was not recorded. Add NULL. 1594 if (add_edge(field, null_obj)) { 1595 // New edge was added 1596 new_edges++; 1597 add_field_uses_to_worklist(field->as_Field()); 1598 } 1599 } 1600 } 1601 } 1602 } 1603 return new_edges; 1604 } 1605 1606 // Adjust scalar_replaceable state after Connection Graph is built. 1607 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1608 // Search for non-escaping objects which are not scalar replaceable 1609 // and mark them to propagate the state to referenced objects. 1610 1611 // 1. An object is not scalar replaceable if the field into which it is 1612 // stored has unknown offset (stored into unknown element of an array). 1613 // 1614 for (UseIterator i(jobj); i.has_next(); i.next()) { 1615 PointsToNode* use = i.get(); 1616 assert(!use->is_Arraycopy(), "sanity"); 1617 if (use->is_Field()) { 1618 FieldNode* field = use->as_Field(); 1619 assert(field->is_oop() && field->scalar_replaceable() && 1620 field->fields_escape_state() == PointsToNode::NoEscape, "sanity"); 1621 if (field->offset() == Type::OffsetBot) { 1622 jobj->set_scalar_replaceable(false); 1623 return; 1624 } 1625 // 2. An object is not scalar replaceable if the field into which it is 1626 // stored has multiple bases one of which is null. 1627 if (field->base_count() > 1) { 1628 for (BaseIterator i(field); i.has_next(); i.next()) { 1629 PointsToNode* base = i.get(); 1630 if (base == null_obj) { 1631 jobj->set_scalar_replaceable(false); 1632 return; 1633 } 1634 } 1635 } 1636 } 1637 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1638 // 3. An object is not scalar replaceable if it is merged with other objects. 1639 for (EdgeIterator j(use); j.has_next(); j.next()) { 1640 PointsToNode* ptn = j.get(); 1641 if (ptn->is_JavaObject() && ptn != jobj) { 1642 // Mark all objects. 1643 jobj->set_scalar_replaceable(false); 1644 ptn->set_scalar_replaceable(false); 1645 } 1646 } 1647 if (!jobj->scalar_replaceable()) { 1648 return; 1649 } 1650 } 1651 1652 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1653 // Non-escaping object node should point only to field nodes. 1654 FieldNode* field = j.get()->as_Field(); 1655 int offset = field->as_Field()->offset(); 1656 1657 // 4. An object is not scalar replaceable if it has a field with unknown 1658 // offset (array's element is accessed in loop). 1659 if (offset == Type::OffsetBot) { 1660 jobj->set_scalar_replaceable(false); 1661 return; 1662 } 1663 // 5. Currently an object is not scalar replaceable if a LoadStore node 1664 // access its field since the field value is unknown after it. 1665 // 1666 Node* n = field->ideal_node(); 1667 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1668 if (n->fast_out(i)->is_LoadStore()) { 1669 jobj->set_scalar_replaceable(false); 1670 return; 1671 } 1672 } 1673 1674 // 6. Or the address may point to more then one object. This may produce 1675 // the false positive result (set not scalar replaceable) 1676 // since the flow-insensitive escape analysis can't separate 1677 // the case when stores overwrite the field's value from the case 1678 // when stores happened on different control branches. 1679 // 1680 // Note: it will disable scalar replacement in some cases: 1681 // 1682 // Point p[] = new Point[1]; 1683 // p[0] = new Point(); // Will be not scalar replaced 1684 // 1685 // but it will save us from incorrect optimizations in next cases: 1686 // 1687 // Point p[] = new Point[1]; 1688 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1689 // 1690 if (field->base_count() > 1) { 1691 for (BaseIterator i(field); i.has_next(); i.next()) { 1692 PointsToNode* base = i.get(); 1693 // Don't take into account LocalVar nodes which 1694 // may point to only one object which should be also 1695 // this field's base by now. 1696 if (base->is_JavaObject() && base != jobj) { 1697 // Mark all bases. 1698 jobj->set_scalar_replaceable(false); 1699 base->set_scalar_replaceable(false); 1700 } 1701 } 1702 } 1703 } 1704 } 1705 1706 #ifdef ASSERT 1707 void ConnectionGraph::verify_connection_graph( 1708 GrowableArray<PointsToNode*>& ptnodes_worklist, 1709 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1710 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1711 GrowableArray<Node*>& addp_worklist) { 1712 // Verify that graph is complete - no new edges could be added. 1713 int java_objects_length = java_objects_worklist.length(); 1714 int non_escaped_length = non_escaped_worklist.length(); 1715 int new_edges = 0; 1716 for (int next = 0; next < java_objects_length; ++next) { 1717 JavaObjectNode* ptn = java_objects_worklist.at(next); 1718 new_edges += add_java_object_edges(ptn, true); 1719 } 1720 assert(new_edges == 0, "graph was not complete"); 1721 // Verify that escape state is final. 1722 int length = non_escaped_worklist.length(); 1723 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); 1724 assert((non_escaped_length == non_escaped_worklist.length()) && 1725 (non_escaped_length == length) && 1726 (_worklist.length() == 0), "escape state was not final"); 1727 1728 // Verify fields information. 1729 int addp_length = addp_worklist.length(); 1730 for (int next = 0; next < addp_length; ++next ) { 1731 Node* n = addp_worklist.at(next); 1732 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 1733 if (field->is_oop()) { 1734 // Verify that field has all bases 1735 Node* base = get_addp_base(n); 1736 PointsToNode* ptn = ptnode_adr(base->_idx); 1737 if (ptn->is_JavaObject()) { 1738 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 1739 } else { 1740 assert(ptn->is_LocalVar(), "sanity"); 1741 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1742 PointsToNode* e = i.get(); 1743 if (e->is_JavaObject()) { 1744 assert(field->has_base(e->as_JavaObject()), "sanity"); 1745 } 1746 } 1747 } 1748 // Verify that all fields have initializing values. 1749 if (field->edge_count() == 0) { 1750 tty->print_cr("----------field does not have references----------"); 1751 field->dump(); 1752 for (BaseIterator i(field); i.has_next(); i.next()) { 1753 PointsToNode* base = i.get(); 1754 tty->print_cr("----------field has next base---------------------"); 1755 base->dump(); 1756 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 1757 tty->print_cr("----------base has fields-------------------------"); 1758 for (EdgeIterator j(base); j.has_next(); j.next()) { 1759 j.get()->dump(); 1760 } 1761 tty->print_cr("----------base has references---------------------"); 1762 for (UseIterator j(base); j.has_next(); j.next()) { 1763 j.get()->dump(); 1764 } 1765 } 1766 } 1767 for (UseIterator i(field); i.has_next(); i.next()) { 1768 i.get()->dump(); 1769 } 1770 assert(field->edge_count() > 0, "sanity"); 1771 } 1772 } 1773 } 1774 } 1775 #endif 1776 1777 // Optimize ideal graph. 1778 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 1779 GrowableArray<Node*>& storestore_worklist) { 1780 Compile* C = _compile; 1781 PhaseIterGVN* igvn = _igvn; 1782 if (EliminateLocks) { 1783 // Mark locks before changing ideal graph. 1784 int cnt = C->macro_count(); 1785 for( int i=0; i < cnt; i++ ) { 1786 Node *n = C->macro_node(i); 1787 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1788 AbstractLockNode* alock = n->as_AbstractLock(); 1789 if (!alock->is_non_esc_obj()) { 1790 if (not_global_escape(alock->obj_node())) { 1791 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 1792 // The lock could be marked eliminated by lock coarsening 1793 // code during first IGVN before EA. Replace coarsened flag 1794 // to eliminate all associated locks/unlocks. 1795 alock->set_non_esc_obj(); 1796 } 1797 } 1798 } 1799 } 1800 } 1801 1802 if (OptimizePtrCompare) { 1803 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1804 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1805 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1806 // Optimize objects compare. 1807 while (ptr_cmp_worklist.length() != 0) { 1808 Node *n = ptr_cmp_worklist.pop(); 1809 Node *res = optimize_ptr_compare(n); 1810 if (res != NULL) { 1811 #ifndef PRODUCT 1812 if (PrintOptimizePtrCompare) { 1813 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1814 if (Verbose) { 1815 n->dump(1); 1816 } 1817 } 1818 #endif 1819 igvn->replace_node(n, res); 1820 } 1821 } 1822 // cleanup 1823 if (_pcmp_neq->outcnt() == 0) 1824 igvn->hash_delete(_pcmp_neq); 1825 if (_pcmp_eq->outcnt() == 0) 1826 igvn->hash_delete(_pcmp_eq); 1827 } 1828 1829 // For MemBarStoreStore nodes added in library_call.cpp, check 1830 // escape status of associated AllocateNode and optimize out 1831 // MemBarStoreStore node if the allocated object never escapes. 1832 while (storestore_worklist.length() != 0) { 1833 Node *n = storestore_worklist.pop(); 1834 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 1835 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 1836 assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); 1837 if (not_global_escape(alloc)) { 1838 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 1839 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 1840 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 1841 igvn->register_new_node_with_optimizer(mb); 1842 igvn->replace_node(storestore, mb); 1843 } 1844 } 1845 } 1846 1847 // Optimize objects compare. 1848 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 1849 assert(OptimizePtrCompare, "sanity"); 1850 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 1851 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 1852 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 1853 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 1854 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 1855 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 1856 1857 // Check simple cases first. 1858 if (jobj1 != NULL) { 1859 if (jobj1->escape_state() == PointsToNode::NoEscape) { 1860 if (jobj1 == jobj2) { 1861 // Comparing the same not escaping object. 1862 return _pcmp_eq; 1863 } 1864 Node* obj = jobj1->ideal_node(); 1865 // Comparing not escaping allocation. 1866 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1867 !ptn2->points_to(jobj1)) { 1868 return _pcmp_neq; // This includes nullness check. 1869 } 1870 } 1871 } 1872 if (jobj2 != NULL) { 1873 if (jobj2->escape_state() == PointsToNode::NoEscape) { 1874 Node* obj = jobj2->ideal_node(); 1875 // Comparing not escaping allocation. 1876 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1877 !ptn1->points_to(jobj2)) { 1878 return _pcmp_neq; // This includes nullness check. 1879 } 1880 } 1881 } 1882 if (jobj1 != NULL && jobj1 != phantom_obj && 1883 jobj2 != NULL && jobj2 != phantom_obj && 1884 jobj1->ideal_node()->is_Con() && 1885 jobj2->ideal_node()->is_Con()) { 1886 // Klass or String constants compare. Need to be careful with 1887 // compressed pointers - compare types of ConN and ConP instead of nodes. 1888 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 1889 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 1890 if (t1->make_ptr() == t2->make_ptr()) { 1891 return _pcmp_eq; 1892 } else { 1893 return _pcmp_neq; 1894 } 1895 } 1896 if (ptn1->meet(ptn2)) { 1897 return NULL; // Sets are not disjoint 1898 } 1899 1900 // Sets are disjoint. 1901 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 1902 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 1903 bool set1_has_null_ptr = ptn1->points_to(null_obj); 1904 bool set2_has_null_ptr = ptn2->points_to(null_obj); 1905 if (set1_has_unknown_ptr && set2_has_null_ptr || 1906 set2_has_unknown_ptr && set1_has_null_ptr) { 1907 // Check nullness of unknown object. 1908 return NULL; 1909 } 1910 1911 // Disjointness by itself is not sufficient since 1912 // alias analysis is not complete for escaped objects. 1913 // Disjoint sets are definitely unrelated only when 1914 // at least one set has only not escaping allocations. 1915 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 1916 if (ptn1->non_escaping_allocation()) { 1917 return _pcmp_neq; 1918 } 1919 } 1920 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 1921 if (ptn2->non_escaping_allocation()) { 1922 return _pcmp_neq; 1923 } 1924 } 1925 return NULL; 1926 } 1927 1928 // Connection Graph constuction functions. 1929 1930 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 1931 PointsToNode* ptadr = _nodes.at(n->_idx); 1932 if (ptadr != NULL) { 1933 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 1934 return; 1935 } 1936 Compile* C = _compile; 1937 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 1938 _nodes.at_put(n->_idx, ptadr); 1939 } 1940 1941 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 1942 PointsToNode* ptadr = _nodes.at(n->_idx); 1943 if (ptadr != NULL) { 1944 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 1945 return; 1946 } 1947 Compile* C = _compile; 1948 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 1949 _nodes.at_put(n->_idx, ptadr); 1950 } 1951 1952 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 1953 PointsToNode* ptadr = _nodes.at(n->_idx); 1954 if (ptadr != NULL) { 1955 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 1956 return; 1957 } 1958 bool unsafe = false; 1959 bool is_oop = is_oop_field(n, offset, &unsafe); 1960 if (unsafe) { 1961 es = PointsToNode::GlobalEscape; 1962 } 1963 Compile* C = _compile; 1964 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 1965 _nodes.at_put(n->_idx, field); 1966 } 1967 1968 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 1969 PointsToNode* src, PointsToNode* dst) { 1970 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 1971 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 1972 PointsToNode* ptadr = _nodes.at(n->_idx); 1973 if (ptadr != NULL) { 1974 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 1975 return; 1976 } 1977 Compile* C = _compile; 1978 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 1979 _nodes.at_put(n->_idx, ptadr); 1980 // Add edge from arraycopy node to source object. 1981 (void)add_edge(ptadr, src); 1982 src->set_arraycopy_src(); 1983 // Add edge from destination object to arraycopy node. 1984 (void)add_edge(dst, ptadr); 1985 dst->set_arraycopy_dst(); 1986 } 1987 1988 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 1989 const Type* adr_type = n->as_AddP()->bottom_type(); 1990 BasicType bt = T_INT; 1991 if (offset == Type::OffsetBot) { 1992 // Check only oop fields. 1993 if (!adr_type->isa_aryptr() || 1994 (adr_type->isa_aryptr()->klass() == NULL) || 1995 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { 1996 // OffsetBot is used to reference array's element. Ignore first AddP. 1997 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 1998 bt = T_OBJECT; 1999 } 2000 } 2001 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2002 if (adr_type->isa_instptr()) { 2003 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 2004 if (field != NULL) { 2005 bt = field->layout_type(); 2006 } else { 2007 // Check for unsafe oop field access 2008 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2009 int opcode = n->fast_out(i)->Opcode(); 2010 if (opcode == Op_StoreP || opcode == Op_LoadP || 2011 opcode == Op_StoreN || opcode == Op_LoadN) { 2012 bt = T_OBJECT; 2013 (*unsafe) = true; 2014 break; 2015 } 2016 } 2017 } 2018 } else if (adr_type->isa_aryptr()) { 2019 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2020 // Ignore array length load. 2021 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2022 // Ignore first AddP. 2023 } else { 2024 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2025 bt = elemtype->array_element_basic_type(); 2026 } 2027 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2028 // Allocation initialization, ThreadLocal field access, unsafe access 2029 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2030 int opcode = n->fast_out(i)->Opcode(); 2031 if (opcode == Op_StoreP || opcode == Op_LoadP || 2032 opcode == Op_StoreN || opcode == Op_LoadN) { 2033 bt = T_OBJECT; 2034 break; 2035 } 2036 } 2037 } 2038 } 2039 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); 2040 } 2041 2042 // Returns unique pointed java object or NULL. 2043 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2044 assert(!_collecting, "should not call when contructed graph"); 2045 // If the node was created after the escape computation we can't answer. 2046 uint idx = n->_idx; 2047 if (idx >= nodes_size()) { 2048 return NULL; 2049 } 2050 PointsToNode* ptn = ptnode_adr(idx); 2051 if (ptn->is_JavaObject()) { 2052 return ptn->as_JavaObject(); 2053 } 2054 assert(ptn->is_LocalVar(), "sanity"); 2055 // Check all java objects it points to. 2056 JavaObjectNode* jobj = NULL; 2057 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2058 PointsToNode* e = i.get(); 2059 if (e->is_JavaObject()) { 2060 if (jobj == NULL) { 2061 jobj = e->as_JavaObject(); 2062 } else if (jobj != e) { 2063 return NULL; 2064 } 2065 } 2066 } 2067 return jobj; 2068 } 2069 2070 // Return true if this node points only to non-escaping allocations. 2071 bool PointsToNode::non_escaping_allocation() { 2072 if (is_JavaObject()) { 2073 Node* n = ideal_node(); 2074 if (n->is_Allocate() || n->is_CallStaticJava()) { 2075 return (escape_state() == PointsToNode::NoEscape); 2076 } else { 2077 return false; 2078 } 2079 } 2080 assert(is_LocalVar(), "sanity"); 2081 // Check all java objects it points to. 2082 for (EdgeIterator i(this); i.has_next(); i.next()) { 2083 PointsToNode* e = i.get(); 2084 if (e->is_JavaObject()) { 2085 Node* n = e->ideal_node(); 2086 if ((e->escape_state() != PointsToNode::NoEscape) || 2087 !(n->is_Allocate() || n->is_CallStaticJava())) { 2088 return false; 2089 } 2090 } 2091 } 2092 return true; 2093 } 2094 2095 // Return true if we know the node does not escape globally. 2096 bool ConnectionGraph::not_global_escape(Node *n) { 2097 assert(!_collecting, "should not call during graph construction"); 2098 // If the node was created after the escape computation we can't answer. 2099 uint idx = n->_idx; 2100 if (idx >= nodes_size()) { 2101 return false; 2102 } 2103 PointsToNode* ptn = ptnode_adr(idx); 2104 PointsToNode::EscapeState es = ptn->escape_state(); 2105 // If we have already computed a value, return it. 2106 if (es >= PointsToNode::GlobalEscape) 2107 return false; 2108 if (ptn->is_JavaObject()) { 2109 return true; // (es < PointsToNode::GlobalEscape); 2110 } 2111 assert(ptn->is_LocalVar(), "sanity"); 2112 // Check all java objects it points to. 2113 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2114 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) 2115 return false; 2116 } 2117 return true; 2118 } 2119 2120 2121 // Helper functions 2122 2123 // Return true if this node points to specified node or nodes it points to. 2124 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2125 if (is_JavaObject()) { 2126 return (this == ptn); 2127 } 2128 assert(is_LocalVar() || is_Field(), "sanity"); 2129 for (EdgeIterator i(this); i.has_next(); i.next()) { 2130 if (i.get() == ptn) 2131 return true; 2132 } 2133 return false; 2134 } 2135 2136 // Return true if one node points to an other. 2137 bool PointsToNode::meet(PointsToNode* ptn) { 2138 if (this == ptn) { 2139 return true; 2140 } else if (ptn->is_JavaObject()) { 2141 return this->points_to(ptn->as_JavaObject()); 2142 } else if (this->is_JavaObject()) { 2143 return ptn->points_to(this->as_JavaObject()); 2144 } 2145 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2146 int ptn_count = ptn->edge_count(); 2147 for (EdgeIterator i(this); i.has_next(); i.next()) { 2148 PointsToNode* this_e = i.get(); 2149 for (int j = 0; j < ptn_count; j++) { 2150 if (this_e == ptn->edge(j)) 2151 return true; 2152 } 2153 } 2154 return false; 2155 } 2156 2157 #ifdef ASSERT 2158 // Return true if bases point to this java object. 2159 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2160 for (BaseIterator i(this); i.has_next(); i.next()) { 2161 if (i.get() == jobj) 2162 return true; 2163 } 2164 return false; 2165 } 2166 #endif 2167 2168 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2169 const Type *adr_type = phase->type(adr); 2170 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 2171 adr->in(AddPNode::Address)->is_Proj() && 2172 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 2173 // We are computing a raw address for a store captured by an Initialize 2174 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2175 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2176 assert(offs != Type::OffsetBot || 2177 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2178 "offset must be a constant or it is initialization of array"); 2179 return offs; 2180 } 2181 const TypePtr *t_ptr = adr_type->isa_ptr(); 2182 assert(t_ptr != NULL, "must be a pointer type"); 2183 return t_ptr->offset(); 2184 } 2185 2186 Node* ConnectionGraph::get_addp_base(Node *addp) { 2187 assert(addp->is_AddP(), "must be AddP"); 2188 // 2189 // AddP cases for Base and Address inputs: 2190 // case #1. Direct object's field reference: 2191 // Allocate 2192 // | 2193 // Proj #5 ( oop result ) 2194 // | 2195 // CheckCastPP (cast to instance type) 2196 // | | 2197 // AddP ( base == address ) 2198 // 2199 // case #2. Indirect object's field reference: 2200 // Phi 2201 // | 2202 // CastPP (cast to instance type) 2203 // | | 2204 // AddP ( base == address ) 2205 // 2206 // case #3. Raw object's field reference for Initialize node: 2207 // Allocate 2208 // | 2209 // Proj #5 ( oop result ) 2210 // top | 2211 // \ | 2212 // AddP ( base == top ) 2213 // 2214 // case #4. Array's element reference: 2215 // {CheckCastPP | CastPP} 2216 // | | | 2217 // | AddP ( array's element offset ) 2218 // | | 2219 // AddP ( array's offset ) 2220 // 2221 // case #5. Raw object's field reference for arraycopy stub call: 2222 // The inline_native_clone() case when the arraycopy stub is called 2223 // after the allocation before Initialize and CheckCastPP nodes. 2224 // Allocate 2225 // | 2226 // Proj #5 ( oop result ) 2227 // | | 2228 // AddP ( base == address ) 2229 // 2230 // case #6. Constant Pool, ThreadLocal, CastX2P or 2231 // Raw object's field reference: 2232 // {ConP, ThreadLocal, CastX2P, raw Load} 2233 // top | 2234 // \ | 2235 // AddP ( base == top ) 2236 // 2237 // case #7. Klass's field reference. 2238 // LoadKlass 2239 // | | 2240 // AddP ( base == address ) 2241 // 2242 // case #8. narrow Klass's field reference. 2243 // LoadNKlass 2244 // | 2245 // DecodeN 2246 // | | 2247 // AddP ( base == address ) 2248 // 2249 Node *base = addp->in(AddPNode::Base); 2250 if (base->uncast()->is_top()) { // The AddP case #3 and #6. 2251 base = addp->in(AddPNode::Address); 2252 while (base->is_AddP()) { 2253 // Case #6 (unsafe access) may have several chained AddP nodes. 2254 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2255 base = base->in(AddPNode::Address); 2256 } 2257 Node* uncast_base = base->uncast(); 2258 int opcode = uncast_base->Opcode(); 2259 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2260 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2261 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2262 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); 2263 } 2264 return base; 2265 } 2266 2267 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2268 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2269 Node* addp2 = addp->raw_out(0); 2270 if (addp->outcnt() == 1 && addp2->is_AddP() && 2271 addp2->in(AddPNode::Base) == n && 2272 addp2->in(AddPNode::Address) == addp) { 2273 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2274 // 2275 // Find array's offset to push it on worklist first and 2276 // as result process an array's element offset first (pushed second) 2277 // to avoid CastPP for the array's offset. 2278 // Otherwise the inserted CastPP (LocalVar) will point to what 2279 // the AddP (Field) points to. Which would be wrong since 2280 // the algorithm expects the CastPP has the same point as 2281 // as AddP's base CheckCastPP (LocalVar). 2282 // 2283 // ArrayAllocation 2284 // | 2285 // CheckCastPP 2286 // | 2287 // memProj (from ArrayAllocation CheckCastPP) 2288 // | || 2289 // | || Int (element index) 2290 // | || | ConI (log(element size)) 2291 // | || | / 2292 // | || LShift 2293 // | || / 2294 // | AddP (array's element offset) 2295 // | | 2296 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2297 // | / / 2298 // AddP (array's offset) 2299 // | 2300 // Load/Store (memory operation on array's element) 2301 // 2302 return addp2; 2303 } 2304 return NULL; 2305 } 2306 2307 // 2308 // Adjust the type and inputs of an AddP which computes the 2309 // address of a field of an instance 2310 // 2311 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2312 PhaseGVN* igvn = _igvn; 2313 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2314 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2315 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2316 if (t == NULL) { 2317 // We are computing a raw address for a store captured by an Initialize 2318 // compute an appropriate address type (cases #3 and #5). 2319 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2320 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2321 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2322 assert(offs != Type::OffsetBot, "offset must be a constant"); 2323 t = base_t->add_offset(offs)->is_oopptr(); 2324 } 2325 int inst_id = base_t->instance_id(); 2326 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2327 "old type must be non-instance or match new type"); 2328 2329 // The type 't' could be subclass of 'base_t'. 2330 // As result t->offset() could be large then base_t's size and it will 2331 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2332 // constructor verifies correctness of the offset. 2333 // 2334 // It could happened on subclass's branch (from the type profiling 2335 // inlining) which was not eliminated during parsing since the exactness 2336 // of the allocation type was not propagated to the subclass type check. 2337 // 2338 // Or the type 't' could be not related to 'base_t' at all. 2339 // It could happened when CHA type is different from MDO type on a dead path 2340 // (for example, from instanceof check) which is not collapsed during parsing. 2341 // 2342 // Do nothing for such AddP node and don't process its users since 2343 // this code branch will go away. 2344 // 2345 if (!t->is_known_instance() && 2346 !base_t->klass()->is_subtype_of(t->klass())) { 2347 return false; // bail out 2348 } 2349 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 2350 // Do NOT remove the next line: ensure a new alias index is allocated 2351 // for the instance type. Note: C++ will not remove it since the call 2352 // has side effect. 2353 int alias_idx = _compile->get_alias_index(tinst); 2354 igvn->set_type(addp, tinst); 2355 // record the allocation in the node map 2356 set_map(addp, get_map(base->_idx)); 2357 // Set addp's Base and Address to 'base'. 2358 Node *abase = addp->in(AddPNode::Base); 2359 Node *adr = addp->in(AddPNode::Address); 2360 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2361 adr->in(0)->_idx == (uint)inst_id) { 2362 // Skip AddP cases #3 and #5. 2363 } else { 2364 assert(!abase->is_top(), "sanity"); // AddP case #3 2365 if (abase != base) { 2366 igvn->hash_delete(addp); 2367 addp->set_req(AddPNode::Base, base); 2368 if (abase == adr) { 2369 addp->set_req(AddPNode::Address, base); 2370 } else { 2371 // AddP case #4 (adr is array's element offset AddP node) 2372 #ifdef ASSERT 2373 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2374 assert(adr->is_AddP() && atype != NULL && 2375 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2376 #endif 2377 } 2378 igvn->hash_insert(addp); 2379 } 2380 } 2381 // Put on IGVN worklist since at least addp's type was changed above. 2382 record_for_optimizer(addp); 2383 return true; 2384 } 2385 2386 // 2387 // Create a new version of orig_phi if necessary. Returns either the newly 2388 // created phi or an existing phi. Sets create_new to indicate whether a new 2389 // phi was created. Cache the last newly created phi in the node map. 2390 // 2391 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2392 Compile *C = _compile; 2393 PhaseGVN* igvn = _igvn; 2394 new_created = false; 2395 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2396 // nothing to do if orig_phi is bottom memory or matches alias_idx 2397 if (phi_alias_idx == alias_idx) { 2398 return orig_phi; 2399 } 2400 // Have we recently created a Phi for this alias index? 2401 PhiNode *result = get_map_phi(orig_phi->_idx); 2402 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2403 return result; 2404 } 2405 // Previous check may fail when the same wide memory Phi was split into Phis 2406 // for different memory slices. Search all Phis for this region. 2407 if (result != NULL) { 2408 Node* region = orig_phi->in(0); 2409 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2410 Node* phi = region->fast_out(i); 2411 if (phi->is_Phi() && 2412 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2413 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2414 return phi->as_Phi(); 2415 } 2416 } 2417 } 2418 if ((int) (C->live_nodes() + 2*NodeLimitFudgeFactor) > MaxNodeLimit) { 2419 if (C->do_escape_analysis() == true && !C->failing()) { 2420 // Retry compilation without escape analysis. 2421 // If this is the first failure, the sentinel string will "stick" 2422 // to the Compile object, and the C2Compiler will see it and retry. 2423 C->record_failure(C2Compiler::retry_no_escape_analysis()); 2424 } 2425 return NULL; 2426 } 2427 orig_phi_worklist.append_if_missing(orig_phi); 2428 const TypePtr *atype = C->get_adr_type(alias_idx); 2429 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2430 C->copy_node_notes_to(result, orig_phi); 2431 igvn->set_type(result, result->bottom_type()); 2432 record_for_optimizer(result); 2433 set_map(orig_phi, result); 2434 new_created = true; 2435 return result; 2436 } 2437 2438 // 2439 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2440 // specified alias index. 2441 // 2442 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2443 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2444 Compile *C = _compile; 2445 PhaseGVN* igvn = _igvn; 2446 bool new_phi_created; 2447 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2448 if (!new_phi_created) { 2449 return result; 2450 } 2451 GrowableArray<PhiNode *> phi_list; 2452 GrowableArray<uint> cur_input; 2453 PhiNode *phi = orig_phi; 2454 uint idx = 1; 2455 bool finished = false; 2456 while(!finished) { 2457 while (idx < phi->req()) { 2458 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2459 if (mem != NULL && mem->is_Phi()) { 2460 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2461 if (new_phi_created) { 2462 // found an phi for which we created a new split, push current one on worklist and begin 2463 // processing new one 2464 phi_list.push(phi); 2465 cur_input.push(idx); 2466 phi = mem->as_Phi(); 2467 result = newphi; 2468 idx = 1; 2469 continue; 2470 } else { 2471 mem = newphi; 2472 } 2473 } 2474 if (C->failing()) { 2475 return NULL; 2476 } 2477 result->set_req(idx++, mem); 2478 } 2479 #ifdef ASSERT 2480 // verify that the new Phi has an input for each input of the original 2481 assert( phi->req() == result->req(), "must have same number of inputs."); 2482 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2483 #endif 2484 // Check if all new phi's inputs have specified alias index. 2485 // Otherwise use old phi. 2486 for (uint i = 1; i < phi->req(); i++) { 2487 Node* in = result->in(i); 2488 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2489 } 2490 // we have finished processing a Phi, see if there are any more to do 2491 finished = (phi_list.length() == 0 ); 2492 if (!finished) { 2493 phi = phi_list.pop(); 2494 idx = cur_input.pop(); 2495 PhiNode *prev_result = get_map_phi(phi->_idx); 2496 prev_result->set_req(idx++, result); 2497 result = prev_result; 2498 } 2499 } 2500 return result; 2501 } 2502 2503 // 2504 // The next methods are derived from methods in MemNode. 2505 // 2506 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2507 Node *mem = mmem; 2508 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2509 // means an array I have not precisely typed yet. Do not do any 2510 // alias stuff with it any time soon. 2511 if (toop->base() != Type::AnyPtr && 2512 !(toop->klass() != NULL && 2513 toop->klass()->is_java_lang_Object() && 2514 toop->offset() == Type::OffsetBot)) { 2515 mem = mmem->memory_at(alias_idx); 2516 // Update input if it is progress over what we have now 2517 } 2518 return mem; 2519 } 2520 2521 // 2522 // Move memory users to their memory slices. 2523 // 2524 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2525 Compile* C = _compile; 2526 PhaseGVN* igvn = _igvn; 2527 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2528 assert(tp != NULL, "ptr type"); 2529 int alias_idx = C->get_alias_index(tp); 2530 int general_idx = C->get_general_index(alias_idx); 2531 2532 // Move users first 2533 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2534 Node* use = n->fast_out(i); 2535 if (use->is_MergeMem()) { 2536 MergeMemNode* mmem = use->as_MergeMem(); 2537 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2538 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2539 continue; // Nothing to do 2540 } 2541 // Replace previous general reference to mem node. 2542 uint orig_uniq = C->unique(); 2543 Node* m = find_inst_mem(n, general_idx, orig_phis); 2544 assert(orig_uniq == C->unique(), "no new nodes"); 2545 mmem->set_memory_at(general_idx, m); 2546 --imax; 2547 --i; 2548 } else if (use->is_MemBar()) { 2549 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2550 if (use->req() > MemBarNode::Precedent && 2551 use->in(MemBarNode::Precedent) == n) { 2552 // Don't move related membars. 2553 record_for_optimizer(use); 2554 continue; 2555 } 2556 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2557 if (tp != NULL && C->get_alias_index(tp) == alias_idx || 2558 alias_idx == general_idx) { 2559 continue; // Nothing to do 2560 } 2561 // Move to general memory slice. 2562 uint orig_uniq = C->unique(); 2563 Node* m = find_inst_mem(n, general_idx, orig_phis); 2564 assert(orig_uniq == C->unique(), "no new nodes"); 2565 igvn->hash_delete(use); 2566 imax -= use->replace_edge(n, m); 2567 igvn->hash_insert(use); 2568 record_for_optimizer(use); 2569 --i; 2570 #ifdef ASSERT 2571 } else if (use->is_Mem()) { 2572 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2573 // Don't move related cardmark. 2574 continue; 2575 } 2576 // Memory nodes should have new memory input. 2577 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2578 assert(tp != NULL, "ptr type"); 2579 int idx = C->get_alias_index(tp); 2580 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2581 "Following memory nodes should have new memory input or be on the same memory slice"); 2582 } else if (use->is_Phi()) { 2583 // Phi nodes should be split and moved already. 2584 tp = use->as_Phi()->adr_type()->isa_ptr(); 2585 assert(tp != NULL, "ptr type"); 2586 int idx = C->get_alias_index(tp); 2587 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2588 } else { 2589 use->dump(); 2590 assert(false, "should not be here"); 2591 #endif 2592 } 2593 } 2594 } 2595 2596 // 2597 // Search memory chain of "mem" to find a MemNode whose address 2598 // is the specified alias index. 2599 // 2600 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2601 if (orig_mem == NULL) 2602 return orig_mem; 2603 Compile* C = _compile; 2604 PhaseGVN* igvn = _igvn; 2605 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2606 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2607 Node *start_mem = C->start()->proj_out(TypeFunc::Memory); 2608 Node *prev = NULL; 2609 Node *result = orig_mem; 2610 while (prev != result) { 2611 prev = result; 2612 if (result == start_mem) 2613 break; // hit one of our sentinels 2614 if (result->is_Mem()) { 2615 const Type *at = igvn->type(result->in(MemNode::Address)); 2616 if (at == Type::TOP) 2617 break; // Dead 2618 assert (at->isa_ptr() != NULL, "pointer type required."); 2619 int idx = C->get_alias_index(at->is_ptr()); 2620 if (idx == alias_idx) 2621 break; // Found 2622 if (!is_instance && (at->isa_oopptr() == NULL || 2623 !at->is_oopptr()->is_known_instance())) { 2624 break; // Do not skip store to general memory slice. 2625 } 2626 result = result->in(MemNode::Memory); 2627 } 2628 if (!is_instance) 2629 continue; // don't search further for non-instance types 2630 // skip over a call which does not affect this memory slice 2631 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2632 Node *proj_in = result->in(0); 2633 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2634 break; // hit one of our sentinels 2635 } else if (proj_in->is_Call()) { 2636 CallNode *call = proj_in->as_Call(); 2637 if (!call->may_modify(toop, igvn)) { 2638 result = call->in(TypeFunc::Memory); 2639 } 2640 } else if (proj_in->is_Initialize()) { 2641 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2642 // Stop if this is the initialization for the object instance which 2643 // which contains this memory slice, otherwise skip over it. 2644 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2645 result = proj_in->in(TypeFunc::Memory); 2646 } 2647 } else if (proj_in->is_MemBar()) { 2648 result = proj_in->in(TypeFunc::Memory); 2649 } 2650 } else if (result->is_MergeMem()) { 2651 MergeMemNode *mmem = result->as_MergeMem(); 2652 result = step_through_mergemem(mmem, alias_idx, toop); 2653 if (result == mmem->base_memory()) { 2654 // Didn't find instance memory, search through general slice recursively. 2655 result = mmem->memory_at(C->get_general_index(alias_idx)); 2656 result = find_inst_mem(result, alias_idx, orig_phis); 2657 if (C->failing()) { 2658 return NULL; 2659 } 2660 mmem->set_memory_at(alias_idx, result); 2661 } 2662 } else if (result->is_Phi() && 2663 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2664 Node *un = result->as_Phi()->unique_input(igvn); 2665 if (un != NULL) { 2666 orig_phis.append_if_missing(result->as_Phi()); 2667 result = un; 2668 } else { 2669 break; 2670 } 2671 } else if (result->is_ClearArray()) { 2672 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 2673 // Can not bypass initialization of the instance 2674 // we are looking for. 2675 break; 2676 } 2677 // Otherwise skip it (the call updated 'result' value). 2678 } else if (result->Opcode() == Op_SCMemProj) { 2679 Node* mem = result->in(0); 2680 Node* adr = NULL; 2681 if (mem->is_LoadStore()) { 2682 adr = mem->in(MemNode::Address); 2683 } else { 2684 assert(mem->Opcode() == Op_EncodeISOArray, "sanity"); 2685 adr = mem->in(3); // Memory edge corresponds to destination array 2686 } 2687 const Type *at = igvn->type(adr); 2688 if (at != Type::TOP) { 2689 assert (at->isa_ptr() != NULL, "pointer type required."); 2690 int idx = C->get_alias_index(at->is_ptr()); 2691 assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field"); 2692 break; 2693 } 2694 result = mem->in(MemNode::Memory); 2695 } 2696 } 2697 if (result->is_Phi()) { 2698 PhiNode *mphi = result->as_Phi(); 2699 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 2700 const TypePtr *t = mphi->adr_type(); 2701 if (!is_instance) { 2702 // Push all non-instance Phis on the orig_phis worklist to update inputs 2703 // during Phase 4 if needed. 2704 orig_phis.append_if_missing(mphi); 2705 } else if (C->get_alias_index(t) != alias_idx) { 2706 // Create a new Phi with the specified alias index type. 2707 result = split_memory_phi(mphi, alias_idx, orig_phis); 2708 } 2709 } 2710 // the result is either MemNode, PhiNode, InitializeNode. 2711 return result; 2712 } 2713 2714 // 2715 // Convert the types of unescaped object to instance types where possible, 2716 // propagate the new type information through the graph, and update memory 2717 // edges and MergeMem inputs to reflect the new type. 2718 // 2719 // We start with allocations (and calls which may be allocations) on alloc_worklist. 2720 // The processing is done in 4 phases: 2721 // 2722 // Phase 1: Process possible allocations from alloc_worklist. Create instance 2723 // types for the CheckCastPP for allocations where possible. 2724 // Propagate the the new types through users as follows: 2725 // casts and Phi: push users on alloc_worklist 2726 // AddP: cast Base and Address inputs to the instance type 2727 // push any AddP users on alloc_worklist and push any memnode 2728 // users onto memnode_worklist. 2729 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2730 // search the Memory chain for a store with the appropriate type 2731 // address type. If a Phi is found, create a new version with 2732 // the appropriate memory slices from each of the Phi inputs. 2733 // For stores, process the users as follows: 2734 // MemNode: push on memnode_worklist 2735 // MergeMem: push on mergemem_worklist 2736 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 2737 // moving the first node encountered of each instance type to the 2738 // the input corresponding to its alias index. 2739 // appropriate memory slice. 2740 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 2741 // 2742 // In the following example, the CheckCastPP nodes are the cast of allocation 2743 // results and the allocation of node 29 is unescaped and eligible to be an 2744 // instance type. 2745 // 2746 // We start with: 2747 // 2748 // 7 Parm #memory 2749 // 10 ConI "12" 2750 // 19 CheckCastPP "Foo" 2751 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2752 // 29 CheckCastPP "Foo" 2753 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 2754 // 2755 // 40 StoreP 25 7 20 ... alias_index=4 2756 // 50 StoreP 35 40 30 ... alias_index=4 2757 // 60 StoreP 45 50 20 ... alias_index=4 2758 // 70 LoadP _ 60 30 ... alias_index=4 2759 // 80 Phi 75 50 60 Memory alias_index=4 2760 // 90 LoadP _ 80 30 ... alias_index=4 2761 // 100 LoadP _ 80 20 ... alias_index=4 2762 // 2763 // 2764 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 2765 // and creating a new alias index for node 30. This gives: 2766 // 2767 // 7 Parm #memory 2768 // 10 ConI "12" 2769 // 19 CheckCastPP "Foo" 2770 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2771 // 29 CheckCastPP "Foo" iid=24 2772 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2773 // 2774 // 40 StoreP 25 7 20 ... alias_index=4 2775 // 50 StoreP 35 40 30 ... alias_index=6 2776 // 60 StoreP 45 50 20 ... alias_index=4 2777 // 70 LoadP _ 60 30 ... alias_index=6 2778 // 80 Phi 75 50 60 Memory alias_index=4 2779 // 90 LoadP _ 80 30 ... alias_index=6 2780 // 100 LoadP _ 80 20 ... alias_index=4 2781 // 2782 // In phase 2, new memory inputs are computed for the loads and stores, 2783 // And a new version of the phi is created. In phase 4, the inputs to 2784 // node 80 are updated and then the memory nodes are updated with the 2785 // values computed in phase 2. This results in: 2786 // 2787 // 7 Parm #memory 2788 // 10 ConI "12" 2789 // 19 CheckCastPP "Foo" 2790 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2791 // 29 CheckCastPP "Foo" iid=24 2792 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2793 // 2794 // 40 StoreP 25 7 20 ... alias_index=4 2795 // 50 StoreP 35 7 30 ... alias_index=6 2796 // 60 StoreP 45 40 20 ... alias_index=4 2797 // 70 LoadP _ 50 30 ... alias_index=6 2798 // 80 Phi 75 40 60 Memory alias_index=4 2799 // 120 Phi 75 50 50 Memory alias_index=6 2800 // 90 LoadP _ 120 30 ... alias_index=6 2801 // 100 LoadP _ 80 20 ... alias_index=4 2802 // 2803 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) { 2804 GrowableArray<Node *> memnode_worklist; 2805 GrowableArray<PhiNode *> orig_phis; 2806 PhaseIterGVN *igvn = _igvn; 2807 uint new_index_start = (uint) _compile->num_alias_types(); 2808 Arena* arena = Thread::current()->resource_area(); 2809 VectorSet visited(arena); 2810 ideal_nodes.clear(); // Reset for use with set_map/get_map. 2811 uint unique_old = _compile->unique(); 2812 2813 // Phase 1: Process possible allocations from alloc_worklist. 2814 // Create instance types for the CheckCastPP for allocations where possible. 2815 // 2816 // (Note: don't forget to change the order of the second AddP node on 2817 // the alloc_worklist if the order of the worklist processing is changed, 2818 // see the comment in find_second_addp().) 2819 // 2820 while (alloc_worklist.length() != 0) { 2821 Node *n = alloc_worklist.pop(); 2822 uint ni = n->_idx; 2823 if (n->is_Call()) { 2824 CallNode *alloc = n->as_Call(); 2825 // copy escape information to call node 2826 PointsToNode* ptn = ptnode_adr(alloc->_idx); 2827 PointsToNode::EscapeState es = ptn->escape_state(); 2828 // We have an allocation or call which returns a Java object, 2829 // see if it is unescaped. 2830 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 2831 continue; 2832 // Find CheckCastPP for the allocate or for the return value of a call 2833 n = alloc->result_cast(); 2834 if (n == NULL) { // No uses except Initialize node 2835 if (alloc->is_Allocate()) { 2836 // Set the scalar_replaceable flag for allocation 2837 // so it could be eliminated if it has no uses. 2838 alloc->as_Allocate()->_is_scalar_replaceable = true; 2839 } 2840 if (alloc->is_CallStaticJava()) { 2841 // Set the scalar_replaceable flag for boxing method 2842 // so it could be eliminated if it has no uses. 2843 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2844 } 2845 continue; 2846 } 2847 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 2848 assert(!alloc->is_Allocate(), "allocation should have unique type"); 2849 continue; 2850 } 2851 2852 // The inline code for Object.clone() casts the allocation result to 2853 // java.lang.Object and then to the actual type of the allocated 2854 // object. Detect this case and use the second cast. 2855 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 2856 // the allocation result is cast to java.lang.Object and then 2857 // to the actual Array type. 2858 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 2859 && (alloc->is_AllocateArray() || 2860 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 2861 Node *cast2 = NULL; 2862 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2863 Node *use = n->fast_out(i); 2864 if (use->is_CheckCastPP()) { 2865 cast2 = use; 2866 break; 2867 } 2868 } 2869 if (cast2 != NULL) { 2870 n = cast2; 2871 } else { 2872 // Non-scalar replaceable if the allocation type is unknown statically 2873 // (reflection allocation), the object can't be restored during 2874 // deoptimization without precise type. 2875 continue; 2876 } 2877 } 2878 2879 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 2880 if (t == NULL) 2881 continue; // not a TypeOopPtr 2882 if (!t->klass_is_exact()) 2883 continue; // not an unique type 2884 2885 if (alloc->is_Allocate()) { 2886 // Set the scalar_replaceable flag for allocation 2887 // so it could be eliminated. 2888 alloc->as_Allocate()->_is_scalar_replaceable = true; 2889 } 2890 if (alloc->is_CallStaticJava()) { 2891 // Set the scalar_replaceable flag for boxing method 2892 // so it could be eliminated. 2893 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2894 } 2895 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state 2896 // in order for an object to be scalar-replaceable, it must be: 2897 // - a direct allocation (not a call returning an object) 2898 // - non-escaping 2899 // - eligible to be a unique type 2900 // - not determined to be ineligible by escape analysis 2901 set_map(alloc, n); 2902 set_map(n, alloc); 2903 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 2904 igvn->hash_delete(n); 2905 igvn->set_type(n, tinst); 2906 n->raise_bottom_type(tinst); 2907 igvn->hash_insert(n); 2908 record_for_optimizer(n); 2909 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 2910 2911 // First, put on the worklist all Field edges from Connection Graph 2912 // which is more accurate then putting immediate users from Ideal Graph. 2913 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 2914 PointsToNode* tgt = e.get(); 2915 Node* use = tgt->ideal_node(); 2916 assert(tgt->is_Field() && use->is_AddP(), 2917 "only AddP nodes are Field edges in CG"); 2918 if (use->outcnt() > 0) { // Don't process dead nodes 2919 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 2920 if (addp2 != NULL) { 2921 assert(alloc->is_AllocateArray(),"array allocation was expected"); 2922 alloc_worklist.append_if_missing(addp2); 2923 } 2924 alloc_worklist.append_if_missing(use); 2925 } 2926 } 2927 2928 // An allocation may have an Initialize which has raw stores. Scan 2929 // the users of the raw allocation result and push AddP users 2930 // on alloc_worklist. 2931 Node *raw_result = alloc->proj_out(TypeFunc::Parms); 2932 assert (raw_result != NULL, "must have an allocation result"); 2933 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 2934 Node *use = raw_result->fast_out(i); 2935 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 2936 Node* addp2 = find_second_addp(use, raw_result); 2937 if (addp2 != NULL) { 2938 assert(alloc->is_AllocateArray(),"array allocation was expected"); 2939 alloc_worklist.append_if_missing(addp2); 2940 } 2941 alloc_worklist.append_if_missing(use); 2942 } else if (use->is_MemBar()) { 2943 memnode_worklist.append_if_missing(use); 2944 } 2945 } 2946 } 2947 } else if (n->is_AddP()) { 2948 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 2949 if (jobj == NULL || jobj == phantom_obj) { 2950 #ifdef ASSERT 2951 ptnode_adr(get_addp_base(n)->_idx)->dump(); 2952 ptnode_adr(n->_idx)->dump(); 2953 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 2954 #endif 2955 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 2956 return; 2957 } 2958 Node *base = get_map(jobj->idx()); // CheckCastPP node 2959 if (!split_AddP(n, base)) continue; // wrong type from dead path 2960 } else if (n->is_Phi() || 2961 n->is_CheckCastPP() || 2962 n->is_EncodeP() || 2963 n->is_DecodeN() || 2964 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 2965 if (visited.test_set(n->_idx)) { 2966 assert(n->is_Phi(), "loops only through Phi's"); 2967 continue; // already processed 2968 } 2969 JavaObjectNode* jobj = unique_java_object(n); 2970 if (jobj == NULL || jobj == phantom_obj) { 2971 #ifdef ASSERT 2972 ptnode_adr(n->_idx)->dump(); 2973 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 2974 #endif 2975 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 2976 return; 2977 } else { 2978 Node *val = get_map(jobj->idx()); // CheckCastPP node 2979 TypeNode *tn = n->as_Type(); 2980 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 2981 assert(tinst != NULL && tinst->is_known_instance() && 2982 tinst->instance_id() == jobj->idx() , "instance type expected."); 2983 2984 const Type *tn_type = igvn->type(tn); 2985 const TypeOopPtr *tn_t; 2986 if (tn_type->isa_narrowoop()) { 2987 tn_t = tn_type->make_ptr()->isa_oopptr(); 2988 } else { 2989 tn_t = tn_type->isa_oopptr(); 2990 } 2991 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 2992 if (tn_type->isa_narrowoop()) { 2993 tn_type = tinst->make_narrowoop(); 2994 } else { 2995 tn_type = tinst; 2996 } 2997 igvn->hash_delete(tn); 2998 igvn->set_type(tn, tn_type); 2999 tn->set_type(tn_type); 3000 igvn->hash_insert(tn); 3001 record_for_optimizer(n); 3002 } else { 3003 assert(tn_type == TypePtr::NULL_PTR || 3004 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 3005 "unexpected type"); 3006 continue; // Skip dead path with different type 3007 } 3008 } 3009 } else { 3010 debug_only(n->dump();) 3011 assert(false, "EA: unexpected node"); 3012 continue; 3013 } 3014 // push allocation's users on appropriate worklist 3015 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3016 Node *use = n->fast_out(i); 3017 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3018 // Load/store to instance's field 3019 memnode_worklist.append_if_missing(use); 3020 } else if (use->is_MemBar()) { 3021 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3022 memnode_worklist.append_if_missing(use); 3023 } 3024 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3025 Node* addp2 = find_second_addp(use, n); 3026 if (addp2 != NULL) { 3027 alloc_worklist.append_if_missing(addp2); 3028 } 3029 alloc_worklist.append_if_missing(use); 3030 } else if (use->is_Phi() || 3031 use->is_CheckCastPP() || 3032 use->is_EncodeNarrowPtr() || 3033 use->is_DecodeNarrowPtr() || 3034 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3035 alloc_worklist.append_if_missing(use); 3036 #ifdef ASSERT 3037 } else if (use->is_Mem()) { 3038 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3039 } else if (use->is_MergeMem()) { 3040 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3041 } else if (use->is_SafePoint()) { 3042 // Look for MergeMem nodes for calls which reference unique allocation 3043 // (through CheckCastPP nodes) even for debug info. 3044 Node* m = use->in(TypeFunc::Memory); 3045 if (m->is_MergeMem()) { 3046 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3047 } 3048 } else if (use->Opcode() == Op_EncodeISOArray) { 3049 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3050 // EncodeISOArray overwrites destination array 3051 memnode_worklist.append_if_missing(use); 3052 } 3053 } else { 3054 uint op = use->Opcode(); 3055 if (!(op == Op_CmpP || op == Op_Conv2B || 3056 op == Op_CastP2X || op == Op_StoreCM || 3057 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || 3058 op == Op_StrEquals || op == Op_StrIndexOf)) { 3059 n->dump(); 3060 use->dump(); 3061 assert(false, "EA: missing allocation reference path"); 3062 } 3063 #endif 3064 } 3065 } 3066 3067 } 3068 // New alias types were created in split_AddP(). 3069 uint new_index_end = (uint) _compile->num_alias_types(); 3070 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3071 3072 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3073 // compute new values for Memory inputs (the Memory inputs are not 3074 // actually updated until phase 4.) 3075 if (memnode_worklist.length() == 0) 3076 return; // nothing to do 3077 while (memnode_worklist.length() != 0) { 3078 Node *n = memnode_worklist.pop(); 3079 if (visited.test_set(n->_idx)) 3080 continue; 3081 if (n->is_Phi() || n->is_ClearArray()) { 3082 // we don't need to do anything, but the users must be pushed 3083 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3084 // we don't need to do anything, but the users must be pushed 3085 n = n->as_MemBar()->proj_out(TypeFunc::Memory); 3086 if (n == NULL) 3087 continue; 3088 } else if (n->Opcode() == Op_EncodeISOArray) { 3089 // get the memory projection 3090 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3091 Node *use = n->fast_out(i); 3092 if (use->Opcode() == Op_SCMemProj) { 3093 n = use; 3094 break; 3095 } 3096 } 3097 assert(n->Opcode() == Op_SCMemProj, "memory projection required"); 3098 } else { 3099 assert(n->is_Mem(), "memory node required."); 3100 Node *addr = n->in(MemNode::Address); 3101 const Type *addr_t = igvn->type(addr); 3102 if (addr_t == Type::TOP) 3103 continue; 3104 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3105 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3106 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3107 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3108 if (_compile->failing()) { 3109 return; 3110 } 3111 if (mem != n->in(MemNode::Memory)) { 3112 // We delay the memory edge update since we need old one in 3113 // MergeMem code below when instances memory slices are separated. 3114 set_map(n, mem); 3115 } 3116 if (n->is_Load()) { 3117 continue; // don't push users 3118 } else if (n->is_LoadStore()) { 3119 // get the memory projection 3120 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3121 Node *use = n->fast_out(i); 3122 if (use->Opcode() == Op_SCMemProj) { 3123 n = use; 3124 break; 3125 } 3126 } 3127 assert(n->Opcode() == Op_SCMemProj, "memory projection required"); 3128 } 3129 } 3130 // push user on appropriate worklist 3131 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3132 Node *use = n->fast_out(i); 3133 if (use->is_Phi() || use->is_ClearArray()) { 3134 memnode_worklist.append_if_missing(use); 3135 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3136 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 3137 continue; 3138 memnode_worklist.append_if_missing(use); 3139 } else if (use->is_MemBar()) { 3140 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3141 memnode_worklist.append_if_missing(use); 3142 } 3143 #ifdef ASSERT 3144 } else if(use->is_Mem()) { 3145 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3146 } else if (use->is_MergeMem()) { 3147 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3148 } else if (use->Opcode() == Op_EncodeISOArray) { 3149 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3150 // EncodeISOArray overwrites destination array 3151 memnode_worklist.append_if_missing(use); 3152 } 3153 } else { 3154 uint op = use->Opcode(); 3155 if (!(op == Op_StoreCM || 3156 (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL && 3157 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) || 3158 op == Op_AryEq || op == Op_StrComp || 3159 op == Op_StrEquals || op == Op_StrIndexOf)) { 3160 n->dump(); 3161 use->dump(); 3162 assert(false, "EA: missing memory path"); 3163 } 3164 #endif 3165 } 3166 } 3167 } 3168 3169 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3170 // Walk each memory slice moving the first node encountered of each 3171 // instance type to the the input corresponding to its alias index. 3172 uint length = _mergemem_worklist.length(); 3173 for( uint next = 0; next < length; ++next ) { 3174 MergeMemNode* nmm = _mergemem_worklist.at(next); 3175 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3176 // Note: we don't want to use MergeMemStream here because we only want to 3177 // scan inputs which exist at the start, not ones we add during processing. 3178 // Note 2: MergeMem may already contains instance memory slices added 3179 // during find_inst_mem() call when memory nodes were processed above. 3180 igvn->hash_delete(nmm); 3181 uint nslices = nmm->req(); 3182 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3183 Node* mem = nmm->in(i); 3184 Node* cur = NULL; 3185 if (mem == NULL || mem->is_top()) 3186 continue; 3187 // First, update mergemem by moving memory nodes to corresponding slices 3188 // if their type became more precise since this mergemem was created. 3189 while (mem->is_Mem()) { 3190 const Type *at = igvn->type(mem->in(MemNode::Address)); 3191 if (at != Type::TOP) { 3192 assert (at->isa_ptr() != NULL, "pointer type required."); 3193 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3194 if (idx == i) { 3195 if (cur == NULL) 3196 cur = mem; 3197 } else { 3198 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3199 nmm->set_memory_at(idx, mem); 3200 } 3201 } 3202 } 3203 mem = mem->in(MemNode::Memory); 3204 } 3205 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3206 // Find any instance of the current type if we haven't encountered 3207 // already a memory slice of the instance along the memory chain. 3208 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3209 if((uint)_compile->get_general_index(ni) == i) { 3210 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3211 if (nmm->is_empty_memory(m)) { 3212 Node* result = find_inst_mem(mem, ni, orig_phis); 3213 if (_compile->failing()) { 3214 return; 3215 } 3216 nmm->set_memory_at(ni, result); 3217 } 3218 } 3219 } 3220 } 3221 // Find the rest of instances values 3222 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3223 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3224 Node* result = step_through_mergemem(nmm, ni, tinst); 3225 if (result == nmm->base_memory()) { 3226 // Didn't find instance memory, search through general slice recursively. 3227 result = nmm->memory_at(_compile->get_general_index(ni)); 3228 result = find_inst_mem(result, ni, orig_phis); 3229 if (_compile->failing()) { 3230 return; 3231 } 3232 nmm->set_memory_at(ni, result); 3233 } 3234 } 3235 igvn->hash_insert(nmm); 3236 record_for_optimizer(nmm); 3237 } 3238 3239 // Phase 4: Update the inputs of non-instance memory Phis and 3240 // the Memory input of memnodes 3241 // First update the inputs of any non-instance Phi's from 3242 // which we split out an instance Phi. Note we don't have 3243 // to recursively process Phi's encounted on the input memory 3244 // chains as is done in split_memory_phi() since they will 3245 // also be processed here. 3246 for (int j = 0; j < orig_phis.length(); j++) { 3247 PhiNode *phi = orig_phis.at(j); 3248 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3249 igvn->hash_delete(phi); 3250 for (uint i = 1; i < phi->req(); i++) { 3251 Node *mem = phi->in(i); 3252 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3253 if (_compile->failing()) { 3254 return; 3255 } 3256 if (mem != new_mem) { 3257 phi->set_req(i, new_mem); 3258 } 3259 } 3260 igvn->hash_insert(phi); 3261 record_for_optimizer(phi); 3262 } 3263 3264 // Update the memory inputs of MemNodes with the value we computed 3265 // in Phase 2 and move stores memory users to corresponding memory slices. 3266 // Disable memory split verification code until the fix for 6984348. 3267 // Currently it produces false negative results since it does not cover all cases. 3268 #if 0 // ifdef ASSERT 3269 visited.Reset(); 3270 Node_Stack old_mems(arena, _compile->unique() >> 2); 3271 #endif 3272 for (uint i = 0; i < ideal_nodes.size(); i++) { 3273 Node* n = ideal_nodes.at(i); 3274 Node* nmem = get_map(n->_idx); 3275 assert(nmem != NULL, "sanity"); 3276 if (n->is_Mem()) { 3277 #if 0 // ifdef ASSERT 3278 Node* old_mem = n->in(MemNode::Memory); 3279 if (!visited.test_set(old_mem->_idx)) { 3280 old_mems.push(old_mem, old_mem->outcnt()); 3281 } 3282 #endif 3283 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3284 if (!n->is_Load()) { 3285 // Move memory users of a store first. 3286 move_inst_mem(n, orig_phis); 3287 } 3288 // Now update memory input 3289 igvn->hash_delete(n); 3290 n->set_req(MemNode::Memory, nmem); 3291 igvn->hash_insert(n); 3292 record_for_optimizer(n); 3293 } else { 3294 assert(n->is_Allocate() || n->is_CheckCastPP() || 3295 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3296 } 3297 } 3298 #if 0 // ifdef ASSERT 3299 // Verify that memory was split correctly 3300 while (old_mems.is_nonempty()) { 3301 Node* old_mem = old_mems.node(); 3302 uint old_cnt = old_mems.index(); 3303 old_mems.pop(); 3304 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3305 } 3306 #endif 3307 } 3308 3309 #ifndef PRODUCT 3310 static const char *node_type_names[] = { 3311 "UnknownType", 3312 "JavaObject", 3313 "LocalVar", 3314 "Field", 3315 "Arraycopy" 3316 }; 3317 3318 static const char *esc_names[] = { 3319 "UnknownEscape", 3320 "NoEscape", 3321 "ArgEscape", 3322 "GlobalEscape" 3323 }; 3324 3325 void PointsToNode::dump(bool print_state) const { 3326 NodeType nt = node_type(); 3327 tty->print("%s ", node_type_names[(int) nt]); 3328 if (print_state) { 3329 EscapeState es = escape_state(); 3330 EscapeState fields_es = fields_escape_state(); 3331 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3332 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) 3333 tty->print("NSR "); 3334 } 3335 if (is_Field()) { 3336 FieldNode* f = (FieldNode*)this; 3337 if (f->is_oop()) 3338 tty->print("oop "); 3339 if (f->offset() > 0) 3340 tty->print("+%d ", f->offset()); 3341 tty->print("("); 3342 for (BaseIterator i(f); i.has_next(); i.next()) { 3343 PointsToNode* b = i.get(); 3344 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3345 } 3346 tty->print(" )"); 3347 } 3348 tty->print("["); 3349 for (EdgeIterator i(this); i.has_next(); i.next()) { 3350 PointsToNode* e = i.get(); 3351 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3352 } 3353 tty->print(" ["); 3354 for (UseIterator i(this); i.has_next(); i.next()) { 3355 PointsToNode* u = i.get(); 3356 bool is_base = false; 3357 if (PointsToNode::is_base_use(u)) { 3358 is_base = true; 3359 u = PointsToNode::get_use_node(u)->as_Field(); 3360 } 3361 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3362 } 3363 tty->print(" ]] "); 3364 if (_node == NULL) 3365 tty->print_cr("<null>"); 3366 else 3367 _node->dump(); 3368 } 3369 3370 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3371 bool first = true; 3372 int ptnodes_length = ptnodes_worklist.length(); 3373 for (int i = 0; i < ptnodes_length; i++) { 3374 PointsToNode *ptn = ptnodes_worklist.at(i); 3375 if (ptn == NULL || !ptn->is_JavaObject()) 3376 continue; 3377 PointsToNode::EscapeState es = ptn->escape_state(); 3378 if ((es != PointsToNode::NoEscape) && !Verbose) { 3379 continue; 3380 } 3381 Node* n = ptn->ideal_node(); 3382 if (n->is_Allocate() || (n->is_CallStaticJava() && 3383 n->as_CallStaticJava()->is_boxing_method())) { 3384 if (first) { 3385 tty->cr(); 3386 tty->print("======== Connection graph for "); 3387 _compile->method()->print_short_name(); 3388 tty->cr(); 3389 first = false; 3390 } 3391 ptn->dump(); 3392 // Print all locals and fields which reference this allocation 3393 for (UseIterator j(ptn); j.has_next(); j.next()) { 3394 PointsToNode* use = j.get(); 3395 if (use->is_LocalVar()) { 3396 use->dump(Verbose); 3397 } else if (Verbose) { 3398 use->dump(); 3399 } 3400 } 3401 tty->cr(); 3402 } 3403 } 3404 } 3405 #endif