1 /* 2 * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "opto/c2compiler.hpp" 34 #include "opto/arraycopynode.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/cfgnode.hpp" 37 #include "opto/compile.hpp" 38 #include "opto/escape.hpp" 39 #include "opto/phaseX.hpp" 40 #include "opto/movenode.hpp" 41 #include "opto/rootnode.hpp" 42 #include "utilities/macros.hpp" 43 44 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 45 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 46 _in_worklist(C->comp_arena()), 47 _next_pidx(0), 48 _collecting(true), 49 _verify(false), 50 _compile(C), 51 _igvn(igvn), 52 _node_map(C->comp_arena()) { 53 // Add unknown java object. 54 add_java_object(C->top(), PointsToNode::GlobalEscape); 55 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 56 // Add ConP(#NULL) and ConN(#NULL) nodes. 57 Node* oop_null = igvn->zerocon(T_OBJECT); 58 assert(oop_null->_idx < nodes_size(), "should be created already"); 59 add_java_object(oop_null, PointsToNode::NoEscape); 60 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 61 if (UseCompressedOops) { 62 Node* noop_null = igvn->zerocon(T_NARROWOOP); 63 assert(noop_null->_idx < nodes_size(), "should be created already"); 64 map_ideal_node(noop_null, null_obj); 65 } 66 _pcmp_neq = NULL; // Should be initialized 67 _pcmp_eq = NULL; 68 } 69 70 bool ConnectionGraph::has_candidates(Compile *C) { 71 // EA brings benefits only when the code has allocations and/or locks which 72 // are represented by ideal Macro nodes. 73 int cnt = C->macro_count(); 74 for (int i = 0; i < cnt; i++) { 75 Node *n = C->macro_node(i); 76 if (n->is_Allocate()) 77 return true; 78 if (n->is_Lock()) { 79 Node* obj = n->as_Lock()->obj_node()->uncast(); 80 if (!(obj->is_Parm() || obj->is_Con())) 81 return true; 82 } 83 if (n->is_CallStaticJava() && 84 n->as_CallStaticJava()->is_boxing_method()) { 85 return true; 86 } 87 } 88 return false; 89 } 90 91 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 92 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 93 ResourceMark rm; 94 95 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 96 // to create space for them in ConnectionGraph::_nodes[]. 97 Node* oop_null = igvn->zerocon(T_OBJECT); 98 Node* noop_null = igvn->zerocon(T_NARROWOOP); 99 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 100 // Perform escape analysis 101 if (congraph->compute_escape()) { 102 // There are non escaping objects. 103 C->set_congraph(congraph); 104 } 105 // Cleanup. 106 if (oop_null->outcnt() == 0) 107 igvn->hash_delete(oop_null); 108 if (noop_null->outcnt() == 0) 109 igvn->hash_delete(noop_null); 110 } 111 112 bool ConnectionGraph::compute_escape() { 113 Compile* C = _compile; 114 PhaseGVN* igvn = _igvn; 115 116 // Worklists used by EA. 117 Unique_Node_List delayed_worklist; 118 GrowableArray<Node*> alloc_worklist; 119 GrowableArray<Node*> ptr_cmp_worklist; 120 GrowableArray<Node*> storestore_worklist; 121 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 122 GrowableArray<PointsToNode*> ptnodes_worklist; 123 GrowableArray<JavaObjectNode*> java_objects_worklist; 124 GrowableArray<JavaObjectNode*> non_escaped_worklist; 125 GrowableArray<FieldNode*> oop_fields_worklist; 126 GrowableArray<SafePointNode*> sfn_worklist; 127 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 128 129 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 130 131 // 1. Populate Connection Graph (CG) with PointsTo nodes. 132 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 133 // Initialize worklist 134 if (C->root() != NULL) { 135 ideal_nodes.push(C->root()); 136 } 137 // Processed ideal nodes are unique on ideal_nodes list 138 // but several ideal nodes are mapped to the phantom_obj. 139 // To avoid duplicated entries on the following worklists 140 // add the phantom_obj only once to them. 141 ptnodes_worklist.append(phantom_obj); 142 java_objects_worklist.append(phantom_obj); 143 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 144 Node* n = ideal_nodes.at(next); 145 // Create PointsTo nodes and add them to Connection Graph. Called 146 // only once per ideal node since ideal_nodes is Unique_Node list. 147 add_node_to_connection_graph(n, &delayed_worklist); 148 PointsToNode* ptn = ptnode_adr(n->_idx); 149 if (ptn != NULL && ptn != phantom_obj) { 150 ptnodes_worklist.append(ptn); 151 if (ptn->is_JavaObject()) { 152 java_objects_worklist.append(ptn->as_JavaObject()); 153 if ((n->is_Allocate() || n->is_CallStaticJava()) && 154 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 155 // Only allocations and java static calls results are interesting. 156 non_escaped_worklist.append(ptn->as_JavaObject()); 157 } 158 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 159 oop_fields_worklist.append(ptn->as_Field()); 160 } 161 } 162 if (n->is_MergeMem()) { 163 // Collect all MergeMem nodes to add memory slices for 164 // scalar replaceable objects in split_unique_types(). 165 _mergemem_worklist.append(n->as_MergeMem()); 166 } else if (OptimizePtrCompare && n->is_Cmp() && 167 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { 168 // Collect compare pointers nodes. 169 ptr_cmp_worklist.append(n); 170 } else if (n->is_MemBarStoreStore()) { 171 // Collect all MemBarStoreStore nodes so that depending on the 172 // escape status of the associated Allocate node some of them 173 // may be eliminated. 174 storestore_worklist.append(n); 175 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && 176 (n->req() > MemBarNode::Precedent)) { 177 record_for_optimizer(n); 178 #ifdef ASSERT 179 } else if (n->is_AddP()) { 180 // Collect address nodes for graph verification. 181 addp_worklist.append(n); 182 #endif 183 } else if (n->is_ArrayCopy()) { 184 // Keep a list of ArrayCopy nodes so if one of its input is non 185 // escaping, we can record a unique type 186 arraycopy_worklist.append(n->as_ArrayCopy()); 187 } 188 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 189 Node* m = n->fast_out(i); // Get user 190 ideal_nodes.push(m); 191 } 192 if (n-> is_SafePoint()) { 193 sfn_worklist.append(n->as_SafePoint()); 194 } 195 } 196 if (non_escaped_worklist.length() == 0) { 197 _collecting = false; 198 return false; // Nothing to do. 199 } 200 // Add final simple edges to graph. 201 while(delayed_worklist.size() > 0) { 202 Node* n = delayed_worklist.pop(); 203 add_final_edges(n); 204 } 205 int ptnodes_length = ptnodes_worklist.length(); 206 207 #ifdef ASSERT 208 if (VerifyConnectionGraph) { 209 // Verify that no new simple edges could be created and all 210 // local vars has edges. 211 _verify = true; 212 for (int next = 0; next < ptnodes_length; ++next) { 213 PointsToNode* ptn = ptnodes_worklist.at(next); 214 add_final_edges(ptn->ideal_node()); 215 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 216 ptn->dump(); 217 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 218 } 219 } 220 _verify = false; 221 } 222 #endif 223 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 224 // processing, calls to CI to resolve symbols (types, fields, methods) 225 // referenced in bytecode. During symbol resolution VM may throw 226 // an exception which CI cleans and converts to compilation failure. 227 if (C->failing()) return false; 228 229 // 2. Finish Graph construction by propagating references to all 230 // java objects through graph. 231 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, 232 java_objects_worklist, oop_fields_worklist)) { 233 // All objects escaped or hit time or iterations limits. 234 _collecting = false; 235 return false; 236 } 237 238 // 3. Adjust scalar_replaceable state of nonescaping objects and push 239 // scalar replaceable allocations on alloc_worklist for processing 240 // in split_unique_types(). 241 int non_escaped_length = non_escaped_worklist.length(); 242 for (int next = 0; next < non_escaped_length; next++) { 243 JavaObjectNode* ptn = non_escaped_worklist.at(next); 244 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 245 Node* n = ptn->ideal_node(); 246 if (n->is_Allocate()) { 247 n->as_Allocate()->_is_non_escaping = noescape; 248 } 249 if (n->is_CallStaticJava()) { 250 n->as_CallStaticJava()->_is_non_escaping = noescape; 251 } 252 if (noescape && ptn->scalar_replaceable()) { 253 adjust_scalar_replaceable_state(ptn); 254 if (ptn->scalar_replaceable()) { 255 alloc_worklist.append(ptn->ideal_node()); 256 } 257 } 258 } 259 260 #ifdef ASSERT 261 if (VerifyConnectionGraph) { 262 // Verify that graph is complete - no new edges could be added or needed. 263 verify_connection_graph(ptnodes_worklist, non_escaped_worklist, 264 java_objects_worklist, addp_worklist); 265 } 266 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 267 assert(null_obj->escape_state() == PointsToNode::NoEscape && 268 null_obj->edge_count() == 0 && 269 !null_obj->arraycopy_src() && 270 !null_obj->arraycopy_dst(), "sanity"); 271 #endif 272 273 _collecting = false; 274 275 } // TracePhase t3("connectionGraph") 276 277 // 4. Optimize ideal graph based on EA information. 278 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); 279 if (has_non_escaping_obj) { 280 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 281 } 282 283 #ifndef PRODUCT 284 if (PrintEscapeAnalysis) { 285 dump(ptnodes_worklist); // Dump ConnectionGraph 286 } 287 #endif 288 289 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 290 #ifdef ASSERT 291 if (VerifyConnectionGraph) { 292 int alloc_length = alloc_worklist.length(); 293 for (int next = 0; next < alloc_length; ++next) { 294 Node* n = alloc_worklist.at(next); 295 PointsToNode* ptn = ptnode_adr(n->_idx); 296 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 297 } 298 } 299 #endif 300 301 // 5. Separate memory graph for scalar replaceable allcations. 302 if (has_scalar_replaceable_candidates && 303 C->AliasLevel() >= 3 && EliminateAllocations) { 304 // Now use the escape information to create unique types for 305 // scalar replaceable objects. 306 split_unique_types(alloc_worklist, arraycopy_worklist); 307 if (C->failing()) return false; 308 C->print_method(PHASE_AFTER_EA, 2); 309 310 #ifdef ASSERT 311 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 312 tty->print("=== No allocations eliminated for "); 313 C->method()->print_short_name(); 314 if(!EliminateAllocations) { 315 tty->print(" since EliminateAllocations is off ==="); 316 } else if(!has_scalar_replaceable_candidates) { 317 tty->print(" since there are no scalar replaceable candidates ==="); 318 } else if(C->AliasLevel() < 3) { 319 tty->print(" since AliasLevel < 3 ==="); 320 } 321 tty->cr(); 322 #endif 323 } 324 325 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at 326 // java calls if they pass ArgEscape objects as parameters. 327 if (has_non_escaping_obj && 328 (C->env()->should_retain_local_variables() || 329 C->env()->jvmti_can_get_owned_monitor_info() || 330 C->env()->jvmti_can_walk_any_space() || 331 DeoptimizeObjectsALot)) { 332 int sfn_length = sfn_worklist.length(); 333 for (int next = 0; next < sfn_length; next++) { 334 SafePointNode* sfn = sfn_worklist.at(next); 335 bool found_not_global_escape = false; 336 for (JVMState* jvms = sfn->jvms(); jvms && !found_not_global_escape; jvms = jvms->caller()) { 337 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || 338 DeoptimizeObjectsALot) { 339 // Jvmti agents can access locals. Must provide info about local objects at runtime. 340 int num_locs = jvms->loc_size(); 341 for(int idx = 0; idx < num_locs && !found_not_global_escape; idx++ ) { 342 Node* l = sfn->local(jvms, idx); 343 found_not_global_escape = not_global_escape(l); 344 } 345 } 346 if (C->env()->jvmti_can_get_owned_monitor_info() || 347 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { 348 // Jvmti agents can read monitors. Must provide info about locked objects at runtime. 349 int num_mon = jvms->nof_monitors(); 350 for(int idx = 0; idx < num_mon && !found_not_global_escape; idx++ ) { 351 Node* m = sfn->monitor_obj(jvms, idx); 352 found_not_global_escape = m != NULL && not_global_escape(m); 353 } 354 } 355 } 356 sfn->set_not_global_escape_in_scope(found_not_global_escape); 357 358 if (sfn->is_CallJava()) { 359 CallJavaNode* call = sfn->as_CallJava(); 360 bool found_arg_escape_in_args = false; 361 if (call->method() != NULL) { 362 uint max_idx = TypeFunc::Parms + call->method()->arg_size(); 363 for(uint idx = TypeFunc::Parms; idx < max_idx && !found_arg_escape_in_args; idx++) { 364 Node* p = call->in(idx); 365 found_arg_escape_in_args = not_global_escape(p); 366 } 367 } else { 368 const char* name = call->as_CallStaticJava()->_name; 369 assert(name != NULL, "no name"); 370 // no arg escapes through uncommon traps 371 if (strcmp(name, "uncommon_trap") != 0) { 372 // process_call_arguments() assumes that all arguments escape globally 373 const TypeTuple* d = call->tf()->domain(); 374 for (uint i = TypeFunc::Parms; i < d->cnt() && !found_arg_escape_in_args; i++) { 375 const Type* at = d->field_at(i); 376 if (at->isa_oopptr() != NULL) { 377 found_arg_escape_in_args = true; 378 } 379 } 380 } 381 } 382 call->set_arg_escape(found_arg_escape_in_args); 383 } 384 } 385 } 386 387 return has_non_escaping_obj; 388 } 389 390 // Utility function for nodes that load an object 391 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 392 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 393 // ThreadLocal has RawPtr type. 394 const Type* t = _igvn->type(n); 395 if (t->make_ptr() != NULL) { 396 Node* adr = n->in(MemNode::Address); 397 #ifdef ASSERT 398 if (!adr->is_AddP()) { 399 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 400 } else { 401 assert((ptnode_adr(adr->_idx) == NULL || 402 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 403 } 404 #endif 405 add_local_var_and_edge(n, PointsToNode::NoEscape, 406 adr, delayed_worklist); 407 } 408 } 409 410 // Populate Connection Graph with PointsTo nodes and create simple 411 // connection graph edges. 412 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 413 assert(!_verify, "this method should not be called for verification"); 414 PhaseGVN* igvn = _igvn; 415 uint n_idx = n->_idx; 416 PointsToNode* n_ptn = ptnode_adr(n_idx); 417 if (n_ptn != NULL) 418 return; // No need to redefine PointsTo node during first iteration. 419 420 int opcode = n->Opcode(); 421 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 422 if (gc_handled) { 423 return; // Ignore node if already handled by GC. 424 } 425 426 if (n->is_Call()) { 427 // Arguments to allocation and locking don't escape. 428 if (n->is_AbstractLock()) { 429 // Put Lock and Unlock nodes on IGVN worklist to process them during 430 // first IGVN optimization when escape information is still available. 431 record_for_optimizer(n); 432 } else if (n->is_Allocate()) { 433 add_call_node(n->as_Call()); 434 record_for_optimizer(n); 435 } else { 436 if (n->is_CallStaticJava()) { 437 const char* name = n->as_CallStaticJava()->_name; 438 if (name != NULL && strcmp(name, "uncommon_trap") == 0) 439 return; // Skip uncommon traps 440 } 441 // Don't mark as processed since call's arguments have to be processed. 442 delayed_worklist->push(n); 443 // Check if a call returns an object. 444 if ((n->as_Call()->returns_pointer() && 445 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) || 446 (n->is_CallStaticJava() && 447 n->as_CallStaticJava()->is_boxing_method())) { 448 add_call_node(n->as_Call()); 449 } 450 } 451 return; 452 } 453 // Put this check here to process call arguments since some call nodes 454 // point to phantom_obj. 455 if (n_ptn == phantom_obj || n_ptn == null_obj) 456 return; // Skip predefined nodes. 457 458 switch (opcode) { 459 case Op_AddP: { 460 Node* base = get_addp_base(n); 461 PointsToNode* ptn_base = ptnode_adr(base->_idx); 462 // Field nodes are created for all field types. They are used in 463 // adjust_scalar_replaceable_state() and split_unique_types(). 464 // Note, non-oop fields will have only base edges in Connection 465 // Graph because such fields are not used for oop loads and stores. 466 int offset = address_offset(n, igvn); 467 add_field(n, PointsToNode::NoEscape, offset); 468 if (ptn_base == NULL) { 469 delayed_worklist->push(n); // Process it later. 470 } else { 471 n_ptn = ptnode_adr(n_idx); 472 add_base(n_ptn->as_Field(), ptn_base); 473 } 474 break; 475 } 476 case Op_CastX2P: { 477 map_ideal_node(n, phantom_obj); 478 break; 479 } 480 case Op_CastPP: 481 case Op_CheckCastPP: 482 case Op_EncodeP: 483 case Op_DecodeN: 484 case Op_EncodePKlass: 485 case Op_DecodeNKlass: { 486 add_local_var_and_edge(n, PointsToNode::NoEscape, 487 n->in(1), delayed_worklist); 488 break; 489 } 490 case Op_CMoveP: { 491 add_local_var(n, PointsToNode::NoEscape); 492 // Do not add edges during first iteration because some could be 493 // not defined yet. 494 delayed_worklist->push(n); 495 break; 496 } 497 case Op_ConP: 498 case Op_ConN: 499 case Op_ConNKlass: { 500 // assume all oop constants globally escape except for null 501 PointsToNode::EscapeState es; 502 const Type* t = igvn->type(n); 503 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 504 es = PointsToNode::NoEscape; 505 } else { 506 es = PointsToNode::GlobalEscape; 507 } 508 add_java_object(n, es); 509 break; 510 } 511 case Op_CreateEx: { 512 // assume that all exception objects globally escape 513 map_ideal_node(n, phantom_obj); 514 break; 515 } 516 case Op_LoadKlass: 517 case Op_LoadNKlass: { 518 // Unknown class is loaded 519 map_ideal_node(n, phantom_obj); 520 break; 521 } 522 case Op_LoadP: 523 case Op_LoadN: 524 case Op_LoadPLocked: { 525 add_objload_to_connection_graph(n, delayed_worklist); 526 break; 527 } 528 case Op_Parm: { 529 map_ideal_node(n, phantom_obj); 530 break; 531 } 532 case Op_PartialSubtypeCheck: { 533 // Produces Null or notNull and is used in only in CmpP so 534 // phantom_obj could be used. 535 map_ideal_node(n, phantom_obj); // Result is unknown 536 break; 537 } 538 case Op_Phi: { 539 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 540 // ThreadLocal has RawPtr type. 541 const Type* t = n->as_Phi()->type(); 542 if (t->make_ptr() != NULL) { 543 add_local_var(n, PointsToNode::NoEscape); 544 // Do not add edges during first iteration because some could be 545 // not defined yet. 546 delayed_worklist->push(n); 547 } 548 break; 549 } 550 case Op_Proj: { 551 // we are only interested in the oop result projection from a call 552 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 553 n->in(0)->as_Call()->returns_pointer()) { 554 add_local_var_and_edge(n, PointsToNode::NoEscape, 555 n->in(0), delayed_worklist); 556 } 557 break; 558 } 559 case Op_Rethrow: // Exception object escapes 560 case Op_Return: { 561 if (n->req() > TypeFunc::Parms && 562 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 563 // Treat Return value as LocalVar with GlobalEscape escape state. 564 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 565 n->in(TypeFunc::Parms), delayed_worklist); 566 } 567 break; 568 } 569 case Op_CompareAndExchangeP: 570 case Op_CompareAndExchangeN: 571 case Op_GetAndSetP: 572 case Op_GetAndSetN: { 573 add_objload_to_connection_graph(n, delayed_worklist); 574 // fallthrough 575 } 576 case Op_StoreP: 577 case Op_StoreN: 578 case Op_StoreNKlass: 579 case Op_StorePConditional: 580 case Op_WeakCompareAndSwapP: 581 case Op_WeakCompareAndSwapN: 582 case Op_CompareAndSwapP: 583 case Op_CompareAndSwapN: { 584 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 585 break; 586 } 587 case Op_AryEq: 588 case Op_HasNegatives: 589 case Op_StrComp: 590 case Op_StrEquals: 591 case Op_StrIndexOf: 592 case Op_StrIndexOfChar: 593 case Op_StrInflatedCopy: 594 case Op_StrCompressedCopy: 595 case Op_EncodeISOArray: { 596 add_local_var(n, PointsToNode::ArgEscape); 597 delayed_worklist->push(n); // Process it later. 598 break; 599 } 600 case Op_ThreadLocal: { 601 add_java_object(n, PointsToNode::ArgEscape); 602 break; 603 } 604 default: 605 ; // Do nothing for nodes not related to EA. 606 } 607 return; 608 } 609 610 #ifdef ASSERT 611 #define ELSE_FAIL(name) \ 612 /* Should not be called for not pointer type. */ \ 613 n->dump(1); \ 614 assert(false, name); \ 615 break; 616 #else 617 #define ELSE_FAIL(name) \ 618 break; 619 #endif 620 621 // Add final simple edges to graph. 622 void ConnectionGraph::add_final_edges(Node *n) { 623 PointsToNode* n_ptn = ptnode_adr(n->_idx); 624 #ifdef ASSERT 625 if (_verify && n_ptn->is_JavaObject()) 626 return; // This method does not change graph for JavaObject. 627 #endif 628 629 if (n->is_Call()) { 630 process_call_arguments(n->as_Call()); 631 return; 632 } 633 assert(n->is_Store() || n->is_LoadStore() || 634 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 635 "node should be registered already"); 636 int opcode = n->Opcode(); 637 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 638 if (gc_handled) { 639 return; // Ignore node if already handled by GC. 640 } 641 switch (opcode) { 642 case Op_AddP: { 643 Node* base = get_addp_base(n); 644 PointsToNode* ptn_base = ptnode_adr(base->_idx); 645 assert(ptn_base != NULL, "field's base should be registered"); 646 add_base(n_ptn->as_Field(), ptn_base); 647 break; 648 } 649 case Op_CastPP: 650 case Op_CheckCastPP: 651 case Op_EncodeP: 652 case Op_DecodeN: 653 case Op_EncodePKlass: 654 case Op_DecodeNKlass: { 655 add_local_var_and_edge(n, PointsToNode::NoEscape, 656 n->in(1), NULL); 657 break; 658 } 659 case Op_CMoveP: { 660 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 661 Node* in = n->in(i); 662 if (in == NULL) 663 continue; // ignore NULL 664 Node* uncast_in = in->uncast(); 665 if (uncast_in->is_top() || uncast_in == n) 666 continue; // ignore top or inputs which go back this node 667 PointsToNode* ptn = ptnode_adr(in->_idx); 668 assert(ptn != NULL, "node should be registered"); 669 add_edge(n_ptn, ptn); 670 } 671 break; 672 } 673 case Op_LoadP: 674 case Op_LoadN: 675 case Op_LoadPLocked: { 676 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 677 // ThreadLocal has RawPtr type. 678 const Type* t = _igvn->type(n); 679 if (t->make_ptr() != NULL) { 680 Node* adr = n->in(MemNode::Address); 681 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 682 break; 683 } 684 ELSE_FAIL("Op_LoadP"); 685 } 686 case Op_Phi: { 687 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 688 // ThreadLocal has RawPtr type. 689 const Type* t = n->as_Phi()->type(); 690 if (t->make_ptr() != NULL) { 691 for (uint i = 1; i < n->req(); i++) { 692 Node* in = n->in(i); 693 if (in == NULL) 694 continue; // ignore NULL 695 Node* uncast_in = in->uncast(); 696 if (uncast_in->is_top() || uncast_in == n) 697 continue; // ignore top or inputs which go back this node 698 PointsToNode* ptn = ptnode_adr(in->_idx); 699 assert(ptn != NULL, "node should be registered"); 700 add_edge(n_ptn, ptn); 701 } 702 break; 703 } 704 ELSE_FAIL("Op_Phi"); 705 } 706 case Op_Proj: { 707 // we are only interested in the oop result projection from a call 708 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 709 n->in(0)->as_Call()->returns_pointer()) { 710 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 711 break; 712 } 713 ELSE_FAIL("Op_Proj"); 714 } 715 case Op_Rethrow: // Exception object escapes 716 case Op_Return: { 717 if (n->req() > TypeFunc::Parms && 718 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 719 // Treat Return value as LocalVar with GlobalEscape escape state. 720 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 721 n->in(TypeFunc::Parms), NULL); 722 break; 723 } 724 ELSE_FAIL("Op_Return"); 725 } 726 case Op_StoreP: 727 case Op_StoreN: 728 case Op_StoreNKlass: 729 case Op_StorePConditional: 730 case Op_CompareAndExchangeP: 731 case Op_CompareAndExchangeN: 732 case Op_CompareAndSwapP: 733 case Op_CompareAndSwapN: 734 case Op_WeakCompareAndSwapP: 735 case Op_WeakCompareAndSwapN: 736 case Op_GetAndSetP: 737 case Op_GetAndSetN: { 738 if (add_final_edges_unsafe_access(n, opcode)) { 739 break; 740 } 741 ELSE_FAIL("Op_StoreP"); 742 } 743 case Op_AryEq: 744 case Op_HasNegatives: 745 case Op_StrComp: 746 case Op_StrEquals: 747 case Op_StrIndexOf: 748 case Op_StrIndexOfChar: 749 case Op_StrInflatedCopy: 750 case Op_StrCompressedCopy: 751 case Op_EncodeISOArray: { 752 // char[]/byte[] arrays passed to string intrinsic do not escape but 753 // they are not scalar replaceable. Adjust escape state for them. 754 // Start from in(2) edge since in(1) is memory edge. 755 for (uint i = 2; i < n->req(); i++) { 756 Node* adr = n->in(i); 757 const Type* at = _igvn->type(adr); 758 if (!adr->is_top() && at->isa_ptr()) { 759 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 760 at->isa_ptr() != NULL, "expecting a pointer"); 761 if (adr->is_AddP()) { 762 adr = get_addp_base(adr); 763 } 764 PointsToNode* ptn = ptnode_adr(adr->_idx); 765 assert(ptn != NULL, "node should be registered"); 766 add_edge(n_ptn, ptn); 767 } 768 } 769 break; 770 } 771 default: { 772 // This method should be called only for EA specific nodes which may 773 // miss some edges when they were created. 774 #ifdef ASSERT 775 n->dump(1); 776 #endif 777 guarantee(false, "unknown node"); 778 } 779 } 780 return; 781 } 782 783 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 784 Node* adr = n->in(MemNode::Address); 785 const Type* adr_type = _igvn->type(adr); 786 adr_type = adr_type->make_ptr(); 787 if (adr_type == NULL) { 788 return; // skip dead nodes 789 } 790 if (adr_type->isa_oopptr() 791 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 792 && adr_type == TypeRawPtr::NOTNULL 793 && adr->in(AddPNode::Address)->is_Proj() 794 && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 795 delayed_worklist->push(n); // Process it later. 796 #ifdef ASSERT 797 assert (adr->is_AddP(), "expecting an AddP"); 798 if (adr_type == TypeRawPtr::NOTNULL) { 799 // Verify a raw address for a store captured by Initialize node. 800 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 801 assert(offs != Type::OffsetBot, "offset must be a constant"); 802 } 803 #endif 804 } else { 805 // Ignore copy the displaced header to the BoxNode (OSR compilation). 806 if (adr->is_BoxLock()) { 807 return; 808 } 809 // Stored value escapes in unsafe access. 810 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 811 delayed_worklist->push(n); // Process unsafe access later. 812 return; 813 } 814 #ifdef ASSERT 815 n->dump(1); 816 assert(false, "not unsafe"); 817 #endif 818 } 819 } 820 821 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 822 Node* adr = n->in(MemNode::Address); 823 const Type *adr_type = _igvn->type(adr); 824 adr_type = adr_type->make_ptr(); 825 #ifdef ASSERT 826 if (adr_type == NULL) { 827 n->dump(1); 828 assert(adr_type != NULL, "dead node should not be on list"); 829 return true; 830 } 831 #endif 832 833 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN || 834 opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) { 835 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 836 } 837 838 if (adr_type->isa_oopptr() 839 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 840 && adr_type == TypeRawPtr::NOTNULL 841 && adr->in(AddPNode::Address)->is_Proj() 842 && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { 843 // Point Address to Value 844 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 845 assert(adr_ptn != NULL && 846 adr_ptn->as_Field()->is_oop(), "node should be registered"); 847 Node* val = n->in(MemNode::ValueIn); 848 PointsToNode* ptn = ptnode_adr(val->_idx); 849 assert(ptn != NULL, "node should be registered"); 850 add_edge(adr_ptn, ptn); 851 return true; 852 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 853 // Stored value escapes in unsafe access. 854 Node* val = n->in(MemNode::ValueIn); 855 PointsToNode* ptn = ptnode_adr(val->_idx); 856 assert(ptn != NULL, "node should be registered"); 857 set_escape_state(ptn, PointsToNode::GlobalEscape); 858 // Add edge to object for unsafe access with offset. 859 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 860 assert(adr_ptn != NULL, "node should be registered"); 861 if (adr_ptn->is_Field()) { 862 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 863 add_edge(adr_ptn, ptn); 864 } 865 return true; 866 } 867 return false; 868 } 869 870 void ConnectionGraph::add_call_node(CallNode* call) { 871 assert(call->returns_pointer(), "only for call which returns pointer"); 872 uint call_idx = call->_idx; 873 if (call->is_Allocate()) { 874 Node* k = call->in(AllocateNode::KlassNode); 875 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 876 assert(kt != NULL, "TypeKlassPtr required."); 877 ciKlass* cik = kt->klass(); 878 PointsToNode::EscapeState es = PointsToNode::NoEscape; 879 bool scalar_replaceable = true; 880 if (call->is_AllocateArray()) { 881 if (!cik->is_array_klass()) { // StressReflectiveCode 882 es = PointsToNode::GlobalEscape; 883 } else { 884 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 885 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 886 // Not scalar replaceable if the length is not constant or too big. 887 scalar_replaceable = false; 888 } 889 } 890 } else { // Allocate instance 891 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 892 cik->is_subclass_of(_compile->env()->Reference_klass()) || 893 !cik->is_instance_klass() || // StressReflectiveCode 894 !cik->as_instance_klass()->can_be_instantiated() || 895 cik->as_instance_klass()->has_finalizer()) { 896 es = PointsToNode::GlobalEscape; 897 } 898 } 899 add_java_object(call, es); 900 PointsToNode* ptn = ptnode_adr(call_idx); 901 if (!scalar_replaceable && ptn->scalar_replaceable()) { 902 ptn->set_scalar_replaceable(false); 903 } 904 } else if (call->is_CallStaticJava()) { 905 // Call nodes could be different types: 906 // 907 // 1. CallDynamicJavaNode (what happened during call is unknown): 908 // 909 // - mapped to GlobalEscape JavaObject node if oop is returned; 910 // 911 // - all oop arguments are escaping globally; 912 // 913 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 914 // 915 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 916 // 917 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 918 // - mapped to NoEscape JavaObject node if non-escaping object allocated 919 // during call is returned; 920 // - mapped to ArgEscape LocalVar node pointed to object arguments 921 // which are returned and does not escape during call; 922 // 923 // - oop arguments escaping status is defined by bytecode analysis; 924 // 925 // For a static call, we know exactly what method is being called. 926 // Use bytecode estimator to record whether the call's return value escapes. 927 ciMethod* meth = call->as_CallJava()->method(); 928 if (meth == NULL) { 929 const char* name = call->as_CallStaticJava()->_name; 930 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 931 // Returns a newly allocated unescaped object. 932 add_java_object(call, PointsToNode::NoEscape); 933 ptnode_adr(call_idx)->set_scalar_replaceable(false); 934 } else if (meth->is_boxing_method()) { 935 // Returns boxing object 936 PointsToNode::EscapeState es; 937 vmIntrinsics::ID intr = meth->intrinsic_id(); 938 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 939 // It does not escape if object is always allocated. 940 es = PointsToNode::NoEscape; 941 } else { 942 // It escapes globally if object could be loaded from cache. 943 es = PointsToNode::GlobalEscape; 944 } 945 add_java_object(call, es); 946 } else { 947 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 948 call_analyzer->copy_dependencies(_compile->dependencies()); 949 if (call_analyzer->is_return_allocated()) { 950 // Returns a newly allocated unescaped object, simply 951 // update dependency information. 952 // Mark it as NoEscape so that objects referenced by 953 // it's fields will be marked as NoEscape at least. 954 add_java_object(call, PointsToNode::NoEscape); 955 ptnode_adr(call_idx)->set_scalar_replaceable(false); 956 } else { 957 // Determine whether any arguments are returned. 958 const TypeTuple* d = call->tf()->domain(); 959 bool ret_arg = false; 960 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 961 if (d->field_at(i)->isa_ptr() != NULL && 962 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 963 ret_arg = true; 964 break; 965 } 966 } 967 if (ret_arg) { 968 add_local_var(call, PointsToNode::ArgEscape); 969 } else { 970 // Returns unknown object. 971 map_ideal_node(call, phantom_obj); 972 } 973 } 974 } 975 } else { 976 // An other type of call, assume the worst case: 977 // returned value is unknown and globally escapes. 978 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 979 map_ideal_node(call, phantom_obj); 980 } 981 } 982 983 void ConnectionGraph::process_call_arguments(CallNode *call) { 984 bool is_arraycopy = false; 985 switch (call->Opcode()) { 986 #ifdef ASSERT 987 case Op_Allocate: 988 case Op_AllocateArray: 989 case Op_Lock: 990 case Op_Unlock: 991 assert(false, "should be done already"); 992 break; 993 #endif 994 case Op_ArrayCopy: 995 case Op_CallLeafNoFP: 996 // Most array copies are ArrayCopy nodes at this point but there 997 // are still a few direct calls to the copy subroutines (See 998 // PhaseStringOpts::copy_string()) 999 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 1000 call->as_CallLeaf()->is_call_to_arraycopystub(); 1001 // fall through 1002 case Op_CallLeaf: { 1003 // Stub calls, objects do not escape but they are not scale replaceable. 1004 // Adjust escape state for outgoing arguments. 1005 const TypeTuple * d = call->tf()->domain(); 1006 bool src_has_oops = false; 1007 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1008 const Type* at = d->field_at(i); 1009 Node *arg = call->in(i); 1010 if (arg == NULL) { 1011 continue; 1012 } 1013 const Type *aat = _igvn->type(arg); 1014 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) 1015 continue; 1016 if (arg->is_AddP()) { 1017 // 1018 // The inline_native_clone() case when the arraycopy stub is called 1019 // after the allocation before Initialize and CheckCastPP nodes. 1020 // Or normal arraycopy for object arrays case. 1021 // 1022 // Set AddP's base (Allocate) as not scalar replaceable since 1023 // pointer to the base (with offset) is passed as argument. 1024 // 1025 arg = get_addp_base(arg); 1026 } 1027 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1028 assert(arg_ptn != NULL, "should be registered"); 1029 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 1030 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 1031 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 1032 aat->isa_ptr() != NULL, "expecting an Ptr"); 1033 bool arg_has_oops = aat->isa_oopptr() && 1034 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || 1035 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); 1036 if (i == TypeFunc::Parms) { 1037 src_has_oops = arg_has_oops; 1038 } 1039 // 1040 // src or dst could be j.l.Object when other is basic type array: 1041 // 1042 // arraycopy(char[],0,Object*,0,size); 1043 // arraycopy(Object*,0,char[],0,size); 1044 // 1045 // Don't add edges in such cases. 1046 // 1047 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 1048 arg_has_oops && (i > TypeFunc::Parms); 1049 #ifdef ASSERT 1050 if (!(is_arraycopy || 1051 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 1052 (call->as_CallLeaf()->_name != NULL && 1053 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 1054 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 1055 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 1056 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 1057 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 1058 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 1059 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 1060 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 1061 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 1062 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 1063 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 1064 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 1065 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 1066 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1067 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1068 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1069 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1070 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1071 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1072 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1073 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1074 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1075 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1076 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0) 1077 ))) { 1078 call->dump(); 1079 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1080 } 1081 #endif 1082 // Always process arraycopy's destination object since 1083 // we need to add all possible edges to references in 1084 // source object. 1085 if (arg_esc >= PointsToNode::ArgEscape && 1086 !arg_is_arraycopy_dest) { 1087 continue; 1088 } 1089 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1090 if (call->is_ArrayCopy()) { 1091 ArrayCopyNode* ac = call->as_ArrayCopy(); 1092 if (ac->is_clonebasic() || 1093 ac->is_arraycopy_validated() || 1094 ac->is_copyof_validated() || 1095 ac->is_copyofrange_validated()) { 1096 es = PointsToNode::NoEscape; 1097 } 1098 } 1099 set_escape_state(arg_ptn, es); 1100 if (arg_is_arraycopy_dest) { 1101 Node* src = call->in(TypeFunc::Parms); 1102 if (src->is_AddP()) { 1103 src = get_addp_base(src); 1104 } 1105 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1106 assert(src_ptn != NULL, "should be registered"); 1107 if (arg_ptn != src_ptn) { 1108 // Special arraycopy edge: 1109 // A destination object's field can't have the source object 1110 // as base since objects escape states are not related. 1111 // Only escape state of destination object's fields affects 1112 // escape state of fields in source object. 1113 add_arraycopy(call, es, src_ptn, arg_ptn); 1114 } 1115 } 1116 } 1117 } 1118 break; 1119 } 1120 case Op_CallStaticJava: { 1121 // For a static call, we know exactly what method is being called. 1122 // Use bytecode estimator to record the call's escape affects 1123 #ifdef ASSERT 1124 const char* name = call->as_CallStaticJava()->_name; 1125 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1126 #endif 1127 ciMethod* meth = call->as_CallJava()->method(); 1128 if ((meth != NULL) && meth->is_boxing_method()) { 1129 break; // Boxing methods do not modify any oops. 1130 } 1131 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1132 // fall-through if not a Java method or no analyzer information 1133 if (call_analyzer != NULL) { 1134 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1135 const TypeTuple* d = call->tf()->domain(); 1136 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1137 const Type* at = d->field_at(i); 1138 int k = i - TypeFunc::Parms; 1139 Node* arg = call->in(i); 1140 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1141 if (at->isa_ptr() != NULL && 1142 call_analyzer->is_arg_returned(k)) { 1143 // The call returns arguments. 1144 if (call_ptn != NULL) { // Is call's result used? 1145 assert(call_ptn->is_LocalVar(), "node should be registered"); 1146 assert(arg_ptn != NULL, "node should be registered"); 1147 add_edge(call_ptn, arg_ptn); 1148 } 1149 } 1150 if (at->isa_oopptr() != NULL && 1151 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1152 if (!call_analyzer->is_arg_stack(k)) { 1153 // The argument global escapes 1154 set_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1155 } else { 1156 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 1157 if (!call_analyzer->is_arg_local(k)) { 1158 // The argument itself doesn't escape, but any fields might 1159 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1160 } 1161 } 1162 } 1163 } 1164 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1165 // The call returns arguments. 1166 assert(call_ptn->edge_count() > 0, "sanity"); 1167 if (!call_analyzer->is_return_local()) { 1168 // Returns also unknown object. 1169 add_edge(call_ptn, phantom_obj); 1170 } 1171 } 1172 break; 1173 } 1174 } 1175 default: { 1176 // Fall-through here if not a Java method or no analyzer information 1177 // or some other type of call, assume the worst case: all arguments 1178 // globally escape. 1179 const TypeTuple* d = call->tf()->domain(); 1180 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1181 const Type* at = d->field_at(i); 1182 if (at->isa_oopptr() != NULL) { 1183 Node* arg = call->in(i); 1184 if (arg->is_AddP()) { 1185 arg = get_addp_base(arg); 1186 } 1187 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1188 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); 1189 } 1190 } 1191 } 1192 } 1193 } 1194 1195 1196 // Finish Graph construction. 1197 bool ConnectionGraph::complete_connection_graph( 1198 GrowableArray<PointsToNode*>& ptnodes_worklist, 1199 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1200 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1201 GrowableArray<FieldNode*>& oop_fields_worklist) { 1202 // Normally only 1-3 passes needed to build Connection Graph depending 1203 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1204 // Set limit to 20 to catch situation when something did go wrong and 1205 // bailout Escape Analysis. 1206 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1207 #define CG_BUILD_ITER_LIMIT 20 1208 1209 // Propagate GlobalEscape and ArgEscape escape states and check that 1210 // we still have non-escaping objects. The method pushs on _worklist 1211 // Field nodes which reference phantom_object. 1212 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1213 return false; // Nothing to do. 1214 } 1215 // Now propagate references to all JavaObject nodes. 1216 int java_objects_length = java_objects_worklist.length(); 1217 elapsedTimer time; 1218 bool timeout = false; 1219 int new_edges = 1; 1220 int iterations = 0; 1221 do { 1222 while ((new_edges > 0) && 1223 (iterations++ < CG_BUILD_ITER_LIMIT)) { 1224 double start_time = time.seconds(); 1225 time.start(); 1226 new_edges = 0; 1227 // Propagate references to phantom_object for nodes pushed on _worklist 1228 // by find_non_escaped_objects() and find_field_value(). 1229 new_edges += add_java_object_edges(phantom_obj, false); 1230 for (int next = 0; next < java_objects_length; ++next) { 1231 JavaObjectNode* ptn = java_objects_worklist.at(next); 1232 new_edges += add_java_object_edges(ptn, true); 1233 1234 #define SAMPLE_SIZE 4 1235 if ((next % SAMPLE_SIZE) == 0) { 1236 // Each 4 iterations calculate how much time it will take 1237 // to complete graph construction. 1238 time.stop(); 1239 // Poll for requests from shutdown mechanism to quiesce compiler 1240 // because Connection graph construction may take long time. 1241 CompileBroker::maybe_block(); 1242 double stop_time = time.seconds(); 1243 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1244 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1245 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1246 timeout = true; 1247 break; // Timeout 1248 } 1249 start_time = stop_time; 1250 time.start(); 1251 } 1252 #undef SAMPLE_SIZE 1253 1254 } 1255 if (timeout) break; 1256 if (new_edges > 0) { 1257 // Update escape states on each iteration if graph was updated. 1258 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1259 return false; // Nothing to do. 1260 } 1261 } 1262 time.stop(); 1263 if (time.seconds() >= EscapeAnalysisTimeout) { 1264 timeout = true; 1265 break; 1266 } 1267 } 1268 if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { 1269 time.start(); 1270 // Find fields which have unknown value. 1271 int fields_length = oop_fields_worklist.length(); 1272 for (int next = 0; next < fields_length; next++) { 1273 FieldNode* field = oop_fields_worklist.at(next); 1274 if (field->edge_count() == 0) { 1275 new_edges += find_field_value(field); 1276 // This code may added new edges to phantom_object. 1277 // Need an other cycle to propagate references to phantom_object. 1278 } 1279 } 1280 time.stop(); 1281 if (time.seconds() >= EscapeAnalysisTimeout) { 1282 timeout = true; 1283 break; 1284 } 1285 } else { 1286 new_edges = 0; // Bailout 1287 } 1288 } while (new_edges > 0); 1289 1290 // Bailout if passed limits. 1291 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { 1292 Compile* C = _compile; 1293 if (C->log() != NULL) { 1294 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1295 C->log()->text("%s", timeout ? "time" : "iterations"); 1296 C->log()->end_elem(" limit'"); 1297 } 1298 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", 1299 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()); 1300 // Possible infinite build_connection_graph loop, 1301 // bailout (no changes to ideal graph were made). 1302 return false; 1303 } 1304 #ifdef ASSERT 1305 if (Verbose && PrintEscapeAnalysis) { 1306 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", 1307 iterations, nodes_size(), ptnodes_worklist.length()); 1308 } 1309 #endif 1310 1311 #undef CG_BUILD_ITER_LIMIT 1312 1313 // Find fields initialized by NULL for non-escaping Allocations. 1314 int non_escaped_length = non_escaped_worklist.length(); 1315 for (int next = 0; next < non_escaped_length; next++) { 1316 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1317 PointsToNode::EscapeState es = ptn->escape_state(); 1318 assert(es <= PointsToNode::ArgEscape, "sanity"); 1319 if (es == PointsToNode::NoEscape) { 1320 if (find_init_values(ptn, null_obj, _igvn) > 0) { 1321 // Adding references to NULL object does not change escape states 1322 // since it does not escape. Also no fields are added to NULL object. 1323 add_java_object_edges(null_obj, false); 1324 } 1325 } 1326 Node* n = ptn->ideal_node(); 1327 if (n->is_Allocate()) { 1328 // The object allocated by this Allocate node will never be 1329 // seen by an other thread. Mark it so that when it is 1330 // expanded no MemBarStoreStore is added. 1331 InitializeNode* ini = n->as_Allocate()->initialization(); 1332 if (ini != NULL) 1333 ini->set_does_not_escape(); 1334 } 1335 } 1336 return true; // Finished graph construction. 1337 } 1338 1339 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1340 // and check that we still have non-escaping java objects. 1341 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1342 GrowableArray<JavaObjectNode*>& non_escaped_worklist) { 1343 GrowableArray<PointsToNode*> escape_worklist; 1344 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1345 int ptnodes_length = ptnodes_worklist.length(); 1346 for (int next = 0; next < ptnodes_length; ++next) { 1347 PointsToNode* ptn = ptnodes_worklist.at(next); 1348 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1349 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1350 escape_worklist.push(ptn); 1351 } 1352 } 1353 // Set escape states to referenced nodes (edges list). 1354 while (escape_worklist.length() > 0) { 1355 PointsToNode* ptn = escape_worklist.pop(); 1356 PointsToNode::EscapeState es = ptn->escape_state(); 1357 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1358 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1359 es >= PointsToNode::ArgEscape) { 1360 // GlobalEscape or ArgEscape state of field means it has unknown value. 1361 if (add_edge(ptn, phantom_obj)) { 1362 // New edge was added 1363 add_field_uses_to_worklist(ptn->as_Field()); 1364 } 1365 } 1366 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1367 PointsToNode* e = i.get(); 1368 if (e->is_Arraycopy()) { 1369 assert(ptn->arraycopy_dst(), "sanity"); 1370 // Propagate only fields escape state through arraycopy edge. 1371 if (e->fields_escape_state() < field_es) { 1372 set_fields_escape_state(e, field_es); 1373 escape_worklist.push(e); 1374 } 1375 } else if (es >= field_es) { 1376 // fields_escape_state is also set to 'es' if it is less than 'es'. 1377 if (e->escape_state() < es) { 1378 set_escape_state(e, es); 1379 escape_worklist.push(e); 1380 } 1381 } else { 1382 // Propagate field escape state. 1383 bool es_changed = false; 1384 if (e->fields_escape_state() < field_es) { 1385 set_fields_escape_state(e, field_es); 1386 es_changed = true; 1387 } 1388 if ((e->escape_state() < field_es) && 1389 e->is_Field() && ptn->is_JavaObject() && 1390 e->as_Field()->is_oop()) { 1391 // Change escape state of referenced fields. 1392 set_escape_state(e, field_es); 1393 es_changed = true; 1394 } else if (e->escape_state() < es) { 1395 set_escape_state(e, es); 1396 es_changed = true; 1397 } 1398 if (es_changed) { 1399 escape_worklist.push(e); 1400 } 1401 } 1402 } 1403 } 1404 // Remove escaped objects from non_escaped list. 1405 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { 1406 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1407 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1408 non_escaped_worklist.delete_at(next); 1409 } 1410 if (ptn->escape_state() == PointsToNode::NoEscape) { 1411 // Find fields in non-escaped allocations which have unknown value. 1412 find_init_values(ptn, phantom_obj, NULL); 1413 } 1414 } 1415 return (non_escaped_worklist.length() > 0); 1416 } 1417 1418 // Add all references to JavaObject node by walking over all uses. 1419 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1420 int new_edges = 0; 1421 if (populate_worklist) { 1422 // Populate _worklist by uses of jobj's uses. 1423 for (UseIterator i(jobj); i.has_next(); i.next()) { 1424 PointsToNode* use = i.get(); 1425 if (use->is_Arraycopy()) 1426 continue; 1427 add_uses_to_worklist(use); 1428 if (use->is_Field() && use->as_Field()->is_oop()) { 1429 // Put on worklist all field's uses (loads) and 1430 // related field nodes (same base and offset). 1431 add_field_uses_to_worklist(use->as_Field()); 1432 } 1433 } 1434 } 1435 for (int l = 0; l < _worklist.length(); l++) { 1436 PointsToNode* use = _worklist.at(l); 1437 if (PointsToNode::is_base_use(use)) { 1438 // Add reference from jobj to field and from field to jobj (field's base). 1439 use = PointsToNode::get_use_node(use)->as_Field(); 1440 if (add_base(use->as_Field(), jobj)) { 1441 new_edges++; 1442 } 1443 continue; 1444 } 1445 assert(!use->is_JavaObject(), "sanity"); 1446 if (use->is_Arraycopy()) { 1447 if (jobj == null_obj) // NULL object does not have field edges 1448 continue; 1449 // Added edge from Arraycopy node to arraycopy's source java object 1450 if (add_edge(use, jobj)) { 1451 jobj->set_arraycopy_src(); 1452 new_edges++; 1453 } 1454 // and stop here. 1455 continue; 1456 } 1457 if (!add_edge(use, jobj)) 1458 continue; // No new edge added, there was such edge already. 1459 new_edges++; 1460 if (use->is_LocalVar()) { 1461 add_uses_to_worklist(use); 1462 if (use->arraycopy_dst()) { 1463 for (EdgeIterator i(use); i.has_next(); i.next()) { 1464 PointsToNode* e = i.get(); 1465 if (e->is_Arraycopy()) { 1466 if (jobj == null_obj) // NULL object does not have field edges 1467 continue; 1468 // Add edge from arraycopy's destination java object to Arraycopy node. 1469 if (add_edge(jobj, e)) { 1470 new_edges++; 1471 jobj->set_arraycopy_dst(); 1472 } 1473 } 1474 } 1475 } 1476 } else { 1477 // Added new edge to stored in field values. 1478 // Put on worklist all field's uses (loads) and 1479 // related field nodes (same base and offset). 1480 add_field_uses_to_worklist(use->as_Field()); 1481 } 1482 } 1483 _worklist.clear(); 1484 _in_worklist.Reset(); 1485 return new_edges; 1486 } 1487 1488 // Put on worklist all related field nodes. 1489 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1490 assert(field->is_oop(), "sanity"); 1491 int offset = field->offset(); 1492 add_uses_to_worklist(field); 1493 // Loop over all bases of this field and push on worklist Field nodes 1494 // with the same offset and base (since they may reference the same field). 1495 for (BaseIterator i(field); i.has_next(); i.next()) { 1496 PointsToNode* base = i.get(); 1497 add_fields_to_worklist(field, base); 1498 // Check if the base was source object of arraycopy and go over arraycopy's 1499 // destination objects since values stored to a field of source object are 1500 // accessable by uses (loads) of fields of destination objects. 1501 if (base->arraycopy_src()) { 1502 for (UseIterator j(base); j.has_next(); j.next()) { 1503 PointsToNode* arycp = j.get(); 1504 if (arycp->is_Arraycopy()) { 1505 for (UseIterator k(arycp); k.has_next(); k.next()) { 1506 PointsToNode* abase = k.get(); 1507 if (abase->arraycopy_dst() && abase != base) { 1508 // Look for the same arraycopy reference. 1509 add_fields_to_worklist(field, abase); 1510 } 1511 } 1512 } 1513 } 1514 } 1515 } 1516 } 1517 1518 // Put on worklist all related field nodes. 1519 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1520 int offset = field->offset(); 1521 if (base->is_LocalVar()) { 1522 for (UseIterator j(base); j.has_next(); j.next()) { 1523 PointsToNode* f = j.get(); 1524 if (PointsToNode::is_base_use(f)) { // Field 1525 f = PointsToNode::get_use_node(f); 1526 if (f == field || !f->as_Field()->is_oop()) 1527 continue; 1528 int offs = f->as_Field()->offset(); 1529 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1530 add_to_worklist(f); 1531 } 1532 } 1533 } 1534 } else { 1535 assert(base->is_JavaObject(), "sanity"); 1536 if (// Skip phantom_object since it is only used to indicate that 1537 // this field's content globally escapes. 1538 (base != phantom_obj) && 1539 // NULL object node does not have fields. 1540 (base != null_obj)) { 1541 for (EdgeIterator i(base); i.has_next(); i.next()) { 1542 PointsToNode* f = i.get(); 1543 // Skip arraycopy edge since store to destination object field 1544 // does not update value in source object field. 1545 if (f->is_Arraycopy()) { 1546 assert(base->arraycopy_dst(), "sanity"); 1547 continue; 1548 } 1549 if (f == field || !f->as_Field()->is_oop()) 1550 continue; 1551 int offs = f->as_Field()->offset(); 1552 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1553 add_to_worklist(f); 1554 } 1555 } 1556 } 1557 } 1558 } 1559 1560 // Find fields which have unknown value. 1561 int ConnectionGraph::find_field_value(FieldNode* field) { 1562 // Escaped fields should have init value already. 1563 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1564 int new_edges = 0; 1565 for (BaseIterator i(field); i.has_next(); i.next()) { 1566 PointsToNode* base = i.get(); 1567 if (base->is_JavaObject()) { 1568 // Skip Allocate's fields which will be processed later. 1569 if (base->ideal_node()->is_Allocate()) 1570 return 0; 1571 assert(base == null_obj, "only NULL ptr base expected here"); 1572 } 1573 } 1574 if (add_edge(field, phantom_obj)) { 1575 // New edge was added 1576 new_edges++; 1577 add_field_uses_to_worklist(field); 1578 } 1579 return new_edges; 1580 } 1581 1582 // Find fields initializing values for allocations. 1583 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { 1584 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1585 int new_edges = 0; 1586 Node* alloc = pta->ideal_node(); 1587 if (init_val == phantom_obj) { 1588 // Do nothing for Allocate nodes since its fields values are 1589 // "known" unless they are initialized by arraycopy/clone. 1590 if (alloc->is_Allocate() && !pta->arraycopy_dst()) 1591 return 0; 1592 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 1593 #ifdef ASSERT 1594 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { 1595 const char* name = alloc->as_CallStaticJava()->_name; 1596 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1597 } 1598 #endif 1599 // Non-escaped allocation returned from Java or runtime call have 1600 // unknown values in fields. 1601 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1602 PointsToNode* field = i.get(); 1603 if (field->is_Field() && field->as_Field()->is_oop()) { 1604 if (add_edge(field, phantom_obj)) { 1605 // New edge was added 1606 new_edges++; 1607 add_field_uses_to_worklist(field->as_Field()); 1608 } 1609 } 1610 } 1611 return new_edges; 1612 } 1613 assert(init_val == null_obj, "sanity"); 1614 // Do nothing for Call nodes since its fields values are unknown. 1615 if (!alloc->is_Allocate()) 1616 return 0; 1617 1618 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1619 bool visited_bottom_offset = false; 1620 GrowableArray<int> offsets_worklist; 1621 1622 // Check if an oop field's initializing value is recorded and add 1623 // a corresponding NULL if field's value if it is not recorded. 1624 // Connection Graph does not record a default initialization by NULL 1625 // captured by Initialize node. 1626 // 1627 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1628 PointsToNode* field = i.get(); // Field (AddP) 1629 if (!field->is_Field() || !field->as_Field()->is_oop()) 1630 continue; // Not oop field 1631 int offset = field->as_Field()->offset(); 1632 if (offset == Type::OffsetBot) { 1633 if (!visited_bottom_offset) { 1634 // OffsetBot is used to reference array's element, 1635 // always add reference to NULL to all Field nodes since we don't 1636 // known which element is referenced. 1637 if (add_edge(field, null_obj)) { 1638 // New edge was added 1639 new_edges++; 1640 add_field_uses_to_worklist(field->as_Field()); 1641 visited_bottom_offset = true; 1642 } 1643 } 1644 } else { 1645 // Check only oop fields. 1646 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1647 if (adr_type->isa_rawptr()) { 1648 #ifdef ASSERT 1649 // Raw pointers are used for initializing stores so skip it 1650 // since it should be recorded already 1651 Node* base = get_addp_base(field->ideal_node()); 1652 assert(adr_type->isa_rawptr() && base->is_Proj() && 1653 (base->in(0) == alloc),"unexpected pointer type"); 1654 #endif 1655 continue; 1656 } 1657 if (!offsets_worklist.contains(offset)) { 1658 offsets_worklist.append(offset); 1659 Node* value = NULL; 1660 if (ini != NULL) { 1661 // StoreP::memory_type() == T_ADDRESS 1662 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1663 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1664 // Make sure initializing store has the same type as this AddP. 1665 // This AddP may reference non existing field because it is on a 1666 // dead branch of bimorphic call which is not eliminated yet. 1667 if (store != NULL && store->is_Store() && 1668 store->as_Store()->memory_type() == ft) { 1669 value = store->in(MemNode::ValueIn); 1670 #ifdef ASSERT 1671 if (VerifyConnectionGraph) { 1672 // Verify that AddP already points to all objects the value points to. 1673 PointsToNode* val = ptnode_adr(value->_idx); 1674 assert((val != NULL), "should be processed already"); 1675 PointsToNode* missed_obj = NULL; 1676 if (val->is_JavaObject()) { 1677 if (!field->points_to(val->as_JavaObject())) { 1678 missed_obj = val; 1679 } 1680 } else { 1681 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1682 tty->print_cr("----------init store has invalid value -----"); 1683 store->dump(); 1684 val->dump(); 1685 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1686 } 1687 for (EdgeIterator j(val); j.has_next(); j.next()) { 1688 PointsToNode* obj = j.get(); 1689 if (obj->is_JavaObject()) { 1690 if (!field->points_to(obj->as_JavaObject())) { 1691 missed_obj = obj; 1692 break; 1693 } 1694 } 1695 } 1696 } 1697 if (missed_obj != NULL) { 1698 tty->print_cr("----------field---------------------------------"); 1699 field->dump(); 1700 tty->print_cr("----------missed referernce to object-----------"); 1701 missed_obj->dump(); 1702 tty->print_cr("----------object referernced by init store -----"); 1703 store->dump(); 1704 val->dump(); 1705 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1706 } 1707 } 1708 #endif 1709 } else { 1710 // There could be initializing stores which follow allocation. 1711 // For example, a volatile field store is not collected 1712 // by Initialize node. 1713 // 1714 // Need to check for dependent loads to separate such stores from 1715 // stores which follow loads. For now, add initial value NULL so 1716 // that compare pointers optimization works correctly. 1717 } 1718 } 1719 if (value == NULL) { 1720 // A field's initializing value was not recorded. Add NULL. 1721 if (add_edge(field, null_obj)) { 1722 // New edge was added 1723 new_edges++; 1724 add_field_uses_to_worklist(field->as_Field()); 1725 } 1726 } 1727 } 1728 } 1729 } 1730 return new_edges; 1731 } 1732 1733 // Adjust scalar_replaceable state after Connection Graph is built. 1734 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1735 // Search for non-escaping objects which are not scalar replaceable 1736 // and mark them to propagate the state to referenced objects. 1737 1738 // 1. An object is not scalar replaceable if the field into which it is 1739 // stored has unknown offset (stored into unknown element of an array). 1740 // 1741 for (UseIterator i(jobj); i.has_next(); i.next()) { 1742 PointsToNode* use = i.get(); 1743 if (use->is_Arraycopy()) { 1744 continue; 1745 } 1746 if (use->is_Field()) { 1747 FieldNode* field = use->as_Field(); 1748 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 1749 if (field->offset() == Type::OffsetBot) { 1750 jobj->set_scalar_replaceable(false); 1751 return; 1752 } 1753 // 2. An object is not scalar replaceable if the field into which it is 1754 // stored has multiple bases one of which is null. 1755 if (field->base_count() > 1) { 1756 for (BaseIterator i(field); i.has_next(); i.next()) { 1757 PointsToNode* base = i.get(); 1758 if (base == null_obj) { 1759 jobj->set_scalar_replaceable(false); 1760 return; 1761 } 1762 } 1763 } 1764 } 1765 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1766 // 3. An object is not scalar replaceable if it is merged with other objects. 1767 for (EdgeIterator j(use); j.has_next(); j.next()) { 1768 PointsToNode* ptn = j.get(); 1769 if (ptn->is_JavaObject() && ptn != jobj) { 1770 // Mark all objects. 1771 jobj->set_scalar_replaceable(false); 1772 ptn->set_scalar_replaceable(false); 1773 } 1774 } 1775 if (!jobj->scalar_replaceable()) { 1776 return; 1777 } 1778 } 1779 1780 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1781 if (j.get()->is_Arraycopy()) { 1782 continue; 1783 } 1784 1785 // Non-escaping object node should point only to field nodes. 1786 FieldNode* field = j.get()->as_Field(); 1787 int offset = field->as_Field()->offset(); 1788 1789 // 4. An object is not scalar replaceable if it has a field with unknown 1790 // offset (array's element is accessed in loop). 1791 if (offset == Type::OffsetBot) { 1792 jobj->set_scalar_replaceable(false); 1793 return; 1794 } 1795 // 5. Currently an object is not scalar replaceable if a LoadStore node 1796 // access its field since the field value is unknown after it. 1797 // 1798 Node* n = field->ideal_node(); 1799 1800 // Test for an unsafe access that was parsed as maybe off heap 1801 // (with a CheckCastPP to raw memory). 1802 assert(n->is_AddP(), "expect an address computation"); 1803 if (n->in(AddPNode::Base)->is_top() && 1804 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 1805 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 1806 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 1807 jobj->set_scalar_replaceable(false); 1808 return; 1809 } 1810 1811 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1812 Node* u = n->fast_out(i); 1813 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 1814 jobj->set_scalar_replaceable(false); 1815 return; 1816 } 1817 } 1818 1819 // 6. Or the address may point to more then one object. This may produce 1820 // the false positive result (set not scalar replaceable) 1821 // since the flow-insensitive escape analysis can't separate 1822 // the case when stores overwrite the field's value from the case 1823 // when stores happened on different control branches. 1824 // 1825 // Note: it will disable scalar replacement in some cases: 1826 // 1827 // Point p[] = new Point[1]; 1828 // p[0] = new Point(); // Will be not scalar replaced 1829 // 1830 // but it will save us from incorrect optimizations in next cases: 1831 // 1832 // Point p[] = new Point[1]; 1833 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1834 // 1835 if (field->base_count() > 1) { 1836 for (BaseIterator i(field); i.has_next(); i.next()) { 1837 PointsToNode* base = i.get(); 1838 // Don't take into account LocalVar nodes which 1839 // may point to only one object which should be also 1840 // this field's base by now. 1841 if (base->is_JavaObject() && base != jobj) { 1842 // Mark all bases. 1843 jobj->set_scalar_replaceable(false); 1844 base->set_scalar_replaceable(false); 1845 } 1846 } 1847 } 1848 } 1849 } 1850 1851 #ifdef ASSERT 1852 void ConnectionGraph::verify_connection_graph( 1853 GrowableArray<PointsToNode*>& ptnodes_worklist, 1854 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1855 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1856 GrowableArray<Node*>& addp_worklist) { 1857 // Verify that graph is complete - no new edges could be added. 1858 int java_objects_length = java_objects_worklist.length(); 1859 int non_escaped_length = non_escaped_worklist.length(); 1860 int new_edges = 0; 1861 for (int next = 0; next < java_objects_length; ++next) { 1862 JavaObjectNode* ptn = java_objects_worklist.at(next); 1863 new_edges += add_java_object_edges(ptn, true); 1864 } 1865 assert(new_edges == 0, "graph was not complete"); 1866 // Verify that escape state is final. 1867 int length = non_escaped_worklist.length(); 1868 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); 1869 assert((non_escaped_length == non_escaped_worklist.length()) && 1870 (non_escaped_length == length) && 1871 (_worklist.length() == 0), "escape state was not final"); 1872 1873 // Verify fields information. 1874 int addp_length = addp_worklist.length(); 1875 for (int next = 0; next < addp_length; ++next ) { 1876 Node* n = addp_worklist.at(next); 1877 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 1878 if (field->is_oop()) { 1879 // Verify that field has all bases 1880 Node* base = get_addp_base(n); 1881 PointsToNode* ptn = ptnode_adr(base->_idx); 1882 if (ptn->is_JavaObject()) { 1883 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 1884 } else { 1885 assert(ptn->is_LocalVar(), "sanity"); 1886 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1887 PointsToNode* e = i.get(); 1888 if (e->is_JavaObject()) { 1889 assert(field->has_base(e->as_JavaObject()), "sanity"); 1890 } 1891 } 1892 } 1893 // Verify that all fields have initializing values. 1894 if (field->edge_count() == 0) { 1895 tty->print_cr("----------field does not have references----------"); 1896 field->dump(); 1897 for (BaseIterator i(field); i.has_next(); i.next()) { 1898 PointsToNode* base = i.get(); 1899 tty->print_cr("----------field has next base---------------------"); 1900 base->dump(); 1901 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 1902 tty->print_cr("----------base has fields-------------------------"); 1903 for (EdgeIterator j(base); j.has_next(); j.next()) { 1904 j.get()->dump(); 1905 } 1906 tty->print_cr("----------base has references---------------------"); 1907 for (UseIterator j(base); j.has_next(); j.next()) { 1908 j.get()->dump(); 1909 } 1910 } 1911 } 1912 for (UseIterator i(field); i.has_next(); i.next()) { 1913 i.get()->dump(); 1914 } 1915 assert(field->edge_count() > 0, "sanity"); 1916 } 1917 } 1918 } 1919 } 1920 #endif 1921 1922 // Optimize ideal graph. 1923 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 1924 GrowableArray<Node*>& storestore_worklist) { 1925 Compile* C = _compile; 1926 PhaseIterGVN* igvn = _igvn; 1927 if (EliminateLocks) { 1928 // Mark locks before changing ideal graph. 1929 int cnt = C->macro_count(); 1930 for( int i=0; i < cnt; i++ ) { 1931 Node *n = C->macro_node(i); 1932 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1933 AbstractLockNode* alock = n->as_AbstractLock(); 1934 if (!alock->is_non_esc_obj()) { 1935 if (not_global_escape(alock->obj_node())) { 1936 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 1937 // The lock could be marked eliminated by lock coarsening 1938 // code during first IGVN before EA. Replace coarsened flag 1939 // to eliminate all associated locks/unlocks. 1940 #ifdef ASSERT 1941 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 1942 #endif 1943 alock->set_non_esc_obj(); 1944 } 1945 } 1946 } 1947 } 1948 } 1949 1950 if (OptimizePtrCompare) { 1951 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1952 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1953 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1954 // Optimize objects compare. 1955 while (ptr_cmp_worklist.length() != 0) { 1956 Node *n = ptr_cmp_worklist.pop(); 1957 Node *res = optimize_ptr_compare(n); 1958 if (res != NULL) { 1959 #ifndef PRODUCT 1960 if (PrintOptimizePtrCompare) { 1961 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1962 if (Verbose) { 1963 n->dump(1); 1964 } 1965 } 1966 #endif 1967 igvn->replace_node(n, res); 1968 } 1969 } 1970 // cleanup 1971 if (_pcmp_neq->outcnt() == 0) 1972 igvn->hash_delete(_pcmp_neq); 1973 if (_pcmp_eq->outcnt() == 0) 1974 igvn->hash_delete(_pcmp_eq); 1975 } 1976 1977 // For MemBarStoreStore nodes added in library_call.cpp, check 1978 // escape status of associated AllocateNode and optimize out 1979 // MemBarStoreStore node if the allocated object never escapes. 1980 while (storestore_worklist.length() != 0) { 1981 Node *n = storestore_worklist.pop(); 1982 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 1983 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 1984 assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); 1985 if (not_global_escape(alloc)) { 1986 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 1987 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 1988 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 1989 igvn->register_new_node_with_optimizer(mb); 1990 igvn->replace_node(storestore, mb); 1991 } 1992 } 1993 } 1994 1995 // Optimize objects compare. 1996 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 1997 assert(OptimizePtrCompare, "sanity"); 1998 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 1999 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 2000 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 2001 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 2002 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 2003 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 2004 2005 // Check simple cases first. 2006 if (jobj1 != NULL) { 2007 if (jobj1->escape_state() == PointsToNode::NoEscape) { 2008 if (jobj1 == jobj2) { 2009 // Comparing the same not escaping object. 2010 return _pcmp_eq; 2011 } 2012 Node* obj = jobj1->ideal_node(); 2013 // Comparing not escaping allocation. 2014 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2015 !ptn2->points_to(jobj1)) { 2016 return _pcmp_neq; // This includes nullness check. 2017 } 2018 } 2019 } 2020 if (jobj2 != NULL) { 2021 if (jobj2->escape_state() == PointsToNode::NoEscape) { 2022 Node* obj = jobj2->ideal_node(); 2023 // Comparing not escaping allocation. 2024 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2025 !ptn1->points_to(jobj2)) { 2026 return _pcmp_neq; // This includes nullness check. 2027 } 2028 } 2029 } 2030 if (jobj1 != NULL && jobj1 != phantom_obj && 2031 jobj2 != NULL && jobj2 != phantom_obj && 2032 jobj1->ideal_node()->is_Con() && 2033 jobj2->ideal_node()->is_Con()) { 2034 // Klass or String constants compare. Need to be careful with 2035 // compressed pointers - compare types of ConN and ConP instead of nodes. 2036 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 2037 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 2038 if (t1->make_ptr() == t2->make_ptr()) { 2039 return _pcmp_eq; 2040 } else { 2041 return _pcmp_neq; 2042 } 2043 } 2044 if (ptn1->meet(ptn2)) { 2045 return NULL; // Sets are not disjoint 2046 } 2047 2048 // Sets are disjoint. 2049 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 2050 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 2051 bool set1_has_null_ptr = ptn1->points_to(null_obj); 2052 bool set2_has_null_ptr = ptn2->points_to(null_obj); 2053 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 2054 (set2_has_unknown_ptr && set1_has_null_ptr)) { 2055 // Check nullness of unknown object. 2056 return NULL; 2057 } 2058 2059 // Disjointness by itself is not sufficient since 2060 // alias analysis is not complete for escaped objects. 2061 // Disjoint sets are definitely unrelated only when 2062 // at least one set has only not escaping allocations. 2063 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 2064 if (ptn1->non_escaping_allocation()) { 2065 return _pcmp_neq; 2066 } 2067 } 2068 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2069 if (ptn2->non_escaping_allocation()) { 2070 return _pcmp_neq; 2071 } 2072 } 2073 return NULL; 2074 } 2075 2076 // Connection Graph constuction functions. 2077 2078 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2079 PointsToNode* ptadr = _nodes.at(n->_idx); 2080 if (ptadr != NULL) { 2081 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2082 return; 2083 } 2084 Compile* C = _compile; 2085 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2086 _nodes.at_put(n->_idx, ptadr); 2087 } 2088 2089 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2090 PointsToNode* ptadr = _nodes.at(n->_idx); 2091 if (ptadr != NULL) { 2092 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2093 return; 2094 } 2095 Compile* C = _compile; 2096 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2097 _nodes.at_put(n->_idx, ptadr); 2098 } 2099 2100 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2101 PointsToNode* ptadr = _nodes.at(n->_idx); 2102 if (ptadr != NULL) { 2103 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2104 return; 2105 } 2106 bool unsafe = false; 2107 bool is_oop = is_oop_field(n, offset, &unsafe); 2108 if (unsafe) { 2109 es = PointsToNode::GlobalEscape; 2110 } 2111 Compile* C = _compile; 2112 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2113 _nodes.at_put(n->_idx, field); 2114 } 2115 2116 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2117 PointsToNode* src, PointsToNode* dst) { 2118 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2119 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 2120 PointsToNode* ptadr = _nodes.at(n->_idx); 2121 if (ptadr != NULL) { 2122 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2123 return; 2124 } 2125 Compile* C = _compile; 2126 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2127 _nodes.at_put(n->_idx, ptadr); 2128 // Add edge from arraycopy node to source object. 2129 (void)add_edge(ptadr, src); 2130 src->set_arraycopy_src(); 2131 // Add edge from destination object to arraycopy node. 2132 (void)add_edge(dst, ptadr); 2133 dst->set_arraycopy_dst(); 2134 } 2135 2136 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2137 const Type* adr_type = n->as_AddP()->bottom_type(); 2138 BasicType bt = T_INT; 2139 if (offset == Type::OffsetBot) { 2140 // Check only oop fields. 2141 if (!adr_type->isa_aryptr() || 2142 (adr_type->isa_aryptr()->klass() == NULL) || 2143 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { 2144 // OffsetBot is used to reference array's element. Ignore first AddP. 2145 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 2146 bt = T_OBJECT; 2147 } 2148 } 2149 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2150 if (adr_type->isa_instptr()) { 2151 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 2152 if (field != NULL) { 2153 bt = field->layout_type(); 2154 } else { 2155 // Check for unsafe oop field access 2156 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2157 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2158 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2159 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2160 bt = T_OBJECT; 2161 (*unsafe) = true; 2162 } 2163 } 2164 } else if (adr_type->isa_aryptr()) { 2165 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2166 // Ignore array length load. 2167 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2168 // Ignore first AddP. 2169 } else { 2170 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2171 bt = elemtype->array_element_basic_type(); 2172 } 2173 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2174 // Allocation initialization, ThreadLocal field access, unsafe access 2175 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2176 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2177 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2178 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2179 bt = T_OBJECT; 2180 } 2181 } 2182 } 2183 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); 2184 } 2185 2186 // Returns unique pointed java object or NULL. 2187 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2188 assert(!_collecting, "should not call when contructed graph"); 2189 // If the node was created after the escape computation we can't answer. 2190 uint idx = n->_idx; 2191 if (idx >= nodes_size()) { 2192 return NULL; 2193 } 2194 PointsToNode* ptn = ptnode_adr(idx); 2195 if (ptn->is_JavaObject()) { 2196 return ptn->as_JavaObject(); 2197 } 2198 assert(ptn->is_LocalVar(), "sanity"); 2199 // Check all java objects it points to. 2200 JavaObjectNode* jobj = NULL; 2201 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2202 PointsToNode* e = i.get(); 2203 if (e->is_JavaObject()) { 2204 if (jobj == NULL) { 2205 jobj = e->as_JavaObject(); 2206 } else if (jobj != e) { 2207 return NULL; 2208 } 2209 } 2210 } 2211 return jobj; 2212 } 2213 2214 // Return true if this node points only to non-escaping allocations. 2215 bool PointsToNode::non_escaping_allocation() { 2216 if (is_JavaObject()) { 2217 Node* n = ideal_node(); 2218 if (n->is_Allocate() || n->is_CallStaticJava()) { 2219 return (escape_state() == PointsToNode::NoEscape); 2220 } else { 2221 return false; 2222 } 2223 } 2224 assert(is_LocalVar(), "sanity"); 2225 // Check all java objects it points to. 2226 for (EdgeIterator i(this); i.has_next(); i.next()) { 2227 PointsToNode* e = i.get(); 2228 if (e->is_JavaObject()) { 2229 Node* n = e->ideal_node(); 2230 if ((e->escape_state() != PointsToNode::NoEscape) || 2231 !(n->is_Allocate() || n->is_CallStaticJava())) { 2232 return false; 2233 } 2234 } 2235 } 2236 return true; 2237 } 2238 2239 // Return true if we know the node does not escape globally. 2240 bool ConnectionGraph::not_global_escape(Node *n) { 2241 assert(!_collecting, "should not call during graph construction"); 2242 // If the node was created after the escape computation we can't answer. 2243 uint idx = n->_idx; 2244 if (idx >= nodes_size()) { 2245 return false; 2246 } 2247 PointsToNode* ptn = ptnode_adr(idx); 2248 if (!ptn) { 2249 return false; // not in congraph (e.g. ConI) 2250 } 2251 PointsToNode::EscapeState es = ptn->escape_state(); 2252 // If we have already computed a value, return it. 2253 if (es >= PointsToNode::GlobalEscape) 2254 return false; 2255 if (ptn->is_JavaObject()) { 2256 return true; // (es < PointsToNode::GlobalEscape); 2257 } 2258 assert(ptn->is_LocalVar(), "sanity"); 2259 // Check all java objects it points to. 2260 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2261 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) 2262 return false; 2263 } 2264 return true; 2265 } 2266 2267 2268 // Helper functions 2269 2270 // Return true if this node points to specified node or nodes it points to. 2271 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2272 if (is_JavaObject()) { 2273 return (this == ptn); 2274 } 2275 assert(is_LocalVar() || is_Field(), "sanity"); 2276 for (EdgeIterator i(this); i.has_next(); i.next()) { 2277 if (i.get() == ptn) 2278 return true; 2279 } 2280 return false; 2281 } 2282 2283 // Return true if one node points to an other. 2284 bool PointsToNode::meet(PointsToNode* ptn) { 2285 if (this == ptn) { 2286 return true; 2287 } else if (ptn->is_JavaObject()) { 2288 return this->points_to(ptn->as_JavaObject()); 2289 } else if (this->is_JavaObject()) { 2290 return ptn->points_to(this->as_JavaObject()); 2291 } 2292 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2293 int ptn_count = ptn->edge_count(); 2294 for (EdgeIterator i(this); i.has_next(); i.next()) { 2295 PointsToNode* this_e = i.get(); 2296 for (int j = 0; j < ptn_count; j++) { 2297 if (this_e == ptn->edge(j)) 2298 return true; 2299 } 2300 } 2301 return false; 2302 } 2303 2304 #ifdef ASSERT 2305 // Return true if bases point to this java object. 2306 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2307 for (BaseIterator i(this); i.has_next(); i.next()) { 2308 if (i.get() == jobj) 2309 return true; 2310 } 2311 return false; 2312 } 2313 #endif 2314 2315 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2316 const Type *adr_type = phase->type(adr); 2317 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 2318 adr->in(AddPNode::Address)->is_Proj() && 2319 adr->in(AddPNode::Address)->in(0)->is_Allocate()) { 2320 // We are computing a raw address for a store captured by an Initialize 2321 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2322 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2323 assert(offs != Type::OffsetBot || 2324 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2325 "offset must be a constant or it is initialization of array"); 2326 return offs; 2327 } 2328 const TypePtr *t_ptr = adr_type->isa_ptr(); 2329 assert(t_ptr != NULL, "must be a pointer type"); 2330 return t_ptr->offset(); 2331 } 2332 2333 Node* ConnectionGraph::get_addp_base(Node *addp) { 2334 assert(addp->is_AddP(), "must be AddP"); 2335 // 2336 // AddP cases for Base and Address inputs: 2337 // case #1. Direct object's field reference: 2338 // Allocate 2339 // | 2340 // Proj #5 ( oop result ) 2341 // | 2342 // CheckCastPP (cast to instance type) 2343 // | | 2344 // AddP ( base == address ) 2345 // 2346 // case #2. Indirect object's field reference: 2347 // Phi 2348 // | 2349 // CastPP (cast to instance type) 2350 // | | 2351 // AddP ( base == address ) 2352 // 2353 // case #3. Raw object's field reference for Initialize node: 2354 // Allocate 2355 // | 2356 // Proj #5 ( oop result ) 2357 // top | 2358 // \ | 2359 // AddP ( base == top ) 2360 // 2361 // case #4. Array's element reference: 2362 // {CheckCastPP | CastPP} 2363 // | | | 2364 // | AddP ( array's element offset ) 2365 // | | 2366 // AddP ( array's offset ) 2367 // 2368 // case #5. Raw object's field reference for arraycopy stub call: 2369 // The inline_native_clone() case when the arraycopy stub is called 2370 // after the allocation before Initialize and CheckCastPP nodes. 2371 // Allocate 2372 // | 2373 // Proj #5 ( oop result ) 2374 // | | 2375 // AddP ( base == address ) 2376 // 2377 // case #6. Constant Pool, ThreadLocal, CastX2P or 2378 // Raw object's field reference: 2379 // {ConP, ThreadLocal, CastX2P, raw Load} 2380 // top | 2381 // \ | 2382 // AddP ( base == top ) 2383 // 2384 // case #7. Klass's field reference. 2385 // LoadKlass 2386 // | | 2387 // AddP ( base == address ) 2388 // 2389 // case #8. narrow Klass's field reference. 2390 // LoadNKlass 2391 // | 2392 // DecodeN 2393 // | | 2394 // AddP ( base == address ) 2395 // 2396 // case #9. Mixed unsafe access 2397 // {instance} 2398 // | 2399 // CheckCastPP (raw) 2400 // top | 2401 // \ | 2402 // AddP ( base == top ) 2403 // 2404 Node *base = addp->in(AddPNode::Base); 2405 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 2406 base = addp->in(AddPNode::Address); 2407 while (base->is_AddP()) { 2408 // Case #6 (unsafe access) may have several chained AddP nodes. 2409 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2410 base = base->in(AddPNode::Address); 2411 } 2412 if (base->Opcode() == Op_CheckCastPP && 2413 base->bottom_type()->isa_rawptr() && 2414 _igvn->type(base->in(1))->isa_oopptr()) { 2415 base = base->in(1); // Case #9 2416 } else { 2417 Node* uncast_base = base->uncast(); 2418 int opcode = uncast_base->Opcode(); 2419 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2420 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2421 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2422 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); 2423 } 2424 } 2425 return base; 2426 } 2427 2428 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2429 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2430 Node* addp2 = addp->raw_out(0); 2431 if (addp->outcnt() == 1 && addp2->is_AddP() && 2432 addp2->in(AddPNode::Base) == n && 2433 addp2->in(AddPNode::Address) == addp) { 2434 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2435 // 2436 // Find array's offset to push it on worklist first and 2437 // as result process an array's element offset first (pushed second) 2438 // to avoid CastPP for the array's offset. 2439 // Otherwise the inserted CastPP (LocalVar) will point to what 2440 // the AddP (Field) points to. Which would be wrong since 2441 // the algorithm expects the CastPP has the same point as 2442 // as AddP's base CheckCastPP (LocalVar). 2443 // 2444 // ArrayAllocation 2445 // | 2446 // CheckCastPP 2447 // | 2448 // memProj (from ArrayAllocation CheckCastPP) 2449 // | || 2450 // | || Int (element index) 2451 // | || | ConI (log(element size)) 2452 // | || | / 2453 // | || LShift 2454 // | || / 2455 // | AddP (array's element offset) 2456 // | | 2457 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2458 // | / / 2459 // AddP (array's offset) 2460 // | 2461 // Load/Store (memory operation on array's element) 2462 // 2463 return addp2; 2464 } 2465 return NULL; 2466 } 2467 2468 // 2469 // Adjust the type and inputs of an AddP which computes the 2470 // address of a field of an instance 2471 // 2472 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2473 PhaseGVN* igvn = _igvn; 2474 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2475 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2476 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2477 if (t == NULL) { 2478 // We are computing a raw address for a store captured by an Initialize 2479 // compute an appropriate address type (cases #3 and #5). 2480 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2481 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2482 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2483 assert(offs != Type::OffsetBot, "offset must be a constant"); 2484 t = base_t->add_offset(offs)->is_oopptr(); 2485 } 2486 int inst_id = base_t->instance_id(); 2487 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2488 "old type must be non-instance or match new type"); 2489 2490 // The type 't' could be subclass of 'base_t'. 2491 // As result t->offset() could be large then base_t's size and it will 2492 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2493 // constructor verifies correctness of the offset. 2494 // 2495 // It could happened on subclass's branch (from the type profiling 2496 // inlining) which was not eliminated during parsing since the exactness 2497 // of the allocation type was not propagated to the subclass type check. 2498 // 2499 // Or the type 't' could be not related to 'base_t' at all. 2500 // It could happened when CHA type is different from MDO type on a dead path 2501 // (for example, from instanceof check) which is not collapsed during parsing. 2502 // 2503 // Do nothing for such AddP node and don't process its users since 2504 // this code branch will go away. 2505 // 2506 if (!t->is_known_instance() && 2507 !base_t->klass()->is_subtype_of(t->klass())) { 2508 return false; // bail out 2509 } 2510 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 2511 // Do NOT remove the next line: ensure a new alias index is allocated 2512 // for the instance type. Note: C++ will not remove it since the call 2513 // has side effect. 2514 int alias_idx = _compile->get_alias_index(tinst); 2515 igvn->set_type(addp, tinst); 2516 // record the allocation in the node map 2517 set_map(addp, get_map(base->_idx)); 2518 // Set addp's Base and Address to 'base'. 2519 Node *abase = addp->in(AddPNode::Base); 2520 Node *adr = addp->in(AddPNode::Address); 2521 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2522 adr->in(0)->_idx == (uint)inst_id) { 2523 // Skip AddP cases #3 and #5. 2524 } else { 2525 assert(!abase->is_top(), "sanity"); // AddP case #3 2526 if (abase != base) { 2527 igvn->hash_delete(addp); 2528 addp->set_req(AddPNode::Base, base); 2529 if (abase == adr) { 2530 addp->set_req(AddPNode::Address, base); 2531 } else { 2532 // AddP case #4 (adr is array's element offset AddP node) 2533 #ifdef ASSERT 2534 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2535 assert(adr->is_AddP() && atype != NULL && 2536 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2537 #endif 2538 } 2539 igvn->hash_insert(addp); 2540 } 2541 } 2542 // Put on IGVN worklist since at least addp's type was changed above. 2543 record_for_optimizer(addp); 2544 return true; 2545 } 2546 2547 // 2548 // Create a new version of orig_phi if necessary. Returns either the newly 2549 // created phi or an existing phi. Sets create_new to indicate whether a new 2550 // phi was created. Cache the last newly created phi in the node map. 2551 // 2552 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2553 Compile *C = _compile; 2554 PhaseGVN* igvn = _igvn; 2555 new_created = false; 2556 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2557 // nothing to do if orig_phi is bottom memory or matches alias_idx 2558 if (phi_alias_idx == alias_idx) { 2559 return orig_phi; 2560 } 2561 // Have we recently created a Phi for this alias index? 2562 PhiNode *result = get_map_phi(orig_phi->_idx); 2563 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2564 return result; 2565 } 2566 // Previous check may fail when the same wide memory Phi was split into Phis 2567 // for different memory slices. Search all Phis for this region. 2568 if (result != NULL) { 2569 Node* region = orig_phi->in(0); 2570 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2571 Node* phi = region->fast_out(i); 2572 if (phi->is_Phi() && 2573 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2574 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2575 return phi->as_Phi(); 2576 } 2577 } 2578 } 2579 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 2580 if (C->do_escape_analysis() == true && !C->failing()) { 2581 // Retry compilation without escape analysis. 2582 // If this is the first failure, the sentinel string will "stick" 2583 // to the Compile object, and the C2Compiler will see it and retry. 2584 C->record_failure(C2Compiler::retry_no_escape_analysis()); 2585 } 2586 return NULL; 2587 } 2588 orig_phi_worklist.append_if_missing(orig_phi); 2589 const TypePtr *atype = C->get_adr_type(alias_idx); 2590 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2591 C->copy_node_notes_to(result, orig_phi); 2592 igvn->set_type(result, result->bottom_type()); 2593 record_for_optimizer(result); 2594 set_map(orig_phi, result); 2595 new_created = true; 2596 return result; 2597 } 2598 2599 // 2600 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2601 // specified alias index. 2602 // 2603 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2604 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2605 Compile *C = _compile; 2606 PhaseGVN* igvn = _igvn; 2607 bool new_phi_created; 2608 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2609 if (!new_phi_created) { 2610 return result; 2611 } 2612 GrowableArray<PhiNode *> phi_list; 2613 GrowableArray<uint> cur_input; 2614 PhiNode *phi = orig_phi; 2615 uint idx = 1; 2616 bool finished = false; 2617 while(!finished) { 2618 while (idx < phi->req()) { 2619 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2620 if (mem != NULL && mem->is_Phi()) { 2621 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2622 if (new_phi_created) { 2623 // found an phi for which we created a new split, push current one on worklist and begin 2624 // processing new one 2625 phi_list.push(phi); 2626 cur_input.push(idx); 2627 phi = mem->as_Phi(); 2628 result = newphi; 2629 idx = 1; 2630 continue; 2631 } else { 2632 mem = newphi; 2633 } 2634 } 2635 if (C->failing()) { 2636 return NULL; 2637 } 2638 result->set_req(idx++, mem); 2639 } 2640 #ifdef ASSERT 2641 // verify that the new Phi has an input for each input of the original 2642 assert( phi->req() == result->req(), "must have same number of inputs."); 2643 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2644 #endif 2645 // Check if all new phi's inputs have specified alias index. 2646 // Otherwise use old phi. 2647 for (uint i = 1; i < phi->req(); i++) { 2648 Node* in = result->in(i); 2649 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2650 } 2651 // we have finished processing a Phi, see if there are any more to do 2652 finished = (phi_list.length() == 0 ); 2653 if (!finished) { 2654 phi = phi_list.pop(); 2655 idx = cur_input.pop(); 2656 PhiNode *prev_result = get_map_phi(phi->_idx); 2657 prev_result->set_req(idx++, result); 2658 result = prev_result; 2659 } 2660 } 2661 return result; 2662 } 2663 2664 // 2665 // The next methods are derived from methods in MemNode. 2666 // 2667 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2668 Node *mem = mmem; 2669 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2670 // means an array I have not precisely typed yet. Do not do any 2671 // alias stuff with it any time soon. 2672 if (toop->base() != Type::AnyPtr && 2673 !(toop->klass() != NULL && 2674 toop->klass()->is_java_lang_Object() && 2675 toop->offset() == Type::OffsetBot)) { 2676 mem = mmem->memory_at(alias_idx); 2677 // Update input if it is progress over what we have now 2678 } 2679 return mem; 2680 } 2681 2682 // 2683 // Move memory users to their memory slices. 2684 // 2685 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2686 Compile* C = _compile; 2687 PhaseGVN* igvn = _igvn; 2688 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2689 assert(tp != NULL, "ptr type"); 2690 int alias_idx = C->get_alias_index(tp); 2691 int general_idx = C->get_general_index(alias_idx); 2692 2693 // Move users first 2694 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2695 Node* use = n->fast_out(i); 2696 if (use->is_MergeMem()) { 2697 MergeMemNode* mmem = use->as_MergeMem(); 2698 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2699 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2700 continue; // Nothing to do 2701 } 2702 // Replace previous general reference to mem node. 2703 uint orig_uniq = C->unique(); 2704 Node* m = find_inst_mem(n, general_idx, orig_phis); 2705 assert(orig_uniq == C->unique(), "no new nodes"); 2706 mmem->set_memory_at(general_idx, m); 2707 --imax; 2708 --i; 2709 } else if (use->is_MemBar()) { 2710 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2711 if (use->req() > MemBarNode::Precedent && 2712 use->in(MemBarNode::Precedent) == n) { 2713 // Don't move related membars. 2714 record_for_optimizer(use); 2715 continue; 2716 } 2717 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2718 if ((tp != NULL && C->get_alias_index(tp) == alias_idx) || 2719 alias_idx == general_idx) { 2720 continue; // Nothing to do 2721 } 2722 // Move to general memory slice. 2723 uint orig_uniq = C->unique(); 2724 Node* m = find_inst_mem(n, general_idx, orig_phis); 2725 assert(orig_uniq == C->unique(), "no new nodes"); 2726 igvn->hash_delete(use); 2727 imax -= use->replace_edge(n, m); 2728 igvn->hash_insert(use); 2729 record_for_optimizer(use); 2730 --i; 2731 #ifdef ASSERT 2732 } else if (use->is_Mem()) { 2733 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2734 // Don't move related cardmark. 2735 continue; 2736 } 2737 // Memory nodes should have new memory input. 2738 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2739 assert(tp != NULL, "ptr type"); 2740 int idx = C->get_alias_index(tp); 2741 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2742 "Following memory nodes should have new memory input or be on the same memory slice"); 2743 } else if (use->is_Phi()) { 2744 // Phi nodes should be split and moved already. 2745 tp = use->as_Phi()->adr_type()->isa_ptr(); 2746 assert(tp != NULL, "ptr type"); 2747 int idx = C->get_alias_index(tp); 2748 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2749 } else { 2750 use->dump(); 2751 assert(false, "should not be here"); 2752 #endif 2753 } 2754 } 2755 } 2756 2757 // 2758 // Search memory chain of "mem" to find a MemNode whose address 2759 // is the specified alias index. 2760 // 2761 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2762 if (orig_mem == NULL) 2763 return orig_mem; 2764 Compile* C = _compile; 2765 PhaseGVN* igvn = _igvn; 2766 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2767 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2768 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 2769 Node *prev = NULL; 2770 Node *result = orig_mem; 2771 while (prev != result) { 2772 prev = result; 2773 if (result == start_mem) 2774 break; // hit one of our sentinels 2775 if (result->is_Mem()) { 2776 const Type *at = igvn->type(result->in(MemNode::Address)); 2777 if (at == Type::TOP) 2778 break; // Dead 2779 assert (at->isa_ptr() != NULL, "pointer type required."); 2780 int idx = C->get_alias_index(at->is_ptr()); 2781 if (idx == alias_idx) 2782 break; // Found 2783 if (!is_instance && (at->isa_oopptr() == NULL || 2784 !at->is_oopptr()->is_known_instance())) { 2785 break; // Do not skip store to general memory slice. 2786 } 2787 result = result->in(MemNode::Memory); 2788 } 2789 if (!is_instance) 2790 continue; // don't search further for non-instance types 2791 // skip over a call which does not affect this memory slice 2792 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2793 Node *proj_in = result->in(0); 2794 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2795 break; // hit one of our sentinels 2796 } else if (proj_in->is_Call()) { 2797 // ArrayCopy node processed here as well 2798 CallNode *call = proj_in->as_Call(); 2799 if (!call->may_modify(toop, igvn)) { 2800 result = call->in(TypeFunc::Memory); 2801 } 2802 } else if (proj_in->is_Initialize()) { 2803 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2804 // Stop if this is the initialization for the object instance which 2805 // which contains this memory slice, otherwise skip over it. 2806 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2807 result = proj_in->in(TypeFunc::Memory); 2808 } 2809 } else if (proj_in->is_MemBar()) { 2810 if (proj_in->in(TypeFunc::Memory)->is_MergeMem() && 2811 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->is_Proj() && 2812 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->is_ArrayCopy()) { 2813 // clone 2814 ArrayCopyNode* ac = proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->as_ArrayCopy(); 2815 if (ac->may_modify(toop, igvn)) { 2816 break; 2817 } 2818 } 2819 result = proj_in->in(TypeFunc::Memory); 2820 } 2821 } else if (result->is_MergeMem()) { 2822 MergeMemNode *mmem = result->as_MergeMem(); 2823 result = step_through_mergemem(mmem, alias_idx, toop); 2824 if (result == mmem->base_memory()) { 2825 // Didn't find instance memory, search through general slice recursively. 2826 result = mmem->memory_at(C->get_general_index(alias_idx)); 2827 result = find_inst_mem(result, alias_idx, orig_phis); 2828 if (C->failing()) { 2829 return NULL; 2830 } 2831 mmem->set_memory_at(alias_idx, result); 2832 } 2833 } else if (result->is_Phi() && 2834 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2835 Node *un = result->as_Phi()->unique_input(igvn); 2836 if (un != NULL) { 2837 orig_phis.append_if_missing(result->as_Phi()); 2838 result = un; 2839 } else { 2840 break; 2841 } 2842 } else if (result->is_ClearArray()) { 2843 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 2844 // Can not bypass initialization of the instance 2845 // we are looking for. 2846 break; 2847 } 2848 // Otherwise skip it (the call updated 'result' value). 2849 } else if (result->Opcode() == Op_SCMemProj) { 2850 Node* mem = result->in(0); 2851 Node* adr = NULL; 2852 if (mem->is_LoadStore()) { 2853 adr = mem->in(MemNode::Address); 2854 } else { 2855 assert(mem->Opcode() == Op_EncodeISOArray || 2856 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 2857 adr = mem->in(3); // Memory edge corresponds to destination array 2858 } 2859 const Type *at = igvn->type(adr); 2860 if (at != Type::TOP) { 2861 assert(at->isa_ptr() != NULL, "pointer type required."); 2862 int idx = C->get_alias_index(at->is_ptr()); 2863 if (idx == alias_idx) { 2864 // Assert in debug mode 2865 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 2866 break; // In product mode return SCMemProj node 2867 } 2868 } 2869 result = mem->in(MemNode::Memory); 2870 } else if (result->Opcode() == Op_StrInflatedCopy) { 2871 Node* adr = result->in(3); // Memory edge corresponds to destination array 2872 const Type *at = igvn->type(adr); 2873 if (at != Type::TOP) { 2874 assert(at->isa_ptr() != NULL, "pointer type required."); 2875 int idx = C->get_alias_index(at->is_ptr()); 2876 if (idx == alias_idx) { 2877 // Assert in debug mode 2878 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 2879 break; // In product mode return SCMemProj node 2880 } 2881 } 2882 result = result->in(MemNode::Memory); 2883 } 2884 } 2885 if (result->is_Phi()) { 2886 PhiNode *mphi = result->as_Phi(); 2887 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 2888 const TypePtr *t = mphi->adr_type(); 2889 if (!is_instance) { 2890 // Push all non-instance Phis on the orig_phis worklist to update inputs 2891 // during Phase 4 if needed. 2892 orig_phis.append_if_missing(mphi); 2893 } else if (C->get_alias_index(t) != alias_idx) { 2894 // Create a new Phi with the specified alias index type. 2895 result = split_memory_phi(mphi, alias_idx, orig_phis); 2896 } 2897 } 2898 // the result is either MemNode, PhiNode, InitializeNode. 2899 return result; 2900 } 2901 2902 // 2903 // Convert the types of unescaped object to instance types where possible, 2904 // propagate the new type information through the graph, and update memory 2905 // edges and MergeMem inputs to reflect the new type. 2906 // 2907 // We start with allocations (and calls which may be allocations) on alloc_worklist. 2908 // The processing is done in 4 phases: 2909 // 2910 // Phase 1: Process possible allocations from alloc_worklist. Create instance 2911 // types for the CheckCastPP for allocations where possible. 2912 // Propagate the new types through users as follows: 2913 // casts and Phi: push users on alloc_worklist 2914 // AddP: cast Base and Address inputs to the instance type 2915 // push any AddP users on alloc_worklist and push any memnode 2916 // users onto memnode_worklist. 2917 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2918 // search the Memory chain for a store with the appropriate type 2919 // address type. If a Phi is found, create a new version with 2920 // the appropriate memory slices from each of the Phi inputs. 2921 // For stores, process the users as follows: 2922 // MemNode: push on memnode_worklist 2923 // MergeMem: push on mergemem_worklist 2924 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 2925 // moving the first node encountered of each instance type to the 2926 // the input corresponding to its alias index. 2927 // appropriate memory slice. 2928 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 2929 // 2930 // In the following example, the CheckCastPP nodes are the cast of allocation 2931 // results and the allocation of node 29 is unescaped and eligible to be an 2932 // instance type. 2933 // 2934 // We start with: 2935 // 2936 // 7 Parm #memory 2937 // 10 ConI "12" 2938 // 19 CheckCastPP "Foo" 2939 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2940 // 29 CheckCastPP "Foo" 2941 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 2942 // 2943 // 40 StoreP 25 7 20 ... alias_index=4 2944 // 50 StoreP 35 40 30 ... alias_index=4 2945 // 60 StoreP 45 50 20 ... alias_index=4 2946 // 70 LoadP _ 60 30 ... alias_index=4 2947 // 80 Phi 75 50 60 Memory alias_index=4 2948 // 90 LoadP _ 80 30 ... alias_index=4 2949 // 100 LoadP _ 80 20 ... alias_index=4 2950 // 2951 // 2952 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 2953 // and creating a new alias index for node 30. This gives: 2954 // 2955 // 7 Parm #memory 2956 // 10 ConI "12" 2957 // 19 CheckCastPP "Foo" 2958 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2959 // 29 CheckCastPP "Foo" iid=24 2960 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2961 // 2962 // 40 StoreP 25 7 20 ... alias_index=4 2963 // 50 StoreP 35 40 30 ... alias_index=6 2964 // 60 StoreP 45 50 20 ... alias_index=4 2965 // 70 LoadP _ 60 30 ... alias_index=6 2966 // 80 Phi 75 50 60 Memory alias_index=4 2967 // 90 LoadP _ 80 30 ... alias_index=6 2968 // 100 LoadP _ 80 20 ... alias_index=4 2969 // 2970 // In phase 2, new memory inputs are computed for the loads and stores, 2971 // And a new version of the phi is created. In phase 4, the inputs to 2972 // node 80 are updated and then the memory nodes are updated with the 2973 // values computed in phase 2. This results in: 2974 // 2975 // 7 Parm #memory 2976 // 10 ConI "12" 2977 // 19 CheckCastPP "Foo" 2978 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2979 // 29 CheckCastPP "Foo" iid=24 2980 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2981 // 2982 // 40 StoreP 25 7 20 ... alias_index=4 2983 // 50 StoreP 35 7 30 ... alias_index=6 2984 // 60 StoreP 45 40 20 ... alias_index=4 2985 // 70 LoadP _ 50 30 ... alias_index=6 2986 // 80 Phi 75 40 60 Memory alias_index=4 2987 // 120 Phi 75 50 50 Memory alias_index=6 2988 // 90 LoadP _ 120 30 ... alias_index=6 2989 // 100 LoadP _ 80 20 ... alias_index=4 2990 // 2991 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) { 2992 GrowableArray<Node *> memnode_worklist; 2993 GrowableArray<PhiNode *> orig_phis; 2994 PhaseIterGVN *igvn = _igvn; 2995 uint new_index_start = (uint) _compile->num_alias_types(); 2996 Arena* arena = Thread::current()->resource_area(); 2997 VectorSet visited(arena); 2998 ideal_nodes.clear(); // Reset for use with set_map/get_map. 2999 uint unique_old = _compile->unique(); 3000 3001 // Phase 1: Process possible allocations from alloc_worklist. 3002 // Create instance types for the CheckCastPP for allocations where possible. 3003 // 3004 // (Note: don't forget to change the order of the second AddP node on 3005 // the alloc_worklist if the order of the worklist processing is changed, 3006 // see the comment in find_second_addp().) 3007 // 3008 while (alloc_worklist.length() != 0) { 3009 Node *n = alloc_worklist.pop(); 3010 uint ni = n->_idx; 3011 if (n->is_Call()) { 3012 CallNode *alloc = n->as_Call(); 3013 // copy escape information to call node 3014 PointsToNode* ptn = ptnode_adr(alloc->_idx); 3015 PointsToNode::EscapeState es = ptn->escape_state(); 3016 // We have an allocation or call which returns a Java object, 3017 // see if it is unescaped. 3018 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 3019 continue; 3020 // Find CheckCastPP for the allocate or for the return value of a call 3021 n = alloc->result_cast(); 3022 if (n == NULL) { // No uses except Initialize node 3023 if (alloc->is_Allocate()) { 3024 // Set the scalar_replaceable flag for allocation 3025 // so it could be eliminated if it has no uses. 3026 alloc->as_Allocate()->_is_scalar_replaceable = true; 3027 } 3028 if (alloc->is_CallStaticJava()) { 3029 // Set the scalar_replaceable flag for boxing method 3030 // so it could be eliminated if it has no uses. 3031 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 3032 } 3033 continue; 3034 } 3035 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 3036 assert(!alloc->is_Allocate(), "allocation should have unique type"); 3037 continue; 3038 } 3039 3040 // The inline code for Object.clone() casts the allocation result to 3041 // java.lang.Object and then to the actual type of the allocated 3042 // object. Detect this case and use the second cast. 3043 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 3044 // the allocation result is cast to java.lang.Object and then 3045 // to the actual Array type. 3046 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 3047 && (alloc->is_AllocateArray() || 3048 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 3049 Node *cast2 = NULL; 3050 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3051 Node *use = n->fast_out(i); 3052 if (use->is_CheckCastPP()) { 3053 cast2 = use; 3054 break; 3055 } 3056 } 3057 if (cast2 != NULL) { 3058 n = cast2; 3059 } else { 3060 // Non-scalar replaceable if the allocation type is unknown statically 3061 // (reflection allocation), the object can't be restored during 3062 // deoptimization without precise type. 3063 continue; 3064 } 3065 } 3066 3067 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3068 if (t == NULL) 3069 continue; // not a TypeOopPtr 3070 if (!t->klass_is_exact()) 3071 continue; // not an unique type 3072 3073 if (alloc->is_Allocate()) { 3074 // Set the scalar_replaceable flag for allocation 3075 // so it could be eliminated. 3076 alloc->as_Allocate()->_is_scalar_replaceable = true; 3077 } 3078 if (alloc->is_CallStaticJava()) { 3079 // Set the scalar_replaceable flag for boxing method 3080 // so it could be eliminated. 3081 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 3082 } 3083 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state 3084 // in order for an object to be scalar-replaceable, it must be: 3085 // - a direct allocation (not a call returning an object) 3086 // - non-escaping 3087 // - eligible to be a unique type 3088 // - not determined to be ineligible by escape analysis 3089 set_map(alloc, n); 3090 set_map(n, alloc); 3091 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3092 igvn->hash_delete(n); 3093 igvn->set_type(n, tinst); 3094 n->raise_bottom_type(tinst); 3095 igvn->hash_insert(n); 3096 record_for_optimizer(n); 3097 // Allocate an alias index for the header fields. Accesses to 3098 // the header emitted during macro expansion wouldn't have 3099 // correct memory state otherwise. 3100 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 3101 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 3102 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3103 3104 // First, put on the worklist all Field edges from Connection Graph 3105 // which is more accurate than putting immediate users from Ideal Graph. 3106 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3107 PointsToNode* tgt = e.get(); 3108 if (tgt->is_Arraycopy()) { 3109 continue; 3110 } 3111 Node* use = tgt->ideal_node(); 3112 assert(tgt->is_Field() && use->is_AddP(), 3113 "only AddP nodes are Field edges in CG"); 3114 if (use->outcnt() > 0) { // Don't process dead nodes 3115 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3116 if (addp2 != NULL) { 3117 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3118 alloc_worklist.append_if_missing(addp2); 3119 } 3120 alloc_worklist.append_if_missing(use); 3121 } 3122 } 3123 3124 // An allocation may have an Initialize which has raw stores. Scan 3125 // the users of the raw allocation result and push AddP users 3126 // on alloc_worklist. 3127 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3128 assert (raw_result != NULL, "must have an allocation result"); 3129 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3130 Node *use = raw_result->fast_out(i); 3131 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3132 Node* addp2 = find_second_addp(use, raw_result); 3133 if (addp2 != NULL) { 3134 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3135 alloc_worklist.append_if_missing(addp2); 3136 } 3137 alloc_worklist.append_if_missing(use); 3138 } else if (use->is_MemBar()) { 3139 memnode_worklist.append_if_missing(use); 3140 } 3141 } 3142 } 3143 } else if (n->is_AddP()) { 3144 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 3145 if (jobj == NULL || jobj == phantom_obj) { 3146 #ifdef ASSERT 3147 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3148 ptnode_adr(n->_idx)->dump(); 3149 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3150 #endif 3151 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3152 return; 3153 } 3154 Node *base = get_map(jobj->idx()); // CheckCastPP node 3155 if (!split_AddP(n, base)) continue; // wrong type from dead path 3156 } else if (n->is_Phi() || 3157 n->is_CheckCastPP() || 3158 n->is_EncodeP() || 3159 n->is_DecodeN() || 3160 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3161 if (visited.test_set(n->_idx)) { 3162 assert(n->is_Phi(), "loops only through Phi's"); 3163 continue; // already processed 3164 } 3165 JavaObjectNode* jobj = unique_java_object(n); 3166 if (jobj == NULL || jobj == phantom_obj) { 3167 #ifdef ASSERT 3168 ptnode_adr(n->_idx)->dump(); 3169 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3170 #endif 3171 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3172 return; 3173 } else { 3174 Node *val = get_map(jobj->idx()); // CheckCastPP node 3175 TypeNode *tn = n->as_Type(); 3176 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3177 assert(tinst != NULL && tinst->is_known_instance() && 3178 tinst->instance_id() == jobj->idx() , "instance type expected."); 3179 3180 const Type *tn_type = igvn->type(tn); 3181 const TypeOopPtr *tn_t; 3182 if (tn_type->isa_narrowoop()) { 3183 tn_t = tn_type->make_ptr()->isa_oopptr(); 3184 } else { 3185 tn_t = tn_type->isa_oopptr(); 3186 } 3187 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 3188 if (tn_type->isa_narrowoop()) { 3189 tn_type = tinst->make_narrowoop(); 3190 } else { 3191 tn_type = tinst; 3192 } 3193 igvn->hash_delete(tn); 3194 igvn->set_type(tn, tn_type); 3195 tn->set_type(tn_type); 3196 igvn->hash_insert(tn); 3197 record_for_optimizer(n); 3198 } else { 3199 assert(tn_type == TypePtr::NULL_PTR || 3200 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 3201 "unexpected type"); 3202 continue; // Skip dead path with different type 3203 } 3204 } 3205 } else { 3206 debug_only(n->dump();) 3207 assert(false, "EA: unexpected node"); 3208 continue; 3209 } 3210 // push allocation's users on appropriate worklist 3211 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3212 Node *use = n->fast_out(i); 3213 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3214 // Load/store to instance's field 3215 memnode_worklist.append_if_missing(use); 3216 } else if (use->is_MemBar()) { 3217 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3218 memnode_worklist.append_if_missing(use); 3219 } 3220 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3221 Node* addp2 = find_second_addp(use, n); 3222 if (addp2 != NULL) { 3223 alloc_worklist.append_if_missing(addp2); 3224 } 3225 alloc_worklist.append_if_missing(use); 3226 } else if (use->is_Phi() || 3227 use->is_CheckCastPP() || 3228 use->is_EncodeNarrowPtr() || 3229 use->is_DecodeNarrowPtr() || 3230 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3231 alloc_worklist.append_if_missing(use); 3232 #ifdef ASSERT 3233 } else if (use->is_Mem()) { 3234 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3235 } else if (use->is_MergeMem()) { 3236 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3237 } else if (use->is_SafePoint()) { 3238 // Look for MergeMem nodes for calls which reference unique allocation 3239 // (through CheckCastPP nodes) even for debug info. 3240 Node* m = use->in(TypeFunc::Memory); 3241 if (m->is_MergeMem()) { 3242 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3243 } 3244 } else if (use->Opcode() == Op_EncodeISOArray) { 3245 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3246 // EncodeISOArray overwrites destination array 3247 memnode_worklist.append_if_missing(use); 3248 } 3249 } else { 3250 uint op = use->Opcode(); 3251 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3252 (use->in(MemNode::Memory) == n)) { 3253 // They overwrite memory edge corresponding to destination array, 3254 memnode_worklist.append_if_missing(use); 3255 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3256 op == Op_CastP2X || op == Op_StoreCM || 3257 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3258 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3259 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3260 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 3261 n->dump(); 3262 use->dump(); 3263 assert(false, "EA: missing allocation reference path"); 3264 } 3265 #endif 3266 } 3267 } 3268 3269 } 3270 3271 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3272 // type, record it in the ArrayCopy node so we know what memory this 3273 // node uses/modified. 3274 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3275 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3276 Node* dest = ac->in(ArrayCopyNode::Dest); 3277 if (dest->is_AddP()) { 3278 dest = get_addp_base(dest); 3279 } 3280 JavaObjectNode* jobj = unique_java_object(dest); 3281 if (jobj != NULL) { 3282 Node *base = get_map(jobj->idx()); 3283 if (base != NULL) { 3284 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3285 ac->_dest_type = base_t; 3286 } 3287 } 3288 Node* src = ac->in(ArrayCopyNode::Src); 3289 if (src->is_AddP()) { 3290 src = get_addp_base(src); 3291 } 3292 jobj = unique_java_object(src); 3293 if (jobj != NULL) { 3294 Node* base = get_map(jobj->idx()); 3295 if (base != NULL) { 3296 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3297 ac->_src_type = base_t; 3298 } 3299 } 3300 } 3301 3302 // New alias types were created in split_AddP(). 3303 uint new_index_end = (uint) _compile->num_alias_types(); 3304 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3305 3306 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3307 // compute new values for Memory inputs (the Memory inputs are not 3308 // actually updated until phase 4.) 3309 if (memnode_worklist.length() == 0) 3310 return; // nothing to do 3311 while (memnode_worklist.length() != 0) { 3312 Node *n = memnode_worklist.pop(); 3313 if (visited.test_set(n->_idx)) 3314 continue; 3315 if (n->is_Phi() || n->is_ClearArray()) { 3316 // we don't need to do anything, but the users must be pushed 3317 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3318 // we don't need to do anything, but the users must be pushed 3319 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 3320 if (n == NULL) 3321 continue; 3322 } else if (n->Opcode() == Op_StrCompressedCopy || 3323 n->Opcode() == Op_EncodeISOArray) { 3324 // get the memory projection 3325 n = n->find_out_with(Op_SCMemProj); 3326 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3327 } else { 3328 assert(n->is_Mem(), "memory node required."); 3329 Node *addr = n->in(MemNode::Address); 3330 const Type *addr_t = igvn->type(addr); 3331 if (addr_t == Type::TOP) 3332 continue; 3333 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3334 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3335 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3336 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3337 if (_compile->failing()) { 3338 return; 3339 } 3340 if (mem != n->in(MemNode::Memory)) { 3341 // We delay the memory edge update since we need old one in 3342 // MergeMem code below when instances memory slices are separated. 3343 set_map(n, mem); 3344 } 3345 if (n->is_Load()) { 3346 continue; // don't push users 3347 } else if (n->is_LoadStore()) { 3348 // get the memory projection 3349 n = n->find_out_with(Op_SCMemProj); 3350 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3351 } 3352 } 3353 // push user on appropriate worklist 3354 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3355 Node *use = n->fast_out(i); 3356 if (use->is_Phi() || use->is_ClearArray()) { 3357 memnode_worklist.append_if_missing(use); 3358 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3359 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 3360 continue; 3361 memnode_worklist.append_if_missing(use); 3362 } else if (use->is_MemBar()) { 3363 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3364 memnode_worklist.append_if_missing(use); 3365 } 3366 #ifdef ASSERT 3367 } else if(use->is_Mem()) { 3368 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3369 } else if (use->is_MergeMem()) { 3370 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3371 } else if (use->Opcode() == Op_EncodeISOArray) { 3372 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3373 // EncodeISOArray overwrites destination array 3374 memnode_worklist.append_if_missing(use); 3375 } 3376 } else { 3377 uint op = use->Opcode(); 3378 if ((use->in(MemNode::Memory) == n) && 3379 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 3380 // They overwrite memory edge corresponding to destination array, 3381 memnode_worklist.append_if_missing(use); 3382 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 3383 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3384 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3385 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 3386 n->dump(); 3387 use->dump(); 3388 assert(false, "EA: missing memory path"); 3389 } 3390 #endif 3391 } 3392 } 3393 } 3394 3395 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3396 // Walk each memory slice moving the first node encountered of each 3397 // instance type to the the input corresponding to its alias index. 3398 uint length = _mergemem_worklist.length(); 3399 for( uint next = 0; next < length; ++next ) { 3400 MergeMemNode* nmm = _mergemem_worklist.at(next); 3401 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3402 // Note: we don't want to use MergeMemStream here because we only want to 3403 // scan inputs which exist at the start, not ones we add during processing. 3404 // Note 2: MergeMem may already contains instance memory slices added 3405 // during find_inst_mem() call when memory nodes were processed above. 3406 igvn->hash_delete(nmm); 3407 uint nslices = MIN2(nmm->req(), new_index_start); 3408 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3409 Node* mem = nmm->in(i); 3410 Node* cur = NULL; 3411 if (mem == NULL || mem->is_top()) 3412 continue; 3413 // First, update mergemem by moving memory nodes to corresponding slices 3414 // if their type became more precise since this mergemem was created. 3415 while (mem->is_Mem()) { 3416 const Type *at = igvn->type(mem->in(MemNode::Address)); 3417 if (at != Type::TOP) { 3418 assert (at->isa_ptr() != NULL, "pointer type required."); 3419 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3420 if (idx == i) { 3421 if (cur == NULL) 3422 cur = mem; 3423 } else { 3424 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3425 nmm->set_memory_at(idx, mem); 3426 } 3427 } 3428 } 3429 mem = mem->in(MemNode::Memory); 3430 } 3431 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3432 // Find any instance of the current type if we haven't encountered 3433 // already a memory slice of the instance along the memory chain. 3434 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3435 if((uint)_compile->get_general_index(ni) == i) { 3436 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3437 if (nmm->is_empty_memory(m)) { 3438 Node* result = find_inst_mem(mem, ni, orig_phis); 3439 if (_compile->failing()) { 3440 return; 3441 } 3442 nmm->set_memory_at(ni, result); 3443 } 3444 } 3445 } 3446 } 3447 // Find the rest of instances values 3448 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3449 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3450 Node* result = step_through_mergemem(nmm, ni, tinst); 3451 if (result == nmm->base_memory()) { 3452 // Didn't find instance memory, search through general slice recursively. 3453 result = nmm->memory_at(_compile->get_general_index(ni)); 3454 result = find_inst_mem(result, ni, orig_phis); 3455 if (_compile->failing()) { 3456 return; 3457 } 3458 nmm->set_memory_at(ni, result); 3459 } 3460 } 3461 igvn->hash_insert(nmm); 3462 record_for_optimizer(nmm); 3463 } 3464 3465 // Phase 4: Update the inputs of non-instance memory Phis and 3466 // the Memory input of memnodes 3467 // First update the inputs of any non-instance Phi's from 3468 // which we split out an instance Phi. Note we don't have 3469 // to recursively process Phi's encounted on the input memory 3470 // chains as is done in split_memory_phi() since they will 3471 // also be processed here. 3472 for (int j = 0; j < orig_phis.length(); j++) { 3473 PhiNode *phi = orig_phis.at(j); 3474 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3475 igvn->hash_delete(phi); 3476 for (uint i = 1; i < phi->req(); i++) { 3477 Node *mem = phi->in(i); 3478 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3479 if (_compile->failing()) { 3480 return; 3481 } 3482 if (mem != new_mem) { 3483 phi->set_req(i, new_mem); 3484 } 3485 } 3486 igvn->hash_insert(phi); 3487 record_for_optimizer(phi); 3488 } 3489 3490 // Update the memory inputs of MemNodes with the value we computed 3491 // in Phase 2 and move stores memory users to corresponding memory slices. 3492 // Disable memory split verification code until the fix for 6984348. 3493 // Currently it produces false negative results since it does not cover all cases. 3494 #if 0 // ifdef ASSERT 3495 visited.Reset(); 3496 Node_Stack old_mems(arena, _compile->unique() >> 2); 3497 #endif 3498 for (uint i = 0; i < ideal_nodes.size(); i++) { 3499 Node* n = ideal_nodes.at(i); 3500 Node* nmem = get_map(n->_idx); 3501 assert(nmem != NULL, "sanity"); 3502 if (n->is_Mem()) { 3503 #if 0 // ifdef ASSERT 3504 Node* old_mem = n->in(MemNode::Memory); 3505 if (!visited.test_set(old_mem->_idx)) { 3506 old_mems.push(old_mem, old_mem->outcnt()); 3507 } 3508 #endif 3509 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3510 if (!n->is_Load()) { 3511 // Move memory users of a store first. 3512 move_inst_mem(n, orig_phis); 3513 } 3514 // Now update memory input 3515 igvn->hash_delete(n); 3516 n->set_req(MemNode::Memory, nmem); 3517 igvn->hash_insert(n); 3518 record_for_optimizer(n); 3519 } else { 3520 assert(n->is_Allocate() || n->is_CheckCastPP() || 3521 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3522 } 3523 } 3524 #if 0 // ifdef ASSERT 3525 // Verify that memory was split correctly 3526 while (old_mems.is_nonempty()) { 3527 Node* old_mem = old_mems.node(); 3528 uint old_cnt = old_mems.index(); 3529 old_mems.pop(); 3530 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3531 } 3532 #endif 3533 } 3534 3535 #ifndef PRODUCT 3536 static const char *node_type_names[] = { 3537 "UnknownType", 3538 "JavaObject", 3539 "LocalVar", 3540 "Field", 3541 "Arraycopy" 3542 }; 3543 3544 static const char *esc_names[] = { 3545 "UnknownEscape", 3546 "NoEscape", 3547 "ArgEscape", 3548 "GlobalEscape" 3549 }; 3550 3551 void PointsToNode::dump(bool print_state) const { 3552 NodeType nt = node_type(); 3553 tty->print("%s ", node_type_names[(int) nt]); 3554 if (print_state) { 3555 EscapeState es = escape_state(); 3556 EscapeState fields_es = fields_escape_state(); 3557 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3558 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) 3559 tty->print("NSR "); 3560 } 3561 if (is_Field()) { 3562 FieldNode* f = (FieldNode*)this; 3563 if (f->is_oop()) 3564 tty->print("oop "); 3565 if (f->offset() > 0) 3566 tty->print("+%d ", f->offset()); 3567 tty->print("("); 3568 for (BaseIterator i(f); i.has_next(); i.next()) { 3569 PointsToNode* b = i.get(); 3570 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3571 } 3572 tty->print(" )"); 3573 } 3574 tty->print("["); 3575 for (EdgeIterator i(this); i.has_next(); i.next()) { 3576 PointsToNode* e = i.get(); 3577 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3578 } 3579 tty->print(" ["); 3580 for (UseIterator i(this); i.has_next(); i.next()) { 3581 PointsToNode* u = i.get(); 3582 bool is_base = false; 3583 if (PointsToNode::is_base_use(u)) { 3584 is_base = true; 3585 u = PointsToNode::get_use_node(u)->as_Field(); 3586 } 3587 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3588 } 3589 tty->print(" ]] "); 3590 if (_node == NULL) 3591 tty->print_cr("<null>"); 3592 else 3593 _node->dump(); 3594 } 3595 3596 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3597 bool first = true; 3598 int ptnodes_length = ptnodes_worklist.length(); 3599 for (int i = 0; i < ptnodes_length; i++) { 3600 PointsToNode *ptn = ptnodes_worklist.at(i); 3601 if (ptn == NULL || !ptn->is_JavaObject()) 3602 continue; 3603 PointsToNode::EscapeState es = ptn->escape_state(); 3604 if ((es != PointsToNode::NoEscape) && !Verbose) { 3605 continue; 3606 } 3607 Node* n = ptn->ideal_node(); 3608 if (n->is_Allocate() || (n->is_CallStaticJava() && 3609 n->as_CallStaticJava()->is_boxing_method())) { 3610 if (first) { 3611 tty->cr(); 3612 tty->print("======== Connection graph for "); 3613 _compile->method()->print_short_name(); 3614 tty->cr(); 3615 first = false; 3616 } 3617 ptn->dump(); 3618 // Print all locals and fields which reference this allocation 3619 for (UseIterator j(ptn); j.has_next(); j.next()) { 3620 PointsToNode* use = j.get(); 3621 if (use->is_LocalVar()) { 3622 use->dump(Verbose); 3623 } else if (Verbose) { 3624 use->dump(); 3625 } 3626 } 3627 tty->cr(); 3628 } 3629 } 3630 } 3631 #endif 3632 3633 void ConnectionGraph::record_for_optimizer(Node *n) { 3634 _igvn->_worklist.push(n); 3635 _igvn->add_users_to_worklist(n); 3636 }