1 /* 2 * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "opto/c2compiler.hpp" 34 #include "opto/arraycopynode.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/cfgnode.hpp" 37 #include "opto/compile.hpp" 38 #include "opto/escape.hpp" 39 #include "opto/phaseX.hpp" 40 #include "opto/movenode.hpp" 41 #include "opto/rootnode.hpp" 42 #include "utilities/macros.hpp" 43 44 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 45 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 46 _in_worklist(C->comp_arena()), 47 _next_pidx(0), 48 _collecting(true), 49 _verify(false), 50 _compile(C), 51 _igvn(igvn), 52 _node_map(C->comp_arena()) { 53 // Add unknown java object. 54 add_java_object(C->top(), PointsToNode::GlobalEscape); 55 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 56 // Add ConP(#NULL) and ConN(#NULL) nodes. 57 Node* oop_null = igvn->zerocon(T_OBJECT); 58 assert(oop_null->_idx < nodes_size(), "should be created already"); 59 add_java_object(oop_null, PointsToNode::NoEscape); 60 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 61 if (UseCompressedOops) { 62 Node* noop_null = igvn->zerocon(T_NARROWOOP); 63 assert(noop_null->_idx < nodes_size(), "should be created already"); 64 map_ideal_node(noop_null, null_obj); 65 } 66 _pcmp_neq = NULL; // Should be initialized 67 _pcmp_eq = NULL; 68 } 69 70 bool ConnectionGraph::has_candidates(Compile *C) { 71 // EA brings benefits only when the code has allocations and/or locks which 72 // are represented by ideal Macro nodes. 73 int cnt = C->macro_count(); 74 for (int i = 0; i < cnt; i++) { 75 Node *n = C->macro_node(i); 76 if (n->is_Allocate()) 77 return true; 78 if (n->is_Lock()) { 79 Node* obj = n->as_Lock()->obj_node()->uncast(); 80 if (!(obj->is_Parm() || obj->is_Con())) 81 return true; 82 } 83 if (n->is_CallStaticJava() && 84 n->as_CallStaticJava()->is_boxing_method()) { 85 return true; 86 } 87 } 88 return false; 89 } 90 91 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 92 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 93 ResourceMark rm; 94 95 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 96 // to create space for them in ConnectionGraph::_nodes[]. 97 Node* oop_null = igvn->zerocon(T_OBJECT); 98 Node* noop_null = igvn->zerocon(T_NARROWOOP); 99 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 100 // Perform escape analysis 101 if (congraph->compute_escape()) { 102 // There are non escaping objects. 103 C->set_congraph(congraph); 104 } 105 // Cleanup. 106 if (oop_null->outcnt() == 0) 107 igvn->hash_delete(oop_null); 108 if (noop_null->outcnt() == 0) 109 igvn->hash_delete(noop_null); 110 } 111 112 bool ConnectionGraph::compute_escape() { 113 Compile* C = _compile; 114 PhaseGVN* igvn = _igvn; 115 116 // Worklists used by EA. 117 Unique_Node_List delayed_worklist; 118 GrowableArray<Node*> alloc_worklist; 119 GrowableArray<Node*> ptr_cmp_worklist; 120 GrowableArray<Node*> storestore_worklist; 121 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 122 GrowableArray<PointsToNode*> ptnodes_worklist; 123 GrowableArray<JavaObjectNode*> java_objects_worklist; 124 GrowableArray<JavaObjectNode*> non_escaped_worklist; 125 GrowableArray<FieldNode*> oop_fields_worklist; 126 GrowableArray<SafePointNode*> sfn_worklist; 127 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 128 129 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 130 131 // 1. Populate Connection Graph (CG) with PointsTo nodes. 132 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 133 // Initialize worklist 134 if (C->root() != NULL) { 135 ideal_nodes.push(C->root()); 136 } 137 // Processed ideal nodes are unique on ideal_nodes list 138 // but several ideal nodes are mapped to the phantom_obj. 139 // To avoid duplicated entries on the following worklists 140 // add the phantom_obj only once to them. 141 ptnodes_worklist.append(phantom_obj); 142 java_objects_worklist.append(phantom_obj); 143 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 144 Node* n = ideal_nodes.at(next); 145 // Create PointsTo nodes and add them to Connection Graph. Called 146 // only once per ideal node since ideal_nodes is Unique_Node list. 147 add_node_to_connection_graph(n, &delayed_worklist); 148 PointsToNode* ptn = ptnode_adr(n->_idx); 149 if (ptn != NULL && ptn != phantom_obj) { 150 ptnodes_worklist.append(ptn); 151 if (ptn->is_JavaObject()) { 152 java_objects_worklist.append(ptn->as_JavaObject()); 153 if ((n->is_Allocate() || n->is_CallStaticJava()) && 154 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 155 // Only allocations and java static calls results are interesting. 156 non_escaped_worklist.append(ptn->as_JavaObject()); 157 } 158 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 159 oop_fields_worklist.append(ptn->as_Field()); 160 } 161 } 162 if (n->is_MergeMem()) { 163 // Collect all MergeMem nodes to add memory slices for 164 // scalar replaceable objects in split_unique_types(). 165 _mergemem_worklist.append(n->as_MergeMem()); 166 } else if (OptimizePtrCompare && n->is_Cmp() && 167 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { 168 // Collect compare pointers nodes. 169 ptr_cmp_worklist.append(n); 170 } else if (n->is_MemBarStoreStore()) { 171 // Collect all MemBarStoreStore nodes so that depending on the 172 // escape status of the associated Allocate node some of them 173 // may be eliminated. 174 storestore_worklist.append(n); 175 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && 176 (n->req() > MemBarNode::Precedent)) { 177 record_for_optimizer(n); 178 #ifdef ASSERT 179 } else if (n->is_AddP()) { 180 // Collect address nodes for graph verification. 181 addp_worklist.append(n); 182 #endif 183 } else if (n->is_ArrayCopy()) { 184 // Keep a list of ArrayCopy nodes so if one of its input is non 185 // escaping, we can record a unique type 186 arraycopy_worklist.append(n->as_ArrayCopy()); 187 } 188 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 189 Node* m = n->fast_out(i); // Get user 190 ideal_nodes.push(m); 191 } 192 if (n-> is_SafePoint()) { 193 sfn_worklist.append(n->as_SafePoint()); 194 } 195 } 196 if (non_escaped_worklist.length() == 0) { 197 _collecting = false; 198 return false; // Nothing to do. 199 } 200 // Add final simple edges to graph. 201 while(delayed_worklist.size() > 0) { 202 Node* n = delayed_worklist.pop(); 203 add_final_edges(n); 204 } 205 int ptnodes_length = ptnodes_worklist.length(); 206 207 #ifdef ASSERT 208 if (VerifyConnectionGraph) { 209 // Verify that no new simple edges could be created and all 210 // local vars has edges. 211 _verify = true; 212 for (int next = 0; next < ptnodes_length; ++next) { 213 PointsToNode* ptn = ptnodes_worklist.at(next); 214 add_final_edges(ptn->ideal_node()); 215 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 216 ptn->dump(); 217 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 218 } 219 } 220 _verify = false; 221 } 222 #endif 223 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 224 // processing, calls to CI to resolve symbols (types, fields, methods) 225 // referenced in bytecode. During symbol resolution VM may throw 226 // an exception which CI cleans and converts to compilation failure. 227 if (C->failing()) return false; 228 229 // 2. Finish Graph construction by propagating references to all 230 // java objects through graph. 231 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, 232 java_objects_worklist, oop_fields_worklist)) { 233 // All objects escaped or hit time or iterations limits. 234 _collecting = false; 235 return false; 236 } 237 238 // 3. Adjust scalar_replaceable state of nonescaping objects and push 239 // scalar replaceable allocations on alloc_worklist for processing 240 // in split_unique_types(). 241 int non_escaped_length = non_escaped_worklist.length(); 242 for (int next = 0; next < non_escaped_length; next++) { 243 JavaObjectNode* ptn = non_escaped_worklist.at(next); 244 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 245 Node* n = ptn->ideal_node(); 246 if (n->is_Allocate()) { 247 n->as_Allocate()->_is_non_escaping = noescape; 248 } 249 if (n->is_CallStaticJava()) { 250 n->as_CallStaticJava()->_is_non_escaping = noescape; 251 } 252 if (noescape && ptn->scalar_replaceable()) { 253 adjust_scalar_replaceable_state(ptn); 254 if (ptn->scalar_replaceable()) { 255 alloc_worklist.append(ptn->ideal_node()); 256 } 257 } 258 } 259 260 #ifdef ASSERT 261 if (VerifyConnectionGraph) { 262 // Verify that graph is complete - no new edges could be added or needed. 263 verify_connection_graph(ptnodes_worklist, non_escaped_worklist, 264 java_objects_worklist, addp_worklist); 265 } 266 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 267 assert(null_obj->escape_state() == PointsToNode::NoEscape && 268 null_obj->edge_count() == 0 && 269 !null_obj->arraycopy_src() && 270 !null_obj->arraycopy_dst(), "sanity"); 271 #endif 272 273 _collecting = false; 274 275 } // TracePhase t3("connectionGraph") 276 277 // 4. Optimize ideal graph based on EA information. 278 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); 279 if (has_non_escaping_obj) { 280 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 281 } 282 283 #ifndef PRODUCT 284 if (PrintEscapeAnalysis) { 285 dump(ptnodes_worklist); // Dump ConnectionGraph 286 } 287 #endif 288 289 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 290 #ifdef ASSERT 291 if (VerifyConnectionGraph) { 292 int alloc_length = alloc_worklist.length(); 293 for (int next = 0; next < alloc_length; ++next) { 294 Node* n = alloc_worklist.at(next); 295 PointsToNode* ptn = ptnode_adr(n->_idx); 296 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 297 } 298 } 299 #endif 300 301 // 5. Separate memory graph for scalar replaceable allcations. 302 if (has_scalar_replaceable_candidates && 303 C->AliasLevel() >= 3 && EliminateAllocations) { 304 // Now use the escape information to create unique types for 305 // scalar replaceable objects. 306 split_unique_types(alloc_worklist, arraycopy_worklist); 307 if (C->failing()) return false; 308 C->print_method(PHASE_AFTER_EA, 2); 309 310 #ifdef ASSERT 311 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 312 tty->print("=== No allocations eliminated for "); 313 C->method()->print_short_name(); 314 if(!EliminateAllocations) { 315 tty->print(" since EliminateAllocations is off ==="); 316 } else if(!has_scalar_replaceable_candidates) { 317 tty->print(" since there are no scalar replaceable candidates ==="); 318 } else if(C->AliasLevel() < 3) { 319 tty->print(" since AliasLevel < 3 ==="); 320 } 321 tty->cr(); 322 #endif 323 } 324 325 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at 326 // java calls if they pass ArgEscape objects as parameters. 327 if (has_non_escaping_obj && 328 (C->env()->should_retain_local_variables() || 329 C->env()->jvmti_can_get_owned_monitor_info() || 330 C->env()->jvmti_can_walk_any_space() || 331 DeoptimizeObjectsALot)) { 332 int sfn_length = sfn_worklist.length(); 333 for (int next = 0; next < sfn_length; next++) { 334 SafePointNode* sfn = sfn_worklist.at(next); 335 sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn)); 336 if (sfn->is_CallJava()) { 337 CallJavaNode* call = sfn->as_CallJava(); 338 call->set_arg_escape(has_arg_escape(call)); 339 } 340 } 341 } 342 343 return has_non_escaping_obj; 344 } 345 346 // Returns true if there is an object in the scope of sfn that does not escape globally. 347 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { 348 Compile* C = _compile; 349 for (JVMState* jvms = sfn->jvms(); jvms != NULL; jvms = jvms->caller()) { 350 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || 351 DeoptimizeObjectsALot) { 352 // Jvmti agents can access locals. Must provide info about local objects at runtime. 353 int num_locs = jvms->loc_size(); 354 for (int idx = 0; idx < num_locs; idx++) { 355 Node* l = sfn->local(jvms, idx); 356 if (not_global_escape(l)) { 357 return true; 358 } 359 } 360 } 361 if (C->env()->jvmti_can_get_owned_monitor_info() || 362 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { 363 // Jvmti agents can read monitors. Must provide info about locked objects at runtime. 364 int num_mon = jvms->nof_monitors(); 365 for (int idx = 0; idx < num_mon; idx++) { 366 Node* m = sfn->monitor_obj(jvms, idx); 367 if (m != NULL && not_global_escape(m)) { 368 return true; 369 } 370 } 371 } 372 } 373 return false; 374 } 375 376 // Returns true if at least one of the arguments to the call is an object 377 // that does not escape globally. 378 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { 379 if (call->method() != NULL) { 380 uint max_idx = TypeFunc::Parms + call->method()->arg_size(); 381 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) { 382 Node* p = call->in(idx); 383 if (not_global_escape(p)) { 384 return true; 385 } 386 } 387 } else { 388 const char* name = call->as_CallStaticJava()->_name; 389 assert(name != NULL, "no name"); 390 // no arg escapes through uncommon traps 391 if (strcmp(name, "uncommon_trap") != 0) { 392 // process_call_arguments() assumes that all arguments escape globally 393 const TypeTuple* d = call->tf()->domain(); 394 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 395 const Type* at = d->field_at(i); 396 if (at->isa_oopptr() != NULL) { 397 return true; 398 } 399 } 400 } 401 } 402 return false; 403 } 404 405 406 407 // Utility function for nodes that load an object 408 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 409 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 410 // ThreadLocal has RawPtr type. 411 const Type* t = _igvn->type(n); 412 if (t->make_ptr() != NULL) { 413 Node* adr = n->in(MemNode::Address); 414 #ifdef ASSERT 415 if (!adr->is_AddP()) { 416 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 417 } else { 418 assert((ptnode_adr(adr->_idx) == NULL || 419 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 420 } 421 #endif 422 add_local_var_and_edge(n, PointsToNode::NoEscape, 423 adr, delayed_worklist); 424 } 425 } 426 427 // Populate Connection Graph with PointsTo nodes and create simple 428 // connection graph edges. 429 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 430 assert(!_verify, "this method should not be called for verification"); 431 PhaseGVN* igvn = _igvn; 432 uint n_idx = n->_idx; 433 PointsToNode* n_ptn = ptnode_adr(n_idx); 434 if (n_ptn != NULL) 435 return; // No need to redefine PointsTo node during first iteration. 436 437 int opcode = n->Opcode(); 438 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 439 if (gc_handled) { 440 return; // Ignore node if already handled by GC. 441 } 442 443 if (n->is_Call()) { 444 // Arguments to allocation and locking don't escape. 445 if (n->is_AbstractLock()) { 446 // Put Lock and Unlock nodes on IGVN worklist to process them during 447 // first IGVN optimization when escape information is still available. 448 record_for_optimizer(n); 449 } else if (n->is_Allocate()) { 450 add_call_node(n->as_Call()); 451 record_for_optimizer(n); 452 } else { 453 if (n->is_CallStaticJava()) { 454 const char* name = n->as_CallStaticJava()->_name; 455 if (name != NULL && strcmp(name, "uncommon_trap") == 0) 456 return; // Skip uncommon traps 457 } 458 // Don't mark as processed since call's arguments have to be processed. 459 delayed_worklist->push(n); 460 // Check if a call returns an object. 461 if ((n->as_Call()->returns_pointer() && 462 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) || 463 (n->is_CallStaticJava() && 464 n->as_CallStaticJava()->is_boxing_method())) { 465 add_call_node(n->as_Call()); 466 } 467 } 468 return; 469 } 470 // Put this check here to process call arguments since some call nodes 471 // point to phantom_obj. 472 if (n_ptn == phantom_obj || n_ptn == null_obj) 473 return; // Skip predefined nodes. 474 475 switch (opcode) { 476 case Op_AddP: { 477 Node* base = get_addp_base(n); 478 PointsToNode* ptn_base = ptnode_adr(base->_idx); 479 // Field nodes are created for all field types. They are used in 480 // adjust_scalar_replaceable_state() and split_unique_types(). 481 // Note, non-oop fields will have only base edges in Connection 482 // Graph because such fields are not used for oop loads and stores. 483 int offset = address_offset(n, igvn); 484 add_field(n, PointsToNode::NoEscape, offset); 485 if (ptn_base == NULL) { 486 delayed_worklist->push(n); // Process it later. 487 } else { 488 n_ptn = ptnode_adr(n_idx); 489 add_base(n_ptn->as_Field(), ptn_base); 490 } 491 break; 492 } 493 case Op_CastX2P: { 494 map_ideal_node(n, phantom_obj); 495 break; 496 } 497 case Op_CastPP: 498 case Op_CheckCastPP: 499 case Op_EncodeP: 500 case Op_DecodeN: 501 case Op_EncodePKlass: 502 case Op_DecodeNKlass: { 503 add_local_var_and_edge(n, PointsToNode::NoEscape, 504 n->in(1), delayed_worklist); 505 break; 506 } 507 case Op_CMoveP: { 508 add_local_var(n, PointsToNode::NoEscape); 509 // Do not add edges during first iteration because some could be 510 // not defined yet. 511 delayed_worklist->push(n); 512 break; 513 } 514 case Op_ConP: 515 case Op_ConN: 516 case Op_ConNKlass: { 517 // assume all oop constants globally escape except for null 518 PointsToNode::EscapeState es; 519 const Type* t = igvn->type(n); 520 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 521 es = PointsToNode::NoEscape; 522 } else { 523 es = PointsToNode::GlobalEscape; 524 } 525 add_java_object(n, es); 526 break; 527 } 528 case Op_CreateEx: { 529 // assume that all exception objects globally escape 530 map_ideal_node(n, phantom_obj); 531 break; 532 } 533 case Op_LoadKlass: 534 case Op_LoadNKlass: { 535 // Unknown class is loaded 536 map_ideal_node(n, phantom_obj); 537 break; 538 } 539 case Op_LoadP: 540 case Op_LoadN: 541 case Op_LoadPLocked: { 542 add_objload_to_connection_graph(n, delayed_worklist); 543 break; 544 } 545 case Op_Parm: { 546 map_ideal_node(n, phantom_obj); 547 break; 548 } 549 case Op_PartialSubtypeCheck: { 550 // Produces Null or notNull and is used in only in CmpP so 551 // phantom_obj could be used. 552 map_ideal_node(n, phantom_obj); // Result is unknown 553 break; 554 } 555 case Op_Phi: { 556 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 557 // ThreadLocal has RawPtr type. 558 const Type* t = n->as_Phi()->type(); 559 if (t->make_ptr() != NULL) { 560 add_local_var(n, PointsToNode::NoEscape); 561 // Do not add edges during first iteration because some could be 562 // not defined yet. 563 delayed_worklist->push(n); 564 } 565 break; 566 } 567 case Op_Proj: { 568 // we are only interested in the oop result projection from a call 569 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 570 n->in(0)->as_Call()->returns_pointer()) { 571 add_local_var_and_edge(n, PointsToNode::NoEscape, 572 n->in(0), delayed_worklist); 573 } 574 break; 575 } 576 case Op_Rethrow: // Exception object escapes 577 case Op_Return: { 578 if (n->req() > TypeFunc::Parms && 579 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 580 // Treat Return value as LocalVar with GlobalEscape escape state. 581 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 582 n->in(TypeFunc::Parms), delayed_worklist); 583 } 584 break; 585 } 586 case Op_CompareAndExchangeP: 587 case Op_CompareAndExchangeN: 588 case Op_GetAndSetP: 589 case Op_GetAndSetN: { 590 add_objload_to_connection_graph(n, delayed_worklist); 591 // fallthrough 592 } 593 case Op_StoreP: 594 case Op_StoreN: 595 case Op_StoreNKlass: 596 case Op_StorePConditional: 597 case Op_WeakCompareAndSwapP: 598 case Op_WeakCompareAndSwapN: 599 case Op_CompareAndSwapP: 600 case Op_CompareAndSwapN: { 601 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 602 break; 603 } 604 case Op_AryEq: 605 case Op_HasNegatives: 606 case Op_StrComp: 607 case Op_StrEquals: 608 case Op_StrIndexOf: 609 case Op_StrIndexOfChar: 610 case Op_StrInflatedCopy: 611 case Op_StrCompressedCopy: 612 case Op_EncodeISOArray: { 613 add_local_var(n, PointsToNode::ArgEscape); 614 delayed_worklist->push(n); // Process it later. 615 break; 616 } 617 case Op_ThreadLocal: { 618 add_java_object(n, PointsToNode::ArgEscape); 619 break; 620 } 621 default: 622 ; // Do nothing for nodes not related to EA. 623 } 624 return; 625 } 626 627 #ifdef ASSERT 628 #define ELSE_FAIL(name) \ 629 /* Should not be called for not pointer type. */ \ 630 n->dump(1); \ 631 assert(false, name); \ 632 break; 633 #else 634 #define ELSE_FAIL(name) \ 635 break; 636 #endif 637 638 // Add final simple edges to graph. 639 void ConnectionGraph::add_final_edges(Node *n) { 640 PointsToNode* n_ptn = ptnode_adr(n->_idx); 641 #ifdef ASSERT 642 if (_verify && n_ptn->is_JavaObject()) 643 return; // This method does not change graph for JavaObject. 644 #endif 645 646 if (n->is_Call()) { 647 process_call_arguments(n->as_Call()); 648 return; 649 } 650 assert(n->is_Store() || n->is_LoadStore() || 651 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 652 "node should be registered already"); 653 int opcode = n->Opcode(); 654 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 655 if (gc_handled) { 656 return; // Ignore node if already handled by GC. 657 } 658 switch (opcode) { 659 case Op_AddP: { 660 Node* base = get_addp_base(n); 661 PointsToNode* ptn_base = ptnode_adr(base->_idx); 662 assert(ptn_base != NULL, "field's base should be registered"); 663 add_base(n_ptn->as_Field(), ptn_base); 664 break; 665 } 666 case Op_CastPP: 667 case Op_CheckCastPP: 668 case Op_EncodeP: 669 case Op_DecodeN: 670 case Op_EncodePKlass: 671 case Op_DecodeNKlass: { 672 add_local_var_and_edge(n, PointsToNode::NoEscape, 673 n->in(1), NULL); 674 break; 675 } 676 case Op_CMoveP: { 677 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 678 Node* in = n->in(i); 679 if (in == NULL) 680 continue; // ignore NULL 681 Node* uncast_in = in->uncast(); 682 if (uncast_in->is_top() || uncast_in == n) 683 continue; // ignore top or inputs which go back this node 684 PointsToNode* ptn = ptnode_adr(in->_idx); 685 assert(ptn != NULL, "node should be registered"); 686 add_edge(n_ptn, ptn); 687 } 688 break; 689 } 690 case Op_LoadP: 691 case Op_LoadN: 692 case Op_LoadPLocked: { 693 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 694 // ThreadLocal has RawPtr type. 695 const Type* t = _igvn->type(n); 696 if (t->make_ptr() != NULL) { 697 Node* adr = n->in(MemNode::Address); 698 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 699 break; 700 } 701 ELSE_FAIL("Op_LoadP"); 702 } 703 case Op_Phi: { 704 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 705 // ThreadLocal has RawPtr type. 706 const Type* t = n->as_Phi()->type(); 707 if (t->make_ptr() != NULL) { 708 for (uint i = 1; i < n->req(); i++) { 709 Node* in = n->in(i); 710 if (in == NULL) 711 continue; // ignore NULL 712 Node* uncast_in = in->uncast(); 713 if (uncast_in->is_top() || uncast_in == n) 714 continue; // ignore top or inputs which go back this node 715 PointsToNode* ptn = ptnode_adr(in->_idx); 716 assert(ptn != NULL, "node should be registered"); 717 add_edge(n_ptn, ptn); 718 } 719 break; 720 } 721 ELSE_FAIL("Op_Phi"); 722 } 723 case Op_Proj: { 724 // we are only interested in the oop result projection from a call 725 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 726 n->in(0)->as_Call()->returns_pointer()) { 727 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 728 break; 729 } 730 ELSE_FAIL("Op_Proj"); 731 } 732 case Op_Rethrow: // Exception object escapes 733 case Op_Return: { 734 if (n->req() > TypeFunc::Parms && 735 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 736 // Treat Return value as LocalVar with GlobalEscape escape state. 737 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 738 n->in(TypeFunc::Parms), NULL); 739 break; 740 } 741 ELSE_FAIL("Op_Return"); 742 } 743 case Op_StoreP: 744 case Op_StoreN: 745 case Op_StoreNKlass: 746 case Op_StorePConditional: 747 case Op_CompareAndExchangeP: 748 case Op_CompareAndExchangeN: 749 case Op_CompareAndSwapP: 750 case Op_CompareAndSwapN: 751 case Op_WeakCompareAndSwapP: 752 case Op_WeakCompareAndSwapN: 753 case Op_GetAndSetP: 754 case Op_GetAndSetN: { 755 if (add_final_edges_unsafe_access(n, opcode)) { 756 break; 757 } 758 ELSE_FAIL("Op_StoreP"); 759 } 760 case Op_AryEq: 761 case Op_HasNegatives: 762 case Op_StrComp: 763 case Op_StrEquals: 764 case Op_StrIndexOf: 765 case Op_StrIndexOfChar: 766 case Op_StrInflatedCopy: 767 case Op_StrCompressedCopy: 768 case Op_EncodeISOArray: { 769 // char[]/byte[] arrays passed to string intrinsic do not escape but 770 // they are not scalar replaceable. Adjust escape state for them. 771 // Start from in(2) edge since in(1) is memory edge. 772 for (uint i = 2; i < n->req(); i++) { 773 Node* adr = n->in(i); 774 const Type* at = _igvn->type(adr); 775 if (!adr->is_top() && at->isa_ptr()) { 776 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 777 at->isa_ptr() != NULL, "expecting a pointer"); 778 if (adr->is_AddP()) { 779 adr = get_addp_base(adr); 780 } 781 PointsToNode* ptn = ptnode_adr(adr->_idx); 782 assert(ptn != NULL, "node should be registered"); 783 add_edge(n_ptn, ptn); 784 } 785 } 786 break; 787 } 788 default: { 789 // This method should be called only for EA specific nodes which may 790 // miss some edges when they were created. 791 #ifdef ASSERT 792 n->dump(1); 793 #endif 794 guarantee(false, "unknown node"); 795 } 796 } 797 return; 798 } 799 800 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 801 Node* adr = n->in(MemNode::Address); 802 const Type* adr_type = _igvn->type(adr); 803 adr_type = adr_type->make_ptr(); 804 if (adr_type == NULL) { 805 return; // skip dead nodes 806 } 807 if (adr_type->isa_oopptr() 808 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 809 && adr_type == TypeRawPtr::NOTNULL 810 && is_captured_store_address(adr))) { 811 delayed_worklist->push(n); // Process it later. 812 #ifdef ASSERT 813 assert (adr->is_AddP(), "expecting an AddP"); 814 if (adr_type == TypeRawPtr::NOTNULL) { 815 // Verify a raw address for a store captured by Initialize node. 816 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 817 assert(offs != Type::OffsetBot, "offset must be a constant"); 818 } 819 #endif 820 } else { 821 // Ignore copy the displaced header to the BoxNode (OSR compilation). 822 if (adr->is_BoxLock()) { 823 return; 824 } 825 // Stored value escapes in unsafe access. 826 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 827 delayed_worklist->push(n); // Process unsafe access later. 828 return; 829 } 830 #ifdef ASSERT 831 n->dump(1); 832 assert(false, "not unsafe"); 833 #endif 834 } 835 } 836 837 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 838 Node* adr = n->in(MemNode::Address); 839 const Type *adr_type = _igvn->type(adr); 840 adr_type = adr_type->make_ptr(); 841 #ifdef ASSERT 842 if (adr_type == NULL) { 843 n->dump(1); 844 assert(adr_type != NULL, "dead node should not be on list"); 845 return true; 846 } 847 #endif 848 849 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN || 850 opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) { 851 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 852 } 853 854 if (adr_type->isa_oopptr() 855 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 856 && adr_type == TypeRawPtr::NOTNULL 857 && is_captured_store_address(adr))) { 858 // Point Address to Value 859 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 860 assert(adr_ptn != NULL && 861 adr_ptn->as_Field()->is_oop(), "node should be registered"); 862 Node* val = n->in(MemNode::ValueIn); 863 PointsToNode* ptn = ptnode_adr(val->_idx); 864 assert(ptn != NULL, "node should be registered"); 865 add_edge(adr_ptn, ptn); 866 return true; 867 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 868 // Stored value escapes in unsafe access. 869 Node* val = n->in(MemNode::ValueIn); 870 PointsToNode* ptn = ptnode_adr(val->_idx); 871 assert(ptn != NULL, "node should be registered"); 872 set_escape_state(ptn, PointsToNode::GlobalEscape); 873 // Add edge to object for unsafe access with offset. 874 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 875 assert(adr_ptn != NULL, "node should be registered"); 876 if (adr_ptn->is_Field()) { 877 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 878 add_edge(adr_ptn, ptn); 879 } 880 return true; 881 } 882 return false; 883 } 884 885 void ConnectionGraph::add_call_node(CallNode* call) { 886 assert(call->returns_pointer(), "only for call which returns pointer"); 887 uint call_idx = call->_idx; 888 if (call->is_Allocate()) { 889 Node* k = call->in(AllocateNode::KlassNode); 890 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 891 assert(kt != NULL, "TypeKlassPtr required."); 892 ciKlass* cik = kt->klass(); 893 PointsToNode::EscapeState es = PointsToNode::NoEscape; 894 bool scalar_replaceable = true; 895 if (call->is_AllocateArray()) { 896 if (!cik->is_array_klass()) { // StressReflectiveCode 897 es = PointsToNode::GlobalEscape; 898 } else { 899 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 900 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 901 // Not scalar replaceable if the length is not constant or too big. 902 scalar_replaceable = false; 903 } 904 } 905 } else { // Allocate instance 906 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 907 cik->is_subclass_of(_compile->env()->Reference_klass()) || 908 !cik->is_instance_klass() || // StressReflectiveCode 909 !cik->as_instance_klass()->can_be_instantiated() || 910 cik->as_instance_klass()->has_finalizer()) { 911 es = PointsToNode::GlobalEscape; 912 } 913 } 914 add_java_object(call, es); 915 PointsToNode* ptn = ptnode_adr(call_idx); 916 if (!scalar_replaceable && ptn->scalar_replaceable()) { 917 ptn->set_scalar_replaceable(false); 918 } 919 } else if (call->is_CallStaticJava()) { 920 // Call nodes could be different types: 921 // 922 // 1. CallDynamicJavaNode (what happened during call is unknown): 923 // 924 // - mapped to GlobalEscape JavaObject node if oop is returned; 925 // 926 // - all oop arguments are escaping globally; 927 // 928 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 929 // 930 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 931 // 932 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 933 // - mapped to NoEscape JavaObject node if non-escaping object allocated 934 // during call is returned; 935 // - mapped to ArgEscape LocalVar node pointed to object arguments 936 // which are returned and does not escape during call; 937 // 938 // - oop arguments escaping status is defined by bytecode analysis; 939 // 940 // For a static call, we know exactly what method is being called. 941 // Use bytecode estimator to record whether the call's return value escapes. 942 ciMethod* meth = call->as_CallJava()->method(); 943 if (meth == NULL) { 944 const char* name = call->as_CallStaticJava()->_name; 945 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 946 // Returns a newly allocated unescaped object. 947 add_java_object(call, PointsToNode::NoEscape); 948 ptnode_adr(call_idx)->set_scalar_replaceable(false); 949 } else if (meth->is_boxing_method()) { 950 // Returns boxing object 951 PointsToNode::EscapeState es; 952 vmIntrinsics::ID intr = meth->intrinsic_id(); 953 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 954 // It does not escape if object is always allocated. 955 es = PointsToNode::NoEscape; 956 } else { 957 // It escapes globally if object could be loaded from cache. 958 es = PointsToNode::GlobalEscape; 959 } 960 add_java_object(call, es); 961 } else { 962 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 963 call_analyzer->copy_dependencies(_compile->dependencies()); 964 if (call_analyzer->is_return_allocated()) { 965 // Returns a newly allocated unescaped object, simply 966 // update dependency information. 967 // Mark it as NoEscape so that objects referenced by 968 // it's fields will be marked as NoEscape at least. 969 add_java_object(call, PointsToNode::NoEscape); 970 ptnode_adr(call_idx)->set_scalar_replaceable(false); 971 } else { 972 // Determine whether any arguments are returned. 973 const TypeTuple* d = call->tf()->domain(); 974 bool ret_arg = false; 975 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 976 if (d->field_at(i)->isa_ptr() != NULL && 977 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 978 ret_arg = true; 979 break; 980 } 981 } 982 if (ret_arg) { 983 add_local_var(call, PointsToNode::ArgEscape); 984 } else { 985 // Returns unknown object. 986 map_ideal_node(call, phantom_obj); 987 } 988 } 989 } 990 } else { 991 // An other type of call, assume the worst case: 992 // returned value is unknown and globally escapes. 993 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 994 map_ideal_node(call, phantom_obj); 995 } 996 } 997 998 void ConnectionGraph::process_call_arguments(CallNode *call) { 999 bool is_arraycopy = false; 1000 switch (call->Opcode()) { 1001 #ifdef ASSERT 1002 case Op_Allocate: 1003 case Op_AllocateArray: 1004 case Op_Lock: 1005 case Op_Unlock: 1006 assert(false, "should be done already"); 1007 break; 1008 #endif 1009 case Op_ArrayCopy: 1010 case Op_CallLeafNoFP: 1011 // Most array copies are ArrayCopy nodes at this point but there 1012 // are still a few direct calls to the copy subroutines (See 1013 // PhaseStringOpts::copy_string()) 1014 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 1015 call->as_CallLeaf()->is_call_to_arraycopystub(); 1016 // fall through 1017 case Op_CallLeaf: { 1018 // Stub calls, objects do not escape but they are not scale replaceable. 1019 // Adjust escape state for outgoing arguments. 1020 const TypeTuple * d = call->tf()->domain(); 1021 bool src_has_oops = false; 1022 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1023 const Type* at = d->field_at(i); 1024 Node *arg = call->in(i); 1025 if (arg == NULL) { 1026 continue; 1027 } 1028 const Type *aat = _igvn->type(arg); 1029 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) 1030 continue; 1031 if (arg->is_AddP()) { 1032 // 1033 // The inline_native_clone() case when the arraycopy stub is called 1034 // after the allocation before Initialize and CheckCastPP nodes. 1035 // Or normal arraycopy for object arrays case. 1036 // 1037 // Set AddP's base (Allocate) as not scalar replaceable since 1038 // pointer to the base (with offset) is passed as argument. 1039 // 1040 arg = get_addp_base(arg); 1041 } 1042 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1043 assert(arg_ptn != NULL, "should be registered"); 1044 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 1045 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 1046 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 1047 aat->isa_ptr() != NULL, "expecting an Ptr"); 1048 bool arg_has_oops = aat->isa_oopptr() && 1049 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || 1050 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); 1051 if (i == TypeFunc::Parms) { 1052 src_has_oops = arg_has_oops; 1053 } 1054 // 1055 // src or dst could be j.l.Object when other is basic type array: 1056 // 1057 // arraycopy(char[],0,Object*,0,size); 1058 // arraycopy(Object*,0,char[],0,size); 1059 // 1060 // Don't add edges in such cases. 1061 // 1062 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 1063 arg_has_oops && (i > TypeFunc::Parms); 1064 #ifdef ASSERT 1065 if (!(is_arraycopy || 1066 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 1067 (call->as_CallLeaf()->_name != NULL && 1068 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 1069 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 1070 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 1071 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 1072 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 1073 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 1074 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 1075 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 1076 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 1077 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 1078 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 1079 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 1080 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 || 1081 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 || 1082 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 1083 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1084 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1085 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1086 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1087 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1088 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1089 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1090 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1091 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1092 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1093 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 || 1094 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 || 1095 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0) 1096 ))) { 1097 call->dump(); 1098 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1099 } 1100 #endif 1101 // Always process arraycopy's destination object since 1102 // we need to add all possible edges to references in 1103 // source object. 1104 if (arg_esc >= PointsToNode::ArgEscape && 1105 !arg_is_arraycopy_dest) { 1106 continue; 1107 } 1108 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1109 if (call->is_ArrayCopy()) { 1110 ArrayCopyNode* ac = call->as_ArrayCopy(); 1111 if (ac->is_clonebasic() || 1112 ac->is_arraycopy_validated() || 1113 ac->is_copyof_validated() || 1114 ac->is_copyofrange_validated()) { 1115 es = PointsToNode::NoEscape; 1116 } 1117 } 1118 set_escape_state(arg_ptn, es); 1119 if (arg_is_arraycopy_dest) { 1120 Node* src = call->in(TypeFunc::Parms); 1121 if (src->is_AddP()) { 1122 src = get_addp_base(src); 1123 } 1124 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1125 assert(src_ptn != NULL, "should be registered"); 1126 if (arg_ptn != src_ptn) { 1127 // Special arraycopy edge: 1128 // A destination object's field can't have the source object 1129 // as base since objects escape states are not related. 1130 // Only escape state of destination object's fields affects 1131 // escape state of fields in source object. 1132 add_arraycopy(call, es, src_ptn, arg_ptn); 1133 } 1134 } 1135 } 1136 } 1137 break; 1138 } 1139 case Op_CallStaticJava: { 1140 // For a static call, we know exactly what method is being called. 1141 // Use bytecode estimator to record the call's escape affects 1142 #ifdef ASSERT 1143 const char* name = call->as_CallStaticJava()->_name; 1144 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1145 #endif 1146 ciMethod* meth = call->as_CallJava()->method(); 1147 if ((meth != NULL) && meth->is_boxing_method()) { 1148 break; // Boxing methods do not modify any oops. 1149 } 1150 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1151 // fall-through if not a Java method or no analyzer information 1152 if (call_analyzer != NULL) { 1153 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1154 const TypeTuple* d = call->tf()->domain(); 1155 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1156 const Type* at = d->field_at(i); 1157 int k = i - TypeFunc::Parms; 1158 Node* arg = call->in(i); 1159 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1160 if (at->isa_ptr() != NULL && 1161 call_analyzer->is_arg_returned(k)) { 1162 // The call returns arguments. 1163 if (call_ptn != NULL) { // Is call's result used? 1164 assert(call_ptn->is_LocalVar(), "node should be registered"); 1165 assert(arg_ptn != NULL, "node should be registered"); 1166 add_edge(call_ptn, arg_ptn); 1167 } 1168 } 1169 if (at->isa_oopptr() != NULL && 1170 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1171 if (!call_analyzer->is_arg_stack(k)) { 1172 // The argument global escapes 1173 set_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1174 } else { 1175 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 1176 if (!call_analyzer->is_arg_local(k)) { 1177 // The argument itself doesn't escape, but any fields might 1178 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1179 } 1180 } 1181 } 1182 } 1183 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1184 // The call returns arguments. 1185 assert(call_ptn->edge_count() > 0, "sanity"); 1186 if (!call_analyzer->is_return_local()) { 1187 // Returns also unknown object. 1188 add_edge(call_ptn, phantom_obj); 1189 } 1190 } 1191 break; 1192 } 1193 } 1194 default: { 1195 // Fall-through here if not a Java method or no analyzer information 1196 // or some other type of call, assume the worst case: all arguments 1197 // globally escape. 1198 const TypeTuple* d = call->tf()->domain(); 1199 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1200 const Type* at = d->field_at(i); 1201 if (at->isa_oopptr() != NULL) { 1202 Node* arg = call->in(i); 1203 if (arg->is_AddP()) { 1204 arg = get_addp_base(arg); 1205 } 1206 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1207 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); 1208 } 1209 } 1210 } 1211 } 1212 } 1213 1214 1215 // Finish Graph construction. 1216 bool ConnectionGraph::complete_connection_graph( 1217 GrowableArray<PointsToNode*>& ptnodes_worklist, 1218 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1219 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1220 GrowableArray<FieldNode*>& oop_fields_worklist) { 1221 // Normally only 1-3 passes needed to build Connection Graph depending 1222 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1223 // Set limit to 20 to catch situation when something did go wrong and 1224 // bailout Escape Analysis. 1225 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1226 #define CG_BUILD_ITER_LIMIT 20 1227 1228 // Propagate GlobalEscape and ArgEscape escape states and check that 1229 // we still have non-escaping objects. The method pushs on _worklist 1230 // Field nodes which reference phantom_object. 1231 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1232 return false; // Nothing to do. 1233 } 1234 // Now propagate references to all JavaObject nodes. 1235 int java_objects_length = java_objects_worklist.length(); 1236 elapsedTimer time; 1237 bool timeout = false; 1238 int new_edges = 1; 1239 int iterations = 0; 1240 do { 1241 while ((new_edges > 0) && 1242 (iterations++ < CG_BUILD_ITER_LIMIT)) { 1243 double start_time = time.seconds(); 1244 time.start(); 1245 new_edges = 0; 1246 // Propagate references to phantom_object for nodes pushed on _worklist 1247 // by find_non_escaped_objects() and find_field_value(). 1248 new_edges += add_java_object_edges(phantom_obj, false); 1249 for (int next = 0; next < java_objects_length; ++next) { 1250 JavaObjectNode* ptn = java_objects_worklist.at(next); 1251 new_edges += add_java_object_edges(ptn, true); 1252 1253 #define SAMPLE_SIZE 4 1254 if ((next % SAMPLE_SIZE) == 0) { 1255 // Each 4 iterations calculate how much time it will take 1256 // to complete graph construction. 1257 time.stop(); 1258 // Poll for requests from shutdown mechanism to quiesce compiler 1259 // because Connection graph construction may take long time. 1260 CompileBroker::maybe_block(); 1261 double stop_time = time.seconds(); 1262 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1263 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1264 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1265 timeout = true; 1266 break; // Timeout 1267 } 1268 start_time = stop_time; 1269 time.start(); 1270 } 1271 #undef SAMPLE_SIZE 1272 1273 } 1274 if (timeout) break; 1275 if (new_edges > 0) { 1276 // Update escape states on each iteration if graph was updated. 1277 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1278 return false; // Nothing to do. 1279 } 1280 } 1281 time.stop(); 1282 if (time.seconds() >= EscapeAnalysisTimeout) { 1283 timeout = true; 1284 break; 1285 } 1286 } 1287 if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { 1288 time.start(); 1289 // Find fields which have unknown value. 1290 int fields_length = oop_fields_worklist.length(); 1291 for (int next = 0; next < fields_length; next++) { 1292 FieldNode* field = oop_fields_worklist.at(next); 1293 if (field->edge_count() == 0) { 1294 new_edges += find_field_value(field); 1295 // This code may added new edges to phantom_object. 1296 // Need an other cycle to propagate references to phantom_object. 1297 } 1298 } 1299 time.stop(); 1300 if (time.seconds() >= EscapeAnalysisTimeout) { 1301 timeout = true; 1302 break; 1303 } 1304 } else { 1305 new_edges = 0; // Bailout 1306 } 1307 } while (new_edges > 0); 1308 1309 // Bailout if passed limits. 1310 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { 1311 Compile* C = _compile; 1312 if (C->log() != NULL) { 1313 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1314 C->log()->text("%s", timeout ? "time" : "iterations"); 1315 C->log()->end_elem(" limit'"); 1316 } 1317 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", 1318 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()); 1319 // Possible infinite build_connection_graph loop, 1320 // bailout (no changes to ideal graph were made). 1321 return false; 1322 } 1323 #ifdef ASSERT 1324 if (Verbose && PrintEscapeAnalysis) { 1325 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", 1326 iterations, nodes_size(), ptnodes_worklist.length()); 1327 } 1328 #endif 1329 1330 #undef CG_BUILD_ITER_LIMIT 1331 1332 // Find fields initialized by NULL for non-escaping Allocations. 1333 int non_escaped_length = non_escaped_worklist.length(); 1334 for (int next = 0; next < non_escaped_length; next++) { 1335 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1336 PointsToNode::EscapeState es = ptn->escape_state(); 1337 assert(es <= PointsToNode::ArgEscape, "sanity"); 1338 if (es == PointsToNode::NoEscape) { 1339 if (find_init_values(ptn, null_obj, _igvn) > 0) { 1340 // Adding references to NULL object does not change escape states 1341 // since it does not escape. Also no fields are added to NULL object. 1342 add_java_object_edges(null_obj, false); 1343 } 1344 } 1345 Node* n = ptn->ideal_node(); 1346 if (n->is_Allocate()) { 1347 // The object allocated by this Allocate node will never be 1348 // seen by an other thread. Mark it so that when it is 1349 // expanded no MemBarStoreStore is added. 1350 InitializeNode* ini = n->as_Allocate()->initialization(); 1351 if (ini != NULL) 1352 ini->set_does_not_escape(); 1353 } 1354 } 1355 return true; // Finished graph construction. 1356 } 1357 1358 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1359 // and check that we still have non-escaping java objects. 1360 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1361 GrowableArray<JavaObjectNode*>& non_escaped_worklist) { 1362 GrowableArray<PointsToNode*> escape_worklist; 1363 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1364 int ptnodes_length = ptnodes_worklist.length(); 1365 for (int next = 0; next < ptnodes_length; ++next) { 1366 PointsToNode* ptn = ptnodes_worklist.at(next); 1367 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1368 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1369 escape_worklist.push(ptn); 1370 } 1371 } 1372 // Set escape states to referenced nodes (edges list). 1373 while (escape_worklist.length() > 0) { 1374 PointsToNode* ptn = escape_worklist.pop(); 1375 PointsToNode::EscapeState es = ptn->escape_state(); 1376 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1377 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1378 es >= PointsToNode::ArgEscape) { 1379 // GlobalEscape or ArgEscape state of field means it has unknown value. 1380 if (add_edge(ptn, phantom_obj)) { 1381 // New edge was added 1382 add_field_uses_to_worklist(ptn->as_Field()); 1383 } 1384 } 1385 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1386 PointsToNode* e = i.get(); 1387 if (e->is_Arraycopy()) { 1388 assert(ptn->arraycopy_dst(), "sanity"); 1389 // Propagate only fields escape state through arraycopy edge. 1390 if (e->fields_escape_state() < field_es) { 1391 set_fields_escape_state(e, field_es); 1392 escape_worklist.push(e); 1393 } 1394 } else if (es >= field_es) { 1395 // fields_escape_state is also set to 'es' if it is less than 'es'. 1396 if (e->escape_state() < es) { 1397 set_escape_state(e, es); 1398 escape_worklist.push(e); 1399 } 1400 } else { 1401 // Propagate field escape state. 1402 bool es_changed = false; 1403 if (e->fields_escape_state() < field_es) { 1404 set_fields_escape_state(e, field_es); 1405 es_changed = true; 1406 } 1407 if ((e->escape_state() < field_es) && 1408 e->is_Field() && ptn->is_JavaObject() && 1409 e->as_Field()->is_oop()) { 1410 // Change escape state of referenced fields. 1411 set_escape_state(e, field_es); 1412 es_changed = true; 1413 } else if (e->escape_state() < es) { 1414 set_escape_state(e, es); 1415 es_changed = true; 1416 } 1417 if (es_changed) { 1418 escape_worklist.push(e); 1419 } 1420 } 1421 } 1422 } 1423 // Remove escaped objects from non_escaped list. 1424 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { 1425 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1426 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1427 non_escaped_worklist.delete_at(next); 1428 } 1429 if (ptn->escape_state() == PointsToNode::NoEscape) { 1430 // Find fields in non-escaped allocations which have unknown value. 1431 find_init_values(ptn, phantom_obj, NULL); 1432 } 1433 } 1434 return (non_escaped_worklist.length() > 0); 1435 } 1436 1437 // Add all references to JavaObject node by walking over all uses. 1438 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1439 int new_edges = 0; 1440 if (populate_worklist) { 1441 // Populate _worklist by uses of jobj's uses. 1442 for (UseIterator i(jobj); i.has_next(); i.next()) { 1443 PointsToNode* use = i.get(); 1444 if (use->is_Arraycopy()) 1445 continue; 1446 add_uses_to_worklist(use); 1447 if (use->is_Field() && use->as_Field()->is_oop()) { 1448 // Put on worklist all field's uses (loads) and 1449 // related field nodes (same base and offset). 1450 add_field_uses_to_worklist(use->as_Field()); 1451 } 1452 } 1453 } 1454 for (int l = 0; l < _worklist.length(); l++) { 1455 PointsToNode* use = _worklist.at(l); 1456 if (PointsToNode::is_base_use(use)) { 1457 // Add reference from jobj to field and from field to jobj (field's base). 1458 use = PointsToNode::get_use_node(use)->as_Field(); 1459 if (add_base(use->as_Field(), jobj)) { 1460 new_edges++; 1461 } 1462 continue; 1463 } 1464 assert(!use->is_JavaObject(), "sanity"); 1465 if (use->is_Arraycopy()) { 1466 if (jobj == null_obj) // NULL object does not have field edges 1467 continue; 1468 // Added edge from Arraycopy node to arraycopy's source java object 1469 if (add_edge(use, jobj)) { 1470 jobj->set_arraycopy_src(); 1471 new_edges++; 1472 } 1473 // and stop here. 1474 continue; 1475 } 1476 if (!add_edge(use, jobj)) 1477 continue; // No new edge added, there was such edge already. 1478 new_edges++; 1479 if (use->is_LocalVar()) { 1480 add_uses_to_worklist(use); 1481 if (use->arraycopy_dst()) { 1482 for (EdgeIterator i(use); i.has_next(); i.next()) { 1483 PointsToNode* e = i.get(); 1484 if (e->is_Arraycopy()) { 1485 if (jobj == null_obj) // NULL object does not have field edges 1486 continue; 1487 // Add edge from arraycopy's destination java object to Arraycopy node. 1488 if (add_edge(jobj, e)) { 1489 new_edges++; 1490 jobj->set_arraycopy_dst(); 1491 } 1492 } 1493 } 1494 } 1495 } else { 1496 // Added new edge to stored in field values. 1497 // Put on worklist all field's uses (loads) and 1498 // related field nodes (same base and offset). 1499 add_field_uses_to_worklist(use->as_Field()); 1500 } 1501 } 1502 _worklist.clear(); 1503 _in_worklist.reset(); 1504 return new_edges; 1505 } 1506 1507 // Put on worklist all related field nodes. 1508 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1509 assert(field->is_oop(), "sanity"); 1510 int offset = field->offset(); 1511 add_uses_to_worklist(field); 1512 // Loop over all bases of this field and push on worklist Field nodes 1513 // with the same offset and base (since they may reference the same field). 1514 for (BaseIterator i(field); i.has_next(); i.next()) { 1515 PointsToNode* base = i.get(); 1516 add_fields_to_worklist(field, base); 1517 // Check if the base was source object of arraycopy and go over arraycopy's 1518 // destination objects since values stored to a field of source object are 1519 // accessable by uses (loads) of fields of destination objects. 1520 if (base->arraycopy_src()) { 1521 for (UseIterator j(base); j.has_next(); j.next()) { 1522 PointsToNode* arycp = j.get(); 1523 if (arycp->is_Arraycopy()) { 1524 for (UseIterator k(arycp); k.has_next(); k.next()) { 1525 PointsToNode* abase = k.get(); 1526 if (abase->arraycopy_dst() && abase != base) { 1527 // Look for the same arraycopy reference. 1528 add_fields_to_worklist(field, abase); 1529 } 1530 } 1531 } 1532 } 1533 } 1534 } 1535 } 1536 1537 // Put on worklist all related field nodes. 1538 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1539 int offset = field->offset(); 1540 if (base->is_LocalVar()) { 1541 for (UseIterator j(base); j.has_next(); j.next()) { 1542 PointsToNode* f = j.get(); 1543 if (PointsToNode::is_base_use(f)) { // Field 1544 f = PointsToNode::get_use_node(f); 1545 if (f == field || !f->as_Field()->is_oop()) 1546 continue; 1547 int offs = f->as_Field()->offset(); 1548 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1549 add_to_worklist(f); 1550 } 1551 } 1552 } 1553 } else { 1554 assert(base->is_JavaObject(), "sanity"); 1555 if (// Skip phantom_object since it is only used to indicate that 1556 // this field's content globally escapes. 1557 (base != phantom_obj) && 1558 // NULL object node does not have fields. 1559 (base != null_obj)) { 1560 for (EdgeIterator i(base); i.has_next(); i.next()) { 1561 PointsToNode* f = i.get(); 1562 // Skip arraycopy edge since store to destination object field 1563 // does not update value in source object field. 1564 if (f->is_Arraycopy()) { 1565 assert(base->arraycopy_dst(), "sanity"); 1566 continue; 1567 } 1568 if (f == field || !f->as_Field()->is_oop()) 1569 continue; 1570 int offs = f->as_Field()->offset(); 1571 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1572 add_to_worklist(f); 1573 } 1574 } 1575 } 1576 } 1577 } 1578 1579 // Find fields which have unknown value. 1580 int ConnectionGraph::find_field_value(FieldNode* field) { 1581 // Escaped fields should have init value already. 1582 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1583 int new_edges = 0; 1584 for (BaseIterator i(field); i.has_next(); i.next()) { 1585 PointsToNode* base = i.get(); 1586 if (base->is_JavaObject()) { 1587 // Skip Allocate's fields which will be processed later. 1588 if (base->ideal_node()->is_Allocate()) 1589 return 0; 1590 assert(base == null_obj, "only NULL ptr base expected here"); 1591 } 1592 } 1593 if (add_edge(field, phantom_obj)) { 1594 // New edge was added 1595 new_edges++; 1596 add_field_uses_to_worklist(field); 1597 } 1598 return new_edges; 1599 } 1600 1601 // Find fields initializing values for allocations. 1602 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { 1603 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1604 int new_edges = 0; 1605 Node* alloc = pta->ideal_node(); 1606 if (init_val == phantom_obj) { 1607 // Do nothing for Allocate nodes since its fields values are 1608 // "known" unless they are initialized by arraycopy/clone. 1609 if (alloc->is_Allocate() && !pta->arraycopy_dst()) 1610 return 0; 1611 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 1612 #ifdef ASSERT 1613 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { 1614 const char* name = alloc->as_CallStaticJava()->_name; 1615 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1616 } 1617 #endif 1618 // Non-escaped allocation returned from Java or runtime call have 1619 // unknown values in fields. 1620 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1621 PointsToNode* field = i.get(); 1622 if (field->is_Field() && field->as_Field()->is_oop()) { 1623 if (add_edge(field, phantom_obj)) { 1624 // New edge was added 1625 new_edges++; 1626 add_field_uses_to_worklist(field->as_Field()); 1627 } 1628 } 1629 } 1630 return new_edges; 1631 } 1632 assert(init_val == null_obj, "sanity"); 1633 // Do nothing for Call nodes since its fields values are unknown. 1634 if (!alloc->is_Allocate()) 1635 return 0; 1636 1637 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1638 bool visited_bottom_offset = false; 1639 GrowableArray<int> offsets_worklist; 1640 1641 // Check if an oop field's initializing value is recorded and add 1642 // a corresponding NULL if field's value if it is not recorded. 1643 // Connection Graph does not record a default initialization by NULL 1644 // captured by Initialize node. 1645 // 1646 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1647 PointsToNode* field = i.get(); // Field (AddP) 1648 if (!field->is_Field() || !field->as_Field()->is_oop()) 1649 continue; // Not oop field 1650 int offset = field->as_Field()->offset(); 1651 if (offset == Type::OffsetBot) { 1652 if (!visited_bottom_offset) { 1653 // OffsetBot is used to reference array's element, 1654 // always add reference to NULL to all Field nodes since we don't 1655 // known which element is referenced. 1656 if (add_edge(field, null_obj)) { 1657 // New edge was added 1658 new_edges++; 1659 add_field_uses_to_worklist(field->as_Field()); 1660 visited_bottom_offset = true; 1661 } 1662 } 1663 } else { 1664 // Check only oop fields. 1665 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1666 if (adr_type->isa_rawptr()) { 1667 #ifdef ASSERT 1668 // Raw pointers are used for initializing stores so skip it 1669 // since it should be recorded already 1670 Node* base = get_addp_base(field->ideal_node()); 1671 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type"); 1672 #endif 1673 continue; 1674 } 1675 if (!offsets_worklist.contains(offset)) { 1676 offsets_worklist.append(offset); 1677 Node* value = NULL; 1678 if (ini != NULL) { 1679 // StoreP::memory_type() == T_ADDRESS 1680 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1681 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1682 // Make sure initializing store has the same type as this AddP. 1683 // This AddP may reference non existing field because it is on a 1684 // dead branch of bimorphic call which is not eliminated yet. 1685 if (store != NULL && store->is_Store() && 1686 store->as_Store()->memory_type() == ft) { 1687 value = store->in(MemNode::ValueIn); 1688 #ifdef ASSERT 1689 if (VerifyConnectionGraph) { 1690 // Verify that AddP already points to all objects the value points to. 1691 PointsToNode* val = ptnode_adr(value->_idx); 1692 assert((val != NULL), "should be processed already"); 1693 PointsToNode* missed_obj = NULL; 1694 if (val->is_JavaObject()) { 1695 if (!field->points_to(val->as_JavaObject())) { 1696 missed_obj = val; 1697 } 1698 } else { 1699 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1700 tty->print_cr("----------init store has invalid value -----"); 1701 store->dump(); 1702 val->dump(); 1703 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1704 } 1705 for (EdgeIterator j(val); j.has_next(); j.next()) { 1706 PointsToNode* obj = j.get(); 1707 if (obj->is_JavaObject()) { 1708 if (!field->points_to(obj->as_JavaObject())) { 1709 missed_obj = obj; 1710 break; 1711 } 1712 } 1713 } 1714 } 1715 if (missed_obj != NULL) { 1716 tty->print_cr("----------field---------------------------------"); 1717 field->dump(); 1718 tty->print_cr("----------missed referernce to object-----------"); 1719 missed_obj->dump(); 1720 tty->print_cr("----------object referernced by init store -----"); 1721 store->dump(); 1722 val->dump(); 1723 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1724 } 1725 } 1726 #endif 1727 } else { 1728 // There could be initializing stores which follow allocation. 1729 // For example, a volatile field store is not collected 1730 // by Initialize node. 1731 // 1732 // Need to check for dependent loads to separate such stores from 1733 // stores which follow loads. For now, add initial value NULL so 1734 // that compare pointers optimization works correctly. 1735 } 1736 } 1737 if (value == NULL) { 1738 // A field's initializing value was not recorded. Add NULL. 1739 if (add_edge(field, null_obj)) { 1740 // New edge was added 1741 new_edges++; 1742 add_field_uses_to_worklist(field->as_Field()); 1743 } 1744 } 1745 } 1746 } 1747 } 1748 return new_edges; 1749 } 1750 1751 // Adjust scalar_replaceable state after Connection Graph is built. 1752 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1753 // Search for non-escaping objects which are not scalar replaceable 1754 // and mark them to propagate the state to referenced objects. 1755 1756 // 1. An object is not scalar replaceable if the field into which it is 1757 // stored has unknown offset (stored into unknown element of an array). 1758 // 1759 for (UseIterator i(jobj); i.has_next(); i.next()) { 1760 PointsToNode* use = i.get(); 1761 if (use->is_Arraycopy()) { 1762 continue; 1763 } 1764 if (use->is_Field()) { 1765 FieldNode* field = use->as_Field(); 1766 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 1767 if (field->offset() == Type::OffsetBot) { 1768 jobj->set_scalar_replaceable(false); 1769 return; 1770 } 1771 // 2. An object is not scalar replaceable if the field into which it is 1772 // stored has multiple bases one of which is null. 1773 if (field->base_count() > 1) { 1774 for (BaseIterator i(field); i.has_next(); i.next()) { 1775 PointsToNode* base = i.get(); 1776 if (base == null_obj) { 1777 jobj->set_scalar_replaceable(false); 1778 return; 1779 } 1780 } 1781 } 1782 } 1783 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1784 // 3. An object is not scalar replaceable if it is merged with other objects. 1785 for (EdgeIterator j(use); j.has_next(); j.next()) { 1786 PointsToNode* ptn = j.get(); 1787 if (ptn->is_JavaObject() && ptn != jobj) { 1788 // Mark all objects. 1789 jobj->set_scalar_replaceable(false); 1790 ptn->set_scalar_replaceable(false); 1791 } 1792 } 1793 if (!jobj->scalar_replaceable()) { 1794 return; 1795 } 1796 } 1797 1798 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1799 if (j.get()->is_Arraycopy()) { 1800 continue; 1801 } 1802 1803 // Non-escaping object node should point only to field nodes. 1804 FieldNode* field = j.get()->as_Field(); 1805 int offset = field->as_Field()->offset(); 1806 1807 // 4. An object is not scalar replaceable if it has a field with unknown 1808 // offset (array's element is accessed in loop). 1809 if (offset == Type::OffsetBot) { 1810 jobj->set_scalar_replaceable(false); 1811 return; 1812 } 1813 // 5. Currently an object is not scalar replaceable if a LoadStore node 1814 // access its field since the field value is unknown after it. 1815 // 1816 Node* n = field->ideal_node(); 1817 1818 // Test for an unsafe access that was parsed as maybe off heap 1819 // (with a CheckCastPP to raw memory). 1820 assert(n->is_AddP(), "expect an address computation"); 1821 if (n->in(AddPNode::Base)->is_top() && 1822 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 1823 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 1824 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 1825 jobj->set_scalar_replaceable(false); 1826 return; 1827 } 1828 1829 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1830 Node* u = n->fast_out(i); 1831 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 1832 jobj->set_scalar_replaceable(false); 1833 return; 1834 } 1835 } 1836 1837 // 6. Or the address may point to more then one object. This may produce 1838 // the false positive result (set not scalar replaceable) 1839 // since the flow-insensitive escape analysis can't separate 1840 // the case when stores overwrite the field's value from the case 1841 // when stores happened on different control branches. 1842 // 1843 // Note: it will disable scalar replacement in some cases: 1844 // 1845 // Point p[] = new Point[1]; 1846 // p[0] = new Point(); // Will be not scalar replaced 1847 // 1848 // but it will save us from incorrect optimizations in next cases: 1849 // 1850 // Point p[] = new Point[1]; 1851 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1852 // 1853 if (field->base_count() > 1) { 1854 for (BaseIterator i(field); i.has_next(); i.next()) { 1855 PointsToNode* base = i.get(); 1856 // Don't take into account LocalVar nodes which 1857 // may point to only one object which should be also 1858 // this field's base by now. 1859 if (base->is_JavaObject() && base != jobj) { 1860 // Mark all bases. 1861 jobj->set_scalar_replaceable(false); 1862 base->set_scalar_replaceable(false); 1863 } 1864 } 1865 } 1866 } 1867 } 1868 1869 #ifdef ASSERT 1870 void ConnectionGraph::verify_connection_graph( 1871 GrowableArray<PointsToNode*>& ptnodes_worklist, 1872 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1873 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1874 GrowableArray<Node*>& addp_worklist) { 1875 // Verify that graph is complete - no new edges could be added. 1876 int java_objects_length = java_objects_worklist.length(); 1877 int non_escaped_length = non_escaped_worklist.length(); 1878 int new_edges = 0; 1879 for (int next = 0; next < java_objects_length; ++next) { 1880 JavaObjectNode* ptn = java_objects_worklist.at(next); 1881 new_edges += add_java_object_edges(ptn, true); 1882 } 1883 assert(new_edges == 0, "graph was not complete"); 1884 // Verify that escape state is final. 1885 int length = non_escaped_worklist.length(); 1886 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); 1887 assert((non_escaped_length == non_escaped_worklist.length()) && 1888 (non_escaped_length == length) && 1889 (_worklist.length() == 0), "escape state was not final"); 1890 1891 // Verify fields information. 1892 int addp_length = addp_worklist.length(); 1893 for (int next = 0; next < addp_length; ++next ) { 1894 Node* n = addp_worklist.at(next); 1895 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 1896 if (field->is_oop()) { 1897 // Verify that field has all bases 1898 Node* base = get_addp_base(n); 1899 PointsToNode* ptn = ptnode_adr(base->_idx); 1900 if (ptn->is_JavaObject()) { 1901 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 1902 } else { 1903 assert(ptn->is_LocalVar(), "sanity"); 1904 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1905 PointsToNode* e = i.get(); 1906 if (e->is_JavaObject()) { 1907 assert(field->has_base(e->as_JavaObject()), "sanity"); 1908 } 1909 } 1910 } 1911 // Verify that all fields have initializing values. 1912 if (field->edge_count() == 0) { 1913 tty->print_cr("----------field does not have references----------"); 1914 field->dump(); 1915 for (BaseIterator i(field); i.has_next(); i.next()) { 1916 PointsToNode* base = i.get(); 1917 tty->print_cr("----------field has next base---------------------"); 1918 base->dump(); 1919 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 1920 tty->print_cr("----------base has fields-------------------------"); 1921 for (EdgeIterator j(base); j.has_next(); j.next()) { 1922 j.get()->dump(); 1923 } 1924 tty->print_cr("----------base has references---------------------"); 1925 for (UseIterator j(base); j.has_next(); j.next()) { 1926 j.get()->dump(); 1927 } 1928 } 1929 } 1930 for (UseIterator i(field); i.has_next(); i.next()) { 1931 i.get()->dump(); 1932 } 1933 assert(field->edge_count() > 0, "sanity"); 1934 } 1935 } 1936 } 1937 } 1938 #endif 1939 1940 // Optimize ideal graph. 1941 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 1942 GrowableArray<Node*>& storestore_worklist) { 1943 Compile* C = _compile; 1944 PhaseIterGVN* igvn = _igvn; 1945 if (EliminateLocks) { 1946 // Mark locks before changing ideal graph. 1947 int cnt = C->macro_count(); 1948 for( int i=0; i < cnt; i++ ) { 1949 Node *n = C->macro_node(i); 1950 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1951 AbstractLockNode* alock = n->as_AbstractLock(); 1952 if (!alock->is_non_esc_obj()) { 1953 if (not_global_escape(alock->obj_node())) { 1954 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 1955 // The lock could be marked eliminated by lock coarsening 1956 // code during first IGVN before EA. Replace coarsened flag 1957 // to eliminate all associated locks/unlocks. 1958 #ifdef ASSERT 1959 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 1960 #endif 1961 alock->set_non_esc_obj(); 1962 } 1963 } 1964 } 1965 } 1966 } 1967 1968 if (OptimizePtrCompare) { 1969 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1970 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1971 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1972 // Optimize objects compare. 1973 while (ptr_cmp_worklist.length() != 0) { 1974 Node *n = ptr_cmp_worklist.pop(); 1975 Node *res = optimize_ptr_compare(n); 1976 if (res != NULL) { 1977 #ifndef PRODUCT 1978 if (PrintOptimizePtrCompare) { 1979 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1980 if (Verbose) { 1981 n->dump(1); 1982 } 1983 } 1984 #endif 1985 igvn->replace_node(n, res); 1986 } 1987 } 1988 // cleanup 1989 if (_pcmp_neq->outcnt() == 0) 1990 igvn->hash_delete(_pcmp_neq); 1991 if (_pcmp_eq->outcnt() == 0) 1992 igvn->hash_delete(_pcmp_eq); 1993 } 1994 1995 // For MemBarStoreStore nodes added in library_call.cpp, check 1996 // escape status of associated AllocateNode and optimize out 1997 // MemBarStoreStore node if the allocated object never escapes. 1998 while (storestore_worklist.length() != 0) { 1999 Node *n = storestore_worklist.pop(); 2000 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 2001 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 2002 if (alloc->is_Allocate() && not_global_escape(alloc)) { 2003 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 2004 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 2005 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 2006 igvn->register_new_node_with_optimizer(mb); 2007 igvn->replace_node(storestore, mb); 2008 } 2009 } 2010 } 2011 2012 // Optimize objects compare. 2013 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 2014 assert(OptimizePtrCompare, "sanity"); 2015 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 2016 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 2017 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 2018 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 2019 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 2020 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 2021 2022 // Check simple cases first. 2023 if (jobj1 != NULL) { 2024 if (jobj1->escape_state() == PointsToNode::NoEscape) { 2025 if (jobj1 == jobj2) { 2026 // Comparing the same not escaping object. 2027 return _pcmp_eq; 2028 } 2029 Node* obj = jobj1->ideal_node(); 2030 // Comparing not escaping allocation. 2031 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2032 !ptn2->points_to(jobj1)) { 2033 return _pcmp_neq; // This includes nullness check. 2034 } 2035 } 2036 } 2037 if (jobj2 != NULL) { 2038 if (jobj2->escape_state() == PointsToNode::NoEscape) { 2039 Node* obj = jobj2->ideal_node(); 2040 // Comparing not escaping allocation. 2041 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2042 !ptn1->points_to(jobj2)) { 2043 return _pcmp_neq; // This includes nullness check. 2044 } 2045 } 2046 } 2047 if (jobj1 != NULL && jobj1 != phantom_obj && 2048 jobj2 != NULL && jobj2 != phantom_obj && 2049 jobj1->ideal_node()->is_Con() && 2050 jobj2->ideal_node()->is_Con()) { 2051 // Klass or String constants compare. Need to be careful with 2052 // compressed pointers - compare types of ConN and ConP instead of nodes. 2053 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 2054 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 2055 if (t1->make_ptr() == t2->make_ptr()) { 2056 return _pcmp_eq; 2057 } else { 2058 return _pcmp_neq; 2059 } 2060 } 2061 if (ptn1->meet(ptn2)) { 2062 return NULL; // Sets are not disjoint 2063 } 2064 2065 // Sets are disjoint. 2066 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 2067 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 2068 bool set1_has_null_ptr = ptn1->points_to(null_obj); 2069 bool set2_has_null_ptr = ptn2->points_to(null_obj); 2070 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 2071 (set2_has_unknown_ptr && set1_has_null_ptr)) { 2072 // Check nullness of unknown object. 2073 return NULL; 2074 } 2075 2076 // Disjointness by itself is not sufficient since 2077 // alias analysis is not complete for escaped objects. 2078 // Disjoint sets are definitely unrelated only when 2079 // at least one set has only not escaping allocations. 2080 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 2081 if (ptn1->non_escaping_allocation()) { 2082 return _pcmp_neq; 2083 } 2084 } 2085 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2086 if (ptn2->non_escaping_allocation()) { 2087 return _pcmp_neq; 2088 } 2089 } 2090 return NULL; 2091 } 2092 2093 // Connection Graph constuction functions. 2094 2095 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2096 PointsToNode* ptadr = _nodes.at(n->_idx); 2097 if (ptadr != NULL) { 2098 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2099 return; 2100 } 2101 Compile* C = _compile; 2102 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2103 _nodes.at_put(n->_idx, ptadr); 2104 } 2105 2106 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2107 PointsToNode* ptadr = _nodes.at(n->_idx); 2108 if (ptadr != NULL) { 2109 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2110 return; 2111 } 2112 Compile* C = _compile; 2113 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2114 _nodes.at_put(n->_idx, ptadr); 2115 } 2116 2117 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2118 PointsToNode* ptadr = _nodes.at(n->_idx); 2119 if (ptadr != NULL) { 2120 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2121 return; 2122 } 2123 bool unsafe = false; 2124 bool is_oop = is_oop_field(n, offset, &unsafe); 2125 if (unsafe) { 2126 es = PointsToNode::GlobalEscape; 2127 } 2128 Compile* C = _compile; 2129 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2130 _nodes.at_put(n->_idx, field); 2131 } 2132 2133 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2134 PointsToNode* src, PointsToNode* dst) { 2135 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2136 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 2137 PointsToNode* ptadr = _nodes.at(n->_idx); 2138 if (ptadr != NULL) { 2139 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2140 return; 2141 } 2142 Compile* C = _compile; 2143 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2144 _nodes.at_put(n->_idx, ptadr); 2145 // Add edge from arraycopy node to source object. 2146 (void)add_edge(ptadr, src); 2147 src->set_arraycopy_src(); 2148 // Add edge from destination object to arraycopy node. 2149 (void)add_edge(dst, ptadr); 2150 dst->set_arraycopy_dst(); 2151 } 2152 2153 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2154 const Type* adr_type = n->as_AddP()->bottom_type(); 2155 BasicType bt = T_INT; 2156 if (offset == Type::OffsetBot) { 2157 // Check only oop fields. 2158 if (!adr_type->isa_aryptr() || 2159 (adr_type->isa_aryptr()->klass() == NULL) || 2160 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { 2161 // OffsetBot is used to reference array's element. Ignore first AddP. 2162 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 2163 bt = T_OBJECT; 2164 } 2165 } 2166 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2167 if (adr_type->isa_instptr()) { 2168 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 2169 if (field != NULL) { 2170 bt = field->layout_type(); 2171 } else { 2172 // Check for unsafe oop field access 2173 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2174 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2175 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2176 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2177 bt = T_OBJECT; 2178 (*unsafe) = true; 2179 } 2180 } 2181 } else if (adr_type->isa_aryptr()) { 2182 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2183 // Ignore array length load. 2184 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2185 // Ignore first AddP. 2186 } else { 2187 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2188 bt = elemtype->array_element_basic_type(); 2189 } 2190 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2191 // Allocation initialization, ThreadLocal field access, unsafe access 2192 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2193 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2194 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2195 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2196 bt = T_OBJECT; 2197 } 2198 } 2199 } 2200 // Note: T_NARROWOOP is not classed as a real reference type 2201 return (is_reference_type(bt) || bt == T_NARROWOOP); 2202 } 2203 2204 // Returns unique pointed java object or NULL. 2205 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2206 assert(!_collecting, "should not call when contructed graph"); 2207 // If the node was created after the escape computation we can't answer. 2208 uint idx = n->_idx; 2209 if (idx >= nodes_size()) { 2210 return NULL; 2211 } 2212 PointsToNode* ptn = ptnode_adr(idx); 2213 if (ptn == NULL) { 2214 return NULL; 2215 } 2216 if (ptn->is_JavaObject()) { 2217 return ptn->as_JavaObject(); 2218 } 2219 assert(ptn->is_LocalVar(), "sanity"); 2220 // Check all java objects it points to. 2221 JavaObjectNode* jobj = NULL; 2222 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2223 PointsToNode* e = i.get(); 2224 if (e->is_JavaObject()) { 2225 if (jobj == NULL) { 2226 jobj = e->as_JavaObject(); 2227 } else if (jobj != e) { 2228 return NULL; 2229 } 2230 } 2231 } 2232 return jobj; 2233 } 2234 2235 // Return true if this node points only to non-escaping allocations. 2236 bool PointsToNode::non_escaping_allocation() { 2237 if (is_JavaObject()) { 2238 Node* n = ideal_node(); 2239 if (n->is_Allocate() || n->is_CallStaticJava()) { 2240 return (escape_state() == PointsToNode::NoEscape); 2241 } else { 2242 return false; 2243 } 2244 } 2245 assert(is_LocalVar(), "sanity"); 2246 // Check all java objects it points to. 2247 for (EdgeIterator i(this); i.has_next(); i.next()) { 2248 PointsToNode* e = i.get(); 2249 if (e->is_JavaObject()) { 2250 Node* n = e->ideal_node(); 2251 if ((e->escape_state() != PointsToNode::NoEscape) || 2252 !(n->is_Allocate() || n->is_CallStaticJava())) { 2253 return false; 2254 } 2255 } 2256 } 2257 return true; 2258 } 2259 2260 // Return true if we know the node does not escape globally. 2261 bool ConnectionGraph::not_global_escape(Node *n) { 2262 assert(!_collecting, "should not call during graph construction"); 2263 // If the node was created after the escape computation we can't answer. 2264 uint idx = n->_idx; 2265 if (idx >= nodes_size()) { 2266 return false; 2267 } 2268 PointsToNode* ptn = ptnode_adr(idx); 2269 if (ptn == NULL) { 2270 return false; // not in congraph (e.g. ConI) 2271 } 2272 PointsToNode::EscapeState es = ptn->escape_state(); 2273 // If we have already computed a value, return it. 2274 if (es >= PointsToNode::GlobalEscape) 2275 return false; 2276 if (ptn->is_JavaObject()) { 2277 return true; // (es < PointsToNode::GlobalEscape); 2278 } 2279 assert(ptn->is_LocalVar(), "sanity"); 2280 // Check all java objects it points to. 2281 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2282 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) 2283 return false; 2284 } 2285 return true; 2286 } 2287 2288 2289 // Helper functions 2290 2291 // Return true if this node points to specified node or nodes it points to. 2292 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2293 if (is_JavaObject()) { 2294 return (this == ptn); 2295 } 2296 assert(is_LocalVar() || is_Field(), "sanity"); 2297 for (EdgeIterator i(this); i.has_next(); i.next()) { 2298 if (i.get() == ptn) 2299 return true; 2300 } 2301 return false; 2302 } 2303 2304 // Return true if one node points to an other. 2305 bool PointsToNode::meet(PointsToNode* ptn) { 2306 if (this == ptn) { 2307 return true; 2308 } else if (ptn->is_JavaObject()) { 2309 return this->points_to(ptn->as_JavaObject()); 2310 } else if (this->is_JavaObject()) { 2311 return ptn->points_to(this->as_JavaObject()); 2312 } 2313 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2314 int ptn_count = ptn->edge_count(); 2315 for (EdgeIterator i(this); i.has_next(); i.next()) { 2316 PointsToNode* this_e = i.get(); 2317 for (int j = 0; j < ptn_count; j++) { 2318 if (this_e == ptn->edge(j)) 2319 return true; 2320 } 2321 } 2322 return false; 2323 } 2324 2325 #ifdef ASSERT 2326 // Return true if bases point to this java object. 2327 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2328 for (BaseIterator i(this); i.has_next(); i.next()) { 2329 if (i.get() == jobj) 2330 return true; 2331 } 2332 return false; 2333 } 2334 #endif 2335 2336 bool ConnectionGraph::is_captured_store_address(Node* addp) { 2337 // Handle simple case first. 2338 assert(_igvn->type(addp)->isa_oopptr() == NULL, "should be raw access"); 2339 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { 2340 return true; 2341 } else if (addp->in(AddPNode::Address)->is_Phi()) { 2342 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 2343 Node* addp_use = addp->fast_out(i); 2344 if (addp_use->is_Store()) { 2345 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) { 2346 if (addp_use->fast_out(j)->is_Initialize()) { 2347 return true; 2348 } 2349 } 2350 } 2351 } 2352 } 2353 return false; 2354 } 2355 2356 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2357 const Type *adr_type = phase->type(adr); 2358 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) { 2359 // We are computing a raw address for a store captured by an Initialize 2360 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2361 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2362 assert(offs != Type::OffsetBot || 2363 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2364 "offset must be a constant or it is initialization of array"); 2365 return offs; 2366 } 2367 const TypePtr *t_ptr = adr_type->isa_ptr(); 2368 assert(t_ptr != NULL, "must be a pointer type"); 2369 return t_ptr->offset(); 2370 } 2371 2372 Node* ConnectionGraph::get_addp_base(Node *addp) { 2373 assert(addp->is_AddP(), "must be AddP"); 2374 // 2375 // AddP cases for Base and Address inputs: 2376 // case #1. Direct object's field reference: 2377 // Allocate 2378 // | 2379 // Proj #5 ( oop result ) 2380 // | 2381 // CheckCastPP (cast to instance type) 2382 // | | 2383 // AddP ( base == address ) 2384 // 2385 // case #2. Indirect object's field reference: 2386 // Phi 2387 // | 2388 // CastPP (cast to instance type) 2389 // | | 2390 // AddP ( base == address ) 2391 // 2392 // case #3. Raw object's field reference for Initialize node: 2393 // Allocate 2394 // | 2395 // Proj #5 ( oop result ) 2396 // top | 2397 // \ | 2398 // AddP ( base == top ) 2399 // 2400 // case #4. Array's element reference: 2401 // {CheckCastPP | CastPP} 2402 // | | | 2403 // | AddP ( array's element offset ) 2404 // | | 2405 // AddP ( array's offset ) 2406 // 2407 // case #5. Raw object's field reference for arraycopy stub call: 2408 // The inline_native_clone() case when the arraycopy stub is called 2409 // after the allocation before Initialize and CheckCastPP nodes. 2410 // Allocate 2411 // | 2412 // Proj #5 ( oop result ) 2413 // | | 2414 // AddP ( base == address ) 2415 // 2416 // case #6. Constant Pool, ThreadLocal, CastX2P or 2417 // Raw object's field reference: 2418 // {ConP, ThreadLocal, CastX2P, raw Load} 2419 // top | 2420 // \ | 2421 // AddP ( base == top ) 2422 // 2423 // case #7. Klass's field reference. 2424 // LoadKlass 2425 // | | 2426 // AddP ( base == address ) 2427 // 2428 // case #8. narrow Klass's field reference. 2429 // LoadNKlass 2430 // | 2431 // DecodeN 2432 // | | 2433 // AddP ( base == address ) 2434 // 2435 // case #9. Mixed unsafe access 2436 // {instance} 2437 // | 2438 // CheckCastPP (raw) 2439 // top | 2440 // \ | 2441 // AddP ( base == top ) 2442 // 2443 Node *base = addp->in(AddPNode::Base); 2444 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 2445 base = addp->in(AddPNode::Address); 2446 while (base->is_AddP()) { 2447 // Case #6 (unsafe access) may have several chained AddP nodes. 2448 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2449 base = base->in(AddPNode::Address); 2450 } 2451 if (base->Opcode() == Op_CheckCastPP && 2452 base->bottom_type()->isa_rawptr() && 2453 _igvn->type(base->in(1))->isa_oopptr()) { 2454 base = base->in(1); // Case #9 2455 } else { 2456 Node* uncast_base = base->uncast(); 2457 int opcode = uncast_base->Opcode(); 2458 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2459 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2460 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2461 is_captured_store_address(addp), "sanity"); 2462 } 2463 } 2464 return base; 2465 } 2466 2467 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2468 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2469 Node* addp2 = addp->raw_out(0); 2470 if (addp->outcnt() == 1 && addp2->is_AddP() && 2471 addp2->in(AddPNode::Base) == n && 2472 addp2->in(AddPNode::Address) == addp) { 2473 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2474 // 2475 // Find array's offset to push it on worklist first and 2476 // as result process an array's element offset first (pushed second) 2477 // to avoid CastPP for the array's offset. 2478 // Otherwise the inserted CastPP (LocalVar) will point to what 2479 // the AddP (Field) points to. Which would be wrong since 2480 // the algorithm expects the CastPP has the same point as 2481 // as AddP's base CheckCastPP (LocalVar). 2482 // 2483 // ArrayAllocation 2484 // | 2485 // CheckCastPP 2486 // | 2487 // memProj (from ArrayAllocation CheckCastPP) 2488 // | || 2489 // | || Int (element index) 2490 // | || | ConI (log(element size)) 2491 // | || | / 2492 // | || LShift 2493 // | || / 2494 // | AddP (array's element offset) 2495 // | | 2496 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2497 // | / / 2498 // AddP (array's offset) 2499 // | 2500 // Load/Store (memory operation on array's element) 2501 // 2502 return addp2; 2503 } 2504 return NULL; 2505 } 2506 2507 // 2508 // Adjust the type and inputs of an AddP which computes the 2509 // address of a field of an instance 2510 // 2511 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2512 PhaseGVN* igvn = _igvn; 2513 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2514 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2515 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2516 if (t == NULL) { 2517 // We are computing a raw address for a store captured by an Initialize 2518 // compute an appropriate address type (cases #3 and #5). 2519 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2520 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2521 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2522 assert(offs != Type::OffsetBot, "offset must be a constant"); 2523 t = base_t->add_offset(offs)->is_oopptr(); 2524 } 2525 int inst_id = base_t->instance_id(); 2526 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2527 "old type must be non-instance or match new type"); 2528 2529 // The type 't' could be subclass of 'base_t'. 2530 // As result t->offset() could be large then base_t's size and it will 2531 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2532 // constructor verifies correctness of the offset. 2533 // 2534 // It could happened on subclass's branch (from the type profiling 2535 // inlining) which was not eliminated during parsing since the exactness 2536 // of the allocation type was not propagated to the subclass type check. 2537 // 2538 // Or the type 't' could be not related to 'base_t' at all. 2539 // It could happened when CHA type is different from MDO type on a dead path 2540 // (for example, from instanceof check) which is not collapsed during parsing. 2541 // 2542 // Do nothing for such AddP node and don't process its users since 2543 // this code branch will go away. 2544 // 2545 if (!t->is_known_instance() && 2546 !base_t->klass()->is_subtype_of(t->klass())) { 2547 return false; // bail out 2548 } 2549 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 2550 // Do NOT remove the next line: ensure a new alias index is allocated 2551 // for the instance type. Note: C++ will not remove it since the call 2552 // has side effect. 2553 int alias_idx = _compile->get_alias_index(tinst); 2554 igvn->set_type(addp, tinst); 2555 // record the allocation in the node map 2556 set_map(addp, get_map(base->_idx)); 2557 // Set addp's Base and Address to 'base'. 2558 Node *abase = addp->in(AddPNode::Base); 2559 Node *adr = addp->in(AddPNode::Address); 2560 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2561 adr->in(0)->_idx == (uint)inst_id) { 2562 // Skip AddP cases #3 and #5. 2563 } else { 2564 assert(!abase->is_top(), "sanity"); // AddP case #3 2565 if (abase != base) { 2566 igvn->hash_delete(addp); 2567 addp->set_req(AddPNode::Base, base); 2568 if (abase == adr) { 2569 addp->set_req(AddPNode::Address, base); 2570 } else { 2571 // AddP case #4 (adr is array's element offset AddP node) 2572 #ifdef ASSERT 2573 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2574 assert(adr->is_AddP() && atype != NULL && 2575 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2576 #endif 2577 } 2578 igvn->hash_insert(addp); 2579 } 2580 } 2581 // Put on IGVN worklist since at least addp's type was changed above. 2582 record_for_optimizer(addp); 2583 return true; 2584 } 2585 2586 // 2587 // Create a new version of orig_phi if necessary. Returns either the newly 2588 // created phi or an existing phi. Sets create_new to indicate whether a new 2589 // phi was created. Cache the last newly created phi in the node map. 2590 // 2591 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2592 Compile *C = _compile; 2593 PhaseGVN* igvn = _igvn; 2594 new_created = false; 2595 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2596 // nothing to do if orig_phi is bottom memory or matches alias_idx 2597 if (phi_alias_idx == alias_idx) { 2598 return orig_phi; 2599 } 2600 // Have we recently created a Phi for this alias index? 2601 PhiNode *result = get_map_phi(orig_phi->_idx); 2602 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2603 return result; 2604 } 2605 // Previous check may fail when the same wide memory Phi was split into Phis 2606 // for different memory slices. Search all Phis for this region. 2607 if (result != NULL) { 2608 Node* region = orig_phi->in(0); 2609 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2610 Node* phi = region->fast_out(i); 2611 if (phi->is_Phi() && 2612 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2613 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2614 return phi->as_Phi(); 2615 } 2616 } 2617 } 2618 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 2619 if (C->do_escape_analysis() == true && !C->failing()) { 2620 // Retry compilation without escape analysis. 2621 // If this is the first failure, the sentinel string will "stick" 2622 // to the Compile object, and the C2Compiler will see it and retry. 2623 C->record_failure(C2Compiler::retry_no_escape_analysis()); 2624 } 2625 return NULL; 2626 } 2627 orig_phi_worklist.append_if_missing(orig_phi); 2628 const TypePtr *atype = C->get_adr_type(alias_idx); 2629 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2630 C->copy_node_notes_to(result, orig_phi); 2631 igvn->set_type(result, result->bottom_type()); 2632 record_for_optimizer(result); 2633 set_map(orig_phi, result); 2634 new_created = true; 2635 return result; 2636 } 2637 2638 // 2639 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2640 // specified alias index. 2641 // 2642 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2643 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2644 Compile *C = _compile; 2645 PhaseGVN* igvn = _igvn; 2646 bool new_phi_created; 2647 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2648 if (!new_phi_created) { 2649 return result; 2650 } 2651 GrowableArray<PhiNode *> phi_list; 2652 GrowableArray<uint> cur_input; 2653 PhiNode *phi = orig_phi; 2654 uint idx = 1; 2655 bool finished = false; 2656 while(!finished) { 2657 while (idx < phi->req()) { 2658 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2659 if (mem != NULL && mem->is_Phi()) { 2660 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2661 if (new_phi_created) { 2662 // found an phi for which we created a new split, push current one on worklist and begin 2663 // processing new one 2664 phi_list.push(phi); 2665 cur_input.push(idx); 2666 phi = mem->as_Phi(); 2667 result = newphi; 2668 idx = 1; 2669 continue; 2670 } else { 2671 mem = newphi; 2672 } 2673 } 2674 if (C->failing()) { 2675 return NULL; 2676 } 2677 result->set_req(idx++, mem); 2678 } 2679 #ifdef ASSERT 2680 // verify that the new Phi has an input for each input of the original 2681 assert( phi->req() == result->req(), "must have same number of inputs."); 2682 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2683 #endif 2684 // Check if all new phi's inputs have specified alias index. 2685 // Otherwise use old phi. 2686 for (uint i = 1; i < phi->req(); i++) { 2687 Node* in = result->in(i); 2688 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2689 } 2690 // we have finished processing a Phi, see if there are any more to do 2691 finished = (phi_list.length() == 0 ); 2692 if (!finished) { 2693 phi = phi_list.pop(); 2694 idx = cur_input.pop(); 2695 PhiNode *prev_result = get_map_phi(phi->_idx); 2696 prev_result->set_req(idx++, result); 2697 result = prev_result; 2698 } 2699 } 2700 return result; 2701 } 2702 2703 // 2704 // The next methods are derived from methods in MemNode. 2705 // 2706 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2707 Node *mem = mmem; 2708 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2709 // means an array I have not precisely typed yet. Do not do any 2710 // alias stuff with it any time soon. 2711 if (toop->base() != Type::AnyPtr && 2712 !(toop->klass() != NULL && 2713 toop->klass()->is_java_lang_Object() && 2714 toop->offset() == Type::OffsetBot)) { 2715 mem = mmem->memory_at(alias_idx); 2716 // Update input if it is progress over what we have now 2717 } 2718 return mem; 2719 } 2720 2721 // 2722 // Move memory users to their memory slices. 2723 // 2724 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2725 Compile* C = _compile; 2726 PhaseGVN* igvn = _igvn; 2727 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2728 assert(tp != NULL, "ptr type"); 2729 int alias_idx = C->get_alias_index(tp); 2730 int general_idx = C->get_general_index(alias_idx); 2731 2732 // Move users first 2733 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2734 Node* use = n->fast_out(i); 2735 if (use->is_MergeMem()) { 2736 MergeMemNode* mmem = use->as_MergeMem(); 2737 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2738 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2739 continue; // Nothing to do 2740 } 2741 // Replace previous general reference to mem node. 2742 uint orig_uniq = C->unique(); 2743 Node* m = find_inst_mem(n, general_idx, orig_phis); 2744 assert(orig_uniq == C->unique(), "no new nodes"); 2745 mmem->set_memory_at(general_idx, m); 2746 --imax; 2747 --i; 2748 } else if (use->is_MemBar()) { 2749 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2750 if (use->req() > MemBarNode::Precedent && 2751 use->in(MemBarNode::Precedent) == n) { 2752 // Don't move related membars. 2753 record_for_optimizer(use); 2754 continue; 2755 } 2756 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2757 if ((tp != NULL && C->get_alias_index(tp) == alias_idx) || 2758 alias_idx == general_idx) { 2759 continue; // Nothing to do 2760 } 2761 // Move to general memory slice. 2762 uint orig_uniq = C->unique(); 2763 Node* m = find_inst_mem(n, general_idx, orig_phis); 2764 assert(orig_uniq == C->unique(), "no new nodes"); 2765 igvn->hash_delete(use); 2766 imax -= use->replace_edge(n, m); 2767 igvn->hash_insert(use); 2768 record_for_optimizer(use); 2769 --i; 2770 #ifdef ASSERT 2771 } else if (use->is_Mem()) { 2772 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2773 // Don't move related cardmark. 2774 continue; 2775 } 2776 // Memory nodes should have new memory input. 2777 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2778 assert(tp != NULL, "ptr type"); 2779 int idx = C->get_alias_index(tp); 2780 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2781 "Following memory nodes should have new memory input or be on the same memory slice"); 2782 } else if (use->is_Phi()) { 2783 // Phi nodes should be split and moved already. 2784 tp = use->as_Phi()->adr_type()->isa_ptr(); 2785 assert(tp != NULL, "ptr type"); 2786 int idx = C->get_alias_index(tp); 2787 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2788 } else { 2789 use->dump(); 2790 assert(false, "should not be here"); 2791 #endif 2792 } 2793 } 2794 } 2795 2796 // 2797 // Search memory chain of "mem" to find a MemNode whose address 2798 // is the specified alias index. 2799 // 2800 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2801 if (orig_mem == NULL) 2802 return orig_mem; 2803 Compile* C = _compile; 2804 PhaseGVN* igvn = _igvn; 2805 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2806 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2807 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 2808 Node *prev = NULL; 2809 Node *result = orig_mem; 2810 while (prev != result) { 2811 prev = result; 2812 if (result == start_mem) 2813 break; // hit one of our sentinels 2814 if (result->is_Mem()) { 2815 const Type *at = igvn->type(result->in(MemNode::Address)); 2816 if (at == Type::TOP) 2817 break; // Dead 2818 assert (at->isa_ptr() != NULL, "pointer type required."); 2819 int idx = C->get_alias_index(at->is_ptr()); 2820 if (idx == alias_idx) 2821 break; // Found 2822 if (!is_instance && (at->isa_oopptr() == NULL || 2823 !at->is_oopptr()->is_known_instance())) { 2824 break; // Do not skip store to general memory slice. 2825 } 2826 result = result->in(MemNode::Memory); 2827 } 2828 if (!is_instance) 2829 continue; // don't search further for non-instance types 2830 // skip over a call which does not affect this memory slice 2831 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2832 Node *proj_in = result->in(0); 2833 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2834 break; // hit one of our sentinels 2835 } else if (proj_in->is_Call()) { 2836 // ArrayCopy node processed here as well 2837 CallNode *call = proj_in->as_Call(); 2838 if (!call->may_modify(toop, igvn)) { 2839 result = call->in(TypeFunc::Memory); 2840 } 2841 } else if (proj_in->is_Initialize()) { 2842 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2843 // Stop if this is the initialization for the object instance which 2844 // which contains this memory slice, otherwise skip over it. 2845 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2846 result = proj_in->in(TypeFunc::Memory); 2847 } 2848 } else if (proj_in->is_MemBar()) { 2849 // Check if there is an array copy for a clone 2850 // Step over GC barrier when ReduceInitialCardMarks is disabled 2851 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2852 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0)); 2853 2854 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) { 2855 // Stop if it is a clone 2856 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy(); 2857 if (ac->may_modify(toop, igvn)) { 2858 break; 2859 } 2860 } 2861 result = proj_in->in(TypeFunc::Memory); 2862 } 2863 } else if (result->is_MergeMem()) { 2864 MergeMemNode *mmem = result->as_MergeMem(); 2865 result = step_through_mergemem(mmem, alias_idx, toop); 2866 if (result == mmem->base_memory()) { 2867 // Didn't find instance memory, search through general slice recursively. 2868 result = mmem->memory_at(C->get_general_index(alias_idx)); 2869 result = find_inst_mem(result, alias_idx, orig_phis); 2870 if (C->failing()) { 2871 return NULL; 2872 } 2873 mmem->set_memory_at(alias_idx, result); 2874 } 2875 } else if (result->is_Phi() && 2876 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2877 Node *un = result->as_Phi()->unique_input(igvn); 2878 if (un != NULL) { 2879 orig_phis.append_if_missing(result->as_Phi()); 2880 result = un; 2881 } else { 2882 break; 2883 } 2884 } else if (result->is_ClearArray()) { 2885 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 2886 // Can not bypass initialization of the instance 2887 // we are looking for. 2888 break; 2889 } 2890 // Otherwise skip it (the call updated 'result' value). 2891 } else if (result->Opcode() == Op_SCMemProj) { 2892 Node* mem = result->in(0); 2893 Node* adr = NULL; 2894 if (mem->is_LoadStore()) { 2895 adr = mem->in(MemNode::Address); 2896 } else { 2897 assert(mem->Opcode() == Op_EncodeISOArray || 2898 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 2899 adr = mem->in(3); // Memory edge corresponds to destination array 2900 } 2901 const Type *at = igvn->type(adr); 2902 if (at != Type::TOP) { 2903 assert(at->isa_ptr() != NULL, "pointer type required."); 2904 int idx = C->get_alias_index(at->is_ptr()); 2905 if (idx == alias_idx) { 2906 // Assert in debug mode 2907 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 2908 break; // In product mode return SCMemProj node 2909 } 2910 } 2911 result = mem->in(MemNode::Memory); 2912 } else if (result->Opcode() == Op_StrInflatedCopy) { 2913 Node* adr = result->in(3); // Memory edge corresponds to destination array 2914 const Type *at = igvn->type(adr); 2915 if (at != Type::TOP) { 2916 assert(at->isa_ptr() != NULL, "pointer type required."); 2917 int idx = C->get_alias_index(at->is_ptr()); 2918 if (idx == alias_idx) { 2919 // Assert in debug mode 2920 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 2921 break; // In product mode return SCMemProj node 2922 } 2923 } 2924 result = result->in(MemNode::Memory); 2925 } 2926 } 2927 if (result->is_Phi()) { 2928 PhiNode *mphi = result->as_Phi(); 2929 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 2930 const TypePtr *t = mphi->adr_type(); 2931 if (!is_instance) { 2932 // Push all non-instance Phis on the orig_phis worklist to update inputs 2933 // during Phase 4 if needed. 2934 orig_phis.append_if_missing(mphi); 2935 } else if (C->get_alias_index(t) != alias_idx) { 2936 // Create a new Phi with the specified alias index type. 2937 result = split_memory_phi(mphi, alias_idx, orig_phis); 2938 } 2939 } 2940 // the result is either MemNode, PhiNode, InitializeNode. 2941 return result; 2942 } 2943 2944 // 2945 // Convert the types of unescaped object to instance types where possible, 2946 // propagate the new type information through the graph, and update memory 2947 // edges and MergeMem inputs to reflect the new type. 2948 // 2949 // We start with allocations (and calls which may be allocations) on alloc_worklist. 2950 // The processing is done in 4 phases: 2951 // 2952 // Phase 1: Process possible allocations from alloc_worklist. Create instance 2953 // types for the CheckCastPP for allocations where possible. 2954 // Propagate the new types through users as follows: 2955 // casts and Phi: push users on alloc_worklist 2956 // AddP: cast Base and Address inputs to the instance type 2957 // push any AddP users on alloc_worklist and push any memnode 2958 // users onto memnode_worklist. 2959 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2960 // search the Memory chain for a store with the appropriate type 2961 // address type. If a Phi is found, create a new version with 2962 // the appropriate memory slices from each of the Phi inputs. 2963 // For stores, process the users as follows: 2964 // MemNode: push on memnode_worklist 2965 // MergeMem: push on mergemem_worklist 2966 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 2967 // moving the first node encountered of each instance type to the 2968 // the input corresponding to its alias index. 2969 // appropriate memory slice. 2970 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 2971 // 2972 // In the following example, the CheckCastPP nodes are the cast of allocation 2973 // results and the allocation of node 29 is unescaped and eligible to be an 2974 // instance type. 2975 // 2976 // We start with: 2977 // 2978 // 7 Parm #memory 2979 // 10 ConI "12" 2980 // 19 CheckCastPP "Foo" 2981 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2982 // 29 CheckCastPP "Foo" 2983 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 2984 // 2985 // 40 StoreP 25 7 20 ... alias_index=4 2986 // 50 StoreP 35 40 30 ... alias_index=4 2987 // 60 StoreP 45 50 20 ... alias_index=4 2988 // 70 LoadP _ 60 30 ... alias_index=4 2989 // 80 Phi 75 50 60 Memory alias_index=4 2990 // 90 LoadP _ 80 30 ... alias_index=4 2991 // 100 LoadP _ 80 20 ... alias_index=4 2992 // 2993 // 2994 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 2995 // and creating a new alias index for node 30. This gives: 2996 // 2997 // 7 Parm #memory 2998 // 10 ConI "12" 2999 // 19 CheckCastPP "Foo" 3000 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3001 // 29 CheckCastPP "Foo" iid=24 3002 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3003 // 3004 // 40 StoreP 25 7 20 ... alias_index=4 3005 // 50 StoreP 35 40 30 ... alias_index=6 3006 // 60 StoreP 45 50 20 ... alias_index=4 3007 // 70 LoadP _ 60 30 ... alias_index=6 3008 // 80 Phi 75 50 60 Memory alias_index=4 3009 // 90 LoadP _ 80 30 ... alias_index=6 3010 // 100 LoadP _ 80 20 ... alias_index=4 3011 // 3012 // In phase 2, new memory inputs are computed for the loads and stores, 3013 // And a new version of the phi is created. In phase 4, the inputs to 3014 // node 80 are updated and then the memory nodes are updated with the 3015 // values computed in phase 2. This results in: 3016 // 3017 // 7 Parm #memory 3018 // 10 ConI "12" 3019 // 19 CheckCastPP "Foo" 3020 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3021 // 29 CheckCastPP "Foo" iid=24 3022 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3023 // 3024 // 40 StoreP 25 7 20 ... alias_index=4 3025 // 50 StoreP 35 7 30 ... alias_index=6 3026 // 60 StoreP 45 40 20 ... alias_index=4 3027 // 70 LoadP _ 50 30 ... alias_index=6 3028 // 80 Phi 75 40 60 Memory alias_index=4 3029 // 120 Phi 75 50 50 Memory alias_index=6 3030 // 90 LoadP _ 120 30 ... alias_index=6 3031 // 100 LoadP _ 80 20 ... alias_index=4 3032 // 3033 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) { 3034 GrowableArray<Node *> memnode_worklist; 3035 GrowableArray<PhiNode *> orig_phis; 3036 PhaseIterGVN *igvn = _igvn; 3037 uint new_index_start = (uint) _compile->num_alias_types(); 3038 VectorSet visited; 3039 ideal_nodes.clear(); // Reset for use with set_map/get_map. 3040 uint unique_old = _compile->unique(); 3041 3042 // Phase 1: Process possible allocations from alloc_worklist. 3043 // Create instance types for the CheckCastPP for allocations where possible. 3044 // 3045 // (Note: don't forget to change the order of the second AddP node on 3046 // the alloc_worklist if the order of the worklist processing is changed, 3047 // see the comment in find_second_addp().) 3048 // 3049 while (alloc_worklist.length() != 0) { 3050 Node *n = alloc_worklist.pop(); 3051 uint ni = n->_idx; 3052 if (n->is_Call()) { 3053 CallNode *alloc = n->as_Call(); 3054 // copy escape information to call node 3055 PointsToNode* ptn = ptnode_adr(alloc->_idx); 3056 PointsToNode::EscapeState es = ptn->escape_state(); 3057 // We have an allocation or call which returns a Java object, 3058 // see if it is unescaped. 3059 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 3060 continue; 3061 // Find CheckCastPP for the allocate or for the return value of a call 3062 n = alloc->result_cast(); 3063 if (n == NULL) { // No uses except Initialize node 3064 if (alloc->is_Allocate()) { 3065 // Set the scalar_replaceable flag for allocation 3066 // so it could be eliminated if it has no uses. 3067 alloc->as_Allocate()->_is_scalar_replaceable = true; 3068 } 3069 if (alloc->is_CallStaticJava()) { 3070 // Set the scalar_replaceable flag for boxing method 3071 // so it could be eliminated if it has no uses. 3072 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 3073 } 3074 continue; 3075 } 3076 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 3077 // we could reach here for allocate case if one init is associated with many allocs. 3078 if (alloc->is_Allocate()) { 3079 alloc->as_Allocate()->_is_scalar_replaceable = false; 3080 } 3081 continue; 3082 } 3083 3084 // The inline code for Object.clone() casts the allocation result to 3085 // java.lang.Object and then to the actual type of the allocated 3086 // object. Detect this case and use the second cast. 3087 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 3088 // the allocation result is cast to java.lang.Object and then 3089 // to the actual Array type. 3090 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 3091 && (alloc->is_AllocateArray() || 3092 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 3093 Node *cast2 = NULL; 3094 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3095 Node *use = n->fast_out(i); 3096 if (use->is_CheckCastPP()) { 3097 cast2 = use; 3098 break; 3099 } 3100 } 3101 if (cast2 != NULL) { 3102 n = cast2; 3103 } else { 3104 // Non-scalar replaceable if the allocation type is unknown statically 3105 // (reflection allocation), the object can't be restored during 3106 // deoptimization without precise type. 3107 continue; 3108 } 3109 } 3110 3111 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3112 if (t == NULL) 3113 continue; // not a TypeOopPtr 3114 if (!t->klass_is_exact()) 3115 continue; // not an unique type 3116 3117 if (alloc->is_Allocate()) { 3118 // Set the scalar_replaceable flag for allocation 3119 // so it could be eliminated. 3120 alloc->as_Allocate()->_is_scalar_replaceable = true; 3121 } 3122 if (alloc->is_CallStaticJava()) { 3123 // Set the scalar_replaceable flag for boxing method 3124 // so it could be eliminated. 3125 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 3126 } 3127 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state 3128 // in order for an object to be scalar-replaceable, it must be: 3129 // - a direct allocation (not a call returning an object) 3130 // - non-escaping 3131 // - eligible to be a unique type 3132 // - not determined to be ineligible by escape analysis 3133 set_map(alloc, n); 3134 set_map(n, alloc); 3135 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3136 igvn->hash_delete(n); 3137 igvn->set_type(n, tinst); 3138 n->raise_bottom_type(tinst); 3139 igvn->hash_insert(n); 3140 record_for_optimizer(n); 3141 // Allocate an alias index for the header fields. Accesses to 3142 // the header emitted during macro expansion wouldn't have 3143 // correct memory state otherwise. 3144 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 3145 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 3146 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3147 3148 // First, put on the worklist all Field edges from Connection Graph 3149 // which is more accurate than putting immediate users from Ideal Graph. 3150 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3151 PointsToNode* tgt = e.get(); 3152 if (tgt->is_Arraycopy()) { 3153 continue; 3154 } 3155 Node* use = tgt->ideal_node(); 3156 assert(tgt->is_Field() && use->is_AddP(), 3157 "only AddP nodes are Field edges in CG"); 3158 if (use->outcnt() > 0) { // Don't process dead nodes 3159 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3160 if (addp2 != NULL) { 3161 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3162 alloc_worklist.append_if_missing(addp2); 3163 } 3164 alloc_worklist.append_if_missing(use); 3165 } 3166 } 3167 3168 // An allocation may have an Initialize which has raw stores. Scan 3169 // the users of the raw allocation result and push AddP users 3170 // on alloc_worklist. 3171 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3172 assert (raw_result != NULL, "must have an allocation result"); 3173 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3174 Node *use = raw_result->fast_out(i); 3175 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3176 Node* addp2 = find_second_addp(use, raw_result); 3177 if (addp2 != NULL) { 3178 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3179 alloc_worklist.append_if_missing(addp2); 3180 } 3181 alloc_worklist.append_if_missing(use); 3182 } else if (use->is_MemBar()) { 3183 memnode_worklist.append_if_missing(use); 3184 } 3185 } 3186 } 3187 } else if (n->is_AddP()) { 3188 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 3189 if (jobj == NULL || jobj == phantom_obj) { 3190 #ifdef ASSERT 3191 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3192 ptnode_adr(n->_idx)->dump(); 3193 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3194 #endif 3195 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3196 return; 3197 } 3198 Node *base = get_map(jobj->idx()); // CheckCastPP node 3199 if (!split_AddP(n, base)) continue; // wrong type from dead path 3200 } else if (n->is_Phi() || 3201 n->is_CheckCastPP() || 3202 n->is_EncodeP() || 3203 n->is_DecodeN() || 3204 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3205 if (visited.test_set(n->_idx)) { 3206 assert(n->is_Phi(), "loops only through Phi's"); 3207 continue; // already processed 3208 } 3209 JavaObjectNode* jobj = unique_java_object(n); 3210 if (jobj == NULL || jobj == phantom_obj) { 3211 #ifdef ASSERT 3212 ptnode_adr(n->_idx)->dump(); 3213 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3214 #endif 3215 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3216 return; 3217 } else { 3218 Node *val = get_map(jobj->idx()); // CheckCastPP node 3219 TypeNode *tn = n->as_Type(); 3220 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3221 assert(tinst != NULL && tinst->is_known_instance() && 3222 tinst->instance_id() == jobj->idx() , "instance type expected."); 3223 3224 const Type *tn_type = igvn->type(tn); 3225 const TypeOopPtr *tn_t; 3226 if (tn_type->isa_narrowoop()) { 3227 tn_t = tn_type->make_ptr()->isa_oopptr(); 3228 } else { 3229 tn_t = tn_type->isa_oopptr(); 3230 } 3231 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 3232 if (tn_type->isa_narrowoop()) { 3233 tn_type = tinst->make_narrowoop(); 3234 } else { 3235 tn_type = tinst; 3236 } 3237 igvn->hash_delete(tn); 3238 igvn->set_type(tn, tn_type); 3239 tn->set_type(tn_type); 3240 igvn->hash_insert(tn); 3241 record_for_optimizer(n); 3242 } else { 3243 assert(tn_type == TypePtr::NULL_PTR || 3244 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 3245 "unexpected type"); 3246 continue; // Skip dead path with different type 3247 } 3248 } 3249 } else { 3250 debug_only(n->dump();) 3251 assert(false, "EA: unexpected node"); 3252 continue; 3253 } 3254 // push allocation's users on appropriate worklist 3255 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3256 Node *use = n->fast_out(i); 3257 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3258 // Load/store to instance's field 3259 memnode_worklist.append_if_missing(use); 3260 } else if (use->is_MemBar()) { 3261 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3262 memnode_worklist.append_if_missing(use); 3263 } 3264 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3265 Node* addp2 = find_second_addp(use, n); 3266 if (addp2 != NULL) { 3267 alloc_worklist.append_if_missing(addp2); 3268 } 3269 alloc_worklist.append_if_missing(use); 3270 } else if (use->is_Phi() || 3271 use->is_CheckCastPP() || 3272 use->is_EncodeNarrowPtr() || 3273 use->is_DecodeNarrowPtr() || 3274 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3275 alloc_worklist.append_if_missing(use); 3276 #ifdef ASSERT 3277 } else if (use->is_Mem()) { 3278 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3279 } else if (use->is_MergeMem()) { 3280 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3281 } else if (use->is_SafePoint()) { 3282 // Look for MergeMem nodes for calls which reference unique allocation 3283 // (through CheckCastPP nodes) even for debug info. 3284 Node* m = use->in(TypeFunc::Memory); 3285 if (m->is_MergeMem()) { 3286 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3287 } 3288 } else if (use->Opcode() == Op_EncodeISOArray) { 3289 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3290 // EncodeISOArray overwrites destination array 3291 memnode_worklist.append_if_missing(use); 3292 } 3293 } else { 3294 uint op = use->Opcode(); 3295 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3296 (use->in(MemNode::Memory) == n)) { 3297 // They overwrite memory edge corresponding to destination array, 3298 memnode_worklist.append_if_missing(use); 3299 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3300 op == Op_CastP2X || op == Op_StoreCM || 3301 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3302 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3303 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3304 op == Op_SubTypeCheck || 3305 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 3306 n->dump(); 3307 use->dump(); 3308 assert(false, "EA: missing allocation reference path"); 3309 } 3310 #endif 3311 } 3312 } 3313 3314 } 3315 3316 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3317 // type, record it in the ArrayCopy node so we know what memory this 3318 // node uses/modified. 3319 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3320 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3321 Node* dest = ac->in(ArrayCopyNode::Dest); 3322 if (dest->is_AddP()) { 3323 dest = get_addp_base(dest); 3324 } 3325 JavaObjectNode* jobj = unique_java_object(dest); 3326 if (jobj != NULL) { 3327 Node *base = get_map(jobj->idx()); 3328 if (base != NULL) { 3329 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3330 ac->_dest_type = base_t; 3331 } 3332 } 3333 Node* src = ac->in(ArrayCopyNode::Src); 3334 if (src->is_AddP()) { 3335 src = get_addp_base(src); 3336 } 3337 jobj = unique_java_object(src); 3338 if (jobj != NULL) { 3339 Node* base = get_map(jobj->idx()); 3340 if (base != NULL) { 3341 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3342 ac->_src_type = base_t; 3343 } 3344 } 3345 } 3346 3347 // New alias types were created in split_AddP(). 3348 uint new_index_end = (uint) _compile->num_alias_types(); 3349 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3350 3351 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3352 // compute new values for Memory inputs (the Memory inputs are not 3353 // actually updated until phase 4.) 3354 if (memnode_worklist.length() == 0) 3355 return; // nothing to do 3356 while (memnode_worklist.length() != 0) { 3357 Node *n = memnode_worklist.pop(); 3358 if (visited.test_set(n->_idx)) 3359 continue; 3360 if (n->is_Phi() || n->is_ClearArray()) { 3361 // we don't need to do anything, but the users must be pushed 3362 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3363 // we don't need to do anything, but the users must be pushed 3364 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 3365 if (n == NULL) 3366 continue; 3367 } else if (n->Opcode() == Op_StrCompressedCopy || 3368 n->Opcode() == Op_EncodeISOArray) { 3369 // get the memory projection 3370 n = n->find_out_with(Op_SCMemProj); 3371 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3372 } else { 3373 assert(n->is_Mem(), "memory node required."); 3374 Node *addr = n->in(MemNode::Address); 3375 const Type *addr_t = igvn->type(addr); 3376 if (addr_t == Type::TOP) 3377 continue; 3378 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3379 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3380 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3381 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3382 if (_compile->failing()) { 3383 return; 3384 } 3385 if (mem != n->in(MemNode::Memory)) { 3386 // We delay the memory edge update since we need old one in 3387 // MergeMem code below when instances memory slices are separated. 3388 set_map(n, mem); 3389 } 3390 if (n->is_Load()) { 3391 continue; // don't push users 3392 } else if (n->is_LoadStore()) { 3393 // get the memory projection 3394 n = n->find_out_with(Op_SCMemProj); 3395 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3396 } 3397 } 3398 // push user on appropriate worklist 3399 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3400 Node *use = n->fast_out(i); 3401 if (use->is_Phi() || use->is_ClearArray()) { 3402 memnode_worklist.append_if_missing(use); 3403 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3404 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 3405 continue; 3406 memnode_worklist.append_if_missing(use); 3407 } else if (use->is_MemBar()) { 3408 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3409 memnode_worklist.append_if_missing(use); 3410 } 3411 #ifdef ASSERT 3412 } else if(use->is_Mem()) { 3413 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3414 } else if (use->is_MergeMem()) { 3415 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3416 } else if (use->Opcode() == Op_EncodeISOArray) { 3417 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3418 // EncodeISOArray overwrites destination array 3419 memnode_worklist.append_if_missing(use); 3420 } 3421 } else { 3422 uint op = use->Opcode(); 3423 if ((use->in(MemNode::Memory) == n) && 3424 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 3425 // They overwrite memory edge corresponding to destination array, 3426 memnode_worklist.append_if_missing(use); 3427 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 3428 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3429 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3430 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 3431 n->dump(); 3432 use->dump(); 3433 assert(false, "EA: missing memory path"); 3434 } 3435 #endif 3436 } 3437 } 3438 } 3439 3440 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3441 // Walk each memory slice moving the first node encountered of each 3442 // instance type to the the input corresponding to its alias index. 3443 uint length = _mergemem_worklist.length(); 3444 for( uint next = 0; next < length; ++next ) { 3445 MergeMemNode* nmm = _mergemem_worklist.at(next); 3446 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3447 // Note: we don't want to use MergeMemStream here because we only want to 3448 // scan inputs which exist at the start, not ones we add during processing. 3449 // Note 2: MergeMem may already contains instance memory slices added 3450 // during find_inst_mem() call when memory nodes were processed above. 3451 igvn->hash_delete(nmm); 3452 uint nslices = MIN2(nmm->req(), new_index_start); 3453 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3454 Node* mem = nmm->in(i); 3455 Node* cur = NULL; 3456 if (mem == NULL || mem->is_top()) 3457 continue; 3458 // First, update mergemem by moving memory nodes to corresponding slices 3459 // if their type became more precise since this mergemem was created. 3460 while (mem->is_Mem()) { 3461 const Type *at = igvn->type(mem->in(MemNode::Address)); 3462 if (at != Type::TOP) { 3463 assert (at->isa_ptr() != NULL, "pointer type required."); 3464 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3465 if (idx == i) { 3466 if (cur == NULL) 3467 cur = mem; 3468 } else { 3469 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3470 nmm->set_memory_at(idx, mem); 3471 } 3472 } 3473 } 3474 mem = mem->in(MemNode::Memory); 3475 } 3476 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3477 // Find any instance of the current type if we haven't encountered 3478 // already a memory slice of the instance along the memory chain. 3479 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3480 if((uint)_compile->get_general_index(ni) == i) { 3481 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3482 if (nmm->is_empty_memory(m)) { 3483 Node* result = find_inst_mem(mem, ni, orig_phis); 3484 if (_compile->failing()) { 3485 return; 3486 } 3487 nmm->set_memory_at(ni, result); 3488 } 3489 } 3490 } 3491 } 3492 // Find the rest of instances values 3493 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3494 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3495 Node* result = step_through_mergemem(nmm, ni, tinst); 3496 if (result == nmm->base_memory()) { 3497 // Didn't find instance memory, search through general slice recursively. 3498 result = nmm->memory_at(_compile->get_general_index(ni)); 3499 result = find_inst_mem(result, ni, orig_phis); 3500 if (_compile->failing()) { 3501 return; 3502 } 3503 nmm->set_memory_at(ni, result); 3504 } 3505 } 3506 igvn->hash_insert(nmm); 3507 record_for_optimizer(nmm); 3508 } 3509 3510 // Phase 4: Update the inputs of non-instance memory Phis and 3511 // the Memory input of memnodes 3512 // First update the inputs of any non-instance Phi's from 3513 // which we split out an instance Phi. Note we don't have 3514 // to recursively process Phi's encounted on the input memory 3515 // chains as is done in split_memory_phi() since they will 3516 // also be processed here. 3517 for (int j = 0; j < orig_phis.length(); j++) { 3518 PhiNode *phi = orig_phis.at(j); 3519 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3520 igvn->hash_delete(phi); 3521 for (uint i = 1; i < phi->req(); i++) { 3522 Node *mem = phi->in(i); 3523 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3524 if (_compile->failing()) { 3525 return; 3526 } 3527 if (mem != new_mem) { 3528 phi->set_req(i, new_mem); 3529 } 3530 } 3531 igvn->hash_insert(phi); 3532 record_for_optimizer(phi); 3533 } 3534 3535 // Update the memory inputs of MemNodes with the value we computed 3536 // in Phase 2 and move stores memory users to corresponding memory slices. 3537 // Disable memory split verification code until the fix for 6984348. 3538 // Currently it produces false negative results since it does not cover all cases. 3539 #if 0 // ifdef ASSERT 3540 visited.Reset(); 3541 Node_Stack old_mems(arena, _compile->unique() >> 2); 3542 #endif 3543 for (uint i = 0; i < ideal_nodes.size(); i++) { 3544 Node* n = ideal_nodes.at(i); 3545 Node* nmem = get_map(n->_idx); 3546 assert(nmem != NULL, "sanity"); 3547 if (n->is_Mem()) { 3548 #if 0 // ifdef ASSERT 3549 Node* old_mem = n->in(MemNode::Memory); 3550 if (!visited.test_set(old_mem->_idx)) { 3551 old_mems.push(old_mem, old_mem->outcnt()); 3552 } 3553 #endif 3554 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3555 if (!n->is_Load()) { 3556 // Move memory users of a store first. 3557 move_inst_mem(n, orig_phis); 3558 } 3559 // Now update memory input 3560 igvn->hash_delete(n); 3561 n->set_req(MemNode::Memory, nmem); 3562 igvn->hash_insert(n); 3563 record_for_optimizer(n); 3564 } else { 3565 assert(n->is_Allocate() || n->is_CheckCastPP() || 3566 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3567 } 3568 } 3569 #if 0 // ifdef ASSERT 3570 // Verify that memory was split correctly 3571 while (old_mems.is_nonempty()) { 3572 Node* old_mem = old_mems.node(); 3573 uint old_cnt = old_mems.index(); 3574 old_mems.pop(); 3575 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3576 } 3577 #endif 3578 } 3579 3580 #ifndef PRODUCT 3581 static const char *node_type_names[] = { 3582 "UnknownType", 3583 "JavaObject", 3584 "LocalVar", 3585 "Field", 3586 "Arraycopy" 3587 }; 3588 3589 static const char *esc_names[] = { 3590 "UnknownEscape", 3591 "NoEscape", 3592 "ArgEscape", 3593 "GlobalEscape" 3594 }; 3595 3596 void PointsToNode::dump(bool print_state) const { 3597 NodeType nt = node_type(); 3598 tty->print("%s ", node_type_names[(int) nt]); 3599 if (print_state) { 3600 EscapeState es = escape_state(); 3601 EscapeState fields_es = fields_escape_state(); 3602 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3603 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) 3604 tty->print("NSR "); 3605 } 3606 if (is_Field()) { 3607 FieldNode* f = (FieldNode*)this; 3608 if (f->is_oop()) 3609 tty->print("oop "); 3610 if (f->offset() > 0) 3611 tty->print("+%d ", f->offset()); 3612 tty->print("("); 3613 for (BaseIterator i(f); i.has_next(); i.next()) { 3614 PointsToNode* b = i.get(); 3615 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3616 } 3617 tty->print(" )"); 3618 } 3619 tty->print("["); 3620 for (EdgeIterator i(this); i.has_next(); i.next()) { 3621 PointsToNode* e = i.get(); 3622 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3623 } 3624 tty->print(" ["); 3625 for (UseIterator i(this); i.has_next(); i.next()) { 3626 PointsToNode* u = i.get(); 3627 bool is_base = false; 3628 if (PointsToNode::is_base_use(u)) { 3629 is_base = true; 3630 u = PointsToNode::get_use_node(u)->as_Field(); 3631 } 3632 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3633 } 3634 tty->print(" ]] "); 3635 if (_node == NULL) 3636 tty->print_cr("<null>"); 3637 else 3638 _node->dump(); 3639 } 3640 3641 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3642 bool first = true; 3643 int ptnodes_length = ptnodes_worklist.length(); 3644 for (int i = 0; i < ptnodes_length; i++) { 3645 PointsToNode *ptn = ptnodes_worklist.at(i); 3646 if (ptn == NULL || !ptn->is_JavaObject()) 3647 continue; 3648 PointsToNode::EscapeState es = ptn->escape_state(); 3649 if ((es != PointsToNode::NoEscape) && !Verbose) { 3650 continue; 3651 } 3652 Node* n = ptn->ideal_node(); 3653 if (n->is_Allocate() || (n->is_CallStaticJava() && 3654 n->as_CallStaticJava()->is_boxing_method())) { 3655 if (first) { 3656 tty->cr(); 3657 tty->print("======== Connection graph for "); 3658 _compile->method()->print_short_name(); 3659 tty->cr(); 3660 first = false; 3661 } 3662 ptn->dump(); 3663 // Print all locals and fields which reference this allocation 3664 for (UseIterator j(ptn); j.has_next(); j.next()) { 3665 PointsToNode* use = j.get(); 3666 if (use->is_LocalVar()) { 3667 use->dump(Verbose); 3668 } else if (Verbose) { 3669 use->dump(); 3670 } 3671 } 3672 tty->cr(); 3673 } 3674 } 3675 } 3676 #endif 3677 3678 void ConnectionGraph::record_for_optimizer(Node *n) { 3679 _igvn->_worklist.push(n); 3680 _igvn->add_users_to_worklist(n); 3681 }