1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "oops/objArrayKlass.hpp" 30 #include "opto/addnode.hpp" 31 #include "opto/castnode.hpp" 32 #include "opto/cfgnode.hpp" 33 #include "opto/connode.hpp" 34 #include "opto/convertnode.hpp" 35 #include "opto/loopnode.hpp" 36 #include "opto/machnode.hpp" 37 #include "opto/movenode.hpp" 38 #include "opto/narrowptrnode.hpp" 39 #include "opto/mulnode.hpp" 40 #include "opto/phaseX.hpp" 41 #include "opto/regmask.hpp" 42 #include "opto/runtime.hpp" 43 #include "opto/subnode.hpp" 44 #include "opto/valuetypenode.hpp" 45 #include "utilities/vmError.hpp" 46 47 // Portions of code courtesy of Clifford Click 48 49 // Optimization - Graph Style 50 51 //============================================================================= 52 //------------------------------Value------------------------------------------ 53 // Compute the type of the RegionNode. 54 const Type* RegionNode::Value(PhaseGVN* phase) const { 55 for( uint i=1; i<req(); ++i ) { // For all paths in 56 Node *n = in(i); // Get Control source 57 if( !n ) continue; // Missing inputs are TOP 58 if( phase->type(n) == Type::CONTROL ) 59 return Type::CONTROL; 60 } 61 return Type::TOP; // All paths dead? Then so are we 62 } 63 64 //------------------------------Identity--------------------------------------- 65 // Check for Region being Identity. 66 Node* RegionNode::Identity(PhaseGVN* phase) { 67 // Cannot have Region be an identity, even if it has only 1 input. 68 // Phi users cannot have their Region input folded away for them, 69 // since they need to select the proper data input 70 return this; 71 } 72 73 //------------------------------merge_region----------------------------------- 74 // If a Region flows into a Region, merge into one big happy merge. This is 75 // hard to do if there is stuff that has to happen 76 static Node *merge_region(RegionNode *region, PhaseGVN *phase) { 77 if( region->Opcode() != Op_Region ) // Do not do to LoopNodes 78 return NULL; 79 Node *progress = NULL; // Progress flag 80 PhaseIterGVN *igvn = phase->is_IterGVN(); 81 82 uint rreq = region->req(); 83 for( uint i = 1; i < rreq; i++ ) { 84 Node *r = region->in(i); 85 if( r && r->Opcode() == Op_Region && // Found a region? 86 r->in(0) == r && // Not already collapsed? 87 r != region && // Avoid stupid situations 88 r->outcnt() == 2 ) { // Self user and 'region' user only? 89 assert(!r->as_Region()->has_phi(), "no phi users"); 90 if( !progress ) { // No progress 91 if (region->has_phi()) { 92 return NULL; // Only flatten if no Phi users 93 // igvn->hash_delete( phi ); 94 } 95 igvn->hash_delete( region ); 96 progress = region; // Making progress 97 } 98 igvn->hash_delete( r ); 99 100 // Append inputs to 'r' onto 'region' 101 for( uint j = 1; j < r->req(); j++ ) { 102 // Move an input from 'r' to 'region' 103 region->add_req(r->in(j)); 104 r->set_req(j, phase->C->top()); 105 // Update phis of 'region' 106 //for( uint k = 0; k < max; k++ ) { 107 // Node *phi = region->out(k); 108 // if( phi->is_Phi() ) { 109 // phi->add_req(phi->in(i)); 110 // } 111 //} 112 113 rreq++; // One more input to Region 114 } // Found a region to merge into Region 115 igvn->_worklist.push(r); 116 // Clobber pointer to the now dead 'r' 117 region->set_req(i, phase->C->top()); 118 } 119 } 120 121 return progress; 122 } 123 124 125 126 //--------------------------------has_phi-------------------------------------- 127 // Helper function: Return any PhiNode that uses this region or NULL 128 PhiNode* RegionNode::has_phi() const { 129 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 130 Node* phi = fast_out(i); 131 if (phi->is_Phi()) { // Check for Phi users 132 assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)"); 133 return phi->as_Phi(); // this one is good enough 134 } 135 } 136 137 return NULL; 138 } 139 140 141 //-----------------------------has_unique_phi---------------------------------- 142 // Helper function: Return the only PhiNode that uses this region or NULL 143 PhiNode* RegionNode::has_unique_phi() const { 144 // Check that only one use is a Phi 145 PhiNode* only_phi = NULL; 146 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 147 Node* phi = fast_out(i); 148 if (phi->is_Phi()) { // Check for Phi users 149 assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)"); 150 if (only_phi == NULL) { 151 only_phi = phi->as_Phi(); 152 } else { 153 return NULL; // multiple phis 154 } 155 } 156 } 157 158 return only_phi; 159 } 160 161 162 //------------------------------check_phi_clipping----------------------------- 163 // Helper function for RegionNode's identification of FP clipping 164 // Check inputs to the Phi 165 static bool check_phi_clipping( PhiNode *phi, ConNode * &min, uint &min_idx, ConNode * &max, uint &max_idx, Node * &val, uint &val_idx ) { 166 min = NULL; 167 max = NULL; 168 val = NULL; 169 min_idx = 0; 170 max_idx = 0; 171 val_idx = 0; 172 uint phi_max = phi->req(); 173 if( phi_max == 4 ) { 174 for( uint j = 1; j < phi_max; ++j ) { 175 Node *n = phi->in(j); 176 int opcode = n->Opcode(); 177 switch( opcode ) { 178 case Op_ConI: 179 { 180 if( min == NULL ) { 181 min = n->Opcode() == Op_ConI ? (ConNode*)n : NULL; 182 min_idx = j; 183 } else { 184 max = n->Opcode() == Op_ConI ? (ConNode*)n : NULL; 185 max_idx = j; 186 if( min->get_int() > max->get_int() ) { 187 // Swap min and max 188 ConNode *temp; 189 uint temp_idx; 190 temp = min; min = max; max = temp; 191 temp_idx = min_idx; min_idx = max_idx; max_idx = temp_idx; 192 } 193 } 194 } 195 break; 196 default: 197 { 198 val = n; 199 val_idx = j; 200 } 201 break; 202 } 203 } 204 } 205 return ( min && max && val && (min->get_int() <= 0) && (max->get_int() >=0) ); 206 } 207 208 209 //------------------------------check_if_clipping------------------------------ 210 // Helper function for RegionNode's identification of FP clipping 211 // Check that inputs to Region come from two IfNodes, 212 // 213 // If 214 // False True 215 // If | 216 // False True | 217 // | | | 218 // RegionNode_inputs 219 // 220 static bool check_if_clipping( const RegionNode *region, IfNode * &bot_if, IfNode * &top_if ) { 221 top_if = NULL; 222 bot_if = NULL; 223 224 // Check control structure above RegionNode for (if ( if ) ) 225 Node *in1 = region->in(1); 226 Node *in2 = region->in(2); 227 Node *in3 = region->in(3); 228 // Check that all inputs are projections 229 if( in1->is_Proj() && in2->is_Proj() && in3->is_Proj() ) { 230 Node *in10 = in1->in(0); 231 Node *in20 = in2->in(0); 232 Node *in30 = in3->in(0); 233 // Check that #1 and #2 are ifTrue and ifFalse from same If 234 if( in10 != NULL && in10->is_If() && 235 in20 != NULL && in20->is_If() && 236 in30 != NULL && in30->is_If() && in10 == in20 && 237 (in1->Opcode() != in2->Opcode()) ) { 238 Node *in100 = in10->in(0); 239 Node *in1000 = (in100 != NULL && in100->is_Proj()) ? in100->in(0) : NULL; 240 // Check that control for in10 comes from other branch of IF from in3 241 if( in1000 != NULL && in1000->is_If() && 242 in30 == in1000 && (in3->Opcode() != in100->Opcode()) ) { 243 // Control pattern checks 244 top_if = (IfNode*)in1000; 245 bot_if = (IfNode*)in10; 246 } 247 } 248 } 249 250 return (top_if != NULL); 251 } 252 253 254 //------------------------------check_convf2i_clipping------------------------- 255 // Helper function for RegionNode's identification of FP clipping 256 // Verify that the value input to the phi comes from "ConvF2I; LShift; RShift" 257 static bool check_convf2i_clipping( PhiNode *phi, uint idx, ConvF2INode * &convf2i, Node *min, Node *max) { 258 convf2i = NULL; 259 260 // Check for the RShiftNode 261 Node *rshift = phi->in(idx); 262 assert( rshift, "Previous checks ensure phi input is present"); 263 if( rshift->Opcode() != Op_RShiftI ) { return false; } 264 265 // Check for the LShiftNode 266 Node *lshift = rshift->in(1); 267 assert( lshift, "Previous checks ensure phi input is present"); 268 if( lshift->Opcode() != Op_LShiftI ) { return false; } 269 270 // Check for the ConvF2INode 271 Node *conv = lshift->in(1); 272 if( conv->Opcode() != Op_ConvF2I ) { return false; } 273 274 // Check that shift amounts are only to get sign bits set after F2I 275 jint max_cutoff = max->get_int(); 276 jint min_cutoff = min->get_int(); 277 jint left_shift = lshift->in(2)->get_int(); 278 jint right_shift = rshift->in(2)->get_int(); 279 jint max_post_shift = nth_bit(BitsPerJavaInteger - left_shift - 1); 280 if( left_shift != right_shift || 281 0 > left_shift || left_shift >= BitsPerJavaInteger || 282 max_post_shift < max_cutoff || 283 max_post_shift < -min_cutoff ) { 284 // Shifts are necessary but current transformation eliminates them 285 return false; 286 } 287 288 // OK to return the result of ConvF2I without shifting 289 convf2i = (ConvF2INode*)conv; 290 return true; 291 } 292 293 294 //------------------------------check_compare_clipping------------------------- 295 // Helper function for RegionNode's identification of FP clipping 296 static bool check_compare_clipping( bool less_than, IfNode *iff, ConNode *limit, Node * & input ) { 297 Node *i1 = iff->in(1); 298 if ( !i1->is_Bool() ) { return false; } 299 BoolNode *bool1 = i1->as_Bool(); 300 if( less_than && bool1->_test._test != BoolTest::le ) { return false; } 301 else if( !less_than && bool1->_test._test != BoolTest::lt ) { return false; } 302 const Node *cmpF = bool1->in(1); 303 if( cmpF->Opcode() != Op_CmpF ) { return false; } 304 // Test that the float value being compared against 305 // is equivalent to the int value used as a limit 306 Node *nodef = cmpF->in(2); 307 if( nodef->Opcode() != Op_ConF ) { return false; } 308 jfloat conf = nodef->getf(); 309 jint coni = limit->get_int(); 310 if( ((int)conf) != coni ) { return false; } 311 input = cmpF->in(1); 312 return true; 313 } 314 315 //------------------------------is_unreachable_region-------------------------- 316 // Find if the Region node is reachable from the root. 317 bool RegionNode::is_unreachable_region(PhaseGVN *phase) const { 318 assert(req() == 2, ""); 319 320 // First, cut the simple case of fallthrough region when NONE of 321 // region's phis references itself directly or through a data node. 322 uint max = outcnt(); 323 uint i; 324 for (i = 0; i < max; i++) { 325 Node* phi = raw_out(i); 326 if (phi != NULL && phi->is_Phi()) { 327 assert(phase->eqv(phi->in(0), this) && phi->req() == 2, ""); 328 if (phi->outcnt() == 0) 329 continue; // Safe case - no loops 330 if (phi->outcnt() == 1) { 331 Node* u = phi->raw_out(0); 332 // Skip if only one use is an other Phi or Call or Uncommon trap. 333 // It is safe to consider this case as fallthrough. 334 if (u != NULL && (u->is_Phi() || u->is_CFG())) 335 continue; 336 } 337 // Check when phi references itself directly or through an other node. 338 if (phi->as_Phi()->simple_data_loop_check(phi->in(1)) >= PhiNode::Unsafe) 339 break; // Found possible unsafe data loop. 340 } 341 } 342 if (i >= max) 343 return false; // An unsafe case was NOT found - don't need graph walk. 344 345 // Unsafe case - check if the Region node is reachable from root. 346 ResourceMark rm; 347 348 Arena *a = Thread::current()->resource_area(); 349 Node_List nstack(a); 350 VectorSet visited(a); 351 352 // Mark all control nodes reachable from root outputs 353 Node *n = (Node*)phase->C->root(); 354 nstack.push(n); 355 visited.set(n->_idx); 356 while (nstack.size() != 0) { 357 n = nstack.pop(); 358 uint max = n->outcnt(); 359 for (uint i = 0; i < max; i++) { 360 Node* m = n->raw_out(i); 361 if (m != NULL && m->is_CFG()) { 362 if (phase->eqv(m, this)) { 363 return false; // We reached the Region node - it is not dead. 364 } 365 if (!visited.test_set(m->_idx)) 366 nstack.push(m); 367 } 368 } 369 } 370 371 return true; // The Region node is unreachable - it is dead. 372 } 373 374 bool RegionNode::try_clean_mem_phi(PhaseGVN *phase) { 375 // Incremental inlining + PhaseStringOpts sometimes produce: 376 // 377 // cmpP with 1 top input 378 // | 379 // If 380 // / \ 381 // IfFalse IfTrue /- Some Node 382 // \ / / / 383 // Region / /-MergeMem 384 // \---Phi 385 // 386 // 387 // It's expected by PhaseStringOpts that the Region goes away and is 388 // replaced by If's control input but because there's still a Phi, 389 // the Region stays in the graph. The top input from the cmpP is 390 // propagated forward and a subgraph that is useful goes away. The 391 // code below replaces the Phi with the MergeMem so that the Region 392 // is simplified. 393 394 PhiNode* phi = has_unique_phi(); 395 if (phi && phi->type() == Type::MEMORY && req() == 3 && phi->is_diamond_phi(true)) { 396 MergeMemNode* m = NULL; 397 assert(phi->req() == 3, "same as region"); 398 for (uint i = 1; i < 3; ++i) { 399 Node *mem = phi->in(i); 400 if (mem && mem->is_MergeMem() && in(i)->outcnt() == 1) { 401 // Nothing is control-dependent on path #i except the region itself. 402 m = mem->as_MergeMem(); 403 uint j = 3 - i; 404 Node* other = phi->in(j); 405 if (other && other == m->base_memory()) { 406 // m is a successor memory to other, and is not pinned inside the diamond, so push it out. 407 // This will allow the diamond to collapse completely. 408 phase->is_IterGVN()->replace_node(phi, m); 409 return true; 410 } 411 } 412 } 413 } 414 return false; 415 } 416 417 //------------------------------Ideal------------------------------------------ 418 // Return a node which is more "ideal" than the current node. Must preserve 419 // the CFG, but we can still strip out dead paths. 420 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { 421 if( !can_reshape && !in(0) ) return NULL; // Already degraded to a Copy 422 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge"); 423 424 // Check for RegionNode with no Phi users and both inputs come from either 425 // arm of the same IF. If found, then the control-flow split is useless. 426 bool has_phis = false; 427 if (can_reshape) { // Need DU info to check for Phi users 428 has_phis = (has_phi() != NULL); // Cache result 429 if (has_phis && try_clean_mem_phi(phase)) { 430 has_phis = false; 431 } 432 433 if (!has_phis) { // No Phi users? Nothing merging? 434 for (uint i = 1; i < req()-1; i++) { 435 Node *if1 = in(i); 436 if( !if1 ) continue; 437 Node *iff = if1->in(0); 438 if( !iff || !iff->is_If() ) continue; 439 for( uint j=i+1; j<req(); j++ ) { 440 if( in(j) && in(j)->in(0) == iff && 441 if1->Opcode() != in(j)->Opcode() ) { 442 // Add the IF Projections to the worklist. They (and the IF itself) 443 // will be eliminated if dead. 444 phase->is_IterGVN()->add_users_to_worklist(iff); 445 set_req(i, iff->in(0));// Skip around the useless IF diamond 446 set_req(j, NULL); 447 return this; // Record progress 448 } 449 } 450 } 451 } 452 } 453 454 // Remove TOP or NULL input paths. If only 1 input path remains, this Region 455 // degrades to a copy. 456 bool add_to_worklist = false; 457 bool modified = false; 458 int cnt = 0; // Count of values merging 459 DEBUG_ONLY( int cnt_orig = req(); ) // Save original inputs count 460 int del_it = 0; // The last input path we delete 461 // For all inputs... 462 for( uint i=1; i<req(); ++i ){// For all paths in 463 Node *n = in(i); // Get the input 464 if( n != NULL ) { 465 // Remove useless control copy inputs 466 if( n->is_Region() && n->as_Region()->is_copy() ) { 467 set_req(i, n->nonnull_req()); 468 modified = true; 469 i--; 470 continue; 471 } 472 if( n->is_Proj() ) { // Remove useless rethrows 473 Node *call = n->in(0); 474 if (call->is_Call() && call->as_Call()->entry_point() == OptoRuntime::rethrow_stub()) { 475 set_req(i, call->in(0)); 476 modified = true; 477 i--; 478 continue; 479 } 480 } 481 if( phase->type(n) == Type::TOP ) { 482 set_req(i, NULL); // Ignore TOP inputs 483 modified = true; 484 i--; 485 continue; 486 } 487 cnt++; // One more value merging 488 489 } else if (can_reshape) { // Else found dead path with DU info 490 PhaseIterGVN *igvn = phase->is_IterGVN(); 491 del_req(i); // Yank path from self 492 del_it = i; 493 uint max = outcnt(); 494 DUIterator j; 495 bool progress = true; 496 while(progress) { // Need to establish property over all users 497 progress = false; 498 for (j = outs(); has_out(j); j++) { 499 Node *n = out(j); 500 if( n->req() != req() && n->is_Phi() ) { 501 assert( n->in(0) == this, "" ); 502 igvn->hash_delete(n); // Yank from hash before hacking edges 503 n->set_req_X(i,NULL,igvn);// Correct DU info 504 n->del_req(i); // Yank path from Phis 505 if( max != outcnt() ) { 506 progress = true; 507 j = refresh_out_pos(j); 508 max = outcnt(); 509 } 510 } 511 } 512 } 513 add_to_worklist = true; 514 i--; 515 } 516 } 517 518 if (can_reshape && cnt == 1) { 519 // Is it dead loop? 520 // If it is LoopNopde it had 2 (+1 itself) inputs and 521 // one of them was cut. The loop is dead if it was EntryContol. 522 // Loop node may have only one input because entry path 523 // is removed in PhaseIdealLoop::Dominators(). 524 assert(!this->is_Loop() || cnt_orig <= 3, "Loop node should have 3 or less inputs"); 525 if ((this->is_Loop() && (del_it == LoopNode::EntryControl || 526 (del_it == 0 && is_unreachable_region(phase)))) || 527 (!this->is_Loop() && has_phis && is_unreachable_region(phase))) { 528 // Yes, the region will be removed during the next step below. 529 // Cut the backedge input and remove phis since no data paths left. 530 // We don't cut outputs to other nodes here since we need to put them 531 // on the worklist. 532 PhaseIterGVN *igvn = phase->is_IterGVN(); 533 if (in(1)->outcnt() == 1) { 534 igvn->_worklist.push(in(1)); 535 } 536 del_req(1); 537 cnt = 0; 538 assert( req() == 1, "no more inputs expected" ); 539 uint max = outcnt(); 540 bool progress = true; 541 Node *top = phase->C->top(); 542 DUIterator j; 543 while(progress) { 544 progress = false; 545 for (j = outs(); has_out(j); j++) { 546 Node *n = out(j); 547 if( n->is_Phi() ) { 548 assert( igvn->eqv(n->in(0), this), "" ); 549 assert( n->req() == 2 && n->in(1) != NULL, "Only one data input expected" ); 550 // Break dead loop data path. 551 // Eagerly replace phis with top to avoid phis copies generation. 552 igvn->replace_node(n, top); 553 if( max != outcnt() ) { 554 progress = true; 555 j = refresh_out_pos(j); 556 max = outcnt(); 557 } 558 } 559 } 560 } 561 add_to_worklist = true; 562 } 563 } 564 if (add_to_worklist) { 565 phase->is_IterGVN()->add_users_to_worklist(this); // Revisit collapsed Phis 566 } 567 568 if( cnt <= 1 ) { // Only 1 path in? 569 set_req(0, NULL); // Null control input for region copy 570 if( cnt == 0 && !can_reshape) { // Parse phase - leave the node as it is. 571 // No inputs or all inputs are NULL. 572 return NULL; 573 } else if (can_reshape) { // Optimization phase - remove the node 574 PhaseIterGVN *igvn = phase->is_IterGVN(); 575 Node *parent_ctrl; 576 if( cnt == 0 ) { 577 assert( req() == 1, "no inputs expected" ); 578 // During IGVN phase such region will be subsumed by TOP node 579 // so region's phis will have TOP as control node. 580 // Kill phis here to avoid it. PhiNode::is_copy() will be always false. 581 // Also set other user's input to top. 582 parent_ctrl = phase->C->top(); 583 } else { 584 // The fallthrough case since we already checked dead loops above. 585 parent_ctrl = in(1); 586 assert(parent_ctrl != NULL, "Region is a copy of some non-null control"); 587 assert(!igvn->eqv(parent_ctrl, this), "Close dead loop"); 588 } 589 if (!add_to_worklist) 590 igvn->add_users_to_worklist(this); // Check for further allowed opts 591 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) { 592 Node* n = last_out(i); 593 igvn->hash_delete(n); // Remove from worklist before modifying edges 594 if( n->is_Phi() ) { // Collapse all Phis 595 // Eagerly replace phis to avoid copies generation. 596 Node* in; 597 if( cnt == 0 ) { 598 assert( n->req() == 1, "No data inputs expected" ); 599 in = parent_ctrl; // replaced by top 600 } else { 601 assert( n->req() == 2 && n->in(1) != NULL, "Only one data input expected" ); 602 in = n->in(1); // replaced by unique input 603 if( n->as_Phi()->is_unsafe_data_reference(in) ) 604 in = phase->C->top(); // replaced by top 605 } 606 igvn->replace_node(n, in); 607 } 608 else if( n->is_Region() ) { // Update all incoming edges 609 assert( !igvn->eqv(n, this), "Must be removed from DefUse edges"); 610 uint uses_found = 0; 611 for( uint k=1; k < n->req(); k++ ) { 612 if( n->in(k) == this ) { 613 n->set_req(k, parent_ctrl); 614 uses_found++; 615 } 616 } 617 if( uses_found > 1 ) { // (--i) done at the end of the loop. 618 i -= (uses_found - 1); 619 } 620 } 621 else { 622 assert( igvn->eqv(n->in(0), this), "Expect RegionNode to be control parent"); 623 n->set_req(0, parent_ctrl); 624 } 625 #ifdef ASSERT 626 for( uint k=0; k < n->req(); k++ ) { 627 assert( !igvn->eqv(n->in(k), this), "All uses of RegionNode should be gone"); 628 } 629 #endif 630 } 631 // Remove the RegionNode itself from DefUse info 632 igvn->remove_dead_node(this); 633 return NULL; 634 } 635 return this; // Record progress 636 } 637 638 639 // If a Region flows into a Region, merge into one big happy merge. 640 if (can_reshape) { 641 Node *m = merge_region(this, phase); 642 if (m != NULL) return m; 643 } 644 645 // Check if this region is the root of a clipping idiom on floats 646 if( ConvertFloat2IntClipping && can_reshape && req() == 4 ) { 647 // Check that only one use is a Phi and that it simplifies to two constants + 648 PhiNode* phi = has_unique_phi(); 649 if (phi != NULL) { // One Phi user 650 // Check inputs to the Phi 651 ConNode *min; 652 ConNode *max; 653 Node *val; 654 uint min_idx; 655 uint max_idx; 656 uint val_idx; 657 if( check_phi_clipping( phi, min, min_idx, max, max_idx, val, val_idx ) ) { 658 IfNode *top_if; 659 IfNode *bot_if; 660 if( check_if_clipping( this, bot_if, top_if ) ) { 661 // Control pattern checks, now verify compares 662 Node *top_in = NULL; // value being compared against 663 Node *bot_in = NULL; 664 if( check_compare_clipping( true, bot_if, min, bot_in ) && 665 check_compare_clipping( false, top_if, max, top_in ) ) { 666 if( bot_in == top_in ) { 667 PhaseIterGVN *gvn = phase->is_IterGVN(); 668 assert( gvn != NULL, "Only had DefUse info in IterGVN"); 669 // Only remaining check is that bot_in == top_in == (Phi's val + mods) 670 671 // Check for the ConvF2INode 672 ConvF2INode *convf2i; 673 if( check_convf2i_clipping( phi, val_idx, convf2i, min, max ) && 674 convf2i->in(1) == bot_in ) { 675 // Matched pattern, including LShiftI; RShiftI, replace with integer compares 676 // max test 677 Node *cmp = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, min )); 678 Node *boo = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::lt )); 679 IfNode *iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( top_if->in(0), boo, PROB_UNLIKELY_MAG(5), top_if->_fcnt )); 680 Node *if_min= gvn->register_new_node_with_optimizer(new IfTrueNode (iff)); 681 Node *ifF = gvn->register_new_node_with_optimizer(new IfFalseNode(iff)); 682 // min test 683 cmp = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, max )); 684 boo = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::gt )); 685 iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( ifF, boo, PROB_UNLIKELY_MAG(5), bot_if->_fcnt )); 686 Node *if_max= gvn->register_new_node_with_optimizer(new IfTrueNode (iff)); 687 ifF = gvn->register_new_node_with_optimizer(new IfFalseNode(iff)); 688 // update input edges to region node 689 set_req_X( min_idx, if_min, gvn ); 690 set_req_X( max_idx, if_max, gvn ); 691 set_req_X( val_idx, ifF, gvn ); 692 // remove unnecessary 'LShiftI; RShiftI' idiom 693 gvn->hash_delete(phi); 694 phi->set_req_X( val_idx, convf2i, gvn ); 695 gvn->hash_find_insert(phi); 696 // Return transformed region node 697 return this; 698 } 699 } 700 } 701 } 702 } 703 } 704 } 705 706 return modified ? this : NULL; 707 } 708 709 710 711 const RegMask &RegionNode::out_RegMask() const { 712 return RegMask::Empty; 713 } 714 715 // Find the one non-null required input. RegionNode only 716 Node *Node::nonnull_req() const { 717 assert( is_Region(), "" ); 718 for( uint i = 1; i < _cnt; i++ ) 719 if( in(i) ) 720 return in(i); 721 ShouldNotReachHere(); 722 return NULL; 723 } 724 725 726 //============================================================================= 727 // note that these functions assume that the _adr_type field is flattened 728 uint PhiNode::hash() const { 729 const Type* at = _adr_type; 730 return TypeNode::hash() + (at ? at->hash() : 0); 731 } 732 uint PhiNode::cmp( const Node &n ) const { 733 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type; 734 } 735 static inline 736 const TypePtr* flatten_phi_adr_type(const TypePtr* at) { 737 if (at == NULL || at == TypePtr::BOTTOM) return at; 738 return Compile::current()->alias_type(at)->adr_type(); 739 } 740 741 //----------------------------make--------------------------------------------- 742 // create a new phi with edges matching r and set (initially) to x 743 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) { 744 uint preds = r->req(); // Number of predecessor paths 745 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at"); 746 PhiNode* p = new PhiNode(r, t, at); 747 for (uint j = 1; j < preds; j++) { 748 // Fill in all inputs, except those which the region does not yet have 749 if (r->in(j) != NULL) 750 p->init_req(j, x); 751 } 752 return p; 753 } 754 PhiNode* PhiNode::make(Node* r, Node* x) { 755 const Type* t = x->bottom_type(); 756 const TypePtr* at = NULL; 757 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type()); 758 return make(r, x, t, at); 759 } 760 PhiNode* PhiNode::make_blank(Node* r, Node* x) { 761 const Type* t = x->bottom_type(); 762 const TypePtr* at = NULL; 763 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type()); 764 return new PhiNode(r, t, at); 765 } 766 767 768 //------------------------slice_memory----------------------------------------- 769 // create a new phi with narrowed memory type 770 PhiNode* PhiNode::slice_memory(const TypePtr* adr_type) const { 771 PhiNode* mem = (PhiNode*) clone(); 772 *(const TypePtr**)&mem->_adr_type = adr_type; 773 // convert self-loops, or else we get a bad graph 774 for (uint i = 1; i < req(); i++) { 775 if ((const Node*)in(i) == this) mem->set_req(i, mem); 776 } 777 mem->verify_adr_type(); 778 return mem; 779 } 780 781 //------------------------split_out_instance----------------------------------- 782 // Split out an instance type from a bottom phi. 783 PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const { 784 const TypeOopPtr *t_oop = at->isa_oopptr(); 785 assert(t_oop != NULL && t_oop->is_known_instance(), "expecting instance oopptr"); 786 const TypePtr *t = adr_type(); 787 assert(type() == Type::MEMORY && 788 (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || 789 t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && 790 t->is_oopptr()->cast_to_exactness(true) 791 ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) 792 ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop), 793 "bottom or raw memory required"); 794 795 // Check if an appropriate node already exists. 796 Node *region = in(0); 797 for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) { 798 Node* use = region->fast_out(k); 799 if( use->is_Phi()) { 800 PhiNode *phi2 = use->as_Phi(); 801 if (phi2->type() == Type::MEMORY && phi2->adr_type() == at) { 802 return phi2; 803 } 804 } 805 } 806 Compile *C = igvn->C; 807 Arena *a = Thread::current()->resource_area(); 808 Node_Array node_map = new Node_Array(a); 809 Node_Stack stack(a, C->live_nodes() >> 4); 810 PhiNode *nphi = slice_memory(at); 811 igvn->register_new_node_with_optimizer( nphi ); 812 node_map.map(_idx, nphi); 813 stack.push((Node *)this, 1); 814 while(!stack.is_empty()) { 815 PhiNode *ophi = stack.node()->as_Phi(); 816 uint i = stack.index(); 817 assert(i >= 1, "not control edge"); 818 stack.pop(); 819 nphi = node_map[ophi->_idx]->as_Phi(); 820 for (; i < ophi->req(); i++) { 821 Node *in = ophi->in(i); 822 if (in == NULL || igvn->type(in) == Type::TOP) 823 continue; 824 Node *opt = MemNode::optimize_simple_memory_chain(in, t_oop, NULL, igvn); 825 PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : NULL; 826 if (optphi != NULL && optphi->adr_type() == TypePtr::BOTTOM) { 827 opt = node_map[optphi->_idx]; 828 if (opt == NULL) { 829 stack.push(ophi, i); 830 nphi = optphi->slice_memory(at); 831 igvn->register_new_node_with_optimizer( nphi ); 832 node_map.map(optphi->_idx, nphi); 833 ophi = optphi; 834 i = 0; // will get incremented at top of loop 835 continue; 836 } 837 } 838 nphi->set_req(i, opt); 839 } 840 } 841 return nphi; 842 } 843 844 //------------------------verify_adr_type-------------------------------------- 845 #ifdef ASSERT 846 void PhiNode::verify_adr_type(VectorSet& visited, const TypePtr* at) const { 847 if (visited.test_set(_idx)) return; //already visited 848 849 // recheck constructor invariants: 850 verify_adr_type(false); 851 852 // recheck local phi/phi consistency: 853 assert(_adr_type == at || _adr_type == TypePtr::BOTTOM, 854 "adr_type must be consistent across phi nest"); 855 856 // walk around 857 for (uint i = 1; i < req(); i++) { 858 Node* n = in(i); 859 if (n == NULL) continue; 860 const Node* np = in(i); 861 if (np->is_Phi()) { 862 np->as_Phi()->verify_adr_type(visited, at); 863 } else if (n->bottom_type() == Type::TOP 864 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) { 865 // ignore top inputs 866 } else { 867 const TypePtr* nat = flatten_phi_adr_type(n->adr_type()); 868 // recheck phi/non-phi consistency at leaves: 869 assert((nat != NULL) == (at != NULL), ""); 870 assert(nat == at || nat == TypePtr::BOTTOM, 871 "adr_type must be consistent at leaves of phi nest"); 872 } 873 } 874 } 875 876 // Verify a whole nest of phis rooted at this one. 877 void PhiNode::verify_adr_type(bool recursive) const { 878 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error 879 if (Node::in_dump()) return; // muzzle asserts when printing 880 881 assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only"); 882 883 if (!VerifyAliases) return; // verify thoroughly only if requested 884 885 assert(_adr_type == flatten_phi_adr_type(_adr_type), 886 "Phi::adr_type must be pre-normalized"); 887 888 if (recursive) { 889 VectorSet visited(Thread::current()->resource_area()); 890 verify_adr_type(visited, _adr_type); 891 } 892 } 893 #endif 894 895 896 //------------------------------Value------------------------------------------ 897 // Compute the type of the PhiNode 898 const Type* PhiNode::Value(PhaseGVN* phase) const { 899 Node *r = in(0); // RegionNode 900 if( !r ) // Copy or dead 901 return in(1) ? phase->type(in(1)) : Type::TOP; 902 903 // Note: During parsing, phis are often transformed before their regions. 904 // This means we have to use type_or_null to defend against untyped regions. 905 if( phase->type_or_null(r) == Type::TOP ) // Dead code? 906 return Type::TOP; 907 908 // Check for trip-counted loop. If so, be smarter. 909 CountedLoopNode* l = r->is_CountedLoop() ? r->as_CountedLoop() : NULL; 910 if (l && ((const Node*)l->phi() == this)) { // Trip counted loop! 911 // protect against init_trip() or limit() returning NULL 912 if (l->can_be_counted_loop(phase)) { 913 const Node *init = l->init_trip(); 914 const Node *limit = l->limit(); 915 const Node* stride = l->stride(); 916 if (init != NULL && limit != NULL && stride != NULL) { 917 const TypeInt* lo = phase->type(init)->isa_int(); 918 const TypeInt* hi = phase->type(limit)->isa_int(); 919 const TypeInt* stride_t = phase->type(stride)->isa_int(); 920 if (lo != NULL && hi != NULL && stride_t != NULL) { // Dying loops might have TOP here 921 assert(stride_t->_hi >= stride_t->_lo, "bad stride type"); 922 if (stride_t->_hi < 0) { // Down-counter loop 923 swap(lo, hi); 924 return TypeInt::make(MIN2(lo->_lo, hi->_lo) , hi->_hi, 3); 925 } else if (stride_t->_lo >= 0) { 926 return TypeInt::make(lo->_lo, MAX2(lo->_hi, hi->_hi), 3); 927 } 928 } 929 } 930 } else if (l->in(LoopNode::LoopBackControl) != NULL && 931 in(LoopNode::EntryControl) != NULL && 932 phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) { 933 // During CCP, if we saturate the type of a counted loop's Phi 934 // before the special code for counted loop above has a chance 935 // to run (that is as long as the type of the backedge's control 936 // is top), we might end up with non monotonic types 937 return phase->type(in(LoopNode::EntryControl)); 938 } 939 } 940 941 // Until we have harmony between classes and interfaces in the type 942 // lattice, we must tread carefully around phis which implicitly 943 // convert the one to the other. 944 const TypePtr* ttp = _type->make_ptr(); 945 const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL; 946 const TypeKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_klassptr() : NULL; 947 bool is_intf = false; 948 if (ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) { 949 is_intf = true; 950 } else if (ttkp != NULL && ttkp->is_loaded() && ttkp->klass()->is_interface()) { 951 is_intf = true; 952 } 953 954 // Default case: merge all inputs 955 const Type *t = Type::TOP; // Merged type starting value 956 for (uint i = 1; i < req(); ++i) {// For all paths in 957 // Reachable control path? 958 if (r->in(i) && phase->type(r->in(i)) == Type::CONTROL) { 959 const Type* ti = phase->type(in(i)); 960 // We assume that each input of an interface-valued Phi is a true 961 // subtype of that interface. This might not be true of the meet 962 // of all the input types. The lattice is not distributive in 963 // such cases. Ward off asserts in type.cpp by refusing to do 964 // meets between interfaces and proper classes. 965 const TypePtr* tip = ti->make_ptr(); 966 const TypeInstPtr* tiip = (tip != NULL) ? tip->isa_instptr() : NULL; 967 if (tiip) { 968 bool ti_is_intf = false; 969 ciKlass* k = tiip->klass(); 970 if (k->is_loaded() && k->is_interface()) 971 ti_is_intf = true; 972 if (is_intf != ti_is_intf) 973 { t = _type; break; } 974 } 975 t = t->meet_speculative(ti); 976 } 977 } 978 979 // The worst-case type (from ciTypeFlow) should be consistent with "t". 980 // That is, we expect that "t->higher_equal(_type)" holds true. 981 // There are various exceptions: 982 // - Inputs which are phis might in fact be widened unnecessarily. 983 // For example, an input might be a widened int while the phi is a short. 984 // - Inputs might be BotPtrs but this phi is dependent on a null check, 985 // and postCCP has removed the cast which encodes the result of the check. 986 // - The type of this phi is an interface, and the inputs are classes. 987 // - Value calls on inputs might produce fuzzy results. 988 // (Occurrences of this case suggest improvements to Value methods.) 989 // 990 // It is not possible to see Type::BOTTOM values as phi inputs, 991 // because the ciTypeFlow pre-pass produces verifier-quality types. 992 const Type* ft = t->filter_speculative(_type); // Worst case type 993 994 #ifdef ASSERT 995 // The following logic has been moved into TypeOopPtr::filter. 996 const Type* jt = t->join_speculative(_type); 997 if (jt->empty()) { // Emptied out??? 998 999 // Check for evil case of 't' being a class and '_type' expecting an 1000 // interface. This can happen because the bytecodes do not contain 1001 // enough type info to distinguish a Java-level interface variable 1002 // from a Java-level object variable. If we meet 2 classes which 1003 // both implement interface I, but their meet is at 'j/l/O' which 1004 // doesn't implement I, we have no way to tell if the result should 1005 // be 'I' or 'j/l/O'. Thus we'll pick 'j/l/O'. If this then flows 1006 // into a Phi which "knows" it's an Interface type we'll have to 1007 // uplift the type. 1008 if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) { 1009 assert(ft == _type, ""); // Uplift to interface 1010 } else if (!t->empty() && ttkp != NULL && ttkp->is_loaded() && ttkp->klass()->is_interface()) { 1011 assert(ft == _type, ""); // Uplift to interface 1012 } else { 1013 // We also have to handle 'evil cases' of interface- vs. class-arrays 1014 Type::get_arrays_base_elements(jt, _type, NULL, &ttip); 1015 if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) { 1016 assert(ft == _type, ""); // Uplift to array of interface 1017 } else { 1018 // Otherwise it's something stupid like non-overlapping int ranges 1019 // found on dying counted loops. 1020 assert(ft == Type::TOP, ""); // Canonical empty value 1021 } 1022 } 1023 } 1024 1025 else { 1026 1027 // If we have an interface-typed Phi and we narrow to a class type, the join 1028 // should report back the class. However, if we have a J/L/Object 1029 // class-typed Phi and an interface flows in, it's possible that the meet & 1030 // join report an interface back out. This isn't possible but happens 1031 // because the type system doesn't interact well with interfaces. 1032 const TypePtr *jtp = jt->make_ptr(); 1033 const TypeInstPtr *jtip = (jtp != NULL) ? jtp->isa_instptr() : NULL; 1034 const TypeKlassPtr *jtkp = (jtp != NULL) ? jtp->isa_klassptr() : NULL; 1035 if( jtip && ttip ) { 1036 if( jtip->is_loaded() && jtip->klass()->is_interface() && 1037 ttip->is_loaded() && !ttip->klass()->is_interface() ) { 1038 assert(ft == ttip->cast_to_ptr_type(jtip->ptr()) || 1039 ft->isa_narrowoop() && ft->make_ptr() == ttip->cast_to_ptr_type(jtip->ptr()), ""); 1040 jt = ft; 1041 } 1042 } 1043 if( jtkp && ttkp ) { 1044 if( jtkp->is_loaded() && jtkp->klass()->is_interface() && 1045 !jtkp->klass_is_exact() && // Keep exact interface klass (6894807) 1046 ttkp->is_loaded() && !ttkp->klass()->is_interface() ) { 1047 assert(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) || 1048 ft->isa_narrowklass() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), ""); 1049 jt = ft; 1050 } 1051 } 1052 if (jt != ft && jt->base() == ft->base()) { 1053 if (jt->isa_int() && 1054 jt->is_int()->_lo == ft->is_int()->_lo && 1055 jt->is_int()->_hi == ft->is_int()->_hi) 1056 jt = ft; 1057 if (jt->isa_long() && 1058 jt->is_long()->_lo == ft->is_long()->_lo && 1059 jt->is_long()->_hi == ft->is_long()->_hi) 1060 jt = ft; 1061 } 1062 if (jt != ft) { 1063 tty->print("merge type: "); t->dump(); tty->cr(); 1064 tty->print("kill type: "); _type->dump(); tty->cr(); 1065 tty->print("join type: "); jt->dump(); tty->cr(); 1066 tty->print("filter type: "); ft->dump(); tty->cr(); 1067 } 1068 assert(jt == ft, ""); 1069 } 1070 #endif //ASSERT 1071 1072 // Deal with conversion problems found in data loops. 1073 ft = phase->saturate(ft, phase->type_or_null(this), _type); 1074 1075 return ft; 1076 } 1077 1078 1079 //------------------------------is_diamond_phi--------------------------------- 1080 // Does this Phi represent a simple well-shaped diamond merge? Return the 1081 // index of the true path or 0 otherwise. 1082 // If check_control_only is true, do not inspect the If node at the 1083 // top, and return -1 (not an edge number) on success. 1084 int PhiNode::is_diamond_phi(bool check_control_only) const { 1085 // Check for a 2-path merge 1086 Node *region = in(0); 1087 if( !region ) return 0; 1088 if( region->req() != 3 ) return 0; 1089 if( req() != 3 ) return 0; 1090 // Check that both paths come from the same If 1091 Node *ifp1 = region->in(1); 1092 Node *ifp2 = region->in(2); 1093 if( !ifp1 || !ifp2 ) return 0; 1094 Node *iff = ifp1->in(0); 1095 if( !iff || !iff->is_If() ) return 0; 1096 if( iff != ifp2->in(0) ) return 0; 1097 if (check_control_only) return -1; 1098 // Check for a proper bool/cmp 1099 const Node *b = iff->in(1); 1100 if( !b->is_Bool() ) return 0; 1101 const Node *cmp = b->in(1); 1102 if( !cmp->is_Cmp() ) return 0; 1103 1104 // Check for branching opposite expected 1105 if( ifp2->Opcode() == Op_IfTrue ) { 1106 assert( ifp1->Opcode() == Op_IfFalse, "" ); 1107 return 2; 1108 } else { 1109 assert( ifp1->Opcode() == Op_IfTrue, "" ); 1110 return 1; 1111 } 1112 } 1113 1114 //----------------------------check_cmove_id----------------------------------- 1115 // Check for CMove'ing a constant after comparing against the constant. 1116 // Happens all the time now, since if we compare equality vs a constant in 1117 // the parser, we "know" the variable is constant on one path and we force 1118 // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a 1119 // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more 1120 // general in that we don't need constants. Since CMove's are only inserted 1121 // in very special circumstances, we do it here on generic Phi's. 1122 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) { 1123 assert(true_path !=0, "only diamond shape graph expected"); 1124 1125 // is_diamond_phi() has guaranteed the correctness of the nodes sequence: 1126 // phi->region->if_proj->ifnode->bool->cmp 1127 Node* region = in(0); 1128 Node* iff = region->in(1)->in(0); 1129 BoolNode* b = iff->in(1)->as_Bool(); 1130 Node* cmp = b->in(1); 1131 Node* tval = in(true_path); 1132 Node* fval = in(3-true_path); 1133 Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b); 1134 if (id == NULL) 1135 return NULL; 1136 1137 // Either value might be a cast that depends on a branch of 'iff'. 1138 // Since the 'id' value will float free of the diamond, either 1139 // decast or return failure. 1140 Node* ctl = id->in(0); 1141 if (ctl != NULL && ctl->in(0) == iff) { 1142 if (id->is_ConstraintCast()) { 1143 return id->in(1); 1144 } else { 1145 // Don't know how to disentangle this value. 1146 return NULL; 1147 } 1148 } 1149 1150 return id; 1151 } 1152 1153 //------------------------------Identity--------------------------------------- 1154 // Check for Region being Identity. 1155 Node* PhiNode::Identity(PhaseGVN* phase) { 1156 // Check for no merging going on 1157 // (There used to be special-case code here when this->region->is_Loop. 1158 // It would check for a tributary phi on the backedge that the main phi 1159 // trivially, perhaps with a single cast. The unique_input method 1160 // does all this and more, by reducing such tributaries to 'this'.) 1161 Node* uin = unique_input(phase, false); 1162 if (uin != NULL) { 1163 return uin; 1164 } 1165 1166 int true_path = is_diamond_phi(); 1167 if (true_path != 0) { 1168 Node* id = is_cmove_id(phase, true_path); 1169 if (id != NULL) return id; 1170 } 1171 1172 return this; // No identity 1173 } 1174 1175 //-----------------------------unique_input------------------------------------ 1176 // Find the unique value, discounting top, self-loops, and casts. 1177 // Return top if there are no inputs, and self if there are multiple. 1178 Node* PhiNode::unique_input(PhaseTransform* phase, bool uncast) { 1179 // 1) One unique direct input, 1180 // or if uncast is true: 1181 // 2) some of the inputs have an intervening ConstraintCast 1182 // 3) an input is a self loop 1183 // 1184 // 1) input or 2) input or 3) input __ 1185 // / \ / \ \ / \ 1186 // \ / | cast phi cast 1187 // phi \ / / \ / 1188 // phi / -- 1189 1190 Node* r = in(0); // RegionNode 1191 if (r == NULL) return in(1); // Already degraded to a Copy 1192 Node* input = NULL; // The unique direct input (maybe uncasted = ConstraintCasts removed) 1193 1194 for (uint i = 1, cnt = req(); i < cnt; ++i) { 1195 Node* rc = r->in(i); 1196 if (rc == NULL || phase->type(rc) == Type::TOP) 1197 continue; // ignore unreachable control path 1198 Node* n = in(i); 1199 if (n == NULL) 1200 continue; 1201 Node* un = n; 1202 if (uncast) { 1203 #ifdef ASSERT 1204 Node* m = un->uncast(); 1205 #endif 1206 while (un != NULL && un->req() == 2 && un->is_ConstraintCast()) { 1207 Node* next = un->in(1); 1208 if (phase->type(next)->isa_rawptr() && phase->type(un)->isa_oopptr()) { 1209 // risk exposing raw ptr at safepoint 1210 break; 1211 } 1212 un = next; 1213 } 1214 assert(m == un || un->in(1) == m, "Only expected at CheckCastPP from allocation"); 1215 } 1216 if (un == NULL || un == this || phase->type(un) == Type::TOP) { 1217 continue; // ignore if top, or in(i) and "this" are in a data cycle 1218 } 1219 // Check for a unique input (maybe uncasted) 1220 if (input == NULL) { 1221 input = un; 1222 } else if (input != un) { 1223 input = NodeSentinel; // no unique input 1224 } 1225 } 1226 if (input == NULL) { 1227 return phase->C->top(); // no inputs 1228 } 1229 1230 if (input != NodeSentinel) { 1231 return input; // one unique direct input 1232 } 1233 1234 // Nothing. 1235 return NULL; 1236 } 1237 1238 //------------------------------is_x2logic------------------------------------- 1239 // Check for simple convert-to-boolean pattern 1240 // If:(C Bool) Region:(IfF IfT) Phi:(Region 0 1) 1241 // Convert Phi to an ConvIB. 1242 static Node *is_x2logic( PhaseGVN *phase, PhiNode *phi, int true_path ) { 1243 assert(true_path !=0, "only diamond shape graph expected"); 1244 // Convert the true/false index into an expected 0/1 return. 1245 // Map 2->0 and 1->1. 1246 int flipped = 2-true_path; 1247 1248 // is_diamond_phi() has guaranteed the correctness of the nodes sequence: 1249 // phi->region->if_proj->ifnode->bool->cmp 1250 Node *region = phi->in(0); 1251 Node *iff = region->in(1)->in(0); 1252 BoolNode *b = (BoolNode*)iff->in(1); 1253 const CmpNode *cmp = (CmpNode*)b->in(1); 1254 1255 Node *zero = phi->in(1); 1256 Node *one = phi->in(2); 1257 const Type *tzero = phase->type( zero ); 1258 const Type *tone = phase->type( one ); 1259 1260 // Check for compare vs 0 1261 const Type *tcmp = phase->type(cmp->in(2)); 1262 if( tcmp != TypeInt::ZERO && tcmp != TypePtr::NULL_PTR ) { 1263 // Allow cmp-vs-1 if the other input is bounded by 0-1 1264 if( !(tcmp == TypeInt::ONE && phase->type(cmp->in(1)) == TypeInt::BOOL) ) 1265 return NULL; 1266 flipped = 1-flipped; // Test is vs 1 instead of 0! 1267 } 1268 1269 // Check for setting zero/one opposite expected 1270 if( tzero == TypeInt::ZERO ) { 1271 if( tone == TypeInt::ONE ) { 1272 } else return NULL; 1273 } else if( tzero == TypeInt::ONE ) { 1274 if( tone == TypeInt::ZERO ) { 1275 flipped = 1-flipped; 1276 } else return NULL; 1277 } else return NULL; 1278 1279 // Check for boolean test backwards 1280 if( b->_test._test == BoolTest::ne ) { 1281 } else if( b->_test._test == BoolTest::eq ) { 1282 flipped = 1-flipped; 1283 } else return NULL; 1284 1285 // Build int->bool conversion 1286 Node *n = new Conv2BNode( cmp->in(1) ); 1287 if( flipped ) 1288 n = new XorINode( phase->transform(n), phase->intcon(1) ); 1289 1290 return n; 1291 } 1292 1293 //------------------------------is_cond_add------------------------------------ 1294 // Check for simple conditional add pattern: "(P < Q) ? X+Y : X;" 1295 // To be profitable the control flow has to disappear; there can be no other 1296 // values merging here. We replace the test-and-branch with: 1297 // "(sgn(P-Q))&Y) + X". Basically, convert "(P < Q)" into 0 or -1 by 1298 // moving the carry bit from (P-Q) into a register with 'sbb EAX,EAX'. 1299 // Then convert Y to 0-or-Y and finally add. 1300 // This is a key transform for SpecJava _201_compress. 1301 static Node* is_cond_add(PhaseGVN *phase, PhiNode *phi, int true_path) { 1302 assert(true_path !=0, "only diamond shape graph expected"); 1303 1304 // is_diamond_phi() has guaranteed the correctness of the nodes sequence: 1305 // phi->region->if_proj->ifnode->bool->cmp 1306 RegionNode *region = (RegionNode*)phi->in(0); 1307 Node *iff = region->in(1)->in(0); 1308 BoolNode* b = iff->in(1)->as_Bool(); 1309 const CmpNode *cmp = (CmpNode*)b->in(1); 1310 1311 // Make sure only merging this one phi here 1312 if (region->has_unique_phi() != phi) return NULL; 1313 1314 // Make sure each arm of the diamond has exactly one output, which we assume 1315 // is the region. Otherwise, the control flow won't disappear. 1316 if (region->in(1)->outcnt() != 1) return NULL; 1317 if (region->in(2)->outcnt() != 1) return NULL; 1318 1319 // Check for "(P < Q)" of type signed int 1320 if (b->_test._test != BoolTest::lt) return NULL; 1321 if (cmp->Opcode() != Op_CmpI) return NULL; 1322 1323 Node *p = cmp->in(1); 1324 Node *q = cmp->in(2); 1325 Node *n1 = phi->in( true_path); 1326 Node *n2 = phi->in(3-true_path); 1327 1328 int op = n1->Opcode(); 1329 if( op != Op_AddI // Need zero as additive identity 1330 /*&&op != Op_SubI && 1331 op != Op_AddP && 1332 op != Op_XorI && 1333 op != Op_OrI*/ ) 1334 return NULL; 1335 1336 Node *x = n2; 1337 Node *y = NULL; 1338 if( x == n1->in(1) ) { 1339 y = n1->in(2); 1340 } else if( x == n1->in(2) ) { 1341 y = n1->in(1); 1342 } else return NULL; 1343 1344 // Not so profitable if compare and add are constants 1345 if( q->is_Con() && phase->type(q) != TypeInt::ZERO && y->is_Con() ) 1346 return NULL; 1347 1348 Node *cmplt = phase->transform( new CmpLTMaskNode(p,q) ); 1349 Node *j_and = phase->transform( new AndINode(cmplt,y) ); 1350 return new AddINode(j_and,x); 1351 } 1352 1353 //------------------------------is_absolute------------------------------------ 1354 // Check for absolute value. 1355 static Node* is_absolute( PhaseGVN *phase, PhiNode *phi_root, int true_path) { 1356 assert(true_path !=0, "only diamond shape graph expected"); 1357 1358 int cmp_zero_idx = 0; // Index of compare input where to look for zero 1359 int phi_x_idx = 0; // Index of phi input where to find naked x 1360 1361 // ABS ends with the merge of 2 control flow paths. 1362 // Find the false path from the true path. With only 2 inputs, 3 - x works nicely. 1363 int false_path = 3 - true_path; 1364 1365 // is_diamond_phi() has guaranteed the correctness of the nodes sequence: 1366 // phi->region->if_proj->ifnode->bool->cmp 1367 BoolNode *bol = phi_root->in(0)->in(1)->in(0)->in(1)->as_Bool(); 1368 1369 // Check bool sense 1370 switch( bol->_test._test ) { 1371 case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = true_path; break; 1372 case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break; 1373 case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = true_path; break; 1374 case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = false_path; break; 1375 default: return NULL; break; 1376 } 1377 1378 // Test is next 1379 Node *cmp = bol->in(1); 1380 const Type *tzero = NULL; 1381 switch( cmp->Opcode() ) { 1382 case Op_CmpF: tzero = TypeF::ZERO; break; // Float ABS 1383 case Op_CmpD: tzero = TypeD::ZERO; break; // Double ABS 1384 default: return NULL; 1385 } 1386 1387 // Find zero input of compare; the other input is being abs'd 1388 Node *x = NULL; 1389 bool flip = false; 1390 if( phase->type(cmp->in(cmp_zero_idx)) == tzero ) { 1391 x = cmp->in(3 - cmp_zero_idx); 1392 } else if( phase->type(cmp->in(3 - cmp_zero_idx)) == tzero ) { 1393 // The test is inverted, we should invert the result... 1394 x = cmp->in(cmp_zero_idx); 1395 flip = true; 1396 } else { 1397 return NULL; 1398 } 1399 1400 // Next get the 2 pieces being selected, one is the original value 1401 // and the other is the negated value. 1402 if( phi_root->in(phi_x_idx) != x ) return NULL; 1403 1404 // Check other phi input for subtract node 1405 Node *sub = phi_root->in(3 - phi_x_idx); 1406 1407 // Allow only Sub(0,X) and fail out for all others; Neg is not OK 1408 if( tzero == TypeF::ZERO ) { 1409 if( sub->Opcode() != Op_SubF || 1410 sub->in(2) != x || 1411 phase->type(sub->in(1)) != tzero ) return NULL; 1412 x = new AbsFNode(x); 1413 if (flip) { 1414 x = new SubFNode(sub->in(1), phase->transform(x)); 1415 } 1416 } else { 1417 if( sub->Opcode() != Op_SubD || 1418 sub->in(2) != x || 1419 phase->type(sub->in(1)) != tzero ) return NULL; 1420 x = new AbsDNode(x); 1421 if (flip) { 1422 x = new SubDNode(sub->in(1), phase->transform(x)); 1423 } 1424 } 1425 1426 return x; 1427 } 1428 1429 //------------------------------split_once------------------------------------- 1430 // Helper for split_flow_path 1431 static void split_once(PhaseIterGVN *igvn, Node *phi, Node *val, Node *n, Node *newn) { 1432 igvn->hash_delete(n); // Remove from hash before hacking edges 1433 1434 uint j = 1; 1435 for (uint i = phi->req()-1; i > 0; i--) { 1436 if (phi->in(i) == val) { // Found a path with val? 1437 // Add to NEW Region/Phi, no DU info 1438 newn->set_req( j++, n->in(i) ); 1439 // Remove from OLD Region/Phi 1440 n->del_req(i); 1441 } 1442 } 1443 1444 // Register the new node but do not transform it. Cannot transform until the 1445 // entire Region/Phi conglomerate has been hacked as a single huge transform. 1446 igvn->register_new_node_with_optimizer( newn ); 1447 1448 // Now I can point to the new node. 1449 n->add_req(newn); 1450 igvn->_worklist.push(n); 1451 } 1452 1453 //------------------------------split_flow_path-------------------------------- 1454 // Check for merging identical values and split flow paths 1455 static Node* split_flow_path(PhaseGVN *phase, PhiNode *phi) { 1456 BasicType bt = phi->type()->basic_type(); 1457 if( bt == T_ILLEGAL || type2size[bt] <= 0 ) 1458 return NULL; // Bail out on funny non-value stuff 1459 if( phi->req() <= 3 ) // Need at least 2 matched inputs and a 1460 return NULL; // third unequal input to be worth doing 1461 1462 // Scan for a constant 1463 uint i; 1464 for( i = 1; i < phi->req()-1; i++ ) { 1465 Node *n = phi->in(i); 1466 if( !n ) return NULL; 1467 if( phase->type(n) == Type::TOP ) return NULL; 1468 if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN || n->Opcode() == Op_ConNKlass ) 1469 break; 1470 } 1471 if( i >= phi->req() ) // Only split for constants 1472 return NULL; 1473 1474 Node *val = phi->in(i); // Constant to split for 1475 uint hit = 0; // Number of times it occurs 1476 Node *r = phi->region(); 1477 1478 for( ; i < phi->req(); i++ ){ // Count occurrences of constant 1479 Node *n = phi->in(i); 1480 if( !n ) return NULL; 1481 if( phase->type(n) == Type::TOP ) return NULL; 1482 if( phi->in(i) == val ) { 1483 hit++; 1484 if (PhaseIdealLoop::find_predicate(r->in(i)) != NULL) { 1485 return NULL; // don't split loop entry path 1486 } 1487 } 1488 } 1489 1490 if( hit <= 1 || // Make sure we find 2 or more 1491 hit == phi->req()-1 ) // and not ALL the same value 1492 return NULL; 1493 1494 // Now start splitting out the flow paths that merge the same value. 1495 // Split first the RegionNode. 1496 PhaseIterGVN *igvn = phase->is_IterGVN(); 1497 RegionNode *newr = new RegionNode(hit+1); 1498 split_once(igvn, phi, val, r, newr); 1499 1500 // Now split all other Phis than this one 1501 for (DUIterator_Fast kmax, k = r->fast_outs(kmax); k < kmax; k++) { 1502 Node* phi2 = r->fast_out(k); 1503 if( phi2->is_Phi() && phi2->as_Phi() != phi ) { 1504 PhiNode *newphi = PhiNode::make_blank(newr, phi2); 1505 split_once(igvn, phi, val, phi2, newphi); 1506 } 1507 } 1508 1509 // Clean up this guy 1510 igvn->hash_delete(phi); 1511 for( i = phi->req()-1; i > 0; i-- ) { 1512 if( phi->in(i) == val ) { 1513 phi->del_req(i); 1514 } 1515 } 1516 phi->add_req(val); 1517 1518 return phi; 1519 } 1520 1521 //============================================================================= 1522 //------------------------------simple_data_loop_check------------------------- 1523 // Try to determining if the phi node in a simple safe/unsafe data loop. 1524 // Returns: 1525 // enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop }; 1526 // Safe - safe case when the phi and it's inputs reference only safe data 1527 // nodes; 1528 // Unsafe - the phi and it's inputs reference unsafe data nodes but there 1529 // is no reference back to the phi - need a graph walk 1530 // to determine if it is in a loop; 1531 // UnsafeLoop - unsafe case when the phi references itself directly or through 1532 // unsafe data node. 1533 // Note: a safe data node is a node which could/never reference itself during 1534 // GVN transformations. For now it is Con, Proj, Phi, CastPP, CheckCastPP. 1535 // I mark Phi nodes as safe node not only because they can reference itself 1536 // but also to prevent mistaking the fallthrough case inside an outer loop 1537 // as dead loop when the phi references itselfs through an other phi. 1538 PhiNode::LoopSafety PhiNode::simple_data_loop_check(Node *in) const { 1539 // It is unsafe loop if the phi node references itself directly. 1540 if (in == (Node*)this) 1541 return UnsafeLoop; // Unsafe loop 1542 // Unsafe loop if the phi node references itself through an unsafe data node. 1543 // Exclude cases with null inputs or data nodes which could reference 1544 // itself (safe for dead loops). 1545 if (in != NULL && !in->is_dead_loop_safe()) { 1546 // Check inputs of phi's inputs also. 1547 // It is much less expensive then full graph walk. 1548 uint cnt = in->req(); 1549 uint i = (in->is_Proj() && !in->is_CFG()) ? 0 : 1; 1550 for (; i < cnt; ++i) { 1551 Node* m = in->in(i); 1552 if (m == (Node*)this) 1553 return UnsafeLoop; // Unsafe loop 1554 if (m != NULL && !m->is_dead_loop_safe()) { 1555 // Check the most common case (about 30% of all cases): 1556 // phi->Load/Store->AddP->(ConP ConP Con)/(Parm Parm Con). 1557 Node *m1 = (m->is_AddP() && m->req() > 3) ? m->in(1) : NULL; 1558 if (m1 == (Node*)this) 1559 return UnsafeLoop; // Unsafe loop 1560 if (m1 != NULL && m1 == m->in(2) && 1561 m1->is_dead_loop_safe() && m->in(3)->is_Con()) { 1562 continue; // Safe case 1563 } 1564 // The phi references an unsafe node - need full analysis. 1565 return Unsafe; 1566 } 1567 } 1568 } 1569 return Safe; // Safe case - we can optimize the phi node. 1570 } 1571 1572 //------------------------------is_unsafe_data_reference----------------------- 1573 // If phi can be reached through the data input - it is data loop. 1574 bool PhiNode::is_unsafe_data_reference(Node *in) const { 1575 assert(req() > 1, ""); 1576 // First, check simple cases when phi references itself directly or 1577 // through an other node. 1578 LoopSafety safety = simple_data_loop_check(in); 1579 if (safety == UnsafeLoop) 1580 return true; // phi references itself - unsafe loop 1581 else if (safety == Safe) 1582 return false; // Safe case - phi could be replaced with the unique input. 1583 1584 // Unsafe case when we should go through data graph to determine 1585 // if the phi references itself. 1586 1587 ResourceMark rm; 1588 1589 Arena *a = Thread::current()->resource_area(); 1590 Node_List nstack(a); 1591 VectorSet visited(a); 1592 1593 nstack.push(in); // Start with unique input. 1594 visited.set(in->_idx); 1595 while (nstack.size() != 0) { 1596 Node* n = nstack.pop(); 1597 uint cnt = n->req(); 1598 uint i = (n->is_Proj() && !n->is_CFG()) ? 0 : 1; 1599 for (; i < cnt; i++) { 1600 Node* m = n->in(i); 1601 if (m == (Node*)this) { 1602 return true; // Data loop 1603 } 1604 if (m != NULL && !m->is_dead_loop_safe()) { // Only look for unsafe cases. 1605 if (!visited.test_set(m->_idx)) 1606 nstack.push(m); 1607 } 1608 } 1609 } 1610 return false; // The phi is not reachable from its inputs 1611 } 1612 1613 1614 //------------------------------Ideal------------------------------------------ 1615 // Return a node which is more "ideal" than the current node. Must preserve 1616 // the CFG, but we can still strip out dead paths. 1617 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1618 // The next should never happen after 6297035 fix. 1619 if( is_copy() ) // Already degraded to a Copy ? 1620 return NULL; // No change 1621 1622 Node *r = in(0); // RegionNode 1623 assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge"); 1624 1625 // Note: During parsing, phis are often transformed before their regions. 1626 // This means we have to use type_or_null to defend against untyped regions. 1627 if( phase->type_or_null(r) == Type::TOP ) // Dead code? 1628 return NULL; // No change 1629 1630 // If all inputs are value types, push the value type node down through the 1631 // phi because value type nodes should be merged through their input values. 1632 if (req() > 2 && in(1) != NULL && in(1)->is_ValueTypeBase() && (can_reshape || in(1)->is_ValueType())) { 1633 int opcode = in(1)->Opcode(); 1634 uint i = 2; 1635 for (; i < req() && in(i) && in(i)->is_ValueTypeBase(); i++) { 1636 assert(in(i)->Opcode() == opcode, "mixing pointers and values?"); 1637 } 1638 if (i == req()) { 1639 ValueTypeBaseNode* vt = in(1)->as_ValueTypeBase()->clone_with_phis(phase, in(0)); 1640 for (uint i = 2; i < req(); ++i) { 1641 vt->merge_with(phase, in(i)->as_ValueTypeBase(), i, i == (req()-1)); 1642 } 1643 return vt; 1644 } 1645 } 1646 1647 if (type()->isa_valuetypeptr() && can_reshape) { 1648 // If the Phi merges the result from a mix of constant and non 1649 // constant method handles, only some of its inputs are 1650 // ValueTypePtr nodes and we can't push the ValueTypePtr node down 1651 // to remove the need for allocations. This if fixed by transforming: 1652 // 1653 // (Phi ValueTypePtr#1 Node#2) to (Phi ValueTypePtr#1 CheckCastPP#2) 1654 // 1655 // Then pushing the CheckCastPP up through Phis until it reaches 1656 // the non constant method handle call. The type of the return 1657 // value is then known from the type of the CheckCastPP. A 1658 // ValueTypePtr can be created by adding projections to the call 1659 // for all values being returned. See 1660 // CheckCastPPNode::Ideal(). That ValueTypePtr node can then be 1661 // pushed down through Phis. 1662 const TypeValueTypePtr* vtptr = NULL; 1663 for (uint i = 1; i < req(); i++) { 1664 if (in(i) != NULL && in(i)->is_ValueTypePtr()) { 1665 const TypeValueTypePtr* t = phase->type(in(i))->is_valuetypeptr(); 1666 t = t->cast_to_ptr_type(TypePtr::BotPTR)->is_valuetypeptr(); 1667 if (vtptr == NULL) { 1668 vtptr = t; 1669 } else { 1670 assert(vtptr == t, "Phi should merge identical value types"); 1671 } 1672 } else { 1673 assert(in(i) == NULL || vtptr == NULL || phase->type(in(i))->higher_equal(vtptr) || phase->type(in(i)) == Type::TOP || 1674 phase->type(in(i))->is_valuetypeptr()->is__Value(), "bad type"); 1675 } 1676 } 1677 if (vtptr != NULL) { 1678 // One input is a value type. All inputs must have the same type. 1679 bool progress = false; 1680 PhaseIterGVN* igvn = phase->is_IterGVN(); 1681 for (uint i = 1; i < req(); i++) { 1682 if (in(i) != NULL && !in(i)->is_Con() && !phase->type(in(i))->higher_equal(vtptr)) { 1683 // Can't transform because CheckCastPPNode::Identity can 1684 // push the cast up through another Phi and cause this same 1685 // transformation to run again, indefinitely 1686 Node* cast = igvn->register_new_node_with_optimizer(new CheckCastPPNode(NULL, in(i), vtptr)); 1687 set_req(i, cast); 1688 progress = true; 1689 } 1690 } 1691 if (progress) { 1692 return this; 1693 } 1694 } 1695 } 1696 1697 Node *top = phase->C->top(); 1698 bool new_phi = (outcnt() == 0); // transforming new Phi 1699 // No change for igvn if new phi is not hooked 1700 if (new_phi && can_reshape) 1701 return NULL; 1702 1703 // The are 2 situations when only one valid phi's input is left 1704 // (in addition to Region input). 1705 // One: region is not loop - replace phi with this input. 1706 // Two: region is loop - replace phi with top since this data path is dead 1707 // and we need to break the dead data loop. 1708 Node* progress = NULL; // Record if any progress made 1709 for( uint j = 1; j < req(); ++j ){ // For all paths in 1710 // Check unreachable control paths 1711 Node* rc = r->in(j); 1712 Node* n = in(j); // Get the input 1713 if (rc == NULL || phase->type(rc) == Type::TOP) { 1714 if (n != top) { // Not already top? 1715 PhaseIterGVN *igvn = phase->is_IterGVN(); 1716 if (can_reshape && igvn != NULL) { 1717 igvn->_worklist.push(r); 1718 } 1719 set_req(j, top); // Nuke it down 1720 progress = this; // Record progress 1721 } 1722 } 1723 } 1724 1725 if (can_reshape && outcnt() == 0) { 1726 // set_req() above may kill outputs if Phi is referenced 1727 // only by itself on the dead (top) control path. 1728 return top; 1729 } 1730 1731 bool uncasted = false; 1732 Node* uin = unique_input(phase, false); 1733 if (uin == NULL && can_reshape) { 1734 uncasted = true; 1735 uin = unique_input(phase, true); 1736 } 1737 if (uin == top) { // Simplest case: no alive inputs. 1738 if (can_reshape) // IGVN transformation 1739 return top; 1740 else 1741 return NULL; // Identity will return TOP 1742 } else if (uin != NULL) { 1743 // Only one not-NULL unique input path is left. 1744 // Determine if this input is backedge of a loop. 1745 // (Skip new phis which have no uses and dead regions). 1746 if (outcnt() > 0 && r->in(0) != NULL) { 1747 // First, take the short cut when we know it is a loop and 1748 // the EntryControl data path is dead. 1749 // Loop node may have only one input because entry path 1750 // is removed in PhaseIdealLoop::Dominators(). 1751 assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs"); 1752 bool is_loop = (r->is_Loop() && r->req() == 3); 1753 // Then, check if there is a data loop when phi references itself directly 1754 // or through other data nodes. 1755 if ((is_loop && !uin->eqv_uncast(in(LoopNode::EntryControl))) || 1756 (!is_loop && is_unsafe_data_reference(uin))) { 1757 // Break this data loop to avoid creation of a dead loop. 1758 if (can_reshape) { 1759 return top; 1760 } else { 1761 // We can't return top if we are in Parse phase - cut inputs only 1762 // let Identity to handle the case. 1763 replace_edge(uin, top); 1764 return NULL; 1765 } 1766 } 1767 } 1768 1769 if (uncasted) { 1770 // Add cast nodes between the phi to be removed and its unique input. 1771 // Wait until after parsing for the type information to propagate from the casts. 1772 assert(can_reshape, "Invalid during parsing"); 1773 const Type* phi_type = bottom_type(); 1774 assert(phi_type->isa_int() || phi_type->isa_ptr(), "bad phi type"); 1775 // Add casts to carry the control dependency of the Phi that is 1776 // going away 1777 Node* cast = NULL; 1778 if (phi_type->isa_int()) { 1779 cast = ConstraintCastNode::make_cast(Op_CastII, r, uin, phi_type, true); 1780 } else { 1781 const Type* uin_type = phase->type(uin); 1782 if (!phi_type->isa_oopptr() && !uin_type->isa_oopptr()) { 1783 cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, true); 1784 } else { 1785 // Use a CastPP for a cast to not null and a CheckCastPP for 1786 // a cast to a new klass (and both if both null-ness and 1787 // klass change). 1788 1789 // If the type of phi is not null but the type of uin may be 1790 // null, uin's type must be casted to not null 1791 if (phi_type->join(TypePtr::NOTNULL) == phi_type->remove_speculative() && 1792 uin_type->join(TypePtr::NOTNULL) != uin_type->remove_speculative()) { 1793 cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, TypePtr::NOTNULL, true); 1794 } 1795 1796 // If the type of phi and uin, both casted to not null, 1797 // differ the klass of uin must be (check)cast'ed to match 1798 // that of phi 1799 if (phi_type->join_speculative(TypePtr::NOTNULL) != uin_type->join_speculative(TypePtr::NOTNULL)) { 1800 Node* n = uin; 1801 if (cast != NULL) { 1802 cast = phase->transform(cast); 1803 n = cast; 1804 } 1805 cast = ConstraintCastNode::make_cast(Op_CheckCastPP, r, n, phi_type, true); 1806 } 1807 if (cast == NULL) { 1808 cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, true); 1809 } 1810 } 1811 } 1812 assert(cast != NULL, "cast should be set"); 1813 cast = phase->transform(cast); 1814 // set all inputs to the new cast(s) so the Phi is removed by Identity 1815 PhaseIterGVN* igvn = phase->is_IterGVN(); 1816 for (uint i = 1; i < req(); i++) { 1817 set_req_X(i, cast, igvn); 1818 } 1819 uin = cast; 1820 } 1821 1822 // One unique input. 1823 debug_only(Node* ident = Identity(phase)); 1824 // The unique input must eventually be detected by the Identity call. 1825 #ifdef ASSERT 1826 if (ident != uin && !ident->is_top()) { 1827 // print this output before failing assert 1828 r->dump(3); 1829 this->dump(3); 1830 ident->dump(); 1831 uin->dump(); 1832 } 1833 #endif 1834 assert(ident == uin || ident->is_top(), "Identity must clean this up"); 1835 return NULL; 1836 } 1837 1838 Node* opt = NULL; 1839 int true_path = is_diamond_phi(); 1840 if( true_path != 0 ) { 1841 // Check for CMove'ing identity. If it would be unsafe, 1842 // handle it here. In the safe case, let Identity handle it. 1843 Node* unsafe_id = is_cmove_id(phase, true_path); 1844 if( unsafe_id != NULL && is_unsafe_data_reference(unsafe_id) ) 1845 opt = unsafe_id; 1846 1847 // Check for simple convert-to-boolean pattern 1848 if( opt == NULL ) 1849 opt = is_x2logic(phase, this, true_path); 1850 1851 // Check for absolute value 1852 if( opt == NULL ) 1853 opt = is_absolute(phase, this, true_path); 1854 1855 // Check for conditional add 1856 if( opt == NULL && can_reshape ) 1857 opt = is_cond_add(phase, this, true_path); 1858 1859 // These 4 optimizations could subsume the phi: 1860 // have to check for a dead data loop creation. 1861 if( opt != NULL ) { 1862 if( opt == unsafe_id || is_unsafe_data_reference(opt) ) { 1863 // Found dead loop. 1864 if( can_reshape ) 1865 return top; 1866 // We can't return top if we are in Parse phase - cut inputs only 1867 // to stop further optimizations for this phi. Identity will return TOP. 1868 assert(req() == 3, "only diamond merge phi here"); 1869 set_req(1, top); 1870 set_req(2, top); 1871 return NULL; 1872 } else { 1873 return opt; 1874 } 1875 } 1876 } 1877 1878 // Check for merging identical values and split flow paths 1879 if (can_reshape) { 1880 opt = split_flow_path(phase, this); 1881 // This optimization only modifies phi - don't need to check for dead loop. 1882 assert(opt == NULL || phase->eqv(opt, this), "do not elide phi"); 1883 if (opt != NULL) return opt; 1884 } 1885 1886 if (in(1) != NULL && in(1)->Opcode() == Op_AddP && can_reshape) { 1887 // Try to undo Phi of AddP: 1888 // (Phi (AddP base base y) (AddP base2 base2 y)) 1889 // becomes: 1890 // newbase := (Phi base base2) 1891 // (AddP newbase newbase y) 1892 // 1893 // This occurs as a result of unsuccessful split_thru_phi and 1894 // interferes with taking advantage of addressing modes. See the 1895 // clone_shift_expressions code in matcher.cpp 1896 Node* addp = in(1); 1897 const Type* type = addp->in(AddPNode::Base)->bottom_type(); 1898 Node* y = addp->in(AddPNode::Offset); 1899 if (y != NULL && addp->in(AddPNode::Base) == addp->in(AddPNode::Address)) { 1900 // make sure that all the inputs are similar to the first one, 1901 // i.e. AddP with base == address and same offset as first AddP 1902 bool doit = true; 1903 for (uint i = 2; i < req(); i++) { 1904 if (in(i) == NULL || 1905 in(i)->Opcode() != Op_AddP || 1906 in(i)->in(AddPNode::Base) != in(i)->in(AddPNode::Address) || 1907 in(i)->in(AddPNode::Offset) != y) { 1908 doit = false; 1909 break; 1910 } 1911 // Accumulate type for resulting Phi 1912 type = type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type()); 1913 } 1914 Node* base = NULL; 1915 if (doit) { 1916 // Check for neighboring AddP nodes in a tree. 1917 // If they have a base, use that it. 1918 for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) { 1919 Node* u = this->fast_out(k); 1920 if (u->is_AddP()) { 1921 Node* base2 = u->in(AddPNode::Base); 1922 if (base2 != NULL && !base2->is_top()) { 1923 if (base == NULL) 1924 base = base2; 1925 else if (base != base2) 1926 { doit = false; break; } 1927 } 1928 } 1929 } 1930 } 1931 if (doit) { 1932 if (base == NULL) { 1933 base = new PhiNode(in(0), type, NULL); 1934 for (uint i = 1; i < req(); i++) { 1935 base->init_req(i, in(i)->in(AddPNode::Base)); 1936 } 1937 phase->is_IterGVN()->register_new_node_with_optimizer(base); 1938 } 1939 return new AddPNode(base, base, y); 1940 } 1941 } 1942 } 1943 1944 // Split phis through memory merges, so that the memory merges will go away. 1945 // Piggy-back this transformation on the search for a unique input.... 1946 // It will be as if the merged memory is the unique value of the phi. 1947 // (Do not attempt this optimization unless parsing is complete. 1948 // It would make the parser's memory-merge logic sick.) 1949 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.) 1950 if (progress == NULL && can_reshape && type() == Type::MEMORY) { 1951 // see if this phi should be sliced 1952 uint merge_width = 0; 1953 bool saw_self = false; 1954 for( uint i=1; i<req(); ++i ) {// For all paths in 1955 Node *ii = in(i); 1956 // TOP inputs should not be counted as safe inputs because if the 1957 // Phi references itself through all other inputs then splitting the 1958 // Phi through memory merges would create dead loop at later stage. 1959 if (ii == top) { 1960 return NULL; // Delay optimization until graph is cleaned. 1961 } 1962 if (ii->is_MergeMem()) { 1963 MergeMemNode* n = ii->as_MergeMem(); 1964 merge_width = MAX2(merge_width, n->req()); 1965 saw_self = saw_self || phase->eqv(n->base_memory(), this); 1966 } 1967 } 1968 1969 // This restriction is temporarily necessary to ensure termination: 1970 if (!saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0; 1971 1972 if (merge_width > Compile::AliasIdxRaw) { 1973 // found at least one non-empty MergeMem 1974 const TypePtr* at = adr_type(); 1975 if (at != TypePtr::BOTTOM) { 1976 // Patch the existing phi to select an input from the merge: 1977 // Phi:AT1(...MergeMem(m0, m1, m2)...) into 1978 // Phi:AT1(...m1...) 1979 int alias_idx = phase->C->get_alias_index(at); 1980 for (uint i=1; i<req(); ++i) { 1981 Node *ii = in(i); 1982 if (ii->is_MergeMem()) { 1983 MergeMemNode* n = ii->as_MergeMem(); 1984 // compress paths and change unreachable cycles to TOP 1985 // If not, we can update the input infinitely along a MergeMem cycle 1986 // Equivalent code is in MemNode::Ideal_common 1987 Node *m = phase->transform(n); 1988 if (outcnt() == 0) { // Above transform() may kill us! 1989 return top; 1990 } 1991 // If transformed to a MergeMem, get the desired slice 1992 // Otherwise the returned node represents memory for every slice 1993 Node *new_mem = (m->is_MergeMem()) ? 1994 m->as_MergeMem()->memory_at(alias_idx) : m; 1995 // Update input if it is progress over what we have now 1996 if (new_mem != ii) { 1997 set_req(i, new_mem); 1998 progress = this; 1999 } 2000 } 2001 } 2002 } else { 2003 // We know that at least one MergeMem->base_memory() == this 2004 // (saw_self == true). If all other inputs also references this phi 2005 // (directly or through data nodes) - it is dead loop. 2006 bool saw_safe_input = false; 2007 for (uint j = 1; j < req(); ++j) { 2008 Node *n = in(j); 2009 if (n->is_MergeMem() && n->as_MergeMem()->base_memory() == this) 2010 continue; // skip known cases 2011 if (!is_unsafe_data_reference(n)) { 2012 saw_safe_input = true; // found safe input 2013 break; 2014 } 2015 } 2016 if (!saw_safe_input) 2017 return top; // all inputs reference back to this phi - dead loop 2018 2019 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into 2020 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...)) 2021 PhaseIterGVN *igvn = phase->is_IterGVN(); 2022 Node* hook = new Node(1); 2023 PhiNode* new_base = (PhiNode*) clone(); 2024 // Must eagerly register phis, since they participate in loops. 2025 if (igvn) { 2026 igvn->register_new_node_with_optimizer(new_base); 2027 hook->add_req(new_base); 2028 } 2029 MergeMemNode* result = MergeMemNode::make(new_base); 2030 for (uint i = 1; i < req(); ++i) { 2031 Node *ii = in(i); 2032 if (ii->is_MergeMem()) { 2033 MergeMemNode* n = ii->as_MergeMem(); 2034 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) { 2035 // If we have not seen this slice yet, make a phi for it. 2036 bool made_new_phi = false; 2037 if (mms.is_empty()) { 2038 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C)); 2039 made_new_phi = true; 2040 if (igvn) { 2041 igvn->register_new_node_with_optimizer(new_phi); 2042 hook->add_req(new_phi); 2043 } 2044 mms.set_memory(new_phi); 2045 } 2046 Node* phi = mms.memory(); 2047 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice"); 2048 phi->set_req(i, mms.memory2()); 2049 } 2050 } 2051 } 2052 // Distribute all self-loops. 2053 { // (Extra braces to hide mms.) 2054 for (MergeMemStream mms(result); mms.next_non_empty(); ) { 2055 Node* phi = mms.memory(); 2056 for (uint i = 1; i < req(); ++i) { 2057 if (phi->in(i) == this) phi->set_req(i, phi); 2058 } 2059 } 2060 } 2061 // now transform the new nodes, and return the mergemem 2062 for (MergeMemStream mms(result); mms.next_non_empty(); ) { 2063 Node* phi = mms.memory(); 2064 mms.set_memory(phase->transform(phi)); 2065 } 2066 if (igvn) { // Unhook. 2067 igvn->hash_delete(hook); 2068 for (uint i = 1; i < hook->req(); i++) { 2069 hook->set_req(i, NULL); 2070 } 2071 } 2072 // Replace self with the result. 2073 return result; 2074 } 2075 } 2076 // 2077 // Other optimizations on the memory chain 2078 // 2079 const TypePtr* at = adr_type(); 2080 for( uint i=1; i<req(); ++i ) {// For all paths in 2081 Node *ii = in(i); 2082 Node *new_in = MemNode::optimize_memory_chain(ii, at, NULL, phase); 2083 if (ii != new_in ) { 2084 set_req(i, new_in); 2085 progress = this; 2086 } 2087 } 2088 } 2089 2090 #ifdef _LP64 2091 // Push DecodeN/DecodeNKlass down through phi. 2092 // The rest of phi graph will transform by split EncodeP node though phis up. 2093 if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == NULL) { 2094 bool may_push = true; 2095 bool has_decodeN = false; 2096 bool is_decodeN = false; 2097 for (uint i=1; i<req(); ++i) {// For all paths in 2098 Node *ii = in(i); 2099 if (ii->is_DecodeNarrowPtr() && ii->bottom_type() == bottom_type()) { 2100 // Do optimization if a non dead path exist. 2101 if (ii->in(1)->bottom_type() != Type::TOP) { 2102 has_decodeN = true; 2103 is_decodeN = ii->is_DecodeN(); 2104 } 2105 } else if (!ii->is_Phi()) { 2106 may_push = false; 2107 } 2108 } 2109 2110 if (has_decodeN && may_push) { 2111 PhaseIterGVN *igvn = phase->is_IterGVN(); 2112 // Make narrow type for new phi. 2113 const Type* narrow_t; 2114 if (is_decodeN) { 2115 narrow_t = TypeNarrowOop::make(this->bottom_type()->is_ptr()); 2116 } else { 2117 narrow_t = TypeNarrowKlass::make(this->bottom_type()->is_ptr()); 2118 } 2119 PhiNode* new_phi = new PhiNode(r, narrow_t); 2120 uint orig_cnt = req(); 2121 for (uint i=1; i<req(); ++i) {// For all paths in 2122 Node *ii = in(i); 2123 Node* new_ii = NULL; 2124 if (ii->is_DecodeNarrowPtr()) { 2125 assert(ii->bottom_type() == bottom_type(), "sanity"); 2126 new_ii = ii->in(1); 2127 } else { 2128 assert(ii->is_Phi(), "sanity"); 2129 if (ii->as_Phi() == this) { 2130 new_ii = new_phi; 2131 } else { 2132 if (is_decodeN) { 2133 new_ii = new EncodePNode(ii, narrow_t); 2134 } else { 2135 new_ii = new EncodePKlassNode(ii, narrow_t); 2136 } 2137 igvn->register_new_node_with_optimizer(new_ii); 2138 } 2139 } 2140 new_phi->set_req(i, new_ii); 2141 } 2142 igvn->register_new_node_with_optimizer(new_phi, this); 2143 if (is_decodeN) { 2144 progress = new DecodeNNode(new_phi, bottom_type()); 2145 } else { 2146 progress = new DecodeNKlassNode(new_phi, bottom_type()); 2147 } 2148 } 2149 } 2150 #endif 2151 2152 return progress; // Return any progress 2153 } 2154 2155 //------------------------------is_tripcount----------------------------------- 2156 bool PhiNode::is_tripcount() const { 2157 return (in(0) != NULL && in(0)->is_CountedLoop() && 2158 in(0)->as_CountedLoop()->phi() == this); 2159 } 2160 2161 //------------------------------out_RegMask------------------------------------ 2162 const RegMask &PhiNode::in_RegMask(uint i) const { 2163 return i ? out_RegMask() : RegMask::Empty; 2164 } 2165 2166 const RegMask &PhiNode::out_RegMask() const { 2167 uint ideal_reg = _type->ideal_reg(); 2168 assert( ideal_reg != Node::NotAMachineReg, "invalid type at Phi" ); 2169 if( ideal_reg == 0 ) return RegMask::Empty; 2170 assert(ideal_reg != Op_RegFlags, "flags register is not spillable"); 2171 return *(Compile::current()->matcher()->idealreg2spillmask[ideal_reg]); 2172 } 2173 2174 #ifndef PRODUCT 2175 void PhiNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 2176 // For a PhiNode, the set of related nodes includes all inputs till level 2, 2177 // and all outputs till level 1. In compact mode, inputs till level 1 are 2178 // collected. 2179 this->collect_nodes(in_rel, compact ? 1 : 2, false, false); 2180 this->collect_nodes(out_rel, -1, false, false); 2181 } 2182 2183 void PhiNode::dump_spec(outputStream *st) const { 2184 TypeNode::dump_spec(st); 2185 if (is_tripcount()) { 2186 st->print(" #tripcount"); 2187 } 2188 } 2189 #endif 2190 2191 2192 //============================================================================= 2193 const Type* GotoNode::Value(PhaseGVN* phase) const { 2194 // If the input is reachable, then we are executed. 2195 // If the input is not reachable, then we are not executed. 2196 return phase->type(in(0)); 2197 } 2198 2199 Node* GotoNode::Identity(PhaseGVN* phase) { 2200 return in(0); // Simple copy of incoming control 2201 } 2202 2203 const RegMask &GotoNode::out_RegMask() const { 2204 return RegMask::Empty; 2205 } 2206 2207 #ifndef PRODUCT 2208 //-----------------------------related----------------------------------------- 2209 // The related nodes of a GotoNode are all inputs at level 1, as well as the 2210 // outputs at level 1. This is regardless of compact mode. 2211 void GotoNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 2212 this->collect_nodes(in_rel, 1, false, false); 2213 this->collect_nodes(out_rel, -1, false, false); 2214 } 2215 #endif 2216 2217 2218 //============================================================================= 2219 const RegMask &JumpNode::out_RegMask() const { 2220 return RegMask::Empty; 2221 } 2222 2223 #ifndef PRODUCT 2224 //-----------------------------related----------------------------------------- 2225 // The related nodes of a JumpNode are all inputs at level 1, as well as the 2226 // outputs at level 2 (to include actual jump targets beyond projection nodes). 2227 // This is regardless of compact mode. 2228 void JumpNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 2229 this->collect_nodes(in_rel, 1, false, false); 2230 this->collect_nodes(out_rel, -2, false, false); 2231 } 2232 #endif 2233 2234 //============================================================================= 2235 const RegMask &JProjNode::out_RegMask() const { 2236 return RegMask::Empty; 2237 } 2238 2239 //============================================================================= 2240 const RegMask &CProjNode::out_RegMask() const { 2241 return RegMask::Empty; 2242 } 2243 2244 2245 2246 //============================================================================= 2247 2248 uint PCTableNode::hash() const { return Node::hash() + _size; } 2249 uint PCTableNode::cmp( const Node &n ) const 2250 { return _size == ((PCTableNode&)n)._size; } 2251 2252 const Type *PCTableNode::bottom_type() const { 2253 const Type** f = TypeTuple::fields(_size); 2254 for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL; 2255 return TypeTuple::make(_size, f); 2256 } 2257 2258 //------------------------------Value------------------------------------------ 2259 // Compute the type of the PCTableNode. If reachable it is a tuple of 2260 // Control, otherwise the table targets are not reachable 2261 const Type* PCTableNode::Value(PhaseGVN* phase) const { 2262 if( phase->type(in(0)) == Type::CONTROL ) 2263 return bottom_type(); 2264 return Type::TOP; // All paths dead? Then so are we 2265 } 2266 2267 //------------------------------Ideal------------------------------------------ 2268 // Return a node which is more "ideal" than the current node. Strip out 2269 // control copies 2270 Node *PCTableNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2271 return remove_dead_region(phase, can_reshape) ? this : NULL; 2272 } 2273 2274 //============================================================================= 2275 uint JumpProjNode::hash() const { 2276 return Node::hash() + _dest_bci; 2277 } 2278 2279 uint JumpProjNode::cmp( const Node &n ) const { 2280 return ProjNode::cmp(n) && 2281 _dest_bci == ((JumpProjNode&)n)._dest_bci; 2282 } 2283 2284 #ifndef PRODUCT 2285 void JumpProjNode::dump_spec(outputStream *st) const { 2286 ProjNode::dump_spec(st); 2287 st->print("@bci %d ",_dest_bci); 2288 } 2289 2290 void JumpProjNode::dump_compact_spec(outputStream *st) const { 2291 ProjNode::dump_compact_spec(st); 2292 st->print("(%d)%d@%d", _switch_val, _proj_no, _dest_bci); 2293 } 2294 2295 void JumpProjNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 2296 // The related nodes of a JumpProjNode are its inputs and outputs at level 1. 2297 this->collect_nodes(in_rel, 1, false, false); 2298 this->collect_nodes(out_rel, -1, false, false); 2299 } 2300 #endif 2301 2302 //============================================================================= 2303 //------------------------------Value------------------------------------------ 2304 // Check for being unreachable, or for coming from a Rethrow. Rethrow's cannot 2305 // have the default "fall_through_index" path. 2306 const Type* CatchNode::Value(PhaseGVN* phase) const { 2307 // Unreachable? Then so are all paths from here. 2308 if( phase->type(in(0)) == Type::TOP ) return Type::TOP; 2309 // First assume all paths are reachable 2310 const Type** f = TypeTuple::fields(_size); 2311 for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL; 2312 // Identify cases that will always throw an exception 2313 // () rethrow call 2314 // () virtual or interface call with NULL receiver 2315 // () call is a check cast with incompatible arguments 2316 if( in(1)->is_Proj() ) { 2317 Node *i10 = in(1)->in(0); 2318 if( i10->is_Call() ) { 2319 CallNode *call = i10->as_Call(); 2320 // Rethrows always throw exceptions, never return 2321 if (call->entry_point() == OptoRuntime::rethrow_stub()) { 2322 f[CatchProjNode::fall_through_index] = Type::TOP; 2323 } else if( call->req() > TypeFunc::Parms ) { 2324 const Type *arg0 = phase->type( call->in(TypeFunc::Parms) ); 2325 // Check for null receiver to virtual or interface calls 2326 if( call->is_CallDynamicJava() && 2327 arg0->higher_equal(TypePtr::NULL_PTR) ) { 2328 f[CatchProjNode::fall_through_index] = Type::TOP; 2329 } 2330 } // End of if not a runtime stub 2331 } // End of if have call above me 2332 } // End of slot 1 is not a projection 2333 return TypeTuple::make(_size, f); 2334 } 2335 2336 //============================================================================= 2337 uint CatchProjNode::hash() const { 2338 return Node::hash() + _handler_bci; 2339 } 2340 2341 2342 uint CatchProjNode::cmp( const Node &n ) const { 2343 return ProjNode::cmp(n) && 2344 _handler_bci == ((CatchProjNode&)n)._handler_bci; 2345 } 2346 2347 2348 //------------------------------Identity--------------------------------------- 2349 // If only 1 target is possible, choose it if it is the main control 2350 Node* CatchProjNode::Identity(PhaseGVN* phase) { 2351 // If my value is control and no other value is, then treat as ID 2352 const TypeTuple *t = phase->type(in(0))->is_tuple(); 2353 if (t->field_at(_con) != Type::CONTROL) return this; 2354 // If we remove the last CatchProj and elide the Catch/CatchProj, then we 2355 // also remove any exception table entry. Thus we must know the call 2356 // feeding the Catch will not really throw an exception. This is ok for 2357 // the main fall-thru control (happens when we know a call can never throw 2358 // an exception) or for "rethrow", because a further optimization will 2359 // yank the rethrow (happens when we inline a function that can throw an 2360 // exception and the caller has no handler). Not legal, e.g., for passing 2361 // a NULL receiver to a v-call, or passing bad types to a slow-check-cast. 2362 // These cases MUST throw an exception via the runtime system, so the VM 2363 // will be looking for a table entry. 2364 Node *proj = in(0)->in(1); // Expect a proj feeding CatchNode 2365 CallNode *call; 2366 if (_con != TypeFunc::Control && // Bail out if not the main control. 2367 !(proj->is_Proj() && // AND NOT a rethrow 2368 proj->in(0)->is_Call() && 2369 (call = proj->in(0)->as_Call()) && 2370 call->entry_point() == OptoRuntime::rethrow_stub())) 2371 return this; 2372 2373 // Search for any other path being control 2374 for (uint i = 0; i < t->cnt(); i++) { 2375 if (i != _con && t->field_at(i) == Type::CONTROL) 2376 return this; 2377 } 2378 // Only my path is possible; I am identity on control to the jump 2379 return in(0)->in(0); 2380 } 2381 2382 2383 #ifndef PRODUCT 2384 void CatchProjNode::dump_spec(outputStream *st) const { 2385 ProjNode::dump_spec(st); 2386 st->print("@bci %d ",_handler_bci); 2387 } 2388 #endif 2389 2390 //============================================================================= 2391 //------------------------------Identity--------------------------------------- 2392 // Check for CreateEx being Identity. 2393 Node* CreateExNode::Identity(PhaseGVN* phase) { 2394 if( phase->type(in(1)) == Type::TOP ) return in(1); 2395 if( phase->type(in(0)) == Type::TOP ) return in(0); 2396 // We only come from CatchProj, unless the CatchProj goes away. 2397 // If the CatchProj is optimized away, then we just carry the 2398 // exception oop through. 2399 2400 // CheckCastPPNode::Ideal() for value types reuses the exception 2401 // paths of a call to perform an allocation: we can see a Phi here. 2402 if (in(1)->is_Phi()) { 2403 return this; 2404 } 2405 CallNode *call = in(1)->in(0)->as_Call(); 2406 2407 return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) ) 2408 ? this 2409 : call->in(TypeFunc::Parms); 2410 } 2411 2412 //============================================================================= 2413 //------------------------------Value------------------------------------------ 2414 // Check for being unreachable. 2415 const Type* NeverBranchNode::Value(PhaseGVN* phase) const { 2416 if (!in(0) || in(0)->is_top()) return Type::TOP; 2417 return bottom_type(); 2418 } 2419 2420 //------------------------------Ideal------------------------------------------ 2421 // Check for no longer being part of a loop 2422 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2423 if (can_reshape && !in(0)->is_Loop()) { 2424 // Dead code elimination can sometimes delete this projection so 2425 // if it's not there, there's nothing to do. 2426 Node* fallthru = proj_out(0); 2427 if (fallthru != NULL) { 2428 phase->is_IterGVN()->replace_node(fallthru, in(0)); 2429 } 2430 return phase->C->top(); 2431 } 2432 return NULL; 2433 } 2434 2435 #ifndef PRODUCT 2436 void NeverBranchNode::format( PhaseRegAlloc *ra_, outputStream *st) const { 2437 st->print("%s", Name()); 2438 } 2439 #endif