1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/callnode.hpp" 30 #include "opto/castnode.hpp" 31 #include "opto/connode.hpp" 32 #include "opto/convertnode.hpp" 33 #include "opto/divnode.hpp" 34 #include "opto/loopnode.hpp" 35 #include "opto/mulnode.hpp" 36 #include "opto/movenode.hpp" 37 #include "opto/opaquenode.hpp" 38 #include "opto/rootnode.hpp" 39 #include "opto/runtime.hpp" 40 #include "opto/subnode.hpp" 41 #include "opto/superword.hpp" 42 #include "opto/vectornode.hpp" 43 44 //------------------------------is_loop_exit----------------------------------- 45 // Given an IfNode, return the loop-exiting projection or NULL if both 46 // arms remain in the loop. 47 Node *IdealLoopTree::is_loop_exit(Node *iff) const { 48 if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests 49 PhaseIdealLoop *phase = _phase; 50 // Test is an IfNode, has 2 projections. If BOTH are in the loop 51 // we need loop unswitching instead of peeling. 52 if( !is_member(phase->get_loop( iff->raw_out(0) )) ) 53 return iff->raw_out(0); 54 if( !is_member(phase->get_loop( iff->raw_out(1) )) ) 55 return iff->raw_out(1); 56 return NULL; 57 } 58 59 60 //============================================================================= 61 62 63 //------------------------------record_for_igvn---------------------------- 64 // Put loop body on igvn work list 65 void IdealLoopTree::record_for_igvn() { 66 for( uint i = 0; i < _body.size(); i++ ) { 67 Node *n = _body.at(i); 68 _phase->_igvn._worklist.push(n); 69 } 70 } 71 72 //------------------------------compute_exact_trip_count----------------------- 73 // Compute loop exact trip count if possible. Do not recalculate trip count for 74 // split loops (pre-main-post) which have their limits and inits behind Opaque node. 75 void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) { 76 if (!_head->as_Loop()->is_valid_counted_loop()) { 77 return; 78 } 79 CountedLoopNode* cl = _head->as_CountedLoop(); 80 // Trip count may become nonexact for iteration split loops since 81 // RCE modifies limits. Note, _trip_count value is not reset since 82 // it is used to limit unrolling of main loop. 83 cl->set_nonexact_trip_count(); 84 85 // Loop's test should be part of loop. 86 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) 87 return; // Infinite loop 88 89 #ifdef ASSERT 90 BoolTest::mask bt = cl->loopexit()->test_trip(); 91 assert(bt == BoolTest::lt || bt == BoolTest::gt || 92 bt == BoolTest::ne, "canonical test is expected"); 93 #endif 94 95 Node* init_n = cl->init_trip(); 96 Node* limit_n = cl->limit(); 97 if (init_n != NULL && init_n->is_Con() && 98 limit_n != NULL && limit_n->is_Con()) { 99 // Use longs to avoid integer overflow. 100 int stride_con = cl->stride_con(); 101 jlong init_con = cl->init_trip()->get_int(); 102 jlong limit_con = cl->limit()->get_int(); 103 int stride_m = stride_con - (stride_con > 0 ? 1 : -1); 104 jlong trip_count = (limit_con - init_con + stride_m)/stride_con; 105 if (trip_count > 0 && (julong)trip_count < (julong)max_juint) { 106 // Set exact trip count. 107 cl->set_exact_trip_count((uint)trip_count); 108 } 109 } 110 } 111 112 //------------------------------compute_profile_trip_cnt---------------------------- 113 // Compute loop trip count from profile data as 114 // (backedge_count + loop_exit_count) / loop_exit_count 115 void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) { 116 if (!_head->is_CountedLoop()) { 117 return; 118 } 119 CountedLoopNode* head = _head->as_CountedLoop(); 120 if (head->profile_trip_cnt() != COUNT_UNKNOWN) { 121 return; // Already computed 122 } 123 float trip_cnt = (float)max_jint; // default is big 124 125 Node* back = head->in(LoopNode::LoopBackControl); 126 while (back != head) { 127 if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && 128 back->in(0) && 129 back->in(0)->is_If() && 130 back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN && 131 back->in(0)->as_If()->_prob != PROB_UNKNOWN) { 132 break; 133 } 134 back = phase->idom(back); 135 } 136 if (back != head) { 137 assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && 138 back->in(0), "if-projection exists"); 139 IfNode* back_if = back->in(0)->as_If(); 140 float loop_back_cnt = back_if->_fcnt * back_if->_prob; 141 142 // Now compute a loop exit count 143 float loop_exit_cnt = 0.0f; 144 for( uint i = 0; i < _body.size(); i++ ) { 145 Node *n = _body[i]; 146 if( n->is_If() ) { 147 IfNode *iff = n->as_If(); 148 if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) { 149 Node *exit = is_loop_exit(iff); 150 if( exit ) { 151 float exit_prob = iff->_prob; 152 if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob; 153 if (exit_prob > PROB_MIN) { 154 float exit_cnt = iff->_fcnt * exit_prob; 155 loop_exit_cnt += exit_cnt; 156 } 157 } 158 } 159 } 160 } 161 if (loop_exit_cnt > 0.0f) { 162 trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt; 163 } else { 164 // No exit count so use 165 trip_cnt = loop_back_cnt; 166 } 167 } 168 #ifndef PRODUCT 169 if (TraceProfileTripCount) { 170 tty->print_cr("compute_profile_trip_cnt lp: %d cnt: %f\n", head->_idx, trip_cnt); 171 } 172 #endif 173 head->set_profile_trip_cnt(trip_cnt); 174 } 175 176 //---------------------is_invariant_addition----------------------------- 177 // Return nonzero index of invariant operand for an Add or Sub 178 // of (nonconstant) invariant and variant values. Helper for reassociate_invariants. 179 int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) { 180 int op = n->Opcode(); 181 if (op == Op_AddI || op == Op_SubI) { 182 bool in1_invar = this->is_invariant(n->in(1)); 183 bool in2_invar = this->is_invariant(n->in(2)); 184 if (in1_invar && !in2_invar) return 1; 185 if (!in1_invar && in2_invar) return 2; 186 } 187 return 0; 188 } 189 190 //---------------------reassociate_add_sub----------------------------- 191 // Reassociate invariant add and subtract expressions: 192 // 193 // inv1 + (x + inv2) => ( inv1 + inv2) + x 194 // (x + inv2) + inv1 => ( inv1 + inv2) + x 195 // inv1 + (x - inv2) => ( inv1 - inv2) + x 196 // inv1 - (inv2 - x) => ( inv1 - inv2) + x 197 // (x + inv2) - inv1 => (-inv1 + inv2) + x 198 // (x - inv2) + inv1 => ( inv1 - inv2) + x 199 // (x - inv2) - inv1 => (-inv1 - inv2) + x 200 // inv1 + (inv2 - x) => ( inv1 + inv2) - x 201 // inv1 - (x - inv2) => ( inv1 + inv2) - x 202 // (inv2 - x) + inv1 => ( inv1 + inv2) - x 203 // (inv2 - x) - inv1 => (-inv1 + inv2) - x 204 // inv1 - (x + inv2) => ( inv1 - inv2) - x 205 // 206 Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) { 207 if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL; 208 if (is_invariant(n1)) return NULL; 209 int inv1_idx = is_invariant_addition(n1, phase); 210 if (!inv1_idx) return NULL; 211 // Don't mess with add of constant (igvn moves them to expression tree root.) 212 if (n1->is_Add() && n1->in(2)->is_Con()) return NULL; 213 Node* inv1 = n1->in(inv1_idx); 214 Node* n2 = n1->in(3 - inv1_idx); 215 int inv2_idx = is_invariant_addition(n2, phase); 216 if (!inv2_idx) return NULL; 217 Node* x = n2->in(3 - inv2_idx); 218 Node* inv2 = n2->in(inv2_idx); 219 220 bool neg_x = n2->is_Sub() && inv2_idx == 1; 221 bool neg_inv2 = n2->is_Sub() && inv2_idx == 2; 222 bool neg_inv1 = n1->is_Sub() && inv1_idx == 2; 223 if (n1->is_Sub() && inv1_idx == 1) { 224 neg_x = !neg_x; 225 neg_inv2 = !neg_inv2; 226 } 227 Node* inv1_c = phase->get_ctrl(inv1); 228 Node* inv2_c = phase->get_ctrl(inv2); 229 Node* n_inv1; 230 if (neg_inv1) { 231 Node *zero = phase->_igvn.intcon(0); 232 phase->set_ctrl(zero, phase->C->root()); 233 n_inv1 = new SubINode(zero, inv1); 234 phase->register_new_node(n_inv1, inv1_c); 235 } else { 236 n_inv1 = inv1; 237 } 238 Node* inv; 239 if (neg_inv2) { 240 inv = new SubINode(n_inv1, inv2); 241 } else { 242 inv = new AddINode(n_inv1, inv2); 243 } 244 phase->register_new_node(inv, phase->get_early_ctrl(inv)); 245 246 Node* addx; 247 if (neg_x) { 248 addx = new SubINode(inv, x); 249 } else { 250 addx = new AddINode(x, inv); 251 } 252 phase->register_new_node(addx, phase->get_ctrl(x)); 253 phase->_igvn.replace_node(n1, addx); 254 assert(phase->get_loop(phase->get_ctrl(n1)) == this, ""); 255 _body.yank(n1); 256 return addx; 257 } 258 259 //---------------------reassociate_invariants----------------------------- 260 // Reassociate invariant expressions: 261 void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) { 262 for (int i = _body.size() - 1; i >= 0; i--) { 263 Node *n = _body.at(i); 264 for (int j = 0; j < 5; j++) { 265 Node* nn = reassociate_add_sub(n, phase); 266 if (nn == NULL) break; 267 n = nn; // again 268 }; 269 } 270 } 271 272 //------------------------------policy_peeling--------------------------------- 273 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can 274 // make some loop-invariant test (usually a null-check) happen before the loop. 275 bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const { 276 Node *test = ((IdealLoopTree*)this)->tail(); 277 int body_size = ((IdealLoopTree*)this)->_body.size(); 278 // Peeling does loop cloning which can result in O(N^2) node construction 279 if( body_size > 255 /* Prevent overflow for large body_size */ 280 || (body_size * body_size + phase->C->live_nodes()) > phase->C->max_node_limit() ) { 281 return false; // too large to safely clone 282 } 283 284 // check for vectorized loops, any peeling done was already applied 285 if (_head->is_CountedLoop() && _head->as_CountedLoop()->do_unroll_only()) return false; 286 287 while( test != _head ) { // Scan till run off top of loop 288 if( test->is_If() ) { // Test? 289 Node *ctrl = phase->get_ctrl(test->in(1)); 290 if (ctrl->is_top()) 291 return false; // Found dead test on live IF? No peeling! 292 // Standard IF only has one input value to check for loop invariance 293 assert( test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added"); 294 // Condition is not a member of this loop? 295 if( !is_member(phase->get_loop(ctrl)) && 296 is_loop_exit(test) ) 297 return true; // Found reason to peel! 298 } 299 // Walk up dominators to loop _head looking for test which is 300 // executed on every path thru loop. 301 test = phase->idom(test); 302 } 303 return false; 304 } 305 306 //------------------------------peeled_dom_test_elim--------------------------- 307 // If we got the effect of peeling, either by actually peeling or by making 308 // a pre-loop which must execute at least once, we can remove all 309 // loop-invariant dominated tests in the main body. 310 void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) { 311 bool progress = true; 312 while( progress ) { 313 progress = false; // Reset for next iteration 314 Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail(); 315 Node *test = prev->in(0); 316 while( test != loop->_head ) { // Scan till run off top of loop 317 318 int p_op = prev->Opcode(); 319 if( (p_op == Op_IfFalse || p_op == Op_IfTrue) && 320 test->is_If() && // Test? 321 !test->in(1)->is_Con() && // And not already obvious? 322 // Condition is not a member of this loop? 323 !loop->is_member(get_loop(get_ctrl(test->in(1))))){ 324 // Walk loop body looking for instances of this test 325 for( uint i = 0; i < loop->_body.size(); i++ ) { 326 Node *n = loop->_body.at(i); 327 if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) { 328 // IfNode was dominated by version in peeled loop body 329 progress = true; 330 dominated_by( old_new[prev->_idx], n ); 331 } 332 } 333 } 334 prev = test; 335 test = idom(test); 336 } // End of scan tests in loop 337 338 } // End of while( progress ) 339 } 340 341 //------------------------------do_peeling------------------------------------- 342 // Peel the first iteration of the given loop. 343 // Step 1: Clone the loop body. The clone becomes the peeled iteration. 344 // The pre-loop illegally has 2 control users (old & new loops). 345 // Step 2: Make the old-loop fall-in edges point to the peeled iteration. 346 // Do this by making the old-loop fall-in edges act as if they came 347 // around the loopback from the prior iteration (follow the old-loop 348 // backedges) and then map to the new peeled iteration. This leaves 349 // the pre-loop with only 1 user (the new peeled iteration), but the 350 // peeled-loop backedge has 2 users. 351 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the 352 // extra backedge user. 353 // 354 // orig 355 // 356 // stmt1 357 // | 358 // v 359 // loop predicate 360 // | 361 // v 362 // loop<----+ 363 // | | 364 // stmt2 | 365 // | | 366 // v | 367 // if ^ 368 // / \ | 369 // / \ | 370 // v v | 371 // false true | 372 // / \ | 373 // / ----+ 374 // | 375 // v 376 // exit 377 // 378 // 379 // after clone loop 380 // 381 // stmt1 382 // | 383 // v 384 // loop predicate 385 // / \ 386 // clone / \ orig 387 // / \ 388 // / \ 389 // v v 390 // +---->loop clone loop<----+ 391 // | | | | 392 // | stmt2 clone stmt2 | 393 // | | | | 394 // | v v | 395 // ^ if clone If ^ 396 // | / \ / \ | 397 // | / \ / \ | 398 // | v v v v | 399 // | true false false true | 400 // | / \ / \ | 401 // +---- \ / ----+ 402 // \ / 403 // 1v v2 404 // region 405 // | 406 // v 407 // exit 408 // 409 // 410 // after peel and predicate move 411 // 412 // stmt1 413 // / 414 // / 415 // clone / orig 416 // / 417 // / +----------+ 418 // / | | 419 // / loop predicate | 420 // / | | 421 // v v | 422 // TOP-->loop clone loop<----+ | 423 // | | | | 424 // stmt2 clone stmt2 | | 425 // | | | ^ 426 // v v | | 427 // if clone If ^ | 428 // / \ / \ | | 429 // / \ / \ | | 430 // v v v v | | 431 // true false false true | | 432 // | \ / \ | | 433 // | \ / ----+ ^ 434 // | \ / | 435 // | 1v v2 | 436 // v region | 437 // | | | 438 // | v | 439 // | exit | 440 // | | 441 // +--------------->-----------------+ 442 // 443 // 444 // final graph 445 // 446 // stmt1 447 // | 448 // v 449 // stmt2 clone 450 // | 451 // v 452 // if clone 453 // / | 454 // / | 455 // v v 456 // false true 457 // | | 458 // | v 459 // | loop predicate 460 // | | 461 // | v 462 // | loop<----+ 463 // | | | 464 // | stmt2 | 465 // | | | 466 // | v | 467 // v if ^ 468 // | / \ | 469 // | / \ | 470 // | v v | 471 // | false true | 472 // | | \ | 473 // v v --+ 474 // region 475 // | 476 // v 477 // exit 478 // 479 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) { 480 481 C->set_major_progress(); 482 // Peeling a 'main' loop in a pre/main/post situation obfuscates the 483 // 'pre' loop from the main and the 'pre' can no longer have its 484 // iterations adjusted. Therefore, we need to declare this loop as 485 // no longer a 'main' loop; it will need new pre and post loops before 486 // we can do further RCE. 487 #ifndef PRODUCT 488 if (TraceLoopOpts) { 489 tty->print("Peel "); 490 loop->dump_head(); 491 } 492 #endif 493 Node* head = loop->_head; 494 bool counted_loop = head->is_CountedLoop(); 495 if (counted_loop) { 496 CountedLoopNode *cl = head->as_CountedLoop(); 497 assert(cl->trip_count() > 0, "peeling a fully unrolled loop"); 498 cl->set_trip_count(cl->trip_count() - 1); 499 if (cl->is_main_loop()) { 500 cl->set_normal_loop(); 501 #ifndef PRODUCT 502 if (PrintOpto && VerifyLoopOptimizations) { 503 tty->print("Peeling a 'main' loop; resetting to 'normal' "); 504 loop->dump_head(); 505 } 506 #endif 507 } 508 } 509 Node* entry = head->in(LoopNode::EntryControl); 510 511 // Step 1: Clone the loop body. The clone becomes the peeled iteration. 512 // The pre-loop illegally has 2 control users (old & new loops). 513 clone_loop( loop, old_new, dom_depth(head) ); 514 515 // Step 2: Make the old-loop fall-in edges point to the peeled iteration. 516 // Do this by making the old-loop fall-in edges act as if they came 517 // around the loopback from the prior iteration (follow the old-loop 518 // backedges) and then map to the new peeled iteration. This leaves 519 // the pre-loop with only 1 user (the new peeled iteration), but the 520 // peeled-loop backedge has 2 users. 521 Node* new_entry = old_new[head->in(LoopNode::LoopBackControl)->_idx]; 522 _igvn.hash_delete(head); 523 head->set_req(LoopNode::EntryControl, new_entry); 524 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { 525 Node* old = head->fast_out(j); 526 if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) { 527 Node* new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx]; 528 if (!new_exit_value ) // Backedge value is ALSO loop invariant? 529 // Then loop body backedge value remains the same. 530 new_exit_value = old->in(LoopNode::LoopBackControl); 531 _igvn.hash_delete(old); 532 old->set_req(LoopNode::EntryControl, new_exit_value); 533 } 534 } 535 536 537 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the 538 // extra backedge user. 539 Node* new_head = old_new[head->_idx]; 540 _igvn.hash_delete(new_head); 541 new_head->set_req(LoopNode::LoopBackControl, C->top()); 542 for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) { 543 Node* use = new_head->fast_out(j2); 544 if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) { 545 _igvn.hash_delete(use); 546 use->set_req(LoopNode::LoopBackControl, C->top()); 547 } 548 } 549 550 551 // Step 4: Correct dom-depth info. Set to loop-head depth. 552 int dd = dom_depth(head); 553 set_idom(head, head->in(1), dd); 554 for (uint j3 = 0; j3 < loop->_body.size(); j3++) { 555 Node *old = loop->_body.at(j3); 556 Node *nnn = old_new[old->_idx]; 557 if (!has_ctrl(nnn)) 558 set_idom(nnn, idom(nnn), dd-1); 559 } 560 561 // Now force out all loop-invariant dominating tests. The optimizer 562 // finds some, but we _know_ they are all useless. 563 peeled_dom_test_elim(loop,old_new); 564 565 loop->record_for_igvn(); 566 } 567 568 #define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop 569 570 //------------------------------policy_maximally_unroll------------------------ 571 // Calculate exact loop trip count and return true if loop can be maximally 572 // unrolled. 573 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const { 574 CountedLoopNode *cl = _head->as_CountedLoop(); 575 assert(cl->is_normal_loop(), ""); 576 if (!cl->is_valid_counted_loop()) 577 return false; // Malformed counted loop 578 579 if (!cl->has_exact_trip_count()) { 580 // Trip count is not exact. 581 return false; 582 } 583 584 uint trip_count = cl->trip_count(); 585 // Note, max_juint is used to indicate unknown trip count. 586 assert(trip_count > 1, "one iteration loop should be optimized out already"); 587 assert(trip_count < max_juint, "exact trip_count should be less than max_uint."); 588 589 // Real policy: if we maximally unroll, does it get too big? 590 // Allow the unrolled mess to get larger than standard loop 591 // size. After all, it will no longer be a loop. 592 uint body_size = _body.size(); 593 uint unroll_limit = (uint)LoopUnrollLimit * 4; 594 assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits"); 595 if (trip_count > unroll_limit || body_size > unroll_limit) { 596 return false; 597 } 598 599 // Fully unroll a loop with few iterations regardless next 600 // conditions since following loop optimizations will split 601 // such loop anyway (pre-main-post). 602 if (trip_count <= 3) 603 return true; 604 605 // Take into account that after unroll conjoined heads and tails will fold, 606 // otherwise policy_unroll() may allow more unrolling than max unrolling. 607 uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count; 608 uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE; 609 if (body_size != tst_body_size) // Check for int overflow 610 return false; 611 if (new_body_size > unroll_limit || 612 // Unrolling can result in a large amount of node construction 613 new_body_size >= phase->C->max_node_limit() - phase->C->live_nodes()) { 614 return false; 615 } 616 617 // Do not unroll a loop with String intrinsics code. 618 // String intrinsics are large and have loops. 619 for (uint k = 0; k < _body.size(); k++) { 620 Node* n = _body.at(k); 621 switch (n->Opcode()) { 622 case Op_StrComp: 623 case Op_StrEquals: 624 case Op_StrIndexOf: 625 case Op_EncodeISOArray: 626 case Op_AryEq: { 627 return false; 628 } 629 #if INCLUDE_RTM_OPT 630 case Op_FastLock: 631 case Op_FastUnlock: { 632 // Don't unroll RTM locking code because it is large. 633 if (UseRTMLocking) { 634 return false; 635 } 636 } 637 #endif 638 } // switch 639 } 640 641 return true; // Do maximally unroll 642 } 643 644 645 //------------------------------policy_unroll---------------------------------- 646 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if 647 // the loop is a CountedLoop and the body is small enough. 648 bool IdealLoopTree::policy_unroll(PhaseIdealLoop *phase) { 649 650 CountedLoopNode *cl = _head->as_CountedLoop(); 651 assert(cl->is_normal_loop() || cl->is_main_loop(), ""); 652 653 if (!cl->is_valid_counted_loop()) 654 return false; // Malformed counted loop 655 656 // Protect against over-unrolling. 657 // After split at least one iteration will be executed in pre-loop. 658 if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false; 659 660 _local_loop_unroll_limit = LoopUnrollLimit; 661 _local_loop_unroll_factor = 4; 662 int future_unroll_ct = cl->unrolled_count() * 2; 663 if (!cl->do_unroll_only()) { 664 if (future_unroll_ct > LoopMaxUnroll) return false; 665 } else { 666 // obey user constraints on vector mapped loops with additional unrolling applied 667 if ((future_unroll_ct / cl->slp_max_unroll()) > LoopMaxUnroll) return false; 668 } 669 670 // Check for initial stride being a small enough constant 671 if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false; 672 673 // Don't unroll if the next round of unrolling would push us 674 // over the expected trip count of the loop. One is subtracted 675 // from the expected trip count because the pre-loop normally 676 // executes 1 iteration. 677 if (UnrollLimitForProfileCheck > 0 && 678 cl->profile_trip_cnt() != COUNT_UNKNOWN && 679 future_unroll_ct > UnrollLimitForProfileCheck && 680 (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) { 681 return false; 682 } 683 684 // When unroll count is greater than LoopUnrollMin, don't unroll if: 685 // the residual iterations are more than 10% of the trip count 686 // and rounds of "unroll,optimize" are not making significant progress 687 // Progress defined as current size less than 20% larger than previous size. 688 if (UseSuperWord && cl->node_count_before_unroll() > 0 && 689 future_unroll_ct > LoopUnrollMin && 690 (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() && 691 1.2 * cl->node_count_before_unroll() < (double)_body.size()) { 692 return false; 693 } 694 695 Node *init_n = cl->init_trip(); 696 Node *limit_n = cl->limit(); 697 int stride_con = cl->stride_con(); 698 // Non-constant bounds. 699 // Protect against over-unrolling when init or/and limit are not constant 700 // (so that trip_count's init value is maxint) but iv range is known. 701 if (init_n == NULL || !init_n->is_Con() || 702 limit_n == NULL || !limit_n->is_Con()) { 703 Node* phi = cl->phi(); 704 if (phi != NULL) { 705 assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi."); 706 const TypeInt* iv_type = phase->_igvn.type(phi)->is_int(); 707 int next_stride = stride_con * 2; // stride after this unroll 708 if (next_stride > 0) { 709 if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow 710 iv_type->_lo + next_stride > iv_type->_hi) { 711 return false; // over-unrolling 712 } 713 } else if (next_stride < 0) { 714 if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow 715 iv_type->_hi + next_stride < iv_type->_lo) { 716 return false; // over-unrolling 717 } 718 } 719 } 720 } 721 722 // After unroll limit will be adjusted: new_limit = limit-stride. 723 // Bailout if adjustment overflow. 724 const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int(); 725 if (stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi) || 726 stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo)) 727 return false; // overflow 728 729 // Adjust body_size to determine if we unroll or not 730 uint body_size = _body.size(); 731 // Key test to unroll loop in CRC32 java code 732 int xors_in_loop = 0; 733 // Also count ModL, DivL and MulL which expand mightly 734 for (uint k = 0; k < _body.size(); k++) { 735 Node* n = _body.at(k); 736 switch (n->Opcode()) { 737 case Op_XorI: xors_in_loop++; break; // CRC32 java code 738 case Op_ModL: body_size += 30; break; 739 case Op_DivL: body_size += 30; break; 740 case Op_MulL: body_size += 10; break; 741 case Op_StrComp: 742 case Op_StrEquals: 743 case Op_StrIndexOf: 744 case Op_EncodeISOArray: 745 case Op_AryEq: { 746 // Do not unroll a loop with String intrinsics code. 747 // String intrinsics are large and have loops. 748 return false; 749 } 750 #if INCLUDE_RTM_OPT 751 case Op_FastLock: 752 case Op_FastUnlock: { 753 // Don't unroll RTM locking code because it is large. 754 if (UseRTMLocking) { 755 return false; 756 } 757 } 758 #endif 759 } // switch 760 } 761 762 if (UseSuperWord) { 763 if (!cl->is_reduction_loop()) { 764 phase->mark_reductions(this); 765 } 766 767 // Only attempt slp analysis when user controls do not prohibit it 768 if (LoopMaxUnroll > _local_loop_unroll_factor) { 769 // Once policy_slp_analysis succeeds, mark the loop with the 770 // maximal unroll factor so that we minimize analysis passes 771 if (future_unroll_ct >= _local_loop_unroll_factor) { 772 policy_unroll_slp_analysis(cl, phase, future_unroll_ct); 773 } 774 } 775 } 776 777 int slp_max_unroll_factor = cl->slp_max_unroll(); 778 if (cl->has_passed_slp()) { 779 if (slp_max_unroll_factor >= future_unroll_ct) return true; 780 // Normal case: loop too big 781 return false; 782 } 783 784 // Check for being too big 785 if (body_size > (uint)_local_loop_unroll_limit) { 786 if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true; 787 // Normal case: loop too big 788 return false; 789 } 790 791 if(cl->do_unroll_only()) { 792 NOT_PRODUCT(if (TraceSuperWordLoopUnrollAnalysis) tty->print_cr("policy_unroll passed vector loop(vlen=%d,factor = %d)\n", slp_max_unroll_factor, future_unroll_ct)); 793 } 794 795 // Unroll once! (Each trip will soon do double iterations) 796 return true; 797 } 798 799 void IdealLoopTree::policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct) { 800 // Enable this functionality target by target as needed 801 if (SuperWordLoopUnrollAnalysis) { 802 if (!cl->was_slp_analyzed()) { 803 SuperWord sw(phase); 804 sw.transform_loop(this, false); 805 806 // If the loop is slp canonical analyze it 807 if (sw.early_return() == false) { 808 sw.unrolling_analysis(_local_loop_unroll_factor); 809 } 810 } 811 812 if (cl->has_passed_slp()) { 813 int slp_max_unroll_factor = cl->slp_max_unroll(); 814 if (slp_max_unroll_factor >= future_unroll_ct) { 815 int new_limit = cl->node_count_before_unroll() * slp_max_unroll_factor; 816 if (new_limit > LoopUnrollLimit) { 817 NOT_PRODUCT(if (TraceSuperWordLoopUnrollAnalysis) tty->print_cr("slp analysis unroll=%d, default limit=%d\n", new_limit, _local_loop_unroll_limit)); 818 _local_loop_unroll_limit = new_limit; 819 } 820 } 821 } 822 } 823 } 824 825 //------------------------------policy_align----------------------------------- 826 // Return TRUE or FALSE if the loop should be cache-line aligned. Gather the 827 // expression that does the alignment. Note that only one array base can be 828 // aligned in a loop (unless the VM guarantees mutual alignment). Note that 829 // if we vectorize short memory ops into longer memory ops, we may want to 830 // increase alignment. 831 bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const { 832 return false; 833 } 834 835 //------------------------------policy_range_check----------------------------- 836 // Return TRUE or FALSE if the loop should be range-check-eliminated. 837 // Actually we do iteration-splitting, a more powerful form of RCE. 838 bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const { 839 if (!RangeCheckElimination) return false; 840 841 CountedLoopNode *cl = _head->as_CountedLoop(); 842 // If we unrolled with no intention of doing RCE and we later 843 // changed our minds, we got no pre-loop. Either we need to 844 // make a new pre-loop, or we gotta disallow RCE. 845 if (cl->is_main_no_pre_loop()) return false; // Disallowed for now. 846 Node *trip_counter = cl->phi(); 847 848 // check for vectorized loops, some opts are no longer needed 849 if (cl->do_unroll_only()) return false; 850 851 // Check loop body for tests of trip-counter plus loop-invariant vs 852 // loop-invariant. 853 for (uint i = 0; i < _body.size(); i++) { 854 Node *iff = _body[i]; 855 if (iff->Opcode() == Op_If) { // Test? 856 857 // Comparing trip+off vs limit 858 Node *bol = iff->in(1); 859 if (bol->req() != 2) continue; // dead constant test 860 if (!bol->is_Bool()) { 861 assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only"); 862 continue; 863 } 864 if (bol->as_Bool()->_test._test == BoolTest::ne) 865 continue; // not RC 866 867 Node *cmp = bol->in(1); 868 Node *rc_exp = cmp->in(1); 869 Node *limit = cmp->in(2); 870 871 Node *limit_c = phase->get_ctrl(limit); 872 if( limit_c == phase->C->top() ) 873 return false; // Found dead test on live IF? No RCE! 874 if( is_member(phase->get_loop(limit_c) ) ) { 875 // Compare might have operands swapped; commute them 876 rc_exp = cmp->in(2); 877 limit = cmp->in(1); 878 limit_c = phase->get_ctrl(limit); 879 if( is_member(phase->get_loop(limit_c) ) ) 880 continue; // Both inputs are loop varying; cannot RCE 881 } 882 883 if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) { 884 continue; 885 } 886 // Yeah! Found a test like 'trip+off vs limit' 887 // Test is an IfNode, has 2 projections. If BOTH are in the loop 888 // we need loop unswitching instead of iteration splitting. 889 if( is_loop_exit(iff) ) 890 return true; // Found reason to split iterations 891 } // End of is IF 892 } 893 894 return false; 895 } 896 897 //------------------------------policy_peel_only------------------------------- 898 // Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned. Useful 899 // for unrolling loops with NO array accesses. 900 bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const { 901 // check for vectorized loops, any peeling done was already applied 902 if (_head->is_CountedLoop() && _head->as_CountedLoop()->do_unroll_only()) return false; 903 904 for( uint i = 0; i < _body.size(); i++ ) 905 if( _body[i]->is_Mem() ) 906 return false; 907 908 // No memory accesses at all! 909 return true; 910 } 911 912 //------------------------------clone_up_backedge_goo-------------------------- 913 // If Node n lives in the back_ctrl block and cannot float, we clone a private 914 // version of n in preheader_ctrl block and return that, otherwise return n. 915 Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) { 916 if( get_ctrl(n) != back_ctrl ) return n; 917 918 // Only visit once 919 if (visited.test_set(n->_idx)) { 920 Node *x = clones.find(n->_idx); 921 if (x != NULL) 922 return x; 923 return n; 924 } 925 926 Node *x = NULL; // If required, a clone of 'n' 927 // Check for 'n' being pinned in the backedge. 928 if( n->in(0) && n->in(0) == back_ctrl ) { 929 assert(clones.find(n->_idx) == NULL, "dead loop"); 930 x = n->clone(); // Clone a copy of 'n' to preheader 931 clones.push(x, n->_idx); 932 x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader 933 } 934 935 // Recursive fixup any other input edges into x. 936 // If there are no changes we can just return 'n', otherwise 937 // we need to clone a private copy and change it. 938 for( uint i = 1; i < n->req(); i++ ) { 939 Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones ); 940 if( g != n->in(i) ) { 941 if( !x ) { 942 assert(clones.find(n->_idx) == NULL, "dead loop"); 943 x = n->clone(); 944 clones.push(x, n->_idx); 945 } 946 x->set_req(i, g); 947 } 948 } 949 if( x ) { // x can legally float to pre-header location 950 register_new_node( x, preheader_ctrl ); 951 return x; 952 } else { // raise n to cover LCA of uses 953 set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) ); 954 } 955 return n; 956 } 957 958 bool PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop) { 959 Node* castii = new CastIINode(incr, TypeInt::INT, true); 960 castii->set_req(0, ctrl); 961 register_new_node(castii, ctrl); 962 for (DUIterator_Fast imax, i = incr->fast_outs(imax); i < imax; i++) { 963 Node* n = incr->fast_out(i); 964 if (n->is_Phi() && n->in(0) == loop) { 965 int nrep = n->replace_edge(incr, castii); 966 return true; 967 } 968 } 969 return false; 970 } 971 972 //------------------------------insert_pre_post_loops-------------------------- 973 // Insert pre and post loops. If peel_only is set, the pre-loop can not have 974 // more iterations added. It acts as a 'peel' only, no lower-bound RCE, no 975 // alignment. Useful to unroll loops that do no array accesses. 976 void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) { 977 978 #ifndef PRODUCT 979 if (TraceLoopOpts) { 980 if (peel_only) 981 tty->print("PeelMainPost "); 982 else 983 tty->print("PreMainPost "); 984 loop->dump_head(); 985 } 986 #endif 987 C->set_major_progress(); 988 989 // Find common pieces of the loop being guarded with pre & post loops 990 CountedLoopNode *main_head = loop->_head->as_CountedLoop(); 991 assert( main_head->is_normal_loop(), "" ); 992 CountedLoopEndNode *main_end = main_head->loopexit(); 993 guarantee(main_end != NULL, "no loop exit node"); 994 assert( main_end->outcnt() == 2, "1 true, 1 false path only" ); 995 uint dd_main_head = dom_depth(main_head); 996 uint max = main_head->outcnt(); 997 998 Node *pre_header= main_head->in(LoopNode::EntryControl); 999 Node *init = main_head->init_trip(); 1000 Node *incr = main_end ->incr(); 1001 Node *limit = main_end ->limit(); 1002 Node *stride = main_end ->stride(); 1003 Node *cmp = main_end ->cmp_node(); 1004 BoolTest::mask b_test = main_end->test_trip(); 1005 1006 // Need only 1 user of 'bol' because I will be hacking the loop bounds. 1007 Node *bol = main_end->in(CountedLoopEndNode::TestValue); 1008 if( bol->outcnt() != 1 ) { 1009 bol = bol->clone(); 1010 register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl)); 1011 _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, bol); 1012 } 1013 // Need only 1 user of 'cmp' because I will be hacking the loop bounds. 1014 if( cmp->outcnt() != 1 ) { 1015 cmp = cmp->clone(); 1016 register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl)); 1017 _igvn.replace_input_of(bol, 1, cmp); 1018 } 1019 1020 //------------------------------ 1021 // Step A: Create Post-Loop. 1022 Node* main_exit = main_end->proj_out(false); 1023 assert( main_exit->Opcode() == Op_IfFalse, "" ); 1024 int dd_main_exit = dom_depth(main_exit); 1025 1026 // Step A1: Clone the loop body. The clone becomes the post-loop. The main 1027 // loop pre-header illegally has 2 control users (old & new loops). 1028 clone_loop( loop, old_new, dd_main_exit ); 1029 assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" ); 1030 CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop(); 1031 post_head->set_post_loop(main_head); 1032 1033 // Reduce the post-loop trip count. 1034 CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); 1035 post_end->_prob = PROB_FAIR; 1036 1037 // Build the main-loop normal exit. 1038 IfFalseNode *new_main_exit = new IfFalseNode(main_end); 1039 _igvn.register_new_node_with_optimizer( new_main_exit ); 1040 set_idom(new_main_exit, main_end, dd_main_exit ); 1041 set_loop(new_main_exit, loop->_parent); 1042 1043 // Step A2: Build a zero-trip guard for the post-loop. After leaving the 1044 // main-loop, the post-loop may not execute at all. We 'opaque' the incr 1045 // (the main-loop trip-counter exit value) because we will be changing 1046 // the exit value (via unrolling) so we cannot constant-fold away the zero 1047 // trip guard until all unrolling is done. 1048 Node *zer_opaq = new Opaque1Node(C, incr); 1049 Node *zer_cmp = new CmpINode( zer_opaq, limit ); 1050 Node *zer_bol = new BoolNode( zer_cmp, b_test ); 1051 register_new_node( zer_opaq, new_main_exit ); 1052 register_new_node( zer_cmp , new_main_exit ); 1053 register_new_node( zer_bol , new_main_exit ); 1054 1055 // Build the IfNode 1056 IfNode *zer_iff = new IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN ); 1057 _igvn.register_new_node_with_optimizer( zer_iff ); 1058 set_idom(zer_iff, new_main_exit, dd_main_exit); 1059 set_loop(zer_iff, loop->_parent); 1060 1061 // Plug in the false-path, taken if we need to skip post-loop 1062 _igvn.replace_input_of(main_exit, 0, zer_iff); 1063 set_idom(main_exit, zer_iff, dd_main_exit); 1064 set_idom(main_exit->unique_out(), zer_iff, dd_main_exit); 1065 // Make the true-path, must enter the post loop 1066 Node *zer_taken = new IfTrueNode( zer_iff ); 1067 _igvn.register_new_node_with_optimizer( zer_taken ); 1068 set_idom(zer_taken, zer_iff, dd_main_exit); 1069 set_loop(zer_taken, loop->_parent); 1070 // Plug in the true path 1071 _igvn.hash_delete( post_head ); 1072 post_head->set_req(LoopNode::EntryControl, zer_taken); 1073 set_idom(post_head, zer_taken, dd_main_exit); 1074 1075 Arena *a = Thread::current()->resource_area(); 1076 VectorSet visited(a); 1077 Node_Stack clones(a, main_head->back_control()->outcnt()); 1078 // Step A3: Make the fall-in values to the post-loop come from the 1079 // fall-out values of the main-loop. 1080 for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) { 1081 Node* main_phi = main_head->fast_out(i); 1082 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) { 1083 Node *post_phi = old_new[main_phi->_idx]; 1084 Node *fallmain = clone_up_backedge_goo(main_head->back_control(), 1085 post_head->init_control(), 1086 main_phi->in(LoopNode::LoopBackControl), 1087 visited, clones); 1088 _igvn.hash_delete(post_phi); 1089 post_phi->set_req( LoopNode::EntryControl, fallmain ); 1090 } 1091 } 1092 1093 // Update local caches for next stanza 1094 main_exit = new_main_exit; 1095 1096 1097 //------------------------------ 1098 // Step B: Create Pre-Loop. 1099 1100 // Step B1: Clone the loop body. The clone becomes the pre-loop. The main 1101 // loop pre-header illegally has 2 control users (old & new loops). 1102 clone_loop( loop, old_new, dd_main_head ); 1103 CountedLoopNode* pre_head = old_new[main_head->_idx]->as_CountedLoop(); 1104 CountedLoopEndNode* pre_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); 1105 pre_head->set_pre_loop(main_head); 1106 Node *pre_incr = old_new[incr->_idx]; 1107 1108 // Reduce the pre-loop trip count. 1109 pre_end->_prob = PROB_FAIR; 1110 1111 // Find the pre-loop normal exit. 1112 Node* pre_exit = pre_end->proj_out(false); 1113 assert( pre_exit->Opcode() == Op_IfFalse, "" ); 1114 IfFalseNode *new_pre_exit = new IfFalseNode(pre_end); 1115 _igvn.register_new_node_with_optimizer( new_pre_exit ); 1116 set_idom(new_pre_exit, pre_end, dd_main_head); 1117 set_loop(new_pre_exit, loop->_parent); 1118 1119 // Step B2: Build a zero-trip guard for the main-loop. After leaving the 1120 // pre-loop, the main-loop may not execute at all. Later in life this 1121 // zero-trip guard will become the minimum-trip guard when we unroll 1122 // the main-loop. 1123 Node *min_opaq = new Opaque1Node(C, limit); 1124 Node *min_cmp = new CmpINode( pre_incr, min_opaq ); 1125 Node *min_bol = new BoolNode( min_cmp, b_test ); 1126 register_new_node( min_opaq, new_pre_exit ); 1127 register_new_node( min_cmp , new_pre_exit ); 1128 register_new_node( min_bol , new_pre_exit ); 1129 1130 // Build the IfNode (assume the main-loop is executed always). 1131 IfNode *min_iff = new IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN ); 1132 _igvn.register_new_node_with_optimizer( min_iff ); 1133 set_idom(min_iff, new_pre_exit, dd_main_head); 1134 set_loop(min_iff, loop->_parent); 1135 1136 // Plug in the false-path, taken if we need to skip main-loop 1137 _igvn.hash_delete( pre_exit ); 1138 pre_exit->set_req(0, min_iff); 1139 set_idom(pre_exit, min_iff, dd_main_head); 1140 set_idom(pre_exit->unique_out(), min_iff, dd_main_head); 1141 // Make the true-path, must enter the main loop 1142 Node *min_taken = new IfTrueNode( min_iff ); 1143 _igvn.register_new_node_with_optimizer( min_taken ); 1144 set_idom(min_taken, min_iff, dd_main_head); 1145 set_loop(min_taken, loop->_parent); 1146 // Plug in the true path 1147 _igvn.hash_delete( main_head ); 1148 main_head->set_req(LoopNode::EntryControl, min_taken); 1149 set_idom(main_head, min_taken, dd_main_head); 1150 1151 visited.Clear(); 1152 clones.clear(); 1153 // Step B3: Make the fall-in values to the main-loop come from the 1154 // fall-out values of the pre-loop. 1155 for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) { 1156 Node* main_phi = main_head->fast_out(i2); 1157 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) { 1158 Node *pre_phi = old_new[main_phi->_idx]; 1159 Node *fallpre = clone_up_backedge_goo(pre_head->back_control(), 1160 main_head->init_control(), 1161 pre_phi->in(LoopNode::LoopBackControl), 1162 visited, clones); 1163 _igvn.hash_delete(main_phi); 1164 main_phi->set_req( LoopNode::EntryControl, fallpre ); 1165 } 1166 } 1167 1168 // Nodes inside the loop may be control dependent on a predicate 1169 // that was moved before the preloop. If the back branch of the main 1170 // or post loops becomes dead, those nodes won't be dependent on the 1171 // test that guards that loop nest anymore which could lead to an 1172 // incorrect array access because it executes independently of the 1173 // test that was guarding the loop nest. We add a special CastII on 1174 // the if branch that enters the loop, between the input induction 1175 // variable value and the induction variable Phi to preserve correct 1176 // dependencies. 1177 1178 // CastII for the post loop: 1179 bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head); 1180 assert(inserted, "no castII inserted"); 1181 1182 // CastII for the main loop: 1183 inserted = cast_incr_before_loop(pre_incr, min_taken, main_head); 1184 assert(inserted, "no castII inserted"); 1185 1186 // Step B4: Shorten the pre-loop to run only 1 iteration (for now). 1187 // RCE and alignment may change this later. 1188 Node *cmp_end = pre_end->cmp_node(); 1189 assert( cmp_end->in(2) == limit, "" ); 1190 Node *pre_limit = new AddINode( init, stride ); 1191 1192 // Save the original loop limit in this Opaque1 node for 1193 // use by range check elimination. 1194 Node *pre_opaq = new Opaque1Node(C, pre_limit, limit); 1195 1196 register_new_node( pre_limit, pre_head->in(0) ); 1197 register_new_node( pre_opaq , pre_head->in(0) ); 1198 1199 // Since no other users of pre-loop compare, I can hack limit directly 1200 assert( cmp_end->outcnt() == 1, "no other users" ); 1201 _igvn.hash_delete(cmp_end); 1202 cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq); 1203 1204 // Special case for not-equal loop bounds: 1205 // Change pre loop test, main loop test, and the 1206 // main loop guard test to use lt or gt depending on stride 1207 // direction: 1208 // positive stride use < 1209 // negative stride use > 1210 // 1211 // not-equal test is kept for post loop to handle case 1212 // when init > limit when stride > 0 (and reverse). 1213 1214 if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) { 1215 1216 BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt; 1217 // Modify pre loop end condition 1218 Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool(); 1219 BoolNode* new_bol0 = new BoolNode(pre_bol->in(1), new_test); 1220 register_new_node( new_bol0, pre_head->in(0) ); 1221 _igvn.replace_input_of(pre_end, CountedLoopEndNode::TestValue, new_bol0); 1222 // Modify main loop guard condition 1223 assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay"); 1224 BoolNode* new_bol1 = new BoolNode(min_bol->in(1), new_test); 1225 register_new_node( new_bol1, new_pre_exit ); 1226 _igvn.hash_delete(min_iff); 1227 min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1); 1228 // Modify main loop end condition 1229 BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool(); 1230 BoolNode* new_bol2 = new BoolNode(main_bol->in(1), new_test); 1231 register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) ); 1232 _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, new_bol2); 1233 } 1234 1235 // Flag main loop 1236 main_head->set_main_loop(); 1237 if( peel_only ) main_head->set_main_no_pre_loop(); 1238 1239 // Subtract a trip count for the pre-loop. 1240 main_head->set_trip_count(main_head->trip_count() - 1); 1241 1242 // It's difficult to be precise about the trip-counts 1243 // for the pre/post loops. They are usually very short, 1244 // so guess that 4 trips is a reasonable value. 1245 post_head->set_profile_trip_cnt(4.0); 1246 pre_head->set_profile_trip_cnt(4.0); 1247 1248 // Now force out all loop-invariant dominating tests. The optimizer 1249 // finds some, but we _know_ they are all useless. 1250 peeled_dom_test_elim(loop,old_new); 1251 loop->record_for_igvn(); 1252 } 1253 1254 //------------------------------is_invariant----------------------------- 1255 // Return true if n is invariant 1256 bool IdealLoopTree::is_invariant(Node* n) const { 1257 Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n; 1258 if (n_c->is_top()) return false; 1259 return !is_member(_phase->get_loop(n_c)); 1260 } 1261 1262 1263 //------------------------------do_unroll-------------------------------------- 1264 // Unroll the loop body one step - make each trip do 2 iterations. 1265 void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) { 1266 assert(LoopUnrollLimit, ""); 1267 CountedLoopNode *loop_head = loop->_head->as_CountedLoop(); 1268 CountedLoopEndNode *loop_end = loop_head->loopexit(); 1269 assert(loop_end, ""); 1270 #ifndef PRODUCT 1271 if (PrintOpto && VerifyLoopOptimizations) { 1272 tty->print("Unrolling "); 1273 loop->dump_head(); 1274 } else if (TraceLoopOpts) { 1275 if (loop_head->trip_count() < (uint)LoopUnrollLimit) { 1276 tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count()); 1277 } else { 1278 tty->print("Unroll %d ", loop_head->unrolled_count()*2); 1279 } 1280 loop->dump_head(); 1281 } 1282 1283 if (C->do_vector_loop() && (PrintOpto && VerifyLoopOptimizations || TraceLoopOpts)) { 1284 Arena* arena = Thread::current()->resource_area(); 1285 Node_Stack stack(arena, C->unique() >> 2); 1286 Node_List rpo_list; 1287 VectorSet visited(arena); 1288 visited.set(loop_head->_idx); 1289 rpo( loop_head, stack, visited, rpo_list ); 1290 dump(loop, rpo_list.size(), rpo_list ); 1291 } 1292 #endif 1293 1294 // Remember loop node count before unrolling to detect 1295 // if rounds of unroll,optimize are making progress 1296 loop_head->set_node_count_before_unroll(loop->_body.size()); 1297 1298 Node *ctrl = loop_head->in(LoopNode::EntryControl); 1299 Node *limit = loop_head->limit(); 1300 Node *init = loop_head->init_trip(); 1301 Node *stride = loop_head->stride(); 1302 1303 Node *opaq = NULL; 1304 if (adjust_min_trip) { // If not maximally unrolling, need adjustment 1305 // Search for zero-trip guard. 1306 assert( loop_head->is_main_loop(), "" ); 1307 assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" ); 1308 Node *iff = ctrl->in(0); 1309 assert( iff->Opcode() == Op_If, "" ); 1310 Node *bol = iff->in(1); 1311 assert( bol->Opcode() == Op_Bool, "" ); 1312 Node *cmp = bol->in(1); 1313 assert( cmp->Opcode() == Op_CmpI, "" ); 1314 opaq = cmp->in(2); 1315 // Occasionally it's possible for a zero-trip guard Opaque1 node to be 1316 // optimized away and then another round of loop opts attempted. 1317 // We can not optimize this particular loop in that case. 1318 if (opaq->Opcode() != Op_Opaque1) 1319 return; // Cannot find zero-trip guard! Bail out! 1320 // Zero-trip test uses an 'opaque' node which is not shared. 1321 assert(opaq->outcnt() == 1 && opaq->in(1) == limit, ""); 1322 } 1323 1324 C->set_major_progress(); 1325 1326 Node* new_limit = NULL; 1327 if (UnrollLimitCheck) { 1328 int stride_con = stride->get_int(); 1329 int stride_p = (stride_con > 0) ? stride_con : -stride_con; 1330 uint old_trip_count = loop_head->trip_count(); 1331 // Verify that unroll policy result is still valid. 1332 assert(old_trip_count > 1 && 1333 (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity"); 1334 1335 // Adjust loop limit to keep valid iterations number after unroll. 1336 // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride 1337 // which may overflow. 1338 if (!adjust_min_trip) { 1339 assert(old_trip_count > 1 && (old_trip_count & 1) == 0, 1340 "odd trip count for maximally unroll"); 1341 // Don't need to adjust limit for maximally unroll since trip count is even. 1342 } else if (loop_head->has_exact_trip_count() && init->is_Con()) { 1343 // Loop's limit is constant. Loop's init could be constant when pre-loop 1344 // become peeled iteration. 1345 jlong init_con = init->get_int(); 1346 // We can keep old loop limit if iterations count stays the same: 1347 // old_trip_count == new_trip_count * 2 1348 // Note: since old_trip_count >= 2 then new_trip_count >= 1 1349 // so we also don't need to adjust zero trip test. 1350 jlong limit_con = limit->get_int(); 1351 // (stride_con*2) not overflow since stride_con <= 8. 1352 int new_stride_con = stride_con * 2; 1353 int stride_m = new_stride_con - (stride_con > 0 ? 1 : -1); 1354 jlong trip_count = (limit_con - init_con + stride_m)/new_stride_con; 1355 // New trip count should satisfy next conditions. 1356 assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity"); 1357 uint new_trip_count = (uint)trip_count; 1358 adjust_min_trip = (old_trip_count != new_trip_count*2); 1359 } 1360 1361 if (adjust_min_trip) { 1362 // Step 2: Adjust the trip limit if it is called for. 1363 // The adjustment amount is -stride. Need to make sure if the 1364 // adjustment underflows or overflows, then the main loop is skipped. 1365 Node* cmp = loop_end->cmp_node(); 1366 assert(cmp->in(2) == limit, "sanity"); 1367 assert(opaq != NULL && opaq->in(1) == limit, "sanity"); 1368 1369 // Verify that policy_unroll result is still valid. 1370 const TypeInt* limit_type = _igvn.type(limit)->is_int(); 1371 assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) || 1372 stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity"); 1373 1374 if (limit->is_Con()) { 1375 // The check in policy_unroll and the assert above guarantee 1376 // no underflow if limit is constant. 1377 new_limit = _igvn.intcon(limit->get_int() - stride_con); 1378 set_ctrl(new_limit, C->root()); 1379 } else { 1380 // Limit is not constant. 1381 if (loop_head->unrolled_count() == 1) { // only for first unroll 1382 // Separate limit by Opaque node in case it is an incremented 1383 // variable from previous loop to avoid using pre-incremented 1384 // value which could increase register pressure. 1385 // Otherwise reorg_offsets() optimization will create a separate 1386 // Opaque node for each use of trip-counter and as result 1387 // zero trip guard limit will be different from loop limit. 1388 assert(has_ctrl(opaq), "should have it"); 1389 Node* opaq_ctrl = get_ctrl(opaq); 1390 limit = new Opaque2Node( C, limit ); 1391 register_new_node( limit, opaq_ctrl ); 1392 } 1393 if (stride_con > 0 && ((limit_type->_lo - stride_con) < limit_type->_lo) || 1394 stride_con < 0 && ((limit_type->_hi - stride_con) > limit_type->_hi)) { 1395 // No underflow. 1396 new_limit = new SubINode(limit, stride); 1397 } else { 1398 // (limit - stride) may underflow. 1399 // Clamp the adjustment value with MININT or MAXINT: 1400 // 1401 // new_limit = limit-stride 1402 // if (stride > 0) 1403 // new_limit = (limit < new_limit) ? MININT : new_limit; 1404 // else 1405 // new_limit = (limit > new_limit) ? MAXINT : new_limit; 1406 // 1407 BoolTest::mask bt = loop_end->test_trip(); 1408 assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); 1409 Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint); 1410 set_ctrl(adj_max, C->root()); 1411 Node* old_limit = NULL; 1412 Node* adj_limit = NULL; 1413 Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL; 1414 if (loop_head->unrolled_count() > 1 && 1415 limit->is_CMove() && limit->Opcode() == Op_CMoveI && 1416 limit->in(CMoveNode::IfTrue) == adj_max && 1417 bol->as_Bool()->_test._test == bt && 1418 bol->in(1)->Opcode() == Op_CmpI && 1419 bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) { 1420 // Loop was unrolled before. 1421 // Optimize the limit to avoid nested CMove: 1422 // use original limit as old limit. 1423 old_limit = bol->in(1)->in(1); 1424 // Adjust previous adjusted limit. 1425 adj_limit = limit->in(CMoveNode::IfFalse); 1426 adj_limit = new SubINode(adj_limit, stride); 1427 } else { 1428 old_limit = limit; 1429 adj_limit = new SubINode(limit, stride); 1430 } 1431 assert(old_limit != NULL && adj_limit != NULL, ""); 1432 register_new_node( adj_limit, ctrl ); // adjust amount 1433 Node* adj_cmp = new CmpINode(old_limit, adj_limit); 1434 register_new_node( adj_cmp, ctrl ); 1435 Node* adj_bool = new BoolNode(adj_cmp, bt); 1436 register_new_node( adj_bool, ctrl ); 1437 new_limit = new CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT); 1438 } 1439 register_new_node(new_limit, ctrl); 1440 } 1441 assert(new_limit != NULL, ""); 1442 // Replace in loop test. 1443 assert(loop_end->in(1)->in(1) == cmp, "sanity"); 1444 if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) { 1445 // Don't need to create new test since only one user. 1446 _igvn.hash_delete(cmp); 1447 cmp->set_req(2, new_limit); 1448 } else { 1449 // Create new test since it is shared. 1450 Node* ctrl2 = loop_end->in(0); 1451 Node* cmp2 = cmp->clone(); 1452 cmp2->set_req(2, new_limit); 1453 register_new_node(cmp2, ctrl2); 1454 Node* bol2 = loop_end->in(1)->clone(); 1455 bol2->set_req(1, cmp2); 1456 register_new_node(bol2, ctrl2); 1457 _igvn.replace_input_of(loop_end, 1, bol2); 1458 } 1459 // Step 3: Find the min-trip test guaranteed before a 'main' loop. 1460 // Make it a 1-trip test (means at least 2 trips). 1461 1462 // Guard test uses an 'opaque' node which is not shared. Hence I 1463 // can edit it's inputs directly. Hammer in the new limit for the 1464 // minimum-trip guard. 1465 assert(opaq->outcnt() == 1, ""); 1466 _igvn.replace_input_of(opaq, 1, new_limit); 1467 } 1468 1469 // Adjust max trip count. The trip count is intentionally rounded 1470 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, 1471 // the main, unrolled, part of the loop will never execute as it is protected 1472 // by the min-trip test. See bug 4834191 for a case where we over-unrolled 1473 // and later determined that part of the unrolled loop was dead. 1474 loop_head->set_trip_count(old_trip_count / 2); 1475 1476 // Double the count of original iterations in the unrolled loop body. 1477 loop_head->double_unrolled_count(); 1478 1479 } else { // LoopLimitCheck 1480 1481 // Adjust max trip count. The trip count is intentionally rounded 1482 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, 1483 // the main, unrolled, part of the loop will never execute as it is protected 1484 // by the min-trip test. See bug 4834191 for a case where we over-unrolled 1485 // and later determined that part of the unrolled loop was dead. 1486 loop_head->set_trip_count(loop_head->trip_count() / 2); 1487 1488 // Double the count of original iterations in the unrolled loop body. 1489 loop_head->double_unrolled_count(); 1490 1491 // ----------- 1492 // Step 2: Cut back the trip counter for an unroll amount of 2. 1493 // Loop will normally trip (limit - init)/stride_con. Since it's a 1494 // CountedLoop this is exact (stride divides limit-init exactly). 1495 // We are going to double the loop body, so we want to knock off any 1496 // odd iteration: (trip_cnt & ~1). Then back compute a new limit. 1497 Node *span = new SubINode( limit, init ); 1498 register_new_node( span, ctrl ); 1499 Node *trip = new DivINode( 0, span, stride ); 1500 register_new_node( trip, ctrl ); 1501 Node *mtwo = _igvn.intcon(-2); 1502 set_ctrl(mtwo, C->root()); 1503 Node *rond = new AndINode( trip, mtwo ); 1504 register_new_node( rond, ctrl ); 1505 Node *spn2 = new MulINode( rond, stride ); 1506 register_new_node( spn2, ctrl ); 1507 new_limit = new AddINode( spn2, init ); 1508 register_new_node( new_limit, ctrl ); 1509 1510 // Hammer in the new limit 1511 Node *ctrl2 = loop_end->in(0); 1512 Node *cmp2 = new CmpINode( loop_head->incr(), new_limit ); 1513 register_new_node( cmp2, ctrl2 ); 1514 Node *bol2 = new BoolNode( cmp2, loop_end->test_trip() ); 1515 register_new_node( bol2, ctrl2 ); 1516 _igvn.replace_input_of(loop_end, CountedLoopEndNode::TestValue, bol2); 1517 1518 // Step 3: Find the min-trip test guaranteed before a 'main' loop. 1519 // Make it a 1-trip test (means at least 2 trips). 1520 if( adjust_min_trip ) { 1521 assert( new_limit != NULL, "" ); 1522 // Guard test uses an 'opaque' node which is not shared. Hence I 1523 // can edit it's inputs directly. Hammer in the new limit for the 1524 // minimum-trip guard. 1525 assert( opaq->outcnt() == 1, "" ); 1526 _igvn.hash_delete(opaq); 1527 opaq->set_req(1, new_limit); 1528 } 1529 } // LoopLimitCheck 1530 1531 // --------- 1532 // Step 4: Clone the loop body. Move it inside the loop. This loop body 1533 // represents the odd iterations; since the loop trips an even number of 1534 // times its backedge is never taken. Kill the backedge. 1535 uint dd = dom_depth(loop_head); 1536 clone_loop( loop, old_new, dd ); 1537 1538 // Make backedges of the clone equal to backedges of the original. 1539 // Make the fall-in from the original come from the fall-out of the clone. 1540 for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) { 1541 Node* phi = loop_head->fast_out(j); 1542 if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) { 1543 Node *newphi = old_new[phi->_idx]; 1544 _igvn.hash_delete( phi ); 1545 _igvn.hash_delete( newphi ); 1546 1547 phi ->set_req(LoopNode:: EntryControl, newphi->in(LoopNode::LoopBackControl)); 1548 newphi->set_req(LoopNode::LoopBackControl, phi ->in(LoopNode::LoopBackControl)); 1549 phi ->set_req(LoopNode::LoopBackControl, C->top()); 1550 } 1551 } 1552 Node *clone_head = old_new[loop_head->_idx]; 1553 _igvn.hash_delete( clone_head ); 1554 loop_head ->set_req(LoopNode:: EntryControl, clone_head->in(LoopNode::LoopBackControl)); 1555 clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl)); 1556 loop_head ->set_req(LoopNode::LoopBackControl, C->top()); 1557 loop->_head = clone_head; // New loop header 1558 1559 set_idom(loop_head, loop_head ->in(LoopNode::EntryControl), dd); 1560 set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd); 1561 1562 // Kill the clone's backedge 1563 Node *newcle = old_new[loop_end->_idx]; 1564 _igvn.hash_delete( newcle ); 1565 Node *one = _igvn.intcon(1); 1566 set_ctrl(one, C->root()); 1567 newcle->set_req(1, one); 1568 // Force clone into same loop body 1569 uint max = loop->_body.size(); 1570 for( uint k = 0; k < max; k++ ) { 1571 Node *old = loop->_body.at(k); 1572 Node *nnn = old_new[old->_idx]; 1573 loop->_body.push(nnn); 1574 if (!has_ctrl(old)) 1575 set_loop(nnn, loop); 1576 } 1577 1578 loop->record_for_igvn(); 1579 1580 #ifndef PRODUCT 1581 if (C->do_vector_loop() && (PrintOpto && VerifyLoopOptimizations || TraceLoopOpts)) { 1582 tty->print("\nnew loop after unroll\n"); loop->dump_head(); 1583 for (uint i = 0; i < loop->_body.size(); i++) { 1584 loop->_body.at(i)->dump(); 1585 } 1586 if(C->clone_map().is_debug()) { 1587 tty->print("\nCloneMap\n"); 1588 Dict* dict = C->clone_map().dict(); 1589 DictI i(dict); 1590 tty->print_cr("Dict@%p[%d] = ", dict, dict->Size()); 1591 for (int ii = 0; i.test(); ++i, ++ii) { 1592 NodeCloneInfo cl((uint64_t)dict->operator[]((void*)i._key)); 1593 tty->print("%d->%d:%d,", (int)(intptr_t)i._key, cl.idx(), cl.gen()); 1594 if (ii % 10 == 9) { 1595 tty->print_cr(" "); 1596 } 1597 } 1598 tty->print_cr(" "); 1599 } 1600 } 1601 #endif 1602 1603 } 1604 1605 //------------------------------do_maximally_unroll---------------------------- 1606 1607 void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) { 1608 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 1609 assert(cl->has_exact_trip_count(), "trip count is not exact"); 1610 assert(cl->trip_count() > 0, ""); 1611 #ifndef PRODUCT 1612 if (TraceLoopOpts) { 1613 tty->print("MaxUnroll %d ", cl->trip_count()); 1614 loop->dump_head(); 1615 } 1616 #endif 1617 1618 // If loop is tripping an odd number of times, peel odd iteration 1619 if ((cl->trip_count() & 1) == 1) { 1620 do_peeling(loop, old_new); 1621 } 1622 1623 // Now its tripping an even number of times remaining. Double loop body. 1624 // Do not adjust pre-guards; they are not needed and do not exist. 1625 if (cl->trip_count() > 0) { 1626 assert((cl->trip_count() & 1) == 0, "missed peeling"); 1627 do_unroll(loop, old_new, false); 1628 } 1629 } 1630 1631 void PhaseIdealLoop::mark_reductions(IdealLoopTree *loop) { 1632 if (SuperWordReductions == false) return; 1633 1634 CountedLoopNode* loop_head = loop->_head->as_CountedLoop(); 1635 if (loop_head->unrolled_count() > 1) { 1636 return; 1637 } 1638 1639 Node* trip_phi = loop_head->phi(); 1640 for (DUIterator_Fast imax, i = loop_head->fast_outs(imax); i < imax; i++) { 1641 Node* phi = loop_head->fast_out(i); 1642 if (phi->is_Phi() && phi->outcnt() > 0 && phi != trip_phi) { 1643 // For definitions which are loop inclusive and not tripcounts. 1644 Node* def_node = phi->in(LoopNode::LoopBackControl); 1645 1646 if (def_node != NULL) { 1647 Node* n_ctrl = get_ctrl(def_node); 1648 if (n_ctrl != NULL && loop->is_member(get_loop(n_ctrl))) { 1649 // Now test it to see if it fits the standard pattern for a reduction operator. 1650 int opc = def_node->Opcode(); 1651 if (opc != ReductionNode::opcode(opc, def_node->bottom_type()->basic_type())) { 1652 if (!def_node->is_reduction()) { // Not marked yet 1653 // To be a reduction, the arithmetic node must have the phi as input and provide a def to it 1654 bool ok = false; 1655 for (unsigned j = 1; j < def_node->req(); j++) { 1656 Node* in = def_node->in(j); 1657 if (in == phi) { 1658 ok = true; 1659 break; 1660 } 1661 } 1662 1663 // do nothing if we did not match the initial criteria 1664 if (ok == false) { 1665 continue; 1666 } 1667 1668 // The result of the reduction must not be used in the loop 1669 for (DUIterator_Fast imax, i = def_node->fast_outs(imax); i < imax && ok; i++) { 1670 Node* u = def_node->fast_out(i); 1671 if (has_ctrl(u) && !loop->is_member(get_loop(get_ctrl(u)))) { 1672 continue; 1673 } 1674 if (u == phi) { 1675 continue; 1676 } 1677 ok = false; 1678 } 1679 1680 // iff the uses conform 1681 if (ok) { 1682 def_node->add_flag(Node::Flag_is_reduction); 1683 loop_head->mark_has_reductions(); 1684 } 1685 } 1686 } 1687 } 1688 } 1689 } 1690 } 1691 } 1692 1693 //------------------------------dominates_backedge--------------------------------- 1694 // Returns true if ctrl is executed on every complete iteration 1695 bool IdealLoopTree::dominates_backedge(Node* ctrl) { 1696 assert(ctrl->is_CFG(), "must be control"); 1697 Node* backedge = _head->as_Loop()->in(LoopNode::LoopBackControl); 1698 return _phase->dom_lca_internal(ctrl, backedge) == ctrl; 1699 } 1700 1701 //------------------------------adjust_limit----------------------------------- 1702 // Helper function for add_constraint(). 1703 Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) { 1704 // Compute "I :: (limit-offset)/scale" 1705 Node *con = new SubINode(rc_limit, offset); 1706 register_new_node(con, pre_ctrl); 1707 Node *X = new DivINode(0, con, scale); 1708 register_new_node(X, pre_ctrl); 1709 1710 // Adjust loop limit 1711 loop_limit = (stride_con > 0) 1712 ? (Node*)(new MinINode(loop_limit, X)) 1713 : (Node*)(new MaxINode(loop_limit, X)); 1714 register_new_node(loop_limit, pre_ctrl); 1715 return loop_limit; 1716 } 1717 1718 //------------------------------add_constraint--------------------------------- 1719 // Constrain the main loop iterations so the conditions: 1720 // low_limit <= scale_con * I + offset < upper_limit 1721 // always holds true. That is, either increase the number of iterations in 1722 // the pre-loop or the post-loop until the condition holds true in the main 1723 // loop. Stride, scale, offset and limit are all loop invariant. Further, 1724 // stride and scale are constants (offset and limit often are). 1725 void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) { 1726 // For positive stride, the pre-loop limit always uses a MAX function 1727 // and the main loop a MIN function. For negative stride these are 1728 // reversed. 1729 1730 // Also for positive stride*scale the affine function is increasing, so the 1731 // pre-loop must check for underflow and the post-loop for overflow. 1732 // Negative stride*scale reverses this; pre-loop checks for overflow and 1733 // post-loop for underflow. 1734 1735 Node *scale = _igvn.intcon(scale_con); 1736 set_ctrl(scale, C->root()); 1737 1738 if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow 1739 // The overflow limit: scale*I+offset < upper_limit 1740 // For main-loop compute 1741 // ( if (scale > 0) /* and stride > 0 */ 1742 // I < (upper_limit-offset)/scale 1743 // else /* scale < 0 and stride < 0 */ 1744 // I > (upper_limit-offset)/scale 1745 // ) 1746 // 1747 // (upper_limit-offset) may overflow or underflow. 1748 // But it is fine since main loop will either have 1749 // less iterations or will be skipped in such case. 1750 *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl); 1751 1752 // The underflow limit: low_limit <= scale*I+offset. 1753 // For pre-loop compute 1754 // NOT(scale*I+offset >= low_limit) 1755 // scale*I+offset < low_limit 1756 // ( if (scale > 0) /* and stride > 0 */ 1757 // I < (low_limit-offset)/scale 1758 // else /* scale < 0 and stride < 0 */ 1759 // I > (low_limit-offset)/scale 1760 // ) 1761 1762 if (low_limit->get_int() == -max_jint) { 1763 if (!RangeLimitCheck) return; 1764 // We need this guard when scale*pre_limit+offset >= limit 1765 // due to underflow. So we need execute pre-loop until 1766 // scale*I+offset >= min_int. But (min_int-offset) will 1767 // underflow when offset > 0 and X will be > original_limit 1768 // when stride > 0. To avoid it we replace positive offset with 0. 1769 // 1770 // Also (min_int+1 == -max_int) is used instead of min_int here 1771 // to avoid problem with scale == -1 (min_int/(-1) == min_int). 1772 Node* shift = _igvn.intcon(31); 1773 set_ctrl(shift, C->root()); 1774 Node* sign = new RShiftINode(offset, shift); 1775 register_new_node(sign, pre_ctrl); 1776 offset = new AndINode(offset, sign); 1777 register_new_node(offset, pre_ctrl); 1778 } else { 1779 assert(low_limit->get_int() == 0, "wrong low limit for range check"); 1780 // The only problem we have here when offset == min_int 1781 // since (0-min_int) == min_int. It may be fine for stride > 0 1782 // but for stride < 0 X will be < original_limit. To avoid it 1783 // max(pre_limit, original_limit) is used in do_range_check(). 1784 } 1785 // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond); 1786 *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl); 1787 1788 } else { // stride_con*scale_con < 0 1789 // For negative stride*scale pre-loop checks for overflow and 1790 // post-loop for underflow. 1791 // 1792 // The overflow limit: scale*I+offset < upper_limit 1793 // For pre-loop compute 1794 // NOT(scale*I+offset < upper_limit) 1795 // scale*I+offset >= upper_limit 1796 // scale*I+offset+1 > upper_limit 1797 // ( if (scale < 0) /* and stride > 0 */ 1798 // I < (upper_limit-(offset+1))/scale 1799 // else /* scale > 0 and stride < 0 */ 1800 // I > (upper_limit-(offset+1))/scale 1801 // ) 1802 // 1803 // (upper_limit-offset-1) may underflow or overflow. 1804 // To avoid it min(pre_limit, original_limit) is used 1805 // in do_range_check() for stride > 0 and max() for < 0. 1806 Node *one = _igvn.intcon(1); 1807 set_ctrl(one, C->root()); 1808 1809 Node *plus_one = new AddINode(offset, one); 1810 register_new_node( plus_one, pre_ctrl ); 1811 // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond); 1812 *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl); 1813 1814 if (low_limit->get_int() == -max_jint) { 1815 if (!RangeLimitCheck) return; 1816 // We need this guard when scale*main_limit+offset >= limit 1817 // due to underflow. So we need execute main-loop while 1818 // scale*I+offset+1 > min_int. But (min_int-offset-1) will 1819 // underflow when (offset+1) > 0 and X will be < main_limit 1820 // when scale < 0 (and stride > 0). To avoid it we replace 1821 // positive (offset+1) with 0. 1822 // 1823 // Also (min_int+1 == -max_int) is used instead of min_int here 1824 // to avoid problem with scale == -1 (min_int/(-1) == min_int). 1825 Node* shift = _igvn.intcon(31); 1826 set_ctrl(shift, C->root()); 1827 Node* sign = new RShiftINode(plus_one, shift); 1828 register_new_node(sign, pre_ctrl); 1829 plus_one = new AndINode(plus_one, sign); 1830 register_new_node(plus_one, pre_ctrl); 1831 } else { 1832 assert(low_limit->get_int() == 0, "wrong low limit for range check"); 1833 // The only problem we have here when offset == max_int 1834 // since (max_int+1) == min_int and (0-min_int) == min_int. 1835 // But it is fine since main loop will either have 1836 // less iterations or will be skipped in such case. 1837 } 1838 // The underflow limit: low_limit <= scale*I+offset. 1839 // For main-loop compute 1840 // scale*I+offset+1 > low_limit 1841 // ( if (scale < 0) /* and stride > 0 */ 1842 // I < (low_limit-(offset+1))/scale 1843 // else /* scale > 0 and stride < 0 */ 1844 // I > (low_limit-(offset+1))/scale 1845 // ) 1846 1847 *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl); 1848 } 1849 } 1850 1851 1852 //------------------------------is_scaled_iv--------------------------------- 1853 // Return true if exp is a constant times an induction var 1854 bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) { 1855 if (exp == iv) { 1856 if (p_scale != NULL) { 1857 *p_scale = 1; 1858 } 1859 return true; 1860 } 1861 int opc = exp->Opcode(); 1862 if (opc == Op_MulI) { 1863 if (exp->in(1) == iv && exp->in(2)->is_Con()) { 1864 if (p_scale != NULL) { 1865 *p_scale = exp->in(2)->get_int(); 1866 } 1867 return true; 1868 } 1869 if (exp->in(2) == iv && exp->in(1)->is_Con()) { 1870 if (p_scale != NULL) { 1871 *p_scale = exp->in(1)->get_int(); 1872 } 1873 return true; 1874 } 1875 } else if (opc == Op_LShiftI) { 1876 if (exp->in(1) == iv && exp->in(2)->is_Con()) { 1877 if (p_scale != NULL) { 1878 *p_scale = 1 << exp->in(2)->get_int(); 1879 } 1880 return true; 1881 } 1882 } 1883 return false; 1884 } 1885 1886 //-----------------------------is_scaled_iv_plus_offset------------------------------ 1887 // Return true if exp is a simple induction variable expression: k1*iv + (invar + k2) 1888 bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) { 1889 if (is_scaled_iv(exp, iv, p_scale)) { 1890 if (p_offset != NULL) { 1891 Node *zero = _igvn.intcon(0); 1892 set_ctrl(zero, C->root()); 1893 *p_offset = zero; 1894 } 1895 return true; 1896 } 1897 int opc = exp->Opcode(); 1898 if (opc == Op_AddI) { 1899 if (is_scaled_iv(exp->in(1), iv, p_scale)) { 1900 if (p_offset != NULL) { 1901 *p_offset = exp->in(2); 1902 } 1903 return true; 1904 } 1905 if (exp->in(2)->is_Con()) { 1906 Node* offset2 = NULL; 1907 if (depth < 2 && 1908 is_scaled_iv_plus_offset(exp->in(1), iv, p_scale, 1909 p_offset != NULL ? &offset2 : NULL, depth+1)) { 1910 if (p_offset != NULL) { 1911 Node *ctrl_off2 = get_ctrl(offset2); 1912 Node* offset = new AddINode(offset2, exp->in(2)); 1913 register_new_node(offset, ctrl_off2); 1914 *p_offset = offset; 1915 } 1916 return true; 1917 } 1918 } 1919 } else if (opc == Op_SubI) { 1920 if (is_scaled_iv(exp->in(1), iv, p_scale)) { 1921 if (p_offset != NULL) { 1922 Node *zero = _igvn.intcon(0); 1923 set_ctrl(zero, C->root()); 1924 Node *ctrl_off = get_ctrl(exp->in(2)); 1925 Node* offset = new SubINode(zero, exp->in(2)); 1926 register_new_node(offset, ctrl_off); 1927 *p_offset = offset; 1928 } 1929 return true; 1930 } 1931 if (is_scaled_iv(exp->in(2), iv, p_scale)) { 1932 if (p_offset != NULL) { 1933 *p_scale *= -1; 1934 *p_offset = exp->in(1); 1935 } 1936 return true; 1937 } 1938 } 1939 return false; 1940 } 1941 1942 //------------------------------do_range_check--------------------------------- 1943 // Eliminate range-checks and other trip-counter vs loop-invariant tests. 1944 void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) { 1945 #ifndef PRODUCT 1946 if (PrintOpto && VerifyLoopOptimizations) { 1947 tty->print("Range Check Elimination "); 1948 loop->dump_head(); 1949 } else if (TraceLoopOpts) { 1950 tty->print("RangeCheck "); 1951 loop->dump_head(); 1952 } 1953 #endif 1954 assert(RangeCheckElimination, ""); 1955 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 1956 assert(cl->is_main_loop(), ""); 1957 1958 // protect against stride not being a constant 1959 if (!cl->stride_is_con()) 1960 return; 1961 1962 // Find the trip counter; we are iteration splitting based on it 1963 Node *trip_counter = cl->phi(); 1964 // Find the main loop limit; we will trim it's iterations 1965 // to not ever trip end tests 1966 Node *main_limit = cl->limit(); 1967 1968 // Need to find the main-loop zero-trip guard 1969 Node *ctrl = cl->in(LoopNode::EntryControl); 1970 assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, ""); 1971 Node *iffm = ctrl->in(0); 1972 assert(iffm->Opcode() == Op_If, ""); 1973 Node *bolzm = iffm->in(1); 1974 assert(bolzm->Opcode() == Op_Bool, ""); 1975 Node *cmpzm = bolzm->in(1); 1976 assert(cmpzm->is_Cmp(), ""); 1977 Node *opqzm = cmpzm->in(2); 1978 // Can not optimize a loop if zero-trip Opaque1 node is optimized 1979 // away and then another round of loop opts attempted. 1980 if (opqzm->Opcode() != Op_Opaque1) 1981 return; 1982 assert(opqzm->in(1) == main_limit, "do not understand situation"); 1983 1984 // Find the pre-loop limit; we will expand its iterations to 1985 // not ever trip low tests. 1986 Node *p_f = iffm->in(0); 1987 // pre loop may have been optimized out 1988 if (p_f->Opcode() != Op_IfFalse) { 1989 return; 1990 } 1991 CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); 1992 assert(pre_end->loopnode()->is_pre_loop(), ""); 1993 Node *pre_opaq1 = pre_end->limit(); 1994 // Occasionally it's possible for a pre-loop Opaque1 node to be 1995 // optimized away and then another round of loop opts attempted. 1996 // We can not optimize this particular loop in that case. 1997 if (pre_opaq1->Opcode() != Op_Opaque1) 1998 return; 1999 Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1; 2000 Node *pre_limit = pre_opaq->in(1); 2001 2002 // Where do we put new limit calculations 2003 Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl); 2004 2005 // Ensure the original loop limit is available from the 2006 // pre-loop Opaque1 node. 2007 Node *orig_limit = pre_opaq->original_loop_limit(); 2008 if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP) 2009 return; 2010 2011 // Must know if its a count-up or count-down loop 2012 2013 int stride_con = cl->stride_con(); 2014 Node *zero = _igvn.intcon(0); 2015 Node *one = _igvn.intcon(1); 2016 // Use symmetrical int range [-max_jint,max_jint] 2017 Node *mini = _igvn.intcon(-max_jint); 2018 set_ctrl(zero, C->root()); 2019 set_ctrl(one, C->root()); 2020 set_ctrl(mini, C->root()); 2021 2022 // Range checks that do not dominate the loop backedge (ie. 2023 // conditionally executed) can lengthen the pre loop limit beyond 2024 // the original loop limit. To prevent this, the pre limit is 2025 // (for stride > 0) MINed with the original loop limit (MAXed 2026 // stride < 0) when some range_check (rc) is conditionally 2027 // executed. 2028 bool conditional_rc = false; 2029 2030 // Check loop body for tests of trip-counter plus loop-invariant vs 2031 // loop-invariant. 2032 for( uint i = 0; i < loop->_body.size(); i++ ) { 2033 Node *iff = loop->_body[i]; 2034 if( iff->Opcode() == Op_If ) { // Test? 2035 2036 // Test is an IfNode, has 2 projections. If BOTH are in the loop 2037 // we need loop unswitching instead of iteration splitting. 2038 Node *exit = loop->is_loop_exit(iff); 2039 if( !exit ) continue; 2040 int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0; 2041 2042 // Get boolean condition to test 2043 Node *i1 = iff->in(1); 2044 if( !i1->is_Bool() ) continue; 2045 BoolNode *bol = i1->as_Bool(); 2046 BoolTest b_test = bol->_test; 2047 // Flip sense of test if exit condition is flipped 2048 if( flip ) 2049 b_test = b_test.negate(); 2050 2051 // Get compare 2052 Node *cmp = bol->in(1); 2053 2054 // Look for trip_counter + offset vs limit 2055 Node *rc_exp = cmp->in(1); 2056 Node *limit = cmp->in(2); 2057 jint scale_con= 1; // Assume trip counter not scaled 2058 2059 Node *limit_c = get_ctrl(limit); 2060 if( loop->is_member(get_loop(limit_c) ) ) { 2061 // Compare might have operands swapped; commute them 2062 b_test = b_test.commute(); 2063 rc_exp = cmp->in(2); 2064 limit = cmp->in(1); 2065 limit_c = get_ctrl(limit); 2066 if( loop->is_member(get_loop(limit_c) ) ) 2067 continue; // Both inputs are loop varying; cannot RCE 2068 } 2069 // Here we know 'limit' is loop invariant 2070 2071 // 'limit' maybe pinned below the zero trip test (probably from a 2072 // previous round of rce), in which case, it can't be used in the 2073 // zero trip test expression which must occur before the zero test's if. 2074 if( limit_c == ctrl ) { 2075 continue; // Don't rce this check but continue looking for other candidates. 2076 } 2077 2078 // Check for scaled induction variable plus an offset 2079 Node *offset = NULL; 2080 2081 if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) { 2082 continue; 2083 } 2084 2085 Node *offset_c = get_ctrl(offset); 2086 if( loop->is_member( get_loop(offset_c) ) ) 2087 continue; // Offset is not really loop invariant 2088 // Here we know 'offset' is loop invariant. 2089 2090 // As above for the 'limit', the 'offset' maybe pinned below the 2091 // zero trip test. 2092 if( offset_c == ctrl ) { 2093 continue; // Don't rce this check but continue looking for other candidates. 2094 } 2095 #ifdef ASSERT 2096 if (TraceRangeLimitCheck) { 2097 tty->print_cr("RC bool node%s", flip ? " flipped:" : ":"); 2098 bol->dump(2); 2099 } 2100 #endif 2101 // At this point we have the expression as: 2102 // scale_con * trip_counter + offset :: limit 2103 // where scale_con, offset and limit are loop invariant. Trip_counter 2104 // monotonically increases by stride_con, a constant. Both (or either) 2105 // stride_con and scale_con can be negative which will flip about the 2106 // sense of the test. 2107 2108 // Adjust pre and main loop limits to guard the correct iteration set 2109 if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests 2110 if( b_test._test == BoolTest::lt ) { // Range checks always use lt 2111 // The underflow and overflow limits: 0 <= scale*I+offset < limit 2112 add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit ); 2113 if (!conditional_rc) { 2114 // (0-offset)/scale could be outside of loop iterations range. 2115 conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; 2116 } 2117 } else { 2118 #ifndef PRODUCT 2119 if( PrintOpto ) 2120 tty->print_cr("missed RCE opportunity"); 2121 #endif 2122 continue; // In release mode, ignore it 2123 } 2124 } else { // Otherwise work on normal compares 2125 switch( b_test._test ) { 2126 case BoolTest::gt: 2127 // Fall into GE case 2128 case BoolTest::ge: 2129 // Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit 2130 scale_con = -scale_con; 2131 offset = new SubINode( zero, offset ); 2132 register_new_node( offset, pre_ctrl ); 2133 limit = new SubINode( zero, limit ); 2134 register_new_node( limit, pre_ctrl ); 2135 // Fall into LE case 2136 case BoolTest::le: 2137 if (b_test._test != BoolTest::gt) { 2138 // Convert X <= Y to X < Y+1 2139 limit = new AddINode( limit, one ); 2140 register_new_node( limit, pre_ctrl ); 2141 } 2142 // Fall into LT case 2143 case BoolTest::lt: 2144 // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit 2145 // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here 2146 // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT. 2147 add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit ); 2148 if (!conditional_rc) { 2149 // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range. 2150 // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could 2151 // still be outside of loop range. 2152 conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; 2153 } 2154 break; 2155 default: 2156 #ifndef PRODUCT 2157 if( PrintOpto ) 2158 tty->print_cr("missed RCE opportunity"); 2159 #endif 2160 continue; // Unhandled case 2161 } 2162 } 2163 2164 // Kill the eliminated test 2165 C->set_major_progress(); 2166 Node *kill_con = _igvn.intcon( 1-flip ); 2167 set_ctrl(kill_con, C->root()); 2168 _igvn.replace_input_of(iff, 1, kill_con); 2169 // Find surviving projection 2170 assert(iff->is_If(), ""); 2171 ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip); 2172 // Find loads off the surviving projection; remove their control edge 2173 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { 2174 Node* cd = dp->fast_out(i); // Control-dependent node 2175 if (cd->is_Load() && cd->depends_only_on_test()) { // Loads can now float around in the loop 2176 // Allow the load to float around in the loop, or before it 2177 // but NOT before the pre-loop. 2178 _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL 2179 --i; 2180 --imax; 2181 } 2182 } 2183 2184 } // End of is IF 2185 2186 } 2187 2188 // Update loop limits 2189 if (conditional_rc) { 2190 pre_limit = (stride_con > 0) ? (Node*)new MinINode(pre_limit, orig_limit) 2191 : (Node*)new MaxINode(pre_limit, orig_limit); 2192 register_new_node(pre_limit, pre_ctrl); 2193 } 2194 _igvn.replace_input_of(pre_opaq, 1, pre_limit); 2195 2196 // Note:: we are making the main loop limit no longer precise; 2197 // need to round up based on stride. 2198 cl->set_nonexact_trip_count(); 2199 if (!LoopLimitCheck && stride_con != 1 && stride_con != -1) { // Cutout for common case 2200 // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init 2201 // Hopefully, compiler will optimize for powers of 2. 2202 Node *ctrl = get_ctrl(main_limit); 2203 Node *stride = cl->stride(); 2204 Node *init = cl->init_trip()->uncast(); 2205 Node *span = new SubINode(main_limit,init); 2206 register_new_node(span,ctrl); 2207 Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1)); 2208 Node *add = new AddINode(span,rndup); 2209 register_new_node(add,ctrl); 2210 Node *div = new DivINode(0,add,stride); 2211 register_new_node(div,ctrl); 2212 Node *mul = new MulINode(div,stride); 2213 register_new_node(mul,ctrl); 2214 Node *newlim = new AddINode(mul,init); 2215 register_new_node(newlim,ctrl); 2216 main_limit = newlim; 2217 } 2218 2219 Node *main_cle = cl->loopexit(); 2220 Node *main_bol = main_cle->in(1); 2221 // Hacking loop bounds; need private copies of exit test 2222 if( main_bol->outcnt() > 1 ) {// BoolNode shared? 2223 main_bol = main_bol->clone();// Clone a private BoolNode 2224 register_new_node( main_bol, main_cle->in(0) ); 2225 _igvn.replace_input_of(main_cle, 1, main_bol); 2226 } 2227 Node *main_cmp = main_bol->in(1); 2228 if( main_cmp->outcnt() > 1 ) { // CmpNode shared? 2229 main_cmp = main_cmp->clone();// Clone a private CmpNode 2230 register_new_node( main_cmp, main_cle->in(0) ); 2231 _igvn.replace_input_of(main_bol, 1, main_cmp); 2232 } 2233 // Hack the now-private loop bounds 2234 _igvn.replace_input_of(main_cmp, 2, main_limit); 2235 // The OpaqueNode is unshared by design 2236 assert( opqzm->outcnt() == 1, "cannot hack shared node" ); 2237 _igvn.replace_input_of(opqzm, 1, main_limit); 2238 } 2239 2240 //------------------------------DCE_loop_body---------------------------------- 2241 // Remove simplistic dead code from loop body 2242 void IdealLoopTree::DCE_loop_body() { 2243 for( uint i = 0; i < _body.size(); i++ ) 2244 if( _body.at(i)->outcnt() == 0 ) 2245 _body.map( i--, _body.pop() ); 2246 } 2247 2248 2249 //------------------------------adjust_loop_exit_prob-------------------------- 2250 // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage. 2251 // Replace with a 1-in-10 exit guess. 2252 void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) { 2253 Node *test = tail(); 2254 while( test != _head ) { 2255 uint top = test->Opcode(); 2256 if( top == Op_IfTrue || top == Op_IfFalse ) { 2257 int test_con = ((ProjNode*)test)->_con; 2258 assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity"); 2259 IfNode *iff = test->in(0)->as_If(); 2260 if( iff->outcnt() == 2 ) { // Ignore dead tests 2261 Node *bol = iff->in(1); 2262 if( bol && bol->req() > 1 && bol->in(1) && 2263 ((bol->in(1)->Opcode() == Op_StorePConditional ) || 2264 (bol->in(1)->Opcode() == Op_StoreIConditional ) || 2265 (bol->in(1)->Opcode() == Op_StoreLConditional ) || 2266 (bol->in(1)->Opcode() == Op_CompareAndSwapI ) || 2267 (bol->in(1)->Opcode() == Op_CompareAndSwapL ) || 2268 (bol->in(1)->Opcode() == Op_CompareAndSwapP ) || 2269 (bol->in(1)->Opcode() == Op_CompareAndSwapN ))) 2270 return; // Allocation loops RARELY take backedge 2271 // Find the OTHER exit path from the IF 2272 Node* ex = iff->proj_out(1-test_con); 2273 float p = iff->_prob; 2274 if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) { 2275 if( top == Op_IfTrue ) { 2276 if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) { 2277 iff->_prob = PROB_STATIC_FREQUENT; 2278 } 2279 } else { 2280 if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) { 2281 iff->_prob = PROB_STATIC_INFREQUENT; 2282 } 2283 } 2284 } 2285 } 2286 } 2287 test = phase->idom(test); 2288 } 2289 } 2290 2291 #ifdef ASSERT 2292 static CountedLoopNode* locate_pre_from_main(CountedLoopNode *cl) { 2293 Node *ctrl = cl->in(LoopNode::EntryControl); 2294 assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, ""); 2295 Node *iffm = ctrl->in(0); 2296 assert(iffm->Opcode() == Op_If, ""); 2297 Node *p_f = iffm->in(0); 2298 assert(p_f->Opcode() == Op_IfFalse, ""); 2299 CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); 2300 assert(pre_end->loopnode()->is_pre_loop(), ""); 2301 return pre_end->loopnode(); 2302 } 2303 #endif 2304 2305 // Remove the main and post loops and make the pre loop execute all 2306 // iterations. Useful when the pre loop is found empty. 2307 void IdealLoopTree::remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase) { 2308 CountedLoopEndNode* pre_end = cl->loopexit(); 2309 Node* pre_cmp = pre_end->cmp_node(); 2310 if (pre_cmp->in(2)->Opcode() != Op_Opaque1) { 2311 // Only safe to remove the main loop if the compiler optimized it 2312 // out based on an unknown number of iterations 2313 return; 2314 } 2315 2316 // Can we find the main loop? 2317 if (_next == NULL) { 2318 return; 2319 } 2320 2321 Node* next_head = _next->_head; 2322 if (!next_head->is_CountedLoop()) { 2323 return; 2324 } 2325 2326 CountedLoopNode* main_head = next_head->as_CountedLoop(); 2327 if (!main_head->is_main_loop()) { 2328 return; 2329 } 2330 2331 assert(locate_pre_from_main(main_head) == cl, "bad main loop"); 2332 Node* main_iff = main_head->in(LoopNode::EntryControl)->in(0); 2333 2334 // Remove the Opaque1Node of the pre loop and make it execute all iterations 2335 phase->_igvn.replace_input_of(pre_cmp, 2, pre_cmp->in(2)->in(2)); 2336 // Remove the Opaque1Node of the main loop so it can be optimized out 2337 Node* main_cmp = main_iff->in(1)->in(1); 2338 assert(main_cmp->in(2)->Opcode() == Op_Opaque1, "main loop has no opaque node?"); 2339 phase->_igvn.replace_input_of(main_cmp, 2, main_cmp->in(2)->in(1)); 2340 } 2341 2342 //------------------------------policy_do_remove_empty_loop-------------------- 2343 // Micro-benchmark spamming. Policy is to always remove empty loops. 2344 // The 'DO' part is to replace the trip counter with the value it will 2345 // have on the last iteration. This will break the loop. 2346 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) { 2347 // Minimum size must be empty loop 2348 if (_body.size() > EMPTY_LOOP_SIZE) 2349 return false; 2350 2351 if (!_head->is_CountedLoop()) 2352 return false; // Dead loop 2353 CountedLoopNode *cl = _head->as_CountedLoop(); 2354 if (!cl->is_valid_counted_loop()) 2355 return false; // Malformed loop 2356 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) 2357 return false; // Infinite loop 2358 2359 if (cl->is_pre_loop()) { 2360 // If the loop we are removing is a pre-loop then the main and 2361 // post loop can be removed as well 2362 remove_main_post_loops(cl, phase); 2363 } 2364 2365 #ifdef ASSERT 2366 // Ensure only one phi which is the iv. 2367 Node* iv = NULL; 2368 for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) { 2369 Node* n = cl->fast_out(i); 2370 if (n->Opcode() == Op_Phi) { 2371 assert(iv == NULL, "Too many phis" ); 2372 iv = n; 2373 } 2374 } 2375 assert(iv == cl->phi(), "Wrong phi" ); 2376 #endif 2377 2378 // main and post loops have explicitly created zero trip guard 2379 bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop(); 2380 if (needs_guard) { 2381 // Skip guard if values not overlap. 2382 const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int(); 2383 const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int(); 2384 int stride_con = cl->stride_con(); 2385 if (stride_con > 0) { 2386 needs_guard = (init_t->_hi >= limit_t->_lo); 2387 } else { 2388 needs_guard = (init_t->_lo <= limit_t->_hi); 2389 } 2390 } 2391 if (needs_guard) { 2392 // Check for an obvious zero trip guard. 2393 Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl)); 2394 if (inctrl->Opcode() == Op_IfTrue) { 2395 // The test should look like just the backedge of a CountedLoop 2396 Node* iff = inctrl->in(0); 2397 if (iff->is_If()) { 2398 Node* bol = iff->in(1); 2399 if (bol->is_Bool() && bol->as_Bool()->_test._test == cl->loopexit()->test_trip()) { 2400 Node* cmp = bol->in(1); 2401 if (cmp->is_Cmp() && cmp->in(1) == cl->init_trip() && cmp->in(2) == cl->limit()) { 2402 needs_guard = false; 2403 } 2404 } 2405 } 2406 } 2407 } 2408 2409 #ifndef PRODUCT 2410 if (PrintOpto) { 2411 tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : ""); 2412 this->dump_head(); 2413 } else if (TraceLoopOpts) { 2414 tty->print("Empty with%s zero trip guard ", needs_guard ? "out" : ""); 2415 this->dump_head(); 2416 } 2417 #endif 2418 2419 if (needs_guard) { 2420 // Peel the loop to ensure there's a zero trip guard 2421 Node_List old_new; 2422 phase->do_peeling(this, old_new); 2423 } 2424 2425 // Replace the phi at loop head with the final value of the last 2426 // iteration. Then the CountedLoopEnd will collapse (backedge never 2427 // taken) and all loop-invariant uses of the exit values will be correct. 2428 Node *phi = cl->phi(); 2429 Node *exact_limit = phase->exact_limit(this); 2430 if (exact_limit != cl->limit()) { 2431 // We also need to replace the original limit to collapse loop exit. 2432 Node* cmp = cl->loopexit()->cmp_node(); 2433 assert(cl->limit() == cmp->in(2), "sanity"); 2434 phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist 2435 phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist 2436 } 2437 // Note: the final value after increment should not overflow since 2438 // counted loop has limit check predicate. 2439 Node *final = new SubINode( exact_limit, cl->stride() ); 2440 phase->register_new_node(final,cl->in(LoopNode::EntryControl)); 2441 phase->_igvn.replace_node(phi,final); 2442 phase->C->set_major_progress(); 2443 return true; 2444 } 2445 2446 //------------------------------policy_do_one_iteration_loop------------------- 2447 // Convert one iteration loop into normal code. 2448 bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) { 2449 if (!_head->as_Loop()->is_valid_counted_loop()) 2450 return false; // Only for counted loop 2451 2452 CountedLoopNode *cl = _head->as_CountedLoop(); 2453 if (!cl->has_exact_trip_count() || cl->trip_count() != 1) { 2454 return false; 2455 } 2456 2457 #ifndef PRODUCT 2458 if(TraceLoopOpts) { 2459 tty->print("OneIteration "); 2460 this->dump_head(); 2461 } 2462 #endif 2463 2464 Node *init_n = cl->init_trip(); 2465 #ifdef ASSERT 2466 // Loop boundaries should be constant since trip count is exact. 2467 assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration"); 2468 #endif 2469 // Replace the phi at loop head with the value of the init_trip. 2470 // Then the CountedLoopEnd will collapse (backedge will not be taken) 2471 // and all loop-invariant uses of the exit values will be correct. 2472 phase->_igvn.replace_node(cl->phi(), cl->init_trip()); 2473 phase->C->set_major_progress(); 2474 return true; 2475 } 2476 2477 //============================================================================= 2478 //------------------------------iteration_split_impl--------------------------- 2479 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) { 2480 // Compute exact loop trip count if possible. 2481 compute_exact_trip_count(phase); 2482 2483 // Convert one iteration loop into normal code. 2484 if (policy_do_one_iteration_loop(phase)) 2485 return true; 2486 2487 // Check and remove empty loops (spam micro-benchmarks) 2488 if (policy_do_remove_empty_loop(phase)) 2489 return true; // Here we removed an empty loop 2490 2491 bool should_peel = policy_peeling(phase); // Should we peel? 2492 2493 bool should_unswitch = policy_unswitching(phase); 2494 2495 // Non-counted loops may be peeled; exactly 1 iteration is peeled. 2496 // This removes loop-invariant tests (usually null checks). 2497 if (!_head->is_CountedLoop()) { // Non-counted loop 2498 if (PartialPeelLoop && phase->partial_peel(this, old_new)) { 2499 // Partial peel succeeded so terminate this round of loop opts 2500 return false; 2501 } 2502 if (should_peel) { // Should we peel? 2503 #ifndef PRODUCT 2504 if (PrintOpto) tty->print_cr("should_peel"); 2505 #endif 2506 phase->do_peeling(this,old_new); 2507 } else if (should_unswitch) { 2508 phase->do_unswitching(this, old_new); 2509 } 2510 return true; 2511 } 2512 CountedLoopNode *cl = _head->as_CountedLoop(); 2513 2514 if (!cl->is_valid_counted_loop()) return true; // Ignore various kinds of broken loops 2515 2516 // Do nothing special to pre- and post- loops 2517 if (cl->is_pre_loop() || cl->is_post_loop()) return true; 2518 2519 // Compute loop trip count from profile data 2520 compute_profile_trip_cnt(phase); 2521 2522 // Before attempting fancy unrolling, RCE or alignment, see if we want 2523 // to completely unroll this loop or do loop unswitching. 2524 if (cl->is_normal_loop()) { 2525 if (should_unswitch) { 2526 phase->do_unswitching(this, old_new); 2527 return true; 2528 } 2529 bool should_maximally_unroll = policy_maximally_unroll(phase); 2530 if (should_maximally_unroll) { 2531 // Here we did some unrolling and peeling. Eventually we will 2532 // completely unroll this loop and it will no longer be a loop. 2533 phase->do_maximally_unroll(this,old_new); 2534 return true; 2535 } 2536 } 2537 2538 // Skip next optimizations if running low on nodes. Note that 2539 // policy_unswitching and policy_maximally_unroll have this check. 2540 int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes(); 2541 if ((int)(2 * _body.size()) > nodes_left) { 2542 return true; 2543 } 2544 2545 // Counted loops may be peeled, may need some iterations run up 2546 // front for RCE, and may want to align loop refs to a cache 2547 // line. Thus we clone a full loop up front whose trip count is 2548 // at least 1 (if peeling), but may be several more. 2549 2550 // The main loop will start cache-line aligned with at least 1 2551 // iteration of the unrolled body (zero-trip test required) and 2552 // will have some range checks removed. 2553 2554 // A post-loop will finish any odd iterations (leftover after 2555 // unrolling), plus any needed for RCE purposes. 2556 2557 bool should_unroll = policy_unroll(phase); 2558 2559 bool should_rce = policy_range_check(phase); 2560 2561 bool should_align = policy_align(phase); 2562 2563 // If not RCE'ing (iteration splitting) or Aligning, then we do not 2564 // need a pre-loop. We may still need to peel an initial iteration but 2565 // we will not be needing an unknown number of pre-iterations. 2566 // 2567 // Basically, if may_rce_align reports FALSE first time through, 2568 // we will not be able to later do RCE or Aligning on this loop. 2569 bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align; 2570 2571 // If we have any of these conditions (RCE, alignment, unrolling) met, then 2572 // we switch to the pre-/main-/post-loop model. This model also covers 2573 // peeling. 2574 if (should_rce || should_align || should_unroll) { 2575 if (cl->is_normal_loop()) // Convert to 'pre/main/post' loops 2576 phase->insert_pre_post_loops(this,old_new, !may_rce_align); 2577 2578 // Adjust the pre- and main-loop limits to let the pre and post loops run 2579 // with full checks, but the main-loop with no checks. Remove said 2580 // checks from the main body. 2581 if (should_rce) 2582 phase->do_range_check(this,old_new); 2583 2584 // Double loop body for unrolling. Adjust the minimum-trip test (will do 2585 // twice as many iterations as before) and the main body limit (only do 2586 // an even number of trips). If we are peeling, we might enable some RCE 2587 // and we'd rather unroll the post-RCE'd loop SO... do not unroll if 2588 // peeling. 2589 if (should_unroll && !should_peel) { 2590 phase->do_unroll(this, old_new, true); 2591 } 2592 2593 // Adjust the pre-loop limits to align the main body 2594 // iterations. 2595 if (should_align) 2596 Unimplemented(); 2597 2598 } else { // Else we have an unchanged counted loop 2599 if (should_peel) // Might want to peel but do nothing else 2600 phase->do_peeling(this,old_new); 2601 } 2602 return true; 2603 } 2604 2605 2606 //============================================================================= 2607 //------------------------------iteration_split-------------------------------- 2608 bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) { 2609 // Recursively iteration split nested loops 2610 if (_child && !_child->iteration_split(phase, old_new)) 2611 return false; 2612 2613 // Clean out prior deadwood 2614 DCE_loop_body(); 2615 2616 2617 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage. 2618 // Replace with a 1-in-10 exit guess. 2619 if (_parent /*not the root loop*/ && 2620 !_irreducible && 2621 // Also ignore the occasional dead backedge 2622 !tail()->is_top()) { 2623 adjust_loop_exit_prob(phase); 2624 } 2625 2626 // Gate unrolling, RCE and peeling efforts. 2627 if (!_child && // If not an inner loop, do not split 2628 !_irreducible && 2629 _allow_optimizations && 2630 !tail()->is_top()) { // Also ignore the occasional dead backedge 2631 if (!_has_call) { 2632 if (!iteration_split_impl(phase, old_new)) { 2633 return false; 2634 } 2635 } else if (policy_unswitching(phase)) { 2636 phase->do_unswitching(this, old_new); 2637 } 2638 } 2639 2640 // Minor offset re-organization to remove loop-fallout uses of 2641 // trip counter when there was no major reshaping. 2642 phase->reorg_offsets(this); 2643 2644 if (_next && !_next->iteration_split(phase, old_new)) 2645 return false; 2646 return true; 2647 } 2648 2649 2650 //============================================================================= 2651 // Process all the loops in the loop tree and replace any fill 2652 // patterns with an intrisc version. 2653 bool PhaseIdealLoop::do_intrinsify_fill() { 2654 bool changed = false; 2655 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { 2656 IdealLoopTree* lpt = iter.current(); 2657 changed |= intrinsify_fill(lpt); 2658 } 2659 return changed; 2660 } 2661 2662 2663 // Examine an inner loop looking for a a single store of an invariant 2664 // value in a unit stride loop, 2665 bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value, 2666 Node*& shift, Node*& con) { 2667 const char* msg = NULL; 2668 Node* msg_node = NULL; 2669 2670 store_value = NULL; 2671 con = NULL; 2672 shift = NULL; 2673 2674 // Process the loop looking for stores. If there are multiple 2675 // stores or extra control flow give at this point. 2676 CountedLoopNode* head = lpt->_head->as_CountedLoop(); 2677 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2678 Node* n = lpt->_body.at(i); 2679 if (n->outcnt() == 0) continue; // Ignore dead 2680 if (n->is_Store()) { 2681 if (store != NULL) { 2682 msg = "multiple stores"; 2683 break; 2684 } 2685 int opc = n->Opcode(); 2686 if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreNKlass || opc == Op_StoreCM) { 2687 msg = "oop fills not handled"; 2688 break; 2689 } 2690 Node* value = n->in(MemNode::ValueIn); 2691 if (!lpt->is_invariant(value)) { 2692 msg = "variant store value"; 2693 } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) { 2694 msg = "not array address"; 2695 } 2696 store = n; 2697 store_value = value; 2698 } else if (n->is_If() && n != head->loopexit()) { 2699 msg = "extra control flow"; 2700 msg_node = n; 2701 } 2702 } 2703 2704 if (store == NULL) { 2705 // No store in loop 2706 return false; 2707 } 2708 2709 if (msg == NULL && head->stride_con() != 1) { 2710 // could handle negative strides too 2711 if (head->stride_con() < 0) { 2712 msg = "negative stride"; 2713 } else { 2714 msg = "non-unit stride"; 2715 } 2716 } 2717 2718 if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) { 2719 msg = "can't handle store address"; 2720 msg_node = store->in(MemNode::Address); 2721 } 2722 2723 if (msg == NULL && 2724 (!store->in(MemNode::Memory)->is_Phi() || 2725 store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) { 2726 msg = "store memory isn't proper phi"; 2727 msg_node = store->in(MemNode::Memory); 2728 } 2729 2730 // Make sure there is an appropriate fill routine 2731 BasicType t = store->as_Mem()->memory_type(); 2732 const char* fill_name; 2733 if (msg == NULL && 2734 StubRoutines::select_fill_function(t, false, fill_name) == NULL) { 2735 msg = "unsupported store"; 2736 msg_node = store; 2737 } 2738 2739 if (msg != NULL) { 2740 #ifndef PRODUCT 2741 if (TraceOptimizeFill) { 2742 tty->print_cr("not fill intrinsic candidate: %s", msg); 2743 if (msg_node != NULL) msg_node->dump(); 2744 } 2745 #endif 2746 return false; 2747 } 2748 2749 // Make sure the address expression can be handled. It should be 2750 // head->phi * elsize + con. head->phi might have a ConvI2L. 2751 Node* elements[4]; 2752 Node* conv = NULL; 2753 bool found_index = false; 2754 int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements)); 2755 for (int e = 0; e < count; e++) { 2756 Node* n = elements[e]; 2757 if (n->is_Con() && con == NULL) { 2758 con = n; 2759 } else if (n->Opcode() == Op_LShiftX && shift == NULL) { 2760 Node* value = n->in(1); 2761 #ifdef _LP64 2762 if (value->Opcode() == Op_ConvI2L) { 2763 conv = value; 2764 value = value->in(1); 2765 } 2766 #endif 2767 if (value != head->phi()) { 2768 msg = "unhandled shift in address"; 2769 } else { 2770 if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) { 2771 msg = "scale doesn't match"; 2772 } else { 2773 found_index = true; 2774 shift = n; 2775 } 2776 } 2777 } else if (n->Opcode() == Op_ConvI2L && conv == NULL) { 2778 if (n->in(1) == head->phi()) { 2779 found_index = true; 2780 conv = n; 2781 } else { 2782 msg = "unhandled input to ConvI2L"; 2783 } 2784 } else if (n == head->phi()) { 2785 // no shift, check below for allowed cases 2786 found_index = true; 2787 } else { 2788 msg = "unhandled node in address"; 2789 msg_node = n; 2790 } 2791 } 2792 2793 if (count == -1) { 2794 msg = "malformed address expression"; 2795 msg_node = store; 2796 } 2797 2798 if (!found_index) { 2799 msg = "missing use of index"; 2800 } 2801 2802 // byte sized items won't have a shift 2803 if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) { 2804 msg = "can't find shift"; 2805 msg_node = store; 2806 } 2807 2808 if (msg != NULL) { 2809 #ifndef PRODUCT 2810 if (TraceOptimizeFill) { 2811 tty->print_cr("not fill intrinsic: %s", msg); 2812 if (msg_node != NULL) msg_node->dump(); 2813 } 2814 #endif 2815 return false; 2816 } 2817 2818 // No make sure all the other nodes in the loop can be handled 2819 VectorSet ok(Thread::current()->resource_area()); 2820 2821 // store related values are ok 2822 ok.set(store->_idx); 2823 ok.set(store->in(MemNode::Memory)->_idx); 2824 2825 CountedLoopEndNode* loop_exit = head->loopexit(); 2826 guarantee(loop_exit != NULL, "no loop exit node"); 2827 2828 // Loop structure is ok 2829 ok.set(head->_idx); 2830 ok.set(loop_exit->_idx); 2831 ok.set(head->phi()->_idx); 2832 ok.set(head->incr()->_idx); 2833 ok.set(loop_exit->cmp_node()->_idx); 2834 ok.set(loop_exit->in(1)->_idx); 2835 2836 // Address elements are ok 2837 if (con) ok.set(con->_idx); 2838 if (shift) ok.set(shift->_idx); 2839 if (conv) ok.set(conv->_idx); 2840 2841 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2842 Node* n = lpt->_body.at(i); 2843 if (n->outcnt() == 0) continue; // Ignore dead 2844 if (ok.test(n->_idx)) continue; 2845 // Backedge projection is ok 2846 if (n->is_IfTrue() && n->in(0) == loop_exit) continue; 2847 if (!n->is_AddP()) { 2848 msg = "unhandled node"; 2849 msg_node = n; 2850 break; 2851 } 2852 } 2853 2854 // Make sure no unexpected values are used outside the loop 2855 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2856 Node* n = lpt->_body.at(i); 2857 // These values can be replaced with other nodes if they are used 2858 // outside the loop. 2859 if (n == store || n == loop_exit || n == head->incr() || n == store->in(MemNode::Memory)) continue; 2860 for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) { 2861 Node* use = iter.get(); 2862 if (!lpt->_body.contains(use)) { 2863 msg = "node is used outside loop"; 2864 // lpt->_body.dump(); 2865 msg_node = n; 2866 break; 2867 } 2868 } 2869 } 2870 2871 #ifdef ASSERT 2872 if (TraceOptimizeFill) { 2873 if (msg != NULL) { 2874 tty->print_cr("no fill intrinsic: %s", msg); 2875 if (msg_node != NULL) msg_node->dump(); 2876 } else { 2877 tty->print_cr("fill intrinsic for:"); 2878 } 2879 store->dump(); 2880 if (Verbose) { 2881 lpt->_body.dump(); 2882 } 2883 } 2884 #endif 2885 2886 return msg == NULL; 2887 } 2888 2889 2890 2891 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { 2892 // Only for counted inner loops 2893 if (!lpt->is_counted() || !lpt->is_inner()) { 2894 return false; 2895 } 2896 2897 // Must have constant stride 2898 CountedLoopNode* head = lpt->_head->as_CountedLoop(); 2899 if (!head->is_valid_counted_loop() || !head->is_normal_loop()) { 2900 return false; 2901 } 2902 2903 // Check that the body only contains a store of a loop invariant 2904 // value that is indexed by the loop phi. 2905 Node* store = NULL; 2906 Node* store_value = NULL; 2907 Node* shift = NULL; 2908 Node* offset = NULL; 2909 if (!match_fill_loop(lpt, store, store_value, shift, offset)) { 2910 return false; 2911 } 2912 2913 #ifndef PRODUCT 2914 if (TraceLoopOpts) { 2915 tty->print("ArrayFill "); 2916 lpt->dump_head(); 2917 } 2918 #endif 2919 2920 // Now replace the whole loop body by a call to a fill routine that 2921 // covers the same region as the loop. 2922 Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base); 2923 2924 // Build an expression for the beginning of the copy region 2925 Node* index = head->init_trip(); 2926 #ifdef _LP64 2927 index = new ConvI2LNode(index); 2928 _igvn.register_new_node_with_optimizer(index); 2929 #endif 2930 if (shift != NULL) { 2931 // byte arrays don't require a shift but others do. 2932 index = new LShiftXNode(index, shift->in(2)); 2933 _igvn.register_new_node_with_optimizer(index); 2934 } 2935 index = new AddPNode(base, base, index); 2936 _igvn.register_new_node_with_optimizer(index); 2937 Node* from = new AddPNode(base, index, offset); 2938 _igvn.register_new_node_with_optimizer(from); 2939 // Compute the number of elements to copy 2940 Node* len = new SubINode(head->limit(), head->init_trip()); 2941 _igvn.register_new_node_with_optimizer(len); 2942 2943 BasicType t = store->as_Mem()->memory_type(); 2944 bool aligned = false; 2945 if (offset != NULL && head->init_trip()->is_Con()) { 2946 int element_size = type2aelembytes(t); 2947 aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0; 2948 } 2949 2950 // Build a call to the fill routine 2951 const char* fill_name; 2952 address fill = StubRoutines::select_fill_function(t, aligned, fill_name); 2953 assert(fill != NULL, "what?"); 2954 2955 // Convert float/double to int/long for fill routines 2956 if (t == T_FLOAT) { 2957 store_value = new MoveF2INode(store_value); 2958 _igvn.register_new_node_with_optimizer(store_value); 2959 } else if (t == T_DOUBLE) { 2960 store_value = new MoveD2LNode(store_value); 2961 _igvn.register_new_node_with_optimizer(store_value); 2962 } 2963 2964 if (CCallingConventionRequiresIntsAsLongs && 2965 // See StubRoutines::select_fill_function for types. FLOAT has been converted to INT. 2966 (t == T_FLOAT || t == T_INT || is_subword_type(t))) { 2967 store_value = new ConvI2LNode(store_value); 2968 _igvn.register_new_node_with_optimizer(store_value); 2969 } 2970 2971 Node* mem_phi = store->in(MemNode::Memory); 2972 Node* result_ctrl; 2973 Node* result_mem; 2974 const TypeFunc* call_type = OptoRuntime::array_fill_Type(); 2975 CallLeafNode *call = new CallLeafNoFPNode(call_type, fill, 2976 fill_name, TypeAryPtr::get_array_body_type(t)); 2977 uint cnt = 0; 2978 call->init_req(TypeFunc::Parms + cnt++, from); 2979 call->init_req(TypeFunc::Parms + cnt++, store_value); 2980 if (CCallingConventionRequiresIntsAsLongs) { 2981 call->init_req(TypeFunc::Parms + cnt++, C->top()); 2982 } 2983 #ifdef _LP64 2984 len = new ConvI2LNode(len); 2985 _igvn.register_new_node_with_optimizer(len); 2986 #endif 2987 call->init_req(TypeFunc::Parms + cnt++, len); 2988 #ifdef _LP64 2989 call->init_req(TypeFunc::Parms + cnt++, C->top()); 2990 #endif 2991 call->init_req(TypeFunc::Control, head->init_control()); 2992 call->init_req(TypeFunc::I_O, C->top()); // Does no I/O. 2993 call->init_req(TypeFunc::Memory, mem_phi->in(LoopNode::EntryControl)); 2994 call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr)); 2995 call->init_req(TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr)); 2996 _igvn.register_new_node_with_optimizer(call); 2997 result_ctrl = new ProjNode(call,TypeFunc::Control); 2998 _igvn.register_new_node_with_optimizer(result_ctrl); 2999 result_mem = new ProjNode(call,TypeFunc::Memory); 3000 _igvn.register_new_node_with_optimizer(result_mem); 3001 3002 /* Disable following optimization until proper fix (add missing checks). 3003 3004 // If this fill is tightly coupled to an allocation and overwrites 3005 // the whole body, allow it to take over the zeroing. 3006 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this); 3007 if (alloc != NULL && alloc->is_AllocateArray()) { 3008 Node* length = alloc->as_AllocateArray()->Ideal_length(); 3009 if (head->limit() == length && 3010 head->init_trip() == _igvn.intcon(0)) { 3011 if (TraceOptimizeFill) { 3012 tty->print_cr("Eliminated zeroing in allocation"); 3013 } 3014 alloc->maybe_set_complete(&_igvn); 3015 } else { 3016 #ifdef ASSERT 3017 if (TraceOptimizeFill) { 3018 tty->print_cr("filling array but bounds don't match"); 3019 alloc->dump(); 3020 head->init_trip()->dump(); 3021 head->limit()->dump(); 3022 length->dump(); 3023 } 3024 #endif 3025 } 3026 } 3027 */ 3028 3029 // Redirect the old control and memory edges that are outside the loop. 3030 Node* exit = head->loopexit()->proj_out(0); 3031 // Sometimes the memory phi of the head is used as the outgoing 3032 // state of the loop. It's safe in this case to replace it with the 3033 // result_mem. 3034 _igvn.replace_node(store->in(MemNode::Memory), result_mem); 3035 _igvn.replace_node(exit, result_ctrl); 3036 _igvn.replace_node(store, result_mem); 3037 // Any uses the increment outside of the loop become the loop limit. 3038 _igvn.replace_node(head->incr(), head->limit()); 3039 3040 // Disconnect the head from the loop. 3041 for (uint i = 0; i < lpt->_body.size(); i++) { 3042 Node* n = lpt->_body.at(i); 3043 _igvn.replace_node(n, C->top()); 3044 } 3045 3046 return true; 3047 }