1 /* 2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/callnode.hpp" 30 #include "opto/connode.hpp" 31 #include "opto/divnode.hpp" 32 #include "opto/loopnode.hpp" 33 #include "opto/mulnode.hpp" 34 #include "opto/rootnode.hpp" 35 #include "opto/runtime.hpp" 36 #include "opto/subnode.hpp" 37 38 //------------------------------is_loop_exit----------------------------------- 39 // Given an IfNode, return the loop-exiting projection or NULL if both 40 // arms remain in the loop. 41 Node *IdealLoopTree::is_loop_exit(Node *iff) const { 42 if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests 43 PhaseIdealLoop *phase = _phase; 44 // Test is an IfNode, has 2 projections. If BOTH are in the loop 45 // we need loop unswitching instead of peeling. 46 if( !is_member(phase->get_loop( iff->raw_out(0) )) ) 47 return iff->raw_out(0); 48 if( !is_member(phase->get_loop( iff->raw_out(1) )) ) 49 return iff->raw_out(1); 50 return NULL; 51 } 52 53 54 //============================================================================= 55 56 57 //------------------------------record_for_igvn---------------------------- 58 // Put loop body on igvn work list 59 void IdealLoopTree::record_for_igvn() { 60 for( uint i = 0; i < _body.size(); i++ ) { 61 Node *n = _body.at(i); 62 _phase->_igvn._worklist.push(n); 63 } 64 } 65 66 //------------------------------compute_exact_trip_count----------------------- 67 // Compute loop exact trip count if possible. Do not recalculate trip count for 68 // split loops (pre-main-post) which have their limits and inits behind Opaque node. 69 void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) { 70 if (!_head->as_Loop()->is_valid_counted_loop()) { 71 return; 72 } 73 CountedLoopNode* cl = _head->as_CountedLoop(); 74 // Trip count may become nonexact for iteration split loops since 75 // RCE modifies limits. Note, _trip_count value is not reset since 76 // it is used to limit unrolling of main loop. 77 cl->set_nonexact_trip_count(); 78 79 // Loop's test should be part of loop. 80 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) 81 return; // Infinite loop 82 83 #ifdef ASSERT 84 BoolTest::mask bt = cl->loopexit()->test_trip(); 85 assert(bt == BoolTest::lt || bt == BoolTest::gt || 86 (bt == BoolTest::ne && !LoopLimitCheck), "canonical test is expected"); 87 #endif 88 89 Node* init_n = cl->init_trip(); 90 Node* limit_n = cl->limit(); 91 if (init_n != NULL && init_n->is_Con() && 92 limit_n != NULL && limit_n->is_Con()) { 93 // Use longs to avoid integer overflow. 94 int stride_con = cl->stride_con(); 95 long init_con = cl->init_trip()->get_int(); 96 long limit_con = cl->limit()->get_int(); 97 int stride_m = stride_con - (stride_con > 0 ? 1 : -1); 98 long trip_count = (limit_con - init_con + stride_m)/stride_con; 99 if (trip_count > 0 && (julong)trip_count < (julong)max_juint) { 100 // Set exact trip count. 101 cl->set_exact_trip_count((uint)trip_count); 102 } 103 } 104 } 105 106 //------------------------------compute_profile_trip_cnt---------------------------- 107 // Compute loop trip count from profile data as 108 // (backedge_count + loop_exit_count) / loop_exit_count 109 void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) { 110 if (!_head->is_CountedLoop()) { 111 return; 112 } 113 CountedLoopNode* head = _head->as_CountedLoop(); 114 if (head->profile_trip_cnt() != COUNT_UNKNOWN) { 115 return; // Already computed 116 } 117 float trip_cnt = (float)max_jint; // default is big 118 119 Node* back = head->in(LoopNode::LoopBackControl); 120 while (back != head) { 121 if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && 122 back->in(0) && 123 back->in(0)->is_If() && 124 back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN && 125 back->in(0)->as_If()->_prob != PROB_UNKNOWN) { 126 break; 127 } 128 back = phase->idom(back); 129 } 130 if (back != head) { 131 assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && 132 back->in(0), "if-projection exists"); 133 IfNode* back_if = back->in(0)->as_If(); 134 float loop_back_cnt = back_if->_fcnt * back_if->_prob; 135 136 // Now compute a loop exit count 137 float loop_exit_cnt = 0.0f; 138 for( uint i = 0; i < _body.size(); i++ ) { 139 Node *n = _body[i]; 140 if( n->is_If() ) { 141 IfNode *iff = n->as_If(); 142 if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) { 143 Node *exit = is_loop_exit(iff); 144 if( exit ) { 145 float exit_prob = iff->_prob; 146 if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob; 147 if (exit_prob > PROB_MIN) { 148 float exit_cnt = iff->_fcnt * exit_prob; 149 loop_exit_cnt += exit_cnt; 150 } 151 } 152 } 153 } 154 } 155 if (loop_exit_cnt > 0.0f) { 156 trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt; 157 } else { 158 // No exit count so use 159 trip_cnt = loop_back_cnt; 160 } 161 } 162 #ifndef PRODUCT 163 if (TraceProfileTripCount) { 164 tty->print_cr("compute_profile_trip_cnt lp: %d cnt: %f\n", head->_idx, trip_cnt); 165 } 166 #endif 167 head->set_profile_trip_cnt(trip_cnt); 168 } 169 170 //---------------------is_invariant_addition----------------------------- 171 // Return nonzero index of invariant operand for an Add or Sub 172 // of (nonconstant) invariant and variant values. Helper for reassociate_invariants. 173 int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) { 174 int op = n->Opcode(); 175 if (op == Op_AddI || op == Op_SubI) { 176 bool in1_invar = this->is_invariant(n->in(1)); 177 bool in2_invar = this->is_invariant(n->in(2)); 178 if (in1_invar && !in2_invar) return 1; 179 if (!in1_invar && in2_invar) return 2; 180 } 181 return 0; 182 } 183 184 //---------------------reassociate_add_sub----------------------------- 185 // Reassociate invariant add and subtract expressions: 186 // 187 // inv1 + (x + inv2) => ( inv1 + inv2) + x 188 // (x + inv2) + inv1 => ( inv1 + inv2) + x 189 // inv1 + (x - inv2) => ( inv1 - inv2) + x 190 // inv1 - (inv2 - x) => ( inv1 - inv2) + x 191 // (x + inv2) - inv1 => (-inv1 + inv2) + x 192 // (x - inv2) + inv1 => ( inv1 - inv2) + x 193 // (x - inv2) - inv1 => (-inv1 - inv2) + x 194 // inv1 + (inv2 - x) => ( inv1 + inv2) - x 195 // inv1 - (x - inv2) => ( inv1 + inv2) - x 196 // (inv2 - x) + inv1 => ( inv1 + inv2) - x 197 // (inv2 - x) - inv1 => (-inv1 + inv2) - x 198 // inv1 - (x + inv2) => ( inv1 - inv2) - x 199 // 200 Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) { 201 if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL; 202 if (is_invariant(n1)) return NULL; 203 int inv1_idx = is_invariant_addition(n1, phase); 204 if (!inv1_idx) return NULL; 205 // Don't mess with add of constant (igvn moves them to expression tree root.) 206 if (n1->is_Add() && n1->in(2)->is_Con()) return NULL; 207 Node* inv1 = n1->in(inv1_idx); 208 Node* n2 = n1->in(3 - inv1_idx); 209 int inv2_idx = is_invariant_addition(n2, phase); 210 if (!inv2_idx) return NULL; 211 Node* x = n2->in(3 - inv2_idx); 212 Node* inv2 = n2->in(inv2_idx); 213 214 bool neg_x = n2->is_Sub() && inv2_idx == 1; 215 bool neg_inv2 = n2->is_Sub() && inv2_idx == 2; 216 bool neg_inv1 = n1->is_Sub() && inv1_idx == 2; 217 if (n1->is_Sub() && inv1_idx == 1) { 218 neg_x = !neg_x; 219 neg_inv2 = !neg_inv2; 220 } 221 Node* inv1_c = phase->get_ctrl(inv1); 222 Node* inv2_c = phase->get_ctrl(inv2); 223 Node* n_inv1; 224 if (neg_inv1) { 225 Node *zero = phase->_igvn.intcon(0); 226 phase->set_ctrl(zero, phase->C->root()); 227 n_inv1 = new (phase->C, 3) SubINode(zero, inv1); 228 phase->register_new_node(n_inv1, inv1_c); 229 } else { 230 n_inv1 = inv1; 231 } 232 Node* inv; 233 if (neg_inv2) { 234 inv = new (phase->C, 3) SubINode(n_inv1, inv2); 235 } else { 236 inv = new (phase->C, 3) AddINode(n_inv1, inv2); 237 } 238 phase->register_new_node(inv, phase->get_early_ctrl(inv)); 239 240 Node* addx; 241 if (neg_x) { 242 addx = new (phase->C, 3) SubINode(inv, x); 243 } else { 244 addx = new (phase->C, 3) AddINode(x, inv); 245 } 246 phase->register_new_node(addx, phase->get_ctrl(x)); 247 phase->_igvn.replace_node(n1, addx); 248 assert(phase->get_loop(phase->get_ctrl(n1)) == this, ""); 249 _body.yank(n1); 250 return addx; 251 } 252 253 //---------------------reassociate_invariants----------------------------- 254 // Reassociate invariant expressions: 255 void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) { 256 for (int i = _body.size() - 1; i >= 0; i--) { 257 Node *n = _body.at(i); 258 for (int j = 0; j < 5; j++) { 259 Node* nn = reassociate_add_sub(n, phase); 260 if (nn == NULL) break; 261 n = nn; // again 262 }; 263 } 264 } 265 266 //------------------------------policy_peeling--------------------------------- 267 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can 268 // make some loop-invariant test (usually a null-check) happen before the loop. 269 bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const { 270 Node *test = ((IdealLoopTree*)this)->tail(); 271 int body_size = ((IdealLoopTree*)this)->_body.size(); 272 int uniq = phase->C->unique(); 273 // Peeling does loop cloning which can result in O(N^2) node construction 274 if( body_size > 255 /* Prevent overflow for large body_size */ 275 || (body_size * body_size + uniq > MaxNodeLimit) ) { 276 return false; // too large to safely clone 277 } 278 while( test != _head ) { // Scan till run off top of loop 279 if( test->is_If() ) { // Test? 280 Node *ctrl = phase->get_ctrl(test->in(1)); 281 if (ctrl->is_top()) 282 return false; // Found dead test on live IF? No peeling! 283 // Standard IF only has one input value to check for loop invariance 284 assert( test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added"); 285 // Condition is not a member of this loop? 286 if( !is_member(phase->get_loop(ctrl)) && 287 is_loop_exit(test) ) 288 return true; // Found reason to peel! 289 } 290 // Walk up dominators to loop _head looking for test which is 291 // executed on every path thru loop. 292 test = phase->idom(test); 293 } 294 return false; 295 } 296 297 //------------------------------peeled_dom_test_elim--------------------------- 298 // If we got the effect of peeling, either by actually peeling or by making 299 // a pre-loop which must execute at least once, we can remove all 300 // loop-invariant dominated tests in the main body. 301 void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) { 302 bool progress = true; 303 while( progress ) { 304 progress = false; // Reset for next iteration 305 Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail(); 306 Node *test = prev->in(0); 307 while( test != loop->_head ) { // Scan till run off top of loop 308 309 int p_op = prev->Opcode(); 310 if( (p_op == Op_IfFalse || p_op == Op_IfTrue) && 311 test->is_If() && // Test? 312 !test->in(1)->is_Con() && // And not already obvious? 313 // Condition is not a member of this loop? 314 !loop->is_member(get_loop(get_ctrl(test->in(1))))){ 315 // Walk loop body looking for instances of this test 316 for( uint i = 0; i < loop->_body.size(); i++ ) { 317 Node *n = loop->_body.at(i); 318 if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) { 319 // IfNode was dominated by version in peeled loop body 320 progress = true; 321 dominated_by( old_new[prev->_idx], n ); 322 } 323 } 324 } 325 prev = test; 326 test = idom(test); 327 } // End of scan tests in loop 328 329 } // End of while( progress ) 330 } 331 332 //------------------------------do_peeling------------------------------------- 333 // Peel the first iteration of the given loop. 334 // Step 1: Clone the loop body. The clone becomes the peeled iteration. 335 // The pre-loop illegally has 2 control users (old & new loops). 336 // Step 2: Make the old-loop fall-in edges point to the peeled iteration. 337 // Do this by making the old-loop fall-in edges act as if they came 338 // around the loopback from the prior iteration (follow the old-loop 339 // backedges) and then map to the new peeled iteration. This leaves 340 // the pre-loop with only 1 user (the new peeled iteration), but the 341 // peeled-loop backedge has 2 users. 342 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the 343 // extra backedge user. 344 // 345 // orig 346 // 347 // stmt1 348 // | 349 // v 350 // loop predicate 351 // | 352 // v 353 // loop<----+ 354 // | | 355 // stmt2 | 356 // | | 357 // v | 358 // if ^ 359 // / \ | 360 // / \ | 361 // v v | 362 // false true | 363 // / \ | 364 // / ----+ 365 // | 366 // v 367 // exit 368 // 369 // 370 // after clone loop 371 // 372 // stmt1 373 // | 374 // v 375 // loop predicate 376 // / \ 377 // clone / \ orig 378 // / \ 379 // / \ 380 // v v 381 // +---->loop clone loop<----+ 382 // | | | | 383 // | stmt2 clone stmt2 | 384 // | | | | 385 // | v v | 386 // ^ if clone If ^ 387 // | / \ / \ | 388 // | / \ / \ | 389 // | v v v v | 390 // | true false false true | 391 // | / \ / \ | 392 // +---- \ / ----+ 393 // \ / 394 // 1v v2 395 // region 396 // | 397 // v 398 // exit 399 // 400 // 401 // after peel and predicate move 402 // 403 // stmt1 404 // / 405 // / 406 // clone / orig 407 // / 408 // / +----------+ 409 // / | | 410 // / loop predicate | 411 // / | | 412 // v v | 413 // TOP-->loop clone loop<----+ | 414 // | | | | 415 // stmt2 clone stmt2 | | 416 // | | | ^ 417 // v v | | 418 // if clone If ^ | 419 // / \ / \ | | 420 // / \ / \ | | 421 // v v v v | | 422 // true false false true | | 423 // | \ / \ | | 424 // | \ / ----+ ^ 425 // | \ / | 426 // | 1v v2 | 427 // v region | 428 // | | | 429 // | v | 430 // | exit | 431 // | | 432 // +--------------->-----------------+ 433 // 434 // 435 // final graph 436 // 437 // stmt1 438 // | 439 // v 440 // stmt2 clone 441 // | 442 // v 443 // if clone 444 // / | 445 // / | 446 // v v 447 // false true 448 // | | 449 // | v 450 // | loop predicate 451 // | | 452 // | v 453 // | loop<----+ 454 // | | | 455 // | stmt2 | 456 // | | | 457 // | v | 458 // v if ^ 459 // | / \ | 460 // | / \ | 461 // | v v | 462 // | false true | 463 // | | \ | 464 // v v --+ 465 // region 466 // | 467 // v 468 // exit 469 // 470 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) { 471 472 C->set_major_progress(); 473 // Peeling a 'main' loop in a pre/main/post situation obfuscates the 474 // 'pre' loop from the main and the 'pre' can no longer have it's 475 // iterations adjusted. Therefore, we need to declare this loop as 476 // no longer a 'main' loop; it will need new pre and post loops before 477 // we can do further RCE. 478 #ifndef PRODUCT 479 if (TraceLoopOpts) { 480 tty->print("Peel "); 481 loop->dump_head(); 482 } 483 #endif 484 Node* head = loop->_head; 485 bool counted_loop = head->is_CountedLoop(); 486 if (counted_loop) { 487 CountedLoopNode *cl = head->as_CountedLoop(); 488 assert(cl->trip_count() > 0, "peeling a fully unrolled loop"); 489 cl->set_trip_count(cl->trip_count() - 1); 490 if (cl->is_main_loop()) { 491 cl->set_normal_loop(); 492 #ifndef PRODUCT 493 if (PrintOpto && VerifyLoopOptimizations) { 494 tty->print("Peeling a 'main' loop; resetting to 'normal' "); 495 loop->dump_head(); 496 } 497 #endif 498 } 499 } 500 Node* entry = head->in(LoopNode::EntryControl); 501 502 // Step 1: Clone the loop body. The clone becomes the peeled iteration. 503 // The pre-loop illegally has 2 control users (old & new loops). 504 clone_loop( loop, old_new, dom_depth(head) ); 505 506 // Step 2: Make the old-loop fall-in edges point to the peeled iteration. 507 // Do this by making the old-loop fall-in edges act as if they came 508 // around the loopback from the prior iteration (follow the old-loop 509 // backedges) and then map to the new peeled iteration. This leaves 510 // the pre-loop with only 1 user (the new peeled iteration), but the 511 // peeled-loop backedge has 2 users. 512 Node* new_exit_value = old_new[head->in(LoopNode::LoopBackControl)->_idx]; 513 new_exit_value = move_loop_predicates(entry, new_exit_value, !counted_loop); 514 _igvn.hash_delete(head); 515 head->set_req(LoopNode::EntryControl, new_exit_value); 516 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { 517 Node* old = head->fast_out(j); 518 if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) { 519 new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx]; 520 if (!new_exit_value ) // Backedge value is ALSO loop invariant? 521 // Then loop body backedge value remains the same. 522 new_exit_value = old->in(LoopNode::LoopBackControl); 523 _igvn.hash_delete(old); 524 old->set_req(LoopNode::EntryControl, new_exit_value); 525 } 526 } 527 528 529 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the 530 // extra backedge user. 531 Node* new_head = old_new[head->_idx]; 532 _igvn.hash_delete(new_head); 533 new_head->set_req(LoopNode::LoopBackControl, C->top()); 534 for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) { 535 Node* use = new_head->fast_out(j2); 536 if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) { 537 _igvn.hash_delete(use); 538 use->set_req(LoopNode::LoopBackControl, C->top()); 539 } 540 } 541 542 543 // Step 4: Correct dom-depth info. Set to loop-head depth. 544 int dd = dom_depth(head); 545 set_idom(head, head->in(1), dd); 546 for (uint j3 = 0; j3 < loop->_body.size(); j3++) { 547 Node *old = loop->_body.at(j3); 548 Node *nnn = old_new[old->_idx]; 549 if (!has_ctrl(nnn)) 550 set_idom(nnn, idom(nnn), dd-1); 551 // While we're at it, remove any SafePoints from the peeled code 552 if (old->Opcode() == Op_SafePoint) { 553 Node *nnn = old_new[old->_idx]; 554 lazy_replace(nnn,nnn->in(TypeFunc::Control)); 555 } 556 } 557 558 // Now force out all loop-invariant dominating tests. The optimizer 559 // finds some, but we _know_ they are all useless. 560 peeled_dom_test_elim(loop,old_new); 561 562 loop->record_for_igvn(); 563 } 564 565 #define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop 566 567 //------------------------------policy_maximally_unroll------------------------ 568 // Calculate exact loop trip count and return true if loop can be maximally 569 // unrolled. 570 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const { 571 CountedLoopNode *cl = _head->as_CountedLoop(); 572 assert(cl->is_normal_loop(), ""); 573 if (!cl->is_valid_counted_loop()) 574 return false; // Malformed counted loop 575 576 if (!cl->has_exact_trip_count()) { 577 // Trip count is not exact. 578 return false; 579 } 580 581 uint trip_count = cl->trip_count(); 582 // Note, max_juint is used to indicate unknown trip count. 583 assert(trip_count > 1, "one iteration loop should be optimized out already"); 584 assert(trip_count < max_juint, "exact trip_count should be less than max_uint."); 585 586 // Real policy: if we maximally unroll, does it get too big? 587 // Allow the unrolled mess to get larger than standard loop 588 // size. After all, it will no longer be a loop. 589 uint body_size = _body.size(); 590 uint unroll_limit = (uint)LoopUnrollLimit * 4; 591 assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits"); 592 if (trip_count > unroll_limit || body_size > unroll_limit) { 593 return false; 594 } 595 596 // Fully unroll a loop with few iterations regardless next 597 // conditions since following loop optimizations will split 598 // such loop anyway (pre-main-post). 599 if (trip_count <= 3) 600 return true; 601 602 // Take into account that after unroll conjoined heads and tails will fold, 603 // otherwise policy_unroll() may allow more unrolling than max unrolling. 604 uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count; 605 uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE; 606 if (body_size != tst_body_size) // Check for int overflow 607 return false; 608 if (new_body_size > unroll_limit || 609 // Unrolling can result in a large amount of node construction 610 new_body_size >= MaxNodeLimit - phase->C->unique()) { 611 return false; 612 } 613 614 // Do not unroll a loop with String intrinsics code. 615 // String intrinsics are large and have loops. 616 for (uint k = 0; k < _body.size(); k++) { 617 Node* n = _body.at(k); 618 switch (n->Opcode()) { 619 case Op_StrComp: 620 case Op_StrEquals: 621 case Op_StrIndexOf: 622 case Op_AryEq: { 623 return false; 624 } 625 } // switch 626 } 627 628 return true; // Do maximally unroll 629 } 630 631 632 #define MAX_UNROLL 16 // maximum number of unrolls for main loop 633 634 //------------------------------policy_unroll---------------------------------- 635 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if 636 // the loop is a CountedLoop and the body is small enough. 637 bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const { 638 639 CountedLoopNode *cl = _head->as_CountedLoop(); 640 assert(cl->is_normal_loop() || cl->is_main_loop(), ""); 641 642 if (!cl->is_valid_counted_loop()) 643 return false; // Malformed counted loop 644 645 // Protect against over-unrolling. 646 // After split at least one iteration will be executed in pre-loop. 647 if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false; 648 649 int future_unroll_ct = cl->unrolled_count() * 2; 650 if (future_unroll_ct > MAX_UNROLL) return false; 651 652 // Check for initial stride being a small enough constant 653 if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false; 654 655 // Don't unroll if the next round of unrolling would push us 656 // over the expected trip count of the loop. One is subtracted 657 // from the expected trip count because the pre-loop normally 658 // executes 1 iteration. 659 if (UnrollLimitForProfileCheck > 0 && 660 cl->profile_trip_cnt() != COUNT_UNKNOWN && 661 future_unroll_ct > UnrollLimitForProfileCheck && 662 (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) { 663 return false; 664 } 665 666 // When unroll count is greater than LoopUnrollMin, don't unroll if: 667 // the residual iterations are more than 10% of the trip count 668 // and rounds of "unroll,optimize" are not making significant progress 669 // Progress defined as current size less than 20% larger than previous size. 670 if (UseSuperWord && cl->node_count_before_unroll() > 0 && 671 future_unroll_ct > LoopUnrollMin && 672 (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() && 673 1.2 * cl->node_count_before_unroll() < (double)_body.size()) { 674 return false; 675 } 676 677 Node *init_n = cl->init_trip(); 678 Node *limit_n = cl->limit(); 679 int stride_con = cl->stride_con(); 680 // Non-constant bounds. 681 // Protect against over-unrolling when init or/and limit are not constant 682 // (so that trip_count's init value is maxint) but iv range is known. 683 if (init_n == NULL || !init_n->is_Con() || 684 limit_n == NULL || !limit_n->is_Con()) { 685 Node* phi = cl->phi(); 686 if (phi != NULL) { 687 assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi."); 688 const TypeInt* iv_type = phase->_igvn.type(phi)->is_int(); 689 int next_stride = stride_con * 2; // stride after this unroll 690 if (next_stride > 0) { 691 if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow 692 iv_type->_lo + next_stride > iv_type->_hi) { 693 return false; // over-unrolling 694 } 695 } else if (next_stride < 0) { 696 if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow 697 iv_type->_hi + next_stride < iv_type->_lo) { 698 return false; // over-unrolling 699 } 700 } 701 } 702 } 703 704 // After unroll limit will be adjusted: new_limit = limit-stride. 705 // Bailout if adjustment overflow. 706 const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int(); 707 if (stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi) || 708 stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo)) 709 return false; // overflow 710 711 // Adjust body_size to determine if we unroll or not 712 uint body_size = _body.size(); 713 // Also count ModL, DivL and MulL which expand mightly 714 for (uint k = 0; k < _body.size(); k++) { 715 Node* n = _body.at(k); 716 switch (n->Opcode()) { 717 case Op_ModL: body_size += 30; break; 718 case Op_DivL: body_size += 30; break; 719 case Op_MulL: body_size += 10; break; 720 case Op_StrComp: 721 case Op_StrEquals: 722 case Op_StrIndexOf: 723 case Op_AryEq: { 724 // Do not unroll a loop with String intrinsics code. 725 // String intrinsics are large and have loops. 726 return false; 727 } 728 } // switch 729 } 730 731 // Check for being too big 732 if (body_size > (uint)LoopUnrollLimit) { 733 // Normal case: loop too big 734 return false; 735 } 736 737 // Unroll once! (Each trip will soon do double iterations) 738 return true; 739 } 740 741 //------------------------------policy_align----------------------------------- 742 // Return TRUE or FALSE if the loop should be cache-line aligned. Gather the 743 // expression that does the alignment. Note that only one array base can be 744 // aligned in a loop (unless the VM guarantees mutual alignment). Note that 745 // if we vectorize short memory ops into longer memory ops, we may want to 746 // increase alignment. 747 bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const { 748 return false; 749 } 750 751 //------------------------------policy_range_check----------------------------- 752 // Return TRUE or FALSE if the loop should be range-check-eliminated. 753 // Actually we do iteration-splitting, a more powerful form of RCE. 754 bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const { 755 if (!RangeCheckElimination) return false; 756 757 CountedLoopNode *cl = _head->as_CountedLoop(); 758 // If we unrolled with no intention of doing RCE and we later 759 // changed our minds, we got no pre-loop. Either we need to 760 // make a new pre-loop, or we gotta disallow RCE. 761 if (cl->is_main_no_pre_loop()) return false; // Disallowed for now. 762 Node *trip_counter = cl->phi(); 763 764 // Check loop body for tests of trip-counter plus loop-invariant vs 765 // loop-invariant. 766 for (uint i = 0; i < _body.size(); i++) { 767 Node *iff = _body[i]; 768 if (iff->Opcode() == Op_If) { // Test? 769 770 // Comparing trip+off vs limit 771 Node *bol = iff->in(1); 772 if (bol->req() != 2) continue; // dead constant test 773 if (!bol->is_Bool()) { 774 assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only"); 775 continue; 776 } 777 if (bol->as_Bool()->_test._test == BoolTest::ne) 778 continue; // not RC 779 780 Node *cmp = bol->in(1); 781 782 Node *rc_exp = cmp->in(1); 783 Node *limit = cmp->in(2); 784 785 Node *limit_c = phase->get_ctrl(limit); 786 if( limit_c == phase->C->top() ) 787 return false; // Found dead test on live IF? No RCE! 788 if( is_member(phase->get_loop(limit_c) ) ) { 789 // Compare might have operands swapped; commute them 790 rc_exp = cmp->in(2); 791 limit = cmp->in(1); 792 limit_c = phase->get_ctrl(limit); 793 if( is_member(phase->get_loop(limit_c) ) ) 794 continue; // Both inputs are loop varying; cannot RCE 795 } 796 797 if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) { 798 continue; 799 } 800 // Yeah! Found a test like 'trip+off vs limit' 801 // Test is an IfNode, has 2 projections. If BOTH are in the loop 802 // we need loop unswitching instead of iteration splitting. 803 if( is_loop_exit(iff) ) 804 return true; // Found reason to split iterations 805 } // End of is IF 806 } 807 808 return false; 809 } 810 811 //------------------------------policy_peel_only------------------------------- 812 // Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned. Useful 813 // for unrolling loops with NO array accesses. 814 bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const { 815 816 for( uint i = 0; i < _body.size(); i++ ) 817 if( _body[i]->is_Mem() ) 818 return false; 819 820 // No memory accesses at all! 821 return true; 822 } 823 824 //------------------------------clone_up_backedge_goo-------------------------- 825 // If Node n lives in the back_ctrl block and cannot float, we clone a private 826 // version of n in preheader_ctrl block and return that, otherwise return n. 827 Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n ) { 828 if( get_ctrl(n) != back_ctrl ) return n; 829 830 Node *x = NULL; // If required, a clone of 'n' 831 // Check for 'n' being pinned in the backedge. 832 if( n->in(0) && n->in(0) == back_ctrl ) { 833 x = n->clone(); // Clone a copy of 'n' to preheader 834 x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader 835 } 836 837 // Recursive fixup any other input edges into x. 838 // If there are no changes we can just return 'n', otherwise 839 // we need to clone a private copy and change it. 840 for( uint i = 1; i < n->req(); i++ ) { 841 Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i) ); 842 if( g != n->in(i) ) { 843 if( !x ) 844 x = n->clone(); 845 x->set_req(i, g); 846 } 847 } 848 if( x ) { // x can legally float to pre-header location 849 register_new_node( x, preheader_ctrl ); 850 return x; 851 } else { // raise n to cover LCA of uses 852 set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) ); 853 } 854 return n; 855 } 856 857 //------------------------------insert_pre_post_loops-------------------------- 858 // Insert pre and post loops. If peel_only is set, the pre-loop can not have 859 // more iterations added. It acts as a 'peel' only, no lower-bound RCE, no 860 // alignment. Useful to unroll loops that do no array accesses. 861 void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) { 862 863 #ifndef PRODUCT 864 if (TraceLoopOpts) { 865 if (peel_only) 866 tty->print("PeelMainPost "); 867 else 868 tty->print("PreMainPost "); 869 loop->dump_head(); 870 } 871 #endif 872 C->set_major_progress(); 873 874 // Find common pieces of the loop being guarded with pre & post loops 875 CountedLoopNode *main_head = loop->_head->as_CountedLoop(); 876 assert( main_head->is_normal_loop(), "" ); 877 CountedLoopEndNode *main_end = main_head->loopexit(); 878 assert( main_end->outcnt() == 2, "1 true, 1 false path only" ); 879 uint dd_main_head = dom_depth(main_head); 880 uint max = main_head->outcnt(); 881 882 Node *pre_header= main_head->in(LoopNode::EntryControl); 883 Node *init = main_head->init_trip(); 884 Node *incr = main_end ->incr(); 885 Node *limit = main_end ->limit(); 886 Node *stride = main_end ->stride(); 887 Node *cmp = main_end ->cmp_node(); 888 BoolTest::mask b_test = main_end->test_trip(); 889 890 // Need only 1 user of 'bol' because I will be hacking the loop bounds. 891 Node *bol = main_end->in(CountedLoopEndNode::TestValue); 892 if( bol->outcnt() != 1 ) { 893 bol = bol->clone(); 894 register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl)); 895 _igvn.hash_delete(main_end); 896 main_end->set_req(CountedLoopEndNode::TestValue, bol); 897 } 898 // Need only 1 user of 'cmp' because I will be hacking the loop bounds. 899 if( cmp->outcnt() != 1 ) { 900 cmp = cmp->clone(); 901 register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl)); 902 _igvn.hash_delete(bol); 903 bol->set_req(1, cmp); 904 } 905 906 //------------------------------ 907 // Step A: Create Post-Loop. 908 Node* main_exit = main_end->proj_out(false); 909 assert( main_exit->Opcode() == Op_IfFalse, "" ); 910 int dd_main_exit = dom_depth(main_exit); 911 912 // Step A1: Clone the loop body. The clone becomes the post-loop. The main 913 // loop pre-header illegally has 2 control users (old & new loops). 914 clone_loop( loop, old_new, dd_main_exit ); 915 assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" ); 916 CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop(); 917 post_head->set_post_loop(main_head); 918 919 // Reduce the post-loop trip count. 920 CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); 921 post_end->_prob = PROB_FAIR; 922 923 // Build the main-loop normal exit. 924 IfFalseNode *new_main_exit = new (C, 1) IfFalseNode(main_end); 925 _igvn.register_new_node_with_optimizer( new_main_exit ); 926 set_idom(new_main_exit, main_end, dd_main_exit ); 927 set_loop(new_main_exit, loop->_parent); 928 929 // Step A2: Build a zero-trip guard for the post-loop. After leaving the 930 // main-loop, the post-loop may not execute at all. We 'opaque' the incr 931 // (the main-loop trip-counter exit value) because we will be changing 932 // the exit value (via unrolling) so we cannot constant-fold away the zero 933 // trip guard until all unrolling is done. 934 Node *zer_opaq = new (C, 2) Opaque1Node(C, incr); 935 Node *zer_cmp = new (C, 3) CmpINode( zer_opaq, limit ); 936 Node *zer_bol = new (C, 2) BoolNode( zer_cmp, b_test ); 937 register_new_node( zer_opaq, new_main_exit ); 938 register_new_node( zer_cmp , new_main_exit ); 939 register_new_node( zer_bol , new_main_exit ); 940 941 // Build the IfNode 942 IfNode *zer_iff = new (C, 2) IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN ); 943 _igvn.register_new_node_with_optimizer( zer_iff ); 944 set_idom(zer_iff, new_main_exit, dd_main_exit); 945 set_loop(zer_iff, loop->_parent); 946 947 // Plug in the false-path, taken if we need to skip post-loop 948 _igvn.hash_delete( main_exit ); 949 main_exit->set_req(0, zer_iff); 950 _igvn._worklist.push(main_exit); 951 set_idom(main_exit, zer_iff, dd_main_exit); 952 set_idom(main_exit->unique_out(), zer_iff, dd_main_exit); 953 // Make the true-path, must enter the post loop 954 Node *zer_taken = new (C, 1) IfTrueNode( zer_iff ); 955 _igvn.register_new_node_with_optimizer( zer_taken ); 956 set_idom(zer_taken, zer_iff, dd_main_exit); 957 set_loop(zer_taken, loop->_parent); 958 // Plug in the true path 959 _igvn.hash_delete( post_head ); 960 post_head->set_req(LoopNode::EntryControl, zer_taken); 961 set_idom(post_head, zer_taken, dd_main_exit); 962 963 // Step A3: Make the fall-in values to the post-loop come from the 964 // fall-out values of the main-loop. 965 for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) { 966 Node* main_phi = main_head->fast_out(i); 967 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) { 968 Node *post_phi = old_new[main_phi->_idx]; 969 Node *fallmain = clone_up_backedge_goo(main_head->back_control(), 970 post_head->init_control(), 971 main_phi->in(LoopNode::LoopBackControl)); 972 _igvn.hash_delete(post_phi); 973 post_phi->set_req( LoopNode::EntryControl, fallmain ); 974 } 975 } 976 977 // Update local caches for next stanza 978 main_exit = new_main_exit; 979 980 981 //------------------------------ 982 // Step B: Create Pre-Loop. 983 984 // Step B1: Clone the loop body. The clone becomes the pre-loop. The main 985 // loop pre-header illegally has 2 control users (old & new loops). 986 clone_loop( loop, old_new, dd_main_head ); 987 CountedLoopNode* pre_head = old_new[main_head->_idx]->as_CountedLoop(); 988 CountedLoopEndNode* pre_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); 989 pre_head->set_pre_loop(main_head); 990 Node *pre_incr = old_new[incr->_idx]; 991 992 // Reduce the pre-loop trip count. 993 pre_end->_prob = PROB_FAIR; 994 995 // Find the pre-loop normal exit. 996 Node* pre_exit = pre_end->proj_out(false); 997 assert( pre_exit->Opcode() == Op_IfFalse, "" ); 998 IfFalseNode *new_pre_exit = new (C, 1) IfFalseNode(pre_end); 999 _igvn.register_new_node_with_optimizer( new_pre_exit ); 1000 set_idom(new_pre_exit, pre_end, dd_main_head); 1001 set_loop(new_pre_exit, loop->_parent); 1002 1003 // Step B2: Build a zero-trip guard for the main-loop. After leaving the 1004 // pre-loop, the main-loop may not execute at all. Later in life this 1005 // zero-trip guard will become the minimum-trip guard when we unroll 1006 // the main-loop. 1007 Node *min_opaq = new (C, 2) Opaque1Node(C, limit); 1008 Node *min_cmp = new (C, 3) CmpINode( pre_incr, min_opaq ); 1009 Node *min_bol = new (C, 2) BoolNode( min_cmp, b_test ); 1010 register_new_node( min_opaq, new_pre_exit ); 1011 register_new_node( min_cmp , new_pre_exit ); 1012 register_new_node( min_bol , new_pre_exit ); 1013 1014 // Build the IfNode (assume the main-loop is executed always). 1015 IfNode *min_iff = new (C, 2) IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN ); 1016 _igvn.register_new_node_with_optimizer( min_iff ); 1017 set_idom(min_iff, new_pre_exit, dd_main_head); 1018 set_loop(min_iff, loop->_parent); 1019 1020 // Plug in the false-path, taken if we need to skip main-loop 1021 _igvn.hash_delete( pre_exit ); 1022 pre_exit->set_req(0, min_iff); 1023 set_idom(pre_exit, min_iff, dd_main_head); 1024 set_idom(pre_exit->unique_out(), min_iff, dd_main_head); 1025 // Make the true-path, must enter the main loop 1026 Node *min_taken = new (C, 1) IfTrueNode( min_iff ); 1027 _igvn.register_new_node_with_optimizer( min_taken ); 1028 set_idom(min_taken, min_iff, dd_main_head); 1029 set_loop(min_taken, loop->_parent); 1030 // Plug in the true path 1031 _igvn.hash_delete( main_head ); 1032 main_head->set_req(LoopNode::EntryControl, min_taken); 1033 set_idom(main_head, min_taken, dd_main_head); 1034 1035 // Step B3: Make the fall-in values to the main-loop come from the 1036 // fall-out values of the pre-loop. 1037 for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) { 1038 Node* main_phi = main_head->fast_out(i2); 1039 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) { 1040 Node *pre_phi = old_new[main_phi->_idx]; 1041 Node *fallpre = clone_up_backedge_goo(pre_head->back_control(), 1042 main_head->init_control(), 1043 pre_phi->in(LoopNode::LoopBackControl)); 1044 _igvn.hash_delete(main_phi); 1045 main_phi->set_req( LoopNode::EntryControl, fallpre ); 1046 } 1047 } 1048 1049 // Step B4: Shorten the pre-loop to run only 1 iteration (for now). 1050 // RCE and alignment may change this later. 1051 Node *cmp_end = pre_end->cmp_node(); 1052 assert( cmp_end->in(2) == limit, "" ); 1053 Node *pre_limit = new (C, 3) AddINode( init, stride ); 1054 1055 // Save the original loop limit in this Opaque1 node for 1056 // use by range check elimination. 1057 Node *pre_opaq = new (C, 3) Opaque1Node(C, pre_limit, limit); 1058 1059 register_new_node( pre_limit, pre_head->in(0) ); 1060 register_new_node( pre_opaq , pre_head->in(0) ); 1061 1062 // Since no other users of pre-loop compare, I can hack limit directly 1063 assert( cmp_end->outcnt() == 1, "no other users" ); 1064 _igvn.hash_delete(cmp_end); 1065 cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq); 1066 1067 // Special case for not-equal loop bounds: 1068 // Change pre loop test, main loop test, and the 1069 // main loop guard test to use lt or gt depending on stride 1070 // direction: 1071 // positive stride use < 1072 // negative stride use > 1073 1074 if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) { 1075 assert(!LoopLimitCheck, "only canonical tests (lt or gt) are expected"); 1076 1077 BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt; 1078 // Modify pre loop end condition 1079 Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool(); 1080 BoolNode* new_bol0 = new (C, 2) BoolNode(pre_bol->in(1), new_test); 1081 register_new_node( new_bol0, pre_head->in(0) ); 1082 _igvn.hash_delete(pre_end); 1083 pre_end->set_req(CountedLoopEndNode::TestValue, new_bol0); 1084 // Modify main loop guard condition 1085 assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay"); 1086 BoolNode* new_bol1 = new (C, 2) BoolNode(min_bol->in(1), new_test); 1087 register_new_node( new_bol1, new_pre_exit ); 1088 _igvn.hash_delete(min_iff); 1089 min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1); 1090 // Modify main loop end condition 1091 BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool(); 1092 BoolNode* new_bol2 = new (C, 2) BoolNode(main_bol->in(1), new_test); 1093 register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) ); 1094 _igvn.hash_delete(main_end); 1095 main_end->set_req(CountedLoopEndNode::TestValue, new_bol2); 1096 } 1097 1098 // Flag main loop 1099 main_head->set_main_loop(); 1100 if( peel_only ) main_head->set_main_no_pre_loop(); 1101 1102 // Subtract a trip count for the pre-loop. 1103 main_head->set_trip_count(main_head->trip_count() - 1); 1104 1105 // It's difficult to be precise about the trip-counts 1106 // for the pre/post loops. They are usually very short, 1107 // so guess that 4 trips is a reasonable value. 1108 post_head->set_profile_trip_cnt(4.0); 1109 pre_head->set_profile_trip_cnt(4.0); 1110 1111 // Now force out all loop-invariant dominating tests. The optimizer 1112 // finds some, but we _know_ they are all useless. 1113 peeled_dom_test_elim(loop,old_new); 1114 } 1115 1116 //------------------------------is_invariant----------------------------- 1117 // Return true if n is invariant 1118 bool IdealLoopTree::is_invariant(Node* n) const { 1119 Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n; 1120 if (n_c->is_top()) return false; 1121 return !is_member(_phase->get_loop(n_c)); 1122 } 1123 1124 1125 //------------------------------do_unroll-------------------------------------- 1126 // Unroll the loop body one step - make each trip do 2 iterations. 1127 void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) { 1128 assert(LoopUnrollLimit, ""); 1129 CountedLoopNode *loop_head = loop->_head->as_CountedLoop(); 1130 CountedLoopEndNode *loop_end = loop_head->loopexit(); 1131 assert(loop_end, ""); 1132 #ifndef PRODUCT 1133 if (PrintOpto && VerifyLoopOptimizations) { 1134 tty->print("Unrolling "); 1135 loop->dump_head(); 1136 } else if (TraceLoopOpts) { 1137 if (loop_head->trip_count() < (uint)LoopUnrollLimit) { 1138 tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count()); 1139 } else { 1140 tty->print("Unroll %d ", loop_head->unrolled_count()*2); 1141 } 1142 loop->dump_head(); 1143 } 1144 #endif 1145 1146 // Remember loop node count before unrolling to detect 1147 // if rounds of unroll,optimize are making progress 1148 loop_head->set_node_count_before_unroll(loop->_body.size()); 1149 1150 Node *ctrl = loop_head->in(LoopNode::EntryControl); 1151 Node *limit = loop_head->limit(); 1152 Node *init = loop_head->init_trip(); 1153 Node *stride = loop_head->stride(); 1154 1155 Node *opaq = NULL; 1156 if (adjust_min_trip) { // If not maximally unrolling, need adjustment 1157 // Search for zero-trip guard. 1158 assert( loop_head->is_main_loop(), "" ); 1159 assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" ); 1160 Node *iff = ctrl->in(0); 1161 assert( iff->Opcode() == Op_If, "" ); 1162 Node *bol = iff->in(1); 1163 assert( bol->Opcode() == Op_Bool, "" ); 1164 Node *cmp = bol->in(1); 1165 assert( cmp->Opcode() == Op_CmpI, "" ); 1166 opaq = cmp->in(2); 1167 // Occasionally it's possible for a zero-trip guard Opaque1 node to be 1168 // optimized away and then another round of loop opts attempted. 1169 // We can not optimize this particular loop in that case. 1170 if (opaq->Opcode() != Op_Opaque1) 1171 return; // Cannot find zero-trip guard! Bail out! 1172 // Zero-trip test uses an 'opaque' node which is not shared. 1173 assert(opaq->outcnt() == 1 && opaq->in(1) == limit, ""); 1174 } 1175 1176 C->set_major_progress(); 1177 1178 Node* new_limit = NULL; 1179 if (UnrollLimitCheck) { 1180 int stride_con = stride->get_int(); 1181 int stride_p = (stride_con > 0) ? stride_con : -stride_con; 1182 uint old_trip_count = loop_head->trip_count(); 1183 // Verify that unroll policy result is still valid. 1184 assert(old_trip_count > 1 && 1185 (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity"); 1186 1187 // Adjust loop limit to keep valid iterations number after unroll. 1188 // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride 1189 // which may overflow. 1190 if (!adjust_min_trip) { 1191 assert(old_trip_count > 1 && (old_trip_count & 1) == 0, 1192 "odd trip count for maximally unroll"); 1193 // Don't need to adjust limit for maximally unroll since trip count is even. 1194 } else if (loop_head->has_exact_trip_count() && init->is_Con()) { 1195 // Loop's limit is constant. Loop's init could be constant when pre-loop 1196 // become peeled iteration. 1197 long init_con = init->get_int(); 1198 // We can keep old loop limit if iterations count stays the same: 1199 // old_trip_count == new_trip_count * 2 1200 // Note: since old_trip_count >= 2 then new_trip_count >= 1 1201 // so we also don't need to adjust zero trip test. 1202 long limit_con = limit->get_int(); 1203 // (stride_con*2) not overflow since stride_con <= 8. 1204 int new_stride_con = stride_con * 2; 1205 int stride_m = new_stride_con - (stride_con > 0 ? 1 : -1); 1206 long trip_count = (limit_con - init_con + stride_m)/new_stride_con; 1207 // New trip count should satisfy next conditions. 1208 assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity"); 1209 uint new_trip_count = (uint)trip_count; 1210 adjust_min_trip = (old_trip_count != new_trip_count*2); 1211 } 1212 1213 if (adjust_min_trip) { 1214 // Step 2: Adjust the trip limit if it is called for. 1215 // The adjustment amount is -stride. Need to make sure if the 1216 // adjustment underflows or overflows, then the main loop is skipped. 1217 Node* cmp = loop_end->cmp_node(); 1218 assert(cmp->in(2) == limit, "sanity"); 1219 assert(opaq != NULL && opaq->in(1) == limit, "sanity"); 1220 1221 // Verify that policy_unroll result is still valid. 1222 const TypeInt* limit_type = _igvn.type(limit)->is_int(); 1223 assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) || 1224 stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity"); 1225 1226 if (limit->is_Con()) { 1227 // The check in policy_unroll and the assert above guarantee 1228 // no underflow if limit is constant. 1229 new_limit = _igvn.intcon(limit->get_int() - stride_con); 1230 set_ctrl(new_limit, C->root()); 1231 } else { 1232 if (stride_con > 0 && ((limit_type->_lo - stride_con) < limit_type->_lo) || 1233 stride_con < 0 && ((limit_type->_hi - stride_con) > limit_type->_hi)) { 1234 // No underflow. 1235 new_limit = new (C, 3) SubINode(limit, stride); 1236 } else { 1237 // (limit - stride) may underflow. 1238 // Clamp the adjustment value with MININT or MAXINT: 1239 // 1240 // new_limit = limit-stride 1241 // if (stride > 0) 1242 // new_limit = (limit < new_limit) ? MININT : new_limit; 1243 // else 1244 // new_limit = (limit > new_limit) ? MAXINT : new_limit; 1245 // 1246 BoolTest::mask bt = loop_end->test_trip(); 1247 assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); 1248 Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint); 1249 set_ctrl(adj_max, C->root()); 1250 Node* old_limit = NULL; 1251 Node* adj_limit = NULL; 1252 Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL; 1253 if (loop_head->unrolled_count() > 1 && 1254 limit->is_CMove() && limit->Opcode() == Op_CMoveI && 1255 limit->in(CMoveNode::IfTrue) == adj_max && 1256 bol->as_Bool()->_test._test == bt && 1257 bol->in(1)->Opcode() == Op_CmpI && 1258 bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) { 1259 // Loop was unrolled before. 1260 // Optimize the limit to avoid nested CMove: 1261 // use original limit as old limit. 1262 old_limit = bol->in(1)->in(1); 1263 // Adjust previous adjusted limit. 1264 adj_limit = limit->in(CMoveNode::IfFalse); 1265 adj_limit = new (C, 3) SubINode(adj_limit, stride); 1266 } else { 1267 old_limit = limit; 1268 adj_limit = new (C, 3) SubINode(limit, stride); 1269 } 1270 assert(old_limit != NULL && adj_limit != NULL, ""); 1271 register_new_node( adj_limit, ctrl ); // adjust amount 1272 Node* adj_cmp = new (C, 3) CmpINode(old_limit, adj_limit); 1273 register_new_node( adj_cmp, ctrl ); 1274 Node* adj_bool = new (C, 2) BoolNode(adj_cmp, bt); 1275 register_new_node( adj_bool, ctrl ); 1276 new_limit = new (C, 4) CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT); 1277 } 1278 register_new_node(new_limit, ctrl); 1279 } 1280 assert(new_limit != NULL, ""); 1281 if (limit->outcnt() == 2) { 1282 // Replace old limit if it is used only in loop tests. 1283 _igvn.replace_node(limit, new_limit); 1284 } else { 1285 // Replace in loop test. 1286 _igvn.hash_delete(cmp); 1287 cmp->set_req(2, new_limit); 1288 1289 // Step 3: Find the min-trip test guaranteed before a 'main' loop. 1290 // Make it a 1-trip test (means at least 2 trips). 1291 1292 // Guard test uses an 'opaque' node which is not shared. Hence I 1293 // can edit it's inputs directly. Hammer in the new limit for the 1294 // minimum-trip guard. 1295 assert(opaq->outcnt() == 1, ""); 1296 _igvn.hash_delete(opaq); 1297 opaq->set_req(1, new_limit); 1298 } 1299 } 1300 1301 // Adjust max trip count. The trip count is intentionally rounded 1302 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, 1303 // the main, unrolled, part of the loop will never execute as it is protected 1304 // by the min-trip test. See bug 4834191 for a case where we over-unrolled 1305 // and later determined that part of the unrolled loop was dead. 1306 loop_head->set_trip_count(old_trip_count / 2); 1307 1308 // Double the count of original iterations in the unrolled loop body. 1309 loop_head->double_unrolled_count(); 1310 1311 } else { // LoopLimitCheck 1312 1313 // Adjust max trip count. The trip count is intentionally rounded 1314 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, 1315 // the main, unrolled, part of the loop will never execute as it is protected 1316 // by the min-trip test. See bug 4834191 for a case where we over-unrolled 1317 // and later determined that part of the unrolled loop was dead. 1318 loop_head->set_trip_count(loop_head->trip_count() / 2); 1319 1320 // Double the count of original iterations in the unrolled loop body. 1321 loop_head->double_unrolled_count(); 1322 1323 // ----------- 1324 // Step 2: Cut back the trip counter for an unroll amount of 2. 1325 // Loop will normally trip (limit - init)/stride_con. Since it's a 1326 // CountedLoop this is exact (stride divides limit-init exactly). 1327 // We are going to double the loop body, so we want to knock off any 1328 // odd iteration: (trip_cnt & ~1). Then back compute a new limit. 1329 Node *span = new (C, 3) SubINode( limit, init ); 1330 register_new_node( span, ctrl ); 1331 Node *trip = new (C, 3) DivINode( 0, span, stride ); 1332 register_new_node( trip, ctrl ); 1333 Node *mtwo = _igvn.intcon(-2); 1334 set_ctrl(mtwo, C->root()); 1335 Node *rond = new (C, 3) AndINode( trip, mtwo ); 1336 register_new_node( rond, ctrl ); 1337 Node *spn2 = new (C, 3) MulINode( rond, stride ); 1338 register_new_node( spn2, ctrl ); 1339 new_limit = new (C, 3) AddINode( spn2, init ); 1340 register_new_node( new_limit, ctrl ); 1341 1342 // Hammer in the new limit 1343 Node *ctrl2 = loop_end->in(0); 1344 Node *cmp2 = new (C, 3) CmpINode( loop_head->incr(), new_limit ); 1345 register_new_node( cmp2, ctrl2 ); 1346 Node *bol2 = new (C, 2) BoolNode( cmp2, loop_end->test_trip() ); 1347 register_new_node( bol2, ctrl2 ); 1348 _igvn.hash_delete(loop_end); 1349 loop_end->set_req(CountedLoopEndNode::TestValue, bol2); 1350 1351 // Step 3: Find the min-trip test guaranteed before a 'main' loop. 1352 // Make it a 1-trip test (means at least 2 trips). 1353 if( adjust_min_trip ) { 1354 assert( new_limit != NULL, "" ); 1355 // Guard test uses an 'opaque' node which is not shared. Hence I 1356 // can edit it's inputs directly. Hammer in the new limit for the 1357 // minimum-trip guard. 1358 assert( opaq->outcnt() == 1, "" ); 1359 _igvn.hash_delete(opaq); 1360 opaq->set_req(1, new_limit); 1361 } 1362 } // LoopLimitCheck 1363 1364 // --------- 1365 // Step 4: Clone the loop body. Move it inside the loop. This loop body 1366 // represents the odd iterations; since the loop trips an even number of 1367 // times its backedge is never taken. Kill the backedge. 1368 uint dd = dom_depth(loop_head); 1369 clone_loop( loop, old_new, dd ); 1370 1371 // Make backedges of the clone equal to backedges of the original. 1372 // Make the fall-in from the original come from the fall-out of the clone. 1373 for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) { 1374 Node* phi = loop_head->fast_out(j); 1375 if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) { 1376 Node *newphi = old_new[phi->_idx]; 1377 _igvn.hash_delete( phi ); 1378 _igvn.hash_delete( newphi ); 1379 1380 phi ->set_req(LoopNode:: EntryControl, newphi->in(LoopNode::LoopBackControl)); 1381 newphi->set_req(LoopNode::LoopBackControl, phi ->in(LoopNode::LoopBackControl)); 1382 phi ->set_req(LoopNode::LoopBackControl, C->top()); 1383 } 1384 } 1385 Node *clone_head = old_new[loop_head->_idx]; 1386 _igvn.hash_delete( clone_head ); 1387 loop_head ->set_req(LoopNode:: EntryControl, clone_head->in(LoopNode::LoopBackControl)); 1388 clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl)); 1389 loop_head ->set_req(LoopNode::LoopBackControl, C->top()); 1390 loop->_head = clone_head; // New loop header 1391 1392 set_idom(loop_head, loop_head ->in(LoopNode::EntryControl), dd); 1393 set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd); 1394 1395 // Kill the clone's backedge 1396 Node *newcle = old_new[loop_end->_idx]; 1397 _igvn.hash_delete( newcle ); 1398 Node *one = _igvn.intcon(1); 1399 set_ctrl(one, C->root()); 1400 newcle->set_req(1, one); 1401 // Force clone into same loop body 1402 uint max = loop->_body.size(); 1403 for( uint k = 0; k < max; k++ ) { 1404 Node *old = loop->_body.at(k); 1405 Node *nnn = old_new[old->_idx]; 1406 loop->_body.push(nnn); 1407 if (!has_ctrl(old)) 1408 set_loop(nnn, loop); 1409 } 1410 1411 loop->record_for_igvn(); 1412 } 1413 1414 //------------------------------do_maximally_unroll---------------------------- 1415 1416 void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) { 1417 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 1418 assert(cl->has_exact_trip_count(), "trip count is not exact"); 1419 assert(cl->trip_count() > 0, ""); 1420 #ifndef PRODUCT 1421 if (TraceLoopOpts) { 1422 tty->print("MaxUnroll %d ", cl->trip_count()); 1423 loop->dump_head(); 1424 } 1425 #endif 1426 1427 // If loop is tripping an odd number of times, peel odd iteration 1428 if ((cl->trip_count() & 1) == 1) { 1429 do_peeling(loop, old_new); 1430 } 1431 1432 // Now its tripping an even number of times remaining. Double loop body. 1433 // Do not adjust pre-guards; they are not needed and do not exist. 1434 if (cl->trip_count() > 0) { 1435 assert((cl->trip_count() & 1) == 0, "missed peeling"); 1436 do_unroll(loop, old_new, false); 1437 } 1438 } 1439 1440 //------------------------------dominates_backedge--------------------------------- 1441 // Returns true if ctrl is executed on every complete iteration 1442 bool IdealLoopTree::dominates_backedge(Node* ctrl) { 1443 assert(ctrl->is_CFG(), "must be control"); 1444 Node* backedge = _head->as_Loop()->in(LoopNode::LoopBackControl); 1445 return _phase->dom_lca_internal(ctrl, backedge) == ctrl; 1446 } 1447 1448 //------------------------------add_constraint--------------------------------- 1449 // Constrain the main loop iterations so the conditions: 1450 // low_limit <= scale_con * I + offset < upper_limit 1451 // always holds true. That is, either increase the number of iterations in 1452 // the pre-loop or the post-loop until the condition holds true in the main 1453 // loop. Stride, scale, offset and limit are all loop invariant. Further, 1454 // stride and scale are constants (offset and limit often are). 1455 void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) { 1456 // For positive stride, the pre-loop limit always uses a MAX function 1457 // and the main loop a MIN function. For negative stride these are 1458 // reversed. 1459 1460 // Also for positive stride*scale the affine function is increasing, so the 1461 // pre-loop must check for underflow and the post-loop for overflow. 1462 // Negative stride*scale reverses this; pre-loop checks for overflow and 1463 // post-loop for underflow. 1464 if (stride_con*scale_con > 0) { 1465 // The overflow limit: scale*I+offset < upper_limit 1466 // For main-loop compute 1467 // ( if (scale > 0) /* and stride > 0 */ 1468 // I < (upper_limit-offset)/scale 1469 // else /* scale < 0 and stride < 0 */ 1470 // I > (upper_limit-offset)/scale 1471 // ) 1472 // 1473 // (upper_limit-offset) may overflow when offset < 0. 1474 // But it is fine since main loop will either have 1475 // less iterations or will be skipped in such case. 1476 Node *con = new (C, 3) SubINode(upper_limit, offset); 1477 register_new_node(con, pre_ctrl); 1478 Node *scale = _igvn.intcon(scale_con); 1479 set_ctrl(scale, C->root()); 1480 Node *X = new (C, 3) DivINode(0, con, scale); 1481 register_new_node(X, pre_ctrl); 1482 1483 // Adjust main-loop last iteration 1484 Node *loop_limit = *main_limit; 1485 loop_limit = (stride_con > 0) // scale > 0 1486 ? (Node*)(new (C, 3) MinINode(loop_limit, X)) 1487 : (Node*)(new (C, 3) MaxINode(loop_limit, X)); 1488 register_new_node(loop_limit, pre_ctrl); 1489 *main_limit = loop_limit; 1490 1491 // The underflow limit: low_limit <= scale*I+offset. 1492 // For pre-loop compute 1493 // NOT(scale*I+offset >= low_limit) 1494 // scale*I+offset < low_limit 1495 // ( if (scale > 0) /* and stride > 0 */ 1496 // I < (low_limit-offset)/scale 1497 // else /* scale < 0 and stride < 0 */ 1498 // I > (low_limit-offset)/scale 1499 // ) 1500 1501 if (low_limit->get_int() == -max_jint) { 1502 if (!RangeLimitCheck) return; 1503 // We need this guard when scale*pre_limit+offset >= limit 1504 // due to underflow so we need execute pre-loop until 1505 // scale*I+offset >= min_int. But (low_limit-offset) will 1506 // underflow when offset > 0 and X will be > original_limit. 1507 // To avoid it we replace offset = offset > 0 ? 0 : offset 1508 // and add min(pre_limit, original_limit). 1509 Node* shift = _igvn.intcon(31); 1510 set_ctrl(shift, C->root()); 1511 Node *neg_off = new (C, 3) RShiftINode(offset, shift); 1512 register_new_node(neg_off, pre_ctrl); 1513 offset = new (C, 3) AndINode(offset, neg_off); 1514 register_new_node(offset, pre_ctrl); 1515 } else { 1516 assert(low_limit->get_int() == 0, "wrong low limit for range check"); 1517 // The only problem we have here when offset == min_int 1518 // since (0-min_int) == min_int. It may be fine for scale > 0 1519 // but for scale < 0 X will be < original_limit. 1520 } 1521 con = new (C, 3) SubINode(low_limit, offset); 1522 register_new_node(con, pre_ctrl); 1523 scale = _igvn.intcon(scale_con); 1524 set_ctrl(scale, C->root()); 1525 X = new (C, 3) DivINode(0, con, scale); 1526 register_new_node(X, pre_ctrl); 1527 1528 // Adjust pre-loop last iteration 1529 loop_limit = *pre_limit; 1530 loop_limit = (stride_con > 0) // scale > 0 1531 ? (Node*)(new (C, 3) MaxINode(loop_limit, X)) 1532 : (Node*)(new (C, 3) MinINode(loop_limit, X)); 1533 register_new_node( loop_limit, pre_ctrl ); 1534 *pre_limit = loop_limit; 1535 1536 } else { // stride_con*scale_con < 0 1537 // For negative stride*scale pre-loop checks for overflow and 1538 // post-loop for underflow. 1539 // 1540 // The underflow limit: low_limit <= scale*I+offset. 1541 // For main-loop compute 1542 // scale*I+offset+1 > low_limit 1543 // ( if (scale < 0) /* and stride > 0 */ 1544 // I < (low_limit-(offset+1))/scale 1545 // else /* scale < 0 and stride < 0 */ 1546 // I > (low_limit-(offset+1))/scale 1547 // ) 1548 1549 if (low_limit->get_int() == -max_jint) { 1550 if (!RangeLimitCheck) return; 1551 } else { 1552 assert(low_limit->get_int() == 0, "wrong low limit for range check"); 1553 } 1554 1555 Node *one = _igvn.intcon(1); 1556 set_ctrl(one, C->root()); 1557 Node *plus_one = new (C, 3) AddINode(offset, one); 1558 register_new_node( plus_one, pre_ctrl ); 1559 Node *con = new (C, 3) SubINode(low_limit, plus_one); 1560 register_new_node(con, pre_ctrl); 1561 Node *scale = _igvn.intcon(scale_con); 1562 set_ctrl(scale, C->root()); 1563 Node *X = new (C, 3) DivINode(0, con, scale); 1564 register_new_node(X, pre_ctrl); 1565 1566 // Adjust main-loop last iteration 1567 Node *loop_limit = *main_limit; 1568 loop_limit = (stride_con > 0) // scale < 0 1569 ? (Node*)(new (C, 3) MinINode(loop_limit, X)) 1570 : (Node*)(new (C, 3) MaxINode(loop_limit, X)); 1571 register_new_node(loop_limit, pre_ctrl); 1572 *main_limit = loop_limit; 1573 1574 // The overflow limit: scale*I+offset < upper_limit 1575 // For pre-loop compute 1576 // NOT(scale*I+offset < upper_limit) 1577 // scale*I+offset >= upper_limit 1578 // scale*I+offset+1 > upper_limit 1579 // ( if (scale < 0) /* and stride > 0 */ 1580 // I < (upper_limit-(offset+1))/scale 1581 // else /* scale < 0 and stride < 0 */ 1582 // I > (upper_limit-(offset+1))/scale 1583 // ) 1584 plus_one = new (C, 3) AddINode(offset, one); 1585 register_new_node( plus_one, pre_ctrl ); 1586 con = new (C, 3) SubINode(upper_limit, plus_one); 1587 register_new_node(con, pre_ctrl); 1588 scale = _igvn.intcon(scale_con); 1589 set_ctrl(scale, C->root()); 1590 X = new (C, 3) DivINode(0, con, scale); 1591 register_new_node(X, pre_ctrl); 1592 1593 // Adjust pre-loop last iteration 1594 loop_limit = *pre_limit; 1595 loop_limit = (stride_con > 0) // scale < 0 1596 ? (Node*)(new (C, 3) MaxINode(loop_limit, X)) 1597 : (Node*)(new (C, 3) MinINode(loop_limit, X)); 1598 register_new_node( loop_limit, pre_ctrl ); 1599 *pre_limit = loop_limit; 1600 1601 } 1602 } 1603 1604 1605 //------------------------------is_scaled_iv--------------------------------- 1606 // Return true if exp is a constant times an induction var 1607 bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) { 1608 if (exp == iv) { 1609 if (p_scale != NULL) { 1610 *p_scale = 1; 1611 } 1612 return true; 1613 } 1614 int opc = exp->Opcode(); 1615 if (opc == Op_MulI) { 1616 if (exp->in(1) == iv && exp->in(2)->is_Con()) { 1617 if (p_scale != NULL) { 1618 *p_scale = exp->in(2)->get_int(); 1619 } 1620 return true; 1621 } 1622 if (exp->in(2) == iv && exp->in(1)->is_Con()) { 1623 if (p_scale != NULL) { 1624 *p_scale = exp->in(1)->get_int(); 1625 } 1626 return true; 1627 } 1628 } else if (opc == Op_LShiftI) { 1629 if (exp->in(1) == iv && exp->in(2)->is_Con()) { 1630 if (p_scale != NULL) { 1631 *p_scale = 1 << exp->in(2)->get_int(); 1632 } 1633 return true; 1634 } 1635 } 1636 return false; 1637 } 1638 1639 //-----------------------------is_scaled_iv_plus_offset------------------------------ 1640 // Return true if exp is a simple induction variable expression: k1*iv + (invar + k2) 1641 bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) { 1642 if (is_scaled_iv(exp, iv, p_scale)) { 1643 if (p_offset != NULL) { 1644 Node *zero = _igvn.intcon(0); 1645 set_ctrl(zero, C->root()); 1646 *p_offset = zero; 1647 } 1648 return true; 1649 } 1650 int opc = exp->Opcode(); 1651 if (opc == Op_AddI) { 1652 if (is_scaled_iv(exp->in(1), iv, p_scale)) { 1653 if (p_offset != NULL) { 1654 *p_offset = exp->in(2); 1655 } 1656 return true; 1657 } 1658 if (exp->in(2)->is_Con()) { 1659 Node* offset2 = NULL; 1660 if (depth < 2 && 1661 is_scaled_iv_plus_offset(exp->in(1), iv, p_scale, 1662 p_offset != NULL ? &offset2 : NULL, depth+1)) { 1663 if (p_offset != NULL) { 1664 Node *ctrl_off2 = get_ctrl(offset2); 1665 Node* offset = new (C, 3) AddINode(offset2, exp->in(2)); 1666 register_new_node(offset, ctrl_off2); 1667 *p_offset = offset; 1668 } 1669 return true; 1670 } 1671 } 1672 } else if (opc == Op_SubI) { 1673 if (is_scaled_iv(exp->in(1), iv, p_scale)) { 1674 if (p_offset != NULL) { 1675 Node *zero = _igvn.intcon(0); 1676 set_ctrl(zero, C->root()); 1677 Node *ctrl_off = get_ctrl(exp->in(2)); 1678 Node* offset = new (C, 3) SubINode(zero, exp->in(2)); 1679 register_new_node(offset, ctrl_off); 1680 *p_offset = offset; 1681 } 1682 return true; 1683 } 1684 if (is_scaled_iv(exp->in(2), iv, p_scale)) { 1685 if (p_offset != NULL) { 1686 *p_scale *= -1; 1687 *p_offset = exp->in(1); 1688 } 1689 return true; 1690 } 1691 } 1692 return false; 1693 } 1694 1695 //------------------------------do_range_check--------------------------------- 1696 // Eliminate range-checks and other trip-counter vs loop-invariant tests. 1697 void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) { 1698 #ifndef PRODUCT 1699 if (PrintOpto && VerifyLoopOptimizations) { 1700 tty->print("Range Check Elimination "); 1701 loop->dump_head(); 1702 } else if (TraceLoopOpts) { 1703 tty->print("RangeCheck "); 1704 loop->dump_head(); 1705 } 1706 #endif 1707 assert(RangeCheckElimination, ""); 1708 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 1709 assert(cl->is_main_loop(), ""); 1710 1711 // protect against stride not being a constant 1712 if (!cl->stride_is_con()) 1713 return; 1714 1715 // Find the trip counter; we are iteration splitting based on it 1716 Node *trip_counter = cl->phi(); 1717 // Find the main loop limit; we will trim it's iterations 1718 // to not ever trip end tests 1719 Node *main_limit = cl->limit(); 1720 1721 // Need to find the main-loop zero-trip guard 1722 Node *ctrl = cl->in(LoopNode::EntryControl); 1723 assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, ""); 1724 Node *iffm = ctrl->in(0); 1725 assert(iffm->Opcode() == Op_If, ""); 1726 Node *bolzm = iffm->in(1); 1727 assert(bolzm->Opcode() == Op_Bool, ""); 1728 Node *cmpzm = bolzm->in(1); 1729 assert(cmpzm->is_Cmp(), ""); 1730 Node *opqzm = cmpzm->in(2); 1731 // Can not optimize a loop if zero-trip Opaque1 node is optimized 1732 // away and then another round of loop opts attempted. 1733 if (opqzm->Opcode() != Op_Opaque1) 1734 return; 1735 assert(opqzm->in(1) == main_limit, "do not understand situation"); 1736 1737 // Find the pre-loop limit; we will expand it's iterations to 1738 // not ever trip low tests. 1739 Node *p_f = iffm->in(0); 1740 assert(p_f->Opcode() == Op_IfFalse, ""); 1741 CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); 1742 assert(pre_end->loopnode()->is_pre_loop(), ""); 1743 Node *pre_opaq1 = pre_end->limit(); 1744 // Occasionally it's possible for a pre-loop Opaque1 node to be 1745 // optimized away and then another round of loop opts attempted. 1746 // We can not optimize this particular loop in that case. 1747 if (pre_opaq1->Opcode() != Op_Opaque1) 1748 return; 1749 Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1; 1750 Node *pre_limit = pre_opaq->in(1); 1751 1752 // Where do we put new limit calculations 1753 Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl); 1754 1755 // Ensure the original loop limit is available from the 1756 // pre-loop Opaque1 node. 1757 Node *orig_limit = pre_opaq->original_loop_limit(); 1758 if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP) 1759 return; 1760 1761 // Must know if its a count-up or count-down loop 1762 1763 int stride_con = cl->stride_con(); 1764 Node *zero = _igvn.intcon(0); 1765 Node *one = _igvn.intcon(1); 1766 // Use symmetrical int range [-max_jint,max_jint] 1767 Node *mini = _igvn.intcon(-max_jint); 1768 set_ctrl(zero, C->root()); 1769 set_ctrl(one, C->root()); 1770 set_ctrl(mini, C->root()); 1771 1772 // Range checks that do not dominate the loop backedge (ie. 1773 // conditionally executed) can lengthen the pre loop limit beyond 1774 // the original loop limit. To prevent this, the pre limit is 1775 // (for stride > 0) MINed with the original loop limit (MAXed 1776 // stride < 0) when some range_check (rc) is conditionally 1777 // executed. 1778 bool conditional_rc = false; 1779 1780 // Check loop body for tests of trip-counter plus loop-invariant vs 1781 // loop-invariant. 1782 for( uint i = 0; i < loop->_body.size(); i++ ) { 1783 Node *iff = loop->_body[i]; 1784 if( iff->Opcode() == Op_If ) { // Test? 1785 1786 // Test is an IfNode, has 2 projections. If BOTH are in the loop 1787 // we need loop unswitching instead of iteration splitting. 1788 Node *exit = loop->is_loop_exit(iff); 1789 if( !exit ) continue; 1790 int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0; 1791 1792 // Get boolean condition to test 1793 Node *i1 = iff->in(1); 1794 if( !i1->is_Bool() ) continue; 1795 BoolNode *bol = i1->as_Bool(); 1796 BoolTest b_test = bol->_test; 1797 // Flip sense of test if exit condition is flipped 1798 if( flip ) 1799 b_test = b_test.negate(); 1800 1801 // Get compare 1802 Node *cmp = bol->in(1); 1803 1804 // Look for trip_counter + offset vs limit 1805 Node *rc_exp = cmp->in(1); 1806 Node *limit = cmp->in(2); 1807 jint scale_con= 1; // Assume trip counter not scaled 1808 1809 Node *limit_c = get_ctrl(limit); 1810 if( loop->is_member(get_loop(limit_c) ) ) { 1811 // Compare might have operands swapped; commute them 1812 b_test = b_test.commute(); 1813 rc_exp = cmp->in(2); 1814 limit = cmp->in(1); 1815 limit_c = get_ctrl(limit); 1816 if( loop->is_member(get_loop(limit_c) ) ) 1817 continue; // Both inputs are loop varying; cannot RCE 1818 } 1819 // Here we know 'limit' is loop invariant 1820 1821 // 'limit' maybe pinned below the zero trip test (probably from a 1822 // previous round of rce), in which case, it can't be used in the 1823 // zero trip test expression which must occur before the zero test's if. 1824 if( limit_c == ctrl ) { 1825 continue; // Don't rce this check but continue looking for other candidates. 1826 } 1827 1828 // Check for scaled induction variable plus an offset 1829 Node *offset = NULL; 1830 1831 if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) { 1832 continue; 1833 } 1834 1835 Node *offset_c = get_ctrl(offset); 1836 if( loop->is_member( get_loop(offset_c) ) ) 1837 continue; // Offset is not really loop invariant 1838 // Here we know 'offset' is loop invariant. 1839 1840 // As above for the 'limit', the 'offset' maybe pinned below the 1841 // zero trip test. 1842 if( offset_c == ctrl ) { 1843 continue; // Don't rce this check but continue looking for other candidates. 1844 } 1845 #ifdef ASSERT 1846 if (TraceRangeLimitCheck) { 1847 tty->print_cr("RC bool node%s", flip ? " flipped:" : ":"); 1848 bol->dump(2); 1849 } 1850 #endif 1851 // At this point we have the expression as: 1852 // scale_con * trip_counter + offset :: limit 1853 // where scale_con, offset and limit are loop invariant. Trip_counter 1854 // monotonically increases by stride_con, a constant. Both (or either) 1855 // stride_con and scale_con can be negative which will flip about the 1856 // sense of the test. 1857 1858 // Adjust pre and main loop limits to guard the correct iteration set 1859 if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests 1860 if( b_test._test == BoolTest::lt ) { // Range checks always use lt 1861 // The underflow and overflow limits: 0 <= scale*I+offset < limit 1862 add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit ); 1863 if (!conditional_rc) { 1864 conditional_rc = !loop->dominates_backedge(iff); 1865 // It is also needed if offset->_lo == min_int since 1866 // (0-min_int) == min_int. It may be fine for stride > 0 1867 // but for stride < 0 pre_limit will be < original_limit. 1868 const TypeInt* offset_t = _igvn.type(offset)->is_int(); 1869 conditional_rc |= RangeLimitCheck && (offset_t->_lo == min_jint) && 1870 (scale_con<0) && (stride_con<0); 1871 } 1872 } else { 1873 #ifndef PRODUCT 1874 if( PrintOpto ) 1875 tty->print_cr("missed RCE opportunity"); 1876 #endif 1877 continue; // In release mode, ignore it 1878 } 1879 } else { // Otherwise work on normal compares 1880 switch( b_test._test ) { 1881 case BoolTest::gt: 1882 // Fall into GE case 1883 case BoolTest::ge: 1884 // Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit 1885 scale_con = -scale_con; 1886 offset = new (C, 3) SubINode( zero, offset ); 1887 register_new_node( offset, pre_ctrl ); 1888 limit = new (C, 3) SubINode( zero, limit ); 1889 register_new_node( limit, pre_ctrl ); 1890 // Fall into LE case 1891 case BoolTest::le: 1892 if (b_test._test != BoolTest::gt) { 1893 // Convert X <= Y to X < Y+1 1894 limit = new (C, 3) AddINode( limit, one ); 1895 register_new_node( limit, pre_ctrl ); 1896 } 1897 // Fall into LT case 1898 case BoolTest::lt: 1899 // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit 1900 add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit ); 1901 if (!conditional_rc) { 1902 conditional_rc = !loop->dominates_backedge(iff); 1903 // It is also needed if scale*pre_limit+offset >= limit 1904 // due to underflow so we need execute pre-loop until 1905 // scale*I+offset >= min_int. But (low_limit-offset) will 1906 // underflow when offset > 0 and X will be > original_limit. 1907 const TypeInt* offset_t = _igvn.type(offset)->is_int(); 1908 conditional_rc |= RangeLimitCheck && (offset_t->_hi > 0) && 1909 (scale_con>0) && (stride_con>0); 1910 } 1911 break; 1912 default: 1913 #ifndef PRODUCT 1914 if( PrintOpto ) 1915 tty->print_cr("missed RCE opportunity"); 1916 #endif 1917 continue; // Unhandled case 1918 } 1919 } 1920 1921 // Kill the eliminated test 1922 C->set_major_progress(); 1923 Node *kill_con = _igvn.intcon( 1-flip ); 1924 set_ctrl(kill_con, C->root()); 1925 _igvn.hash_delete(iff); 1926 iff->set_req(1, kill_con); 1927 _igvn._worklist.push(iff); 1928 // Find surviving projection 1929 assert(iff->is_If(), ""); 1930 ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip); 1931 // Find loads off the surviving projection; remove their control edge 1932 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { 1933 Node* cd = dp->fast_out(i); // Control-dependent node 1934 if( cd->is_Load() ) { // Loads can now float around in the loop 1935 _igvn.hash_delete(cd); 1936 // Allow the load to float around in the loop, or before it 1937 // but NOT before the pre-loop. 1938 cd->set_req(0, ctrl); // ctrl, not NULL 1939 _igvn._worklist.push(cd); 1940 --i; 1941 --imax; 1942 } 1943 } 1944 1945 } // End of is IF 1946 1947 } 1948 1949 // Update loop limits 1950 if (conditional_rc) { 1951 pre_limit = (stride_con > 0) ? (Node*)new (C,3) MinINode(pre_limit, orig_limit) 1952 : (Node*)new (C,3) MaxINode(pre_limit, orig_limit); 1953 register_new_node(pre_limit, pre_ctrl); 1954 } 1955 _igvn.hash_delete(pre_opaq); 1956 pre_opaq->set_req(1, pre_limit); 1957 1958 // Note:: we are making the main loop limit no longer precise; 1959 // need to round up based on stride. 1960 cl->set_nonexact_trip_count(); 1961 if (!LoopLimitCheck && stride_con != 1 && stride_con != -1) { // Cutout for common case 1962 // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init 1963 // Hopefully, compiler will optimize for powers of 2. 1964 Node *ctrl = get_ctrl(main_limit); 1965 Node *stride = cl->stride(); 1966 Node *init = cl->init_trip(); 1967 Node *span = new (C, 3) SubINode(main_limit,init); 1968 register_new_node(span,ctrl); 1969 Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1)); 1970 Node *add = new (C, 3) AddINode(span,rndup); 1971 register_new_node(add,ctrl); 1972 Node *div = new (C, 3) DivINode(0,add,stride); 1973 register_new_node(div,ctrl); 1974 Node *mul = new (C, 3) MulINode(div,stride); 1975 register_new_node(mul,ctrl); 1976 Node *newlim = new (C, 3) AddINode(mul,init); 1977 register_new_node(newlim,ctrl); 1978 main_limit = newlim; 1979 } 1980 1981 Node *main_cle = cl->loopexit(); 1982 Node *main_bol = main_cle->in(1); 1983 // Hacking loop bounds; need private copies of exit test 1984 if( main_bol->outcnt() > 1 ) {// BoolNode shared? 1985 _igvn.hash_delete(main_cle); 1986 main_bol = main_bol->clone();// Clone a private BoolNode 1987 register_new_node( main_bol, main_cle->in(0) ); 1988 main_cle->set_req(1,main_bol); 1989 } 1990 Node *main_cmp = main_bol->in(1); 1991 if( main_cmp->outcnt() > 1 ) { // CmpNode shared? 1992 _igvn.hash_delete(main_bol); 1993 main_cmp = main_cmp->clone();// Clone a private CmpNode 1994 register_new_node( main_cmp, main_cle->in(0) ); 1995 main_bol->set_req(1,main_cmp); 1996 } 1997 // Hack the now-private loop bounds 1998 _igvn.hash_delete(main_cmp); 1999 main_cmp->set_req(2, main_limit); 2000 _igvn._worklist.push(main_cmp); 2001 // The OpaqueNode is unshared by design 2002 _igvn.hash_delete(opqzm); 2003 assert( opqzm->outcnt() == 1, "cannot hack shared node" ); 2004 opqzm->set_req(1,main_limit); 2005 _igvn._worklist.push(opqzm); 2006 } 2007 2008 //------------------------------DCE_loop_body---------------------------------- 2009 // Remove simplistic dead code from loop body 2010 void IdealLoopTree::DCE_loop_body() { 2011 for( uint i = 0; i < _body.size(); i++ ) 2012 if( _body.at(i)->outcnt() == 0 ) 2013 _body.map( i--, _body.pop() ); 2014 } 2015 2016 2017 //------------------------------adjust_loop_exit_prob-------------------------- 2018 // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage. 2019 // Replace with a 1-in-10 exit guess. 2020 void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) { 2021 Node *test = tail(); 2022 while( test != _head ) { 2023 uint top = test->Opcode(); 2024 if( top == Op_IfTrue || top == Op_IfFalse ) { 2025 int test_con = ((ProjNode*)test)->_con; 2026 assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity"); 2027 IfNode *iff = test->in(0)->as_If(); 2028 if( iff->outcnt() == 2 ) { // Ignore dead tests 2029 Node *bol = iff->in(1); 2030 if( bol && bol->req() > 1 && bol->in(1) && 2031 ((bol->in(1)->Opcode() == Op_StorePConditional ) || 2032 (bol->in(1)->Opcode() == Op_StoreIConditional ) || 2033 (bol->in(1)->Opcode() == Op_StoreLConditional ) || 2034 (bol->in(1)->Opcode() == Op_CompareAndSwapI ) || 2035 (bol->in(1)->Opcode() == Op_CompareAndSwapL ) || 2036 (bol->in(1)->Opcode() == Op_CompareAndSwapP ) || 2037 (bol->in(1)->Opcode() == Op_CompareAndSwapN ))) 2038 return; // Allocation loops RARELY take backedge 2039 // Find the OTHER exit path from the IF 2040 Node* ex = iff->proj_out(1-test_con); 2041 float p = iff->_prob; 2042 if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) { 2043 if( top == Op_IfTrue ) { 2044 if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) { 2045 iff->_prob = PROB_STATIC_FREQUENT; 2046 } 2047 } else { 2048 if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) { 2049 iff->_prob = PROB_STATIC_INFREQUENT; 2050 } 2051 } 2052 } 2053 } 2054 } 2055 test = phase->idom(test); 2056 } 2057 } 2058 2059 2060 //------------------------------policy_do_remove_empty_loop-------------------- 2061 // Micro-benchmark spamming. Policy is to always remove empty loops. 2062 // The 'DO' part is to replace the trip counter with the value it will 2063 // have on the last iteration. This will break the loop. 2064 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) { 2065 // Minimum size must be empty loop 2066 if (_body.size() > EMPTY_LOOP_SIZE) 2067 return false; 2068 2069 if (!_head->is_CountedLoop()) 2070 return false; // Dead loop 2071 CountedLoopNode *cl = _head->as_CountedLoop(); 2072 if (!cl->loopexit()) 2073 return false; // Malformed loop 2074 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) 2075 return false; // Infinite loop 2076 2077 #ifdef ASSERT 2078 // Ensure only one phi which is the iv. 2079 Node* iv = NULL; 2080 for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) { 2081 Node* n = cl->fast_out(i); 2082 if (n->Opcode() == Op_Phi) { 2083 assert(iv == NULL, "Too many phis" ); 2084 iv = n; 2085 } 2086 } 2087 assert(iv == cl->phi(), "Wrong phi" ); 2088 #endif 2089 2090 // main and post loops have explicitly created zero trip guard 2091 bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop(); 2092 if (needs_guard) { 2093 // Skip guard if values not overlap. 2094 const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int(); 2095 const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int(); 2096 int stride_con = cl->stride_con(); 2097 if (stride_con > 0) { 2098 needs_guard = (init_t->_hi >= limit_t->_lo); 2099 } else { 2100 needs_guard = (init_t->_lo <= limit_t->_hi); 2101 } 2102 } 2103 if (needs_guard) { 2104 // Check for an obvious zero trip guard. 2105 Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl)); 2106 if (inctrl->Opcode() == Op_IfTrue) { 2107 // The test should look like just the backedge of a CountedLoop 2108 Node* iff = inctrl->in(0); 2109 if (iff->is_If()) { 2110 Node* bol = iff->in(1); 2111 if (bol->is_Bool() && bol->as_Bool()->_test._test == cl->loopexit()->test_trip()) { 2112 Node* cmp = bol->in(1); 2113 if (cmp->is_Cmp() && cmp->in(1) == cl->init_trip() && cmp->in(2) == cl->limit()) { 2114 needs_guard = false; 2115 } 2116 } 2117 } 2118 } 2119 } 2120 2121 #ifndef PRODUCT 2122 if (PrintOpto) { 2123 tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : ""); 2124 this->dump_head(); 2125 } else if (TraceLoopOpts) { 2126 tty->print("Empty with%s zero trip guard ", needs_guard ? "out" : ""); 2127 this->dump_head(); 2128 } 2129 #endif 2130 2131 if (needs_guard) { 2132 // Peel the loop to ensure there's a zero trip guard 2133 Node_List old_new; 2134 phase->do_peeling(this, old_new); 2135 } 2136 2137 // Replace the phi at loop head with the final value of the last 2138 // iteration. Then the CountedLoopEnd will collapse (backedge never 2139 // taken) and all loop-invariant uses of the exit values will be correct. 2140 Node *phi = cl->phi(); 2141 Node *exact_limit = phase->exact_limit(this); 2142 if (exact_limit != cl->limit()) { 2143 // We also need to replace the original limit to collapse loop exit. 2144 Node* cmp = cl->loopexit()->cmp_node(); 2145 assert(cl->limit() == cmp->in(2), "sanity"); 2146 phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist 2147 phase->_igvn.hash_delete(cmp); 2148 cmp->set_req(2, exact_limit); 2149 phase->_igvn._worklist.push(cmp); // put cmp on worklist 2150 } 2151 // Note: the final value after increment should not overflow since 2152 // counted loop has limit check predicate. 2153 Node *final = new (phase->C, 3) SubINode( exact_limit, cl->stride() ); 2154 phase->register_new_node(final,cl->in(LoopNode::EntryControl)); 2155 phase->_igvn.replace_node(phi,final); 2156 phase->C->set_major_progress(); 2157 return true; 2158 } 2159 2160 //------------------------------policy_do_one_iteration_loop------------------- 2161 // Convert one iteration loop into normal code. 2162 bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) { 2163 if (!_head->as_Loop()->is_valid_counted_loop()) 2164 return false; // Only for counted loop 2165 2166 CountedLoopNode *cl = _head->as_CountedLoop(); 2167 if (!cl->has_exact_trip_count() || cl->trip_count() != 1) { 2168 return false; 2169 } 2170 2171 #ifndef PRODUCT 2172 if(TraceLoopOpts) { 2173 tty->print("OneIteration "); 2174 this->dump_head(); 2175 } 2176 #endif 2177 2178 Node *init_n = cl->init_trip(); 2179 #ifdef ASSERT 2180 // Loop boundaries should be constant since trip count is exact. 2181 assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration"); 2182 #endif 2183 // Replace the phi at loop head with the value of the init_trip. 2184 // Then the CountedLoopEnd will collapse (backedge will not be taken) 2185 // and all loop-invariant uses of the exit values will be correct. 2186 phase->_igvn.replace_node(cl->phi(), cl->init_trip()); 2187 phase->C->set_major_progress(); 2188 return true; 2189 } 2190 2191 //============================================================================= 2192 //------------------------------iteration_split_impl--------------------------- 2193 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) { 2194 // Compute exact loop trip count if possible. 2195 compute_exact_trip_count(phase); 2196 2197 // Convert one iteration loop into normal code. 2198 if (policy_do_one_iteration_loop(phase)) 2199 return true; 2200 2201 // Check and remove empty loops (spam micro-benchmarks) 2202 if (policy_do_remove_empty_loop(phase)) 2203 return true; // Here we removed an empty loop 2204 2205 bool should_peel = policy_peeling(phase); // Should we peel? 2206 2207 bool should_unswitch = policy_unswitching(phase); 2208 2209 // Non-counted loops may be peeled; exactly 1 iteration is peeled. 2210 // This removes loop-invariant tests (usually null checks). 2211 if (!_head->is_CountedLoop()) { // Non-counted loop 2212 if (PartialPeelLoop && phase->partial_peel(this, old_new)) { 2213 // Partial peel succeeded so terminate this round of loop opts 2214 return false; 2215 } 2216 if (should_peel) { // Should we peel? 2217 #ifndef PRODUCT 2218 if (PrintOpto) tty->print_cr("should_peel"); 2219 #endif 2220 phase->do_peeling(this,old_new); 2221 } else if (should_unswitch) { 2222 phase->do_unswitching(this, old_new); 2223 } 2224 return true; 2225 } 2226 CountedLoopNode *cl = _head->as_CountedLoop(); 2227 2228 if (!cl->loopexit()) return true; // Ignore various kinds of broken loops 2229 2230 // Do nothing special to pre- and post- loops 2231 if (cl->is_pre_loop() || cl->is_post_loop()) return true; 2232 2233 // Compute loop trip count from profile data 2234 compute_profile_trip_cnt(phase); 2235 2236 // Before attempting fancy unrolling, RCE or alignment, see if we want 2237 // to completely unroll this loop or do loop unswitching. 2238 if (cl->is_normal_loop()) { 2239 if (should_unswitch) { 2240 phase->do_unswitching(this, old_new); 2241 return true; 2242 } 2243 bool should_maximally_unroll = policy_maximally_unroll(phase); 2244 if (should_maximally_unroll) { 2245 // Here we did some unrolling and peeling. Eventually we will 2246 // completely unroll this loop and it will no longer be a loop. 2247 phase->do_maximally_unroll(this,old_new); 2248 return true; 2249 } 2250 } 2251 2252 // Skip next optimizations if running low on nodes. Note that 2253 // policy_unswitching and policy_maximally_unroll have this check. 2254 uint nodes_left = MaxNodeLimit - phase->C->unique(); 2255 if ((2 * _body.size()) > nodes_left) { 2256 return true; 2257 } 2258 2259 // Counted loops may be peeled, may need some iterations run up 2260 // front for RCE, and may want to align loop refs to a cache 2261 // line. Thus we clone a full loop up front whose trip count is 2262 // at least 1 (if peeling), but may be several more. 2263 2264 // The main loop will start cache-line aligned with at least 1 2265 // iteration of the unrolled body (zero-trip test required) and 2266 // will have some range checks removed. 2267 2268 // A post-loop will finish any odd iterations (leftover after 2269 // unrolling), plus any needed for RCE purposes. 2270 2271 bool should_unroll = policy_unroll(phase); 2272 2273 bool should_rce = policy_range_check(phase); 2274 2275 bool should_align = policy_align(phase); 2276 2277 // If not RCE'ing (iteration splitting) or Aligning, then we do not 2278 // need a pre-loop. We may still need to peel an initial iteration but 2279 // we will not be needing an unknown number of pre-iterations. 2280 // 2281 // Basically, if may_rce_align reports FALSE first time through, 2282 // we will not be able to later do RCE or Aligning on this loop. 2283 bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align; 2284 2285 // If we have any of these conditions (RCE, alignment, unrolling) met, then 2286 // we switch to the pre-/main-/post-loop model. This model also covers 2287 // peeling. 2288 if (should_rce || should_align || should_unroll) { 2289 if (cl->is_normal_loop()) // Convert to 'pre/main/post' loops 2290 phase->insert_pre_post_loops(this,old_new, !may_rce_align); 2291 2292 // Adjust the pre- and main-loop limits to let the pre and post loops run 2293 // with full checks, but the main-loop with no checks. Remove said 2294 // checks from the main body. 2295 if (should_rce) 2296 phase->do_range_check(this,old_new); 2297 2298 // Double loop body for unrolling. Adjust the minimum-trip test (will do 2299 // twice as many iterations as before) and the main body limit (only do 2300 // an even number of trips). If we are peeling, we might enable some RCE 2301 // and we'd rather unroll the post-RCE'd loop SO... do not unroll if 2302 // peeling. 2303 if (should_unroll && !should_peel) 2304 phase->do_unroll(this,old_new, true); 2305 2306 // Adjust the pre-loop limits to align the main body 2307 // iterations. 2308 if (should_align) 2309 Unimplemented(); 2310 2311 } else { // Else we have an unchanged counted loop 2312 if (should_peel) // Might want to peel but do nothing else 2313 phase->do_peeling(this,old_new); 2314 } 2315 return true; 2316 } 2317 2318 2319 //============================================================================= 2320 //------------------------------iteration_split-------------------------------- 2321 bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) { 2322 // Recursively iteration split nested loops 2323 if (_child && !_child->iteration_split(phase, old_new)) 2324 return false; 2325 2326 // Clean out prior deadwood 2327 DCE_loop_body(); 2328 2329 2330 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage. 2331 // Replace with a 1-in-10 exit guess. 2332 if (_parent /*not the root loop*/ && 2333 !_irreducible && 2334 // Also ignore the occasional dead backedge 2335 !tail()->is_top()) { 2336 adjust_loop_exit_prob(phase); 2337 } 2338 2339 // Gate unrolling, RCE and peeling efforts. 2340 if (!_child && // If not an inner loop, do not split 2341 !_irreducible && 2342 _allow_optimizations && 2343 !tail()->is_top()) { // Also ignore the occasional dead backedge 2344 if (!_has_call) { 2345 if (!iteration_split_impl(phase, old_new)) { 2346 return false; 2347 } 2348 } else if (policy_unswitching(phase)) { 2349 phase->do_unswitching(this, old_new); 2350 } 2351 } 2352 2353 // Minor offset re-organization to remove loop-fallout uses of 2354 // trip counter when there was no major reshaping. 2355 phase->reorg_offsets(this); 2356 2357 if (_next && !_next->iteration_split(phase, old_new)) 2358 return false; 2359 return true; 2360 } 2361 2362 2363 //============================================================================= 2364 // Process all the loops in the loop tree and replace any fill 2365 // patterns with an intrisc version. 2366 bool PhaseIdealLoop::do_intrinsify_fill() { 2367 bool changed = false; 2368 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { 2369 IdealLoopTree* lpt = iter.current(); 2370 changed |= intrinsify_fill(lpt); 2371 } 2372 return changed; 2373 } 2374 2375 2376 // Examine an inner loop looking for a a single store of an invariant 2377 // value in a unit stride loop, 2378 bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value, 2379 Node*& shift, Node*& con) { 2380 const char* msg = NULL; 2381 Node* msg_node = NULL; 2382 2383 store_value = NULL; 2384 con = NULL; 2385 shift = NULL; 2386 2387 // Process the loop looking for stores. If there are multiple 2388 // stores or extra control flow give at this point. 2389 CountedLoopNode* head = lpt->_head->as_CountedLoop(); 2390 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2391 Node* n = lpt->_body.at(i); 2392 if (n->outcnt() == 0) continue; // Ignore dead 2393 if (n->is_Store()) { 2394 if (store != NULL) { 2395 msg = "multiple stores"; 2396 break; 2397 } 2398 int opc = n->Opcode(); 2399 if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreCM) { 2400 msg = "oop fills not handled"; 2401 break; 2402 } 2403 Node* value = n->in(MemNode::ValueIn); 2404 if (!lpt->is_invariant(value)) { 2405 msg = "variant store value"; 2406 } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) { 2407 msg = "not array address"; 2408 } 2409 store = n; 2410 store_value = value; 2411 } else if (n->is_If() && n != head->loopexit()) { 2412 msg = "extra control flow"; 2413 msg_node = n; 2414 } 2415 } 2416 2417 if (store == NULL) { 2418 // No store in loop 2419 return false; 2420 } 2421 2422 if (msg == NULL && head->stride_con() != 1) { 2423 // could handle negative strides too 2424 if (head->stride_con() < 0) { 2425 msg = "negative stride"; 2426 } else { 2427 msg = "non-unit stride"; 2428 } 2429 } 2430 2431 if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) { 2432 msg = "can't handle store address"; 2433 msg_node = store->in(MemNode::Address); 2434 } 2435 2436 if (msg == NULL && 2437 (!store->in(MemNode::Memory)->is_Phi() || 2438 store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) { 2439 msg = "store memory isn't proper phi"; 2440 msg_node = store->in(MemNode::Memory); 2441 } 2442 2443 // Make sure there is an appropriate fill routine 2444 BasicType t = store->as_Mem()->memory_type(); 2445 const char* fill_name; 2446 if (msg == NULL && 2447 StubRoutines::select_fill_function(t, false, fill_name) == NULL) { 2448 msg = "unsupported store"; 2449 msg_node = store; 2450 } 2451 2452 if (msg != NULL) { 2453 #ifndef PRODUCT 2454 if (TraceOptimizeFill) { 2455 tty->print_cr("not fill intrinsic candidate: %s", msg); 2456 if (msg_node != NULL) msg_node->dump(); 2457 } 2458 #endif 2459 return false; 2460 } 2461 2462 // Make sure the address expression can be handled. It should be 2463 // head->phi * elsize + con. head->phi might have a ConvI2L. 2464 Node* elements[4]; 2465 Node* conv = NULL; 2466 bool found_index = false; 2467 int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements)); 2468 for (int e = 0; e < count; e++) { 2469 Node* n = elements[e]; 2470 if (n->is_Con() && con == NULL) { 2471 con = n; 2472 } else if (n->Opcode() == Op_LShiftX && shift == NULL) { 2473 Node* value = n->in(1); 2474 #ifdef _LP64 2475 if (value->Opcode() == Op_ConvI2L) { 2476 conv = value; 2477 value = value->in(1); 2478 } 2479 #endif 2480 if (value != head->phi()) { 2481 msg = "unhandled shift in address"; 2482 } else { 2483 if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) { 2484 msg = "scale doesn't match"; 2485 } else { 2486 found_index = true; 2487 shift = n; 2488 } 2489 } 2490 } else if (n->Opcode() == Op_ConvI2L && conv == NULL) { 2491 if (n->in(1) == head->phi()) { 2492 found_index = true; 2493 conv = n; 2494 } else { 2495 msg = "unhandled input to ConvI2L"; 2496 } 2497 } else if (n == head->phi()) { 2498 // no shift, check below for allowed cases 2499 found_index = true; 2500 } else { 2501 msg = "unhandled node in address"; 2502 msg_node = n; 2503 } 2504 } 2505 2506 if (count == -1) { 2507 msg = "malformed address expression"; 2508 msg_node = store; 2509 } 2510 2511 if (!found_index) { 2512 msg = "missing use of index"; 2513 } 2514 2515 // byte sized items won't have a shift 2516 if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) { 2517 msg = "can't find shift"; 2518 msg_node = store; 2519 } 2520 2521 if (msg != NULL) { 2522 #ifndef PRODUCT 2523 if (TraceOptimizeFill) { 2524 tty->print_cr("not fill intrinsic: %s", msg); 2525 if (msg_node != NULL) msg_node->dump(); 2526 } 2527 #endif 2528 return false; 2529 } 2530 2531 // No make sure all the other nodes in the loop can be handled 2532 VectorSet ok(Thread::current()->resource_area()); 2533 2534 // store related values are ok 2535 ok.set(store->_idx); 2536 ok.set(store->in(MemNode::Memory)->_idx); 2537 2538 // Loop structure is ok 2539 ok.set(head->_idx); 2540 ok.set(head->loopexit()->_idx); 2541 ok.set(head->phi()->_idx); 2542 ok.set(head->incr()->_idx); 2543 ok.set(head->loopexit()->cmp_node()->_idx); 2544 ok.set(head->loopexit()->in(1)->_idx); 2545 2546 // Address elements are ok 2547 if (con) ok.set(con->_idx); 2548 if (shift) ok.set(shift->_idx); 2549 if (conv) ok.set(conv->_idx); 2550 2551 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2552 Node* n = lpt->_body.at(i); 2553 if (n->outcnt() == 0) continue; // Ignore dead 2554 if (ok.test(n->_idx)) continue; 2555 // Backedge projection is ok 2556 if (n->is_IfTrue() && n->in(0) == head->loopexit()) continue; 2557 if (!n->is_AddP()) { 2558 msg = "unhandled node"; 2559 msg_node = n; 2560 break; 2561 } 2562 } 2563 2564 // Make sure no unexpected values are used outside the loop 2565 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2566 Node* n = lpt->_body.at(i); 2567 // These values can be replaced with other nodes if they are used 2568 // outside the loop. 2569 if (n == store || n == head->loopexit() || n == head->incr() || n == store->in(MemNode::Memory)) continue; 2570 for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) { 2571 Node* use = iter.get(); 2572 if (!lpt->_body.contains(use)) { 2573 msg = "node is used outside loop"; 2574 // lpt->_body.dump(); 2575 msg_node = n; 2576 break; 2577 } 2578 } 2579 } 2580 2581 #ifdef ASSERT 2582 if (TraceOptimizeFill) { 2583 if (msg != NULL) { 2584 tty->print_cr("no fill intrinsic: %s", msg); 2585 if (msg_node != NULL) msg_node->dump(); 2586 } else { 2587 tty->print_cr("fill intrinsic for:"); 2588 } 2589 store->dump(); 2590 if (Verbose) { 2591 lpt->_body.dump(); 2592 } 2593 } 2594 #endif 2595 2596 return msg == NULL; 2597 } 2598 2599 2600 2601 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { 2602 // Only for counted inner loops 2603 if (!lpt->is_counted() || !lpt->is_inner()) { 2604 return false; 2605 } 2606 2607 // Must have constant stride 2608 CountedLoopNode* head = lpt->_head->as_CountedLoop(); 2609 if (!head->stride_is_con() || !head->is_normal_loop()) { 2610 return false; 2611 } 2612 2613 // Check that the body only contains a store of a loop invariant 2614 // value that is indexed by the loop phi. 2615 Node* store = NULL; 2616 Node* store_value = NULL; 2617 Node* shift = NULL; 2618 Node* offset = NULL; 2619 if (!match_fill_loop(lpt, store, store_value, shift, offset)) { 2620 return false; 2621 } 2622 2623 #ifndef PRODUCT 2624 if (TraceLoopOpts) { 2625 tty->print("ArrayFill "); 2626 lpt->dump_head(); 2627 } 2628 #endif 2629 2630 // Now replace the whole loop body by a call to a fill routine that 2631 // covers the same region as the loop. 2632 Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base); 2633 2634 // Build an expression for the beginning of the copy region 2635 Node* index = head->init_trip(); 2636 #ifdef _LP64 2637 index = new (C, 2) ConvI2LNode(index); 2638 _igvn.register_new_node_with_optimizer(index); 2639 #endif 2640 if (shift != NULL) { 2641 // byte arrays don't require a shift but others do. 2642 index = new (C, 3) LShiftXNode(index, shift->in(2)); 2643 _igvn.register_new_node_with_optimizer(index); 2644 } 2645 index = new (C, 4) AddPNode(base, base, index); 2646 _igvn.register_new_node_with_optimizer(index); 2647 Node* from = new (C, 4) AddPNode(base, index, offset); 2648 _igvn.register_new_node_with_optimizer(from); 2649 // Compute the number of elements to copy 2650 Node* len = new (C, 3) SubINode(head->limit(), head->init_trip()); 2651 _igvn.register_new_node_with_optimizer(len); 2652 2653 BasicType t = store->as_Mem()->memory_type(); 2654 bool aligned = false; 2655 if (offset != NULL && head->init_trip()->is_Con()) { 2656 int element_size = type2aelembytes(t); 2657 aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0; 2658 } 2659 2660 // Build a call to the fill routine 2661 const char* fill_name; 2662 address fill = StubRoutines::select_fill_function(t, aligned, fill_name); 2663 assert(fill != NULL, "what?"); 2664 2665 // Convert float/double to int/long for fill routines 2666 if (t == T_FLOAT) { 2667 store_value = new (C, 2) MoveF2INode(store_value); 2668 _igvn.register_new_node_with_optimizer(store_value); 2669 } else if (t == T_DOUBLE) { 2670 store_value = new (C, 2) MoveD2LNode(store_value); 2671 _igvn.register_new_node_with_optimizer(store_value); 2672 } 2673 2674 Node* mem_phi = store->in(MemNode::Memory); 2675 Node* result_ctrl; 2676 Node* result_mem; 2677 const TypeFunc* call_type = OptoRuntime::array_fill_Type(); 2678 int size = call_type->domain()->cnt(); 2679 CallLeafNode *call = new (C, size) CallLeafNoFPNode(call_type, fill, 2680 fill_name, TypeAryPtr::get_array_body_type(t)); 2681 call->init_req(TypeFunc::Parms+0, from); 2682 call->init_req(TypeFunc::Parms+1, store_value); 2683 #ifdef _LP64 2684 len = new (C, 2) ConvI2LNode(len); 2685 _igvn.register_new_node_with_optimizer(len); 2686 #endif 2687 call->init_req(TypeFunc::Parms+2, len); 2688 #ifdef _LP64 2689 call->init_req(TypeFunc::Parms+3, C->top()); 2690 #endif 2691 call->init_req( TypeFunc::Control, head->init_control()); 2692 call->init_req( TypeFunc::I_O , C->top() ) ; // does no i/o 2693 call->init_req( TypeFunc::Memory , mem_phi->in(LoopNode::EntryControl) ); 2694 call->init_req( TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr) ); 2695 call->init_req( TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr) ); 2696 _igvn.register_new_node_with_optimizer(call); 2697 result_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control); 2698 _igvn.register_new_node_with_optimizer(result_ctrl); 2699 result_mem = new (C, 1) ProjNode(call,TypeFunc::Memory); 2700 _igvn.register_new_node_with_optimizer(result_mem); 2701 2702 // If this fill is tightly coupled to an allocation and overwrites 2703 // the whole body, allow it to take over the zeroing. 2704 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this); 2705 if (alloc != NULL && alloc->is_AllocateArray()) { 2706 Node* length = alloc->as_AllocateArray()->Ideal_length(); 2707 if (head->limit() == length && 2708 head->init_trip() == _igvn.intcon(0)) { 2709 if (TraceOptimizeFill) { 2710 tty->print_cr("Eliminated zeroing in allocation"); 2711 } 2712 alloc->maybe_set_complete(&_igvn); 2713 } else { 2714 #ifdef ASSERT 2715 if (TraceOptimizeFill) { 2716 tty->print_cr("filling array but bounds don't match"); 2717 alloc->dump(); 2718 head->init_trip()->dump(); 2719 head->limit()->dump(); 2720 length->dump(); 2721 } 2722 #endif 2723 } 2724 } 2725 2726 // Redirect the old control and memory edges that are outside the loop. 2727 Node* exit = head->loopexit()->proj_out(0); 2728 // Sometimes the memory phi of the head is used as the outgoing 2729 // state of the loop. It's safe in this case to replace it with the 2730 // result_mem. 2731 _igvn.replace_node(store->in(MemNode::Memory), result_mem); 2732 _igvn.replace_node(exit, result_ctrl); 2733 _igvn.replace_node(store, result_mem); 2734 // Any uses the increment outside of the loop become the loop limit. 2735 _igvn.replace_node(head->incr(), head->limit()); 2736 2737 // Disconnect the head from the loop. 2738 for (uint i = 0; i < lpt->_body.size(); i++) { 2739 Node* n = lpt->_body.at(i); 2740 _igvn.replace_node(n, C->top()); 2741 } 2742 2743 return true; 2744 }