1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/callnode.hpp" 30 #include "opto/connode.hpp" 31 #include "opto/convertnode.hpp" 32 #include "opto/divnode.hpp" 33 #include "opto/loopnode.hpp" 34 #include "opto/mulnode.hpp" 35 #include "opto/movenode.hpp" 36 #include "opto/opaquenode.hpp" 37 #include "opto/rootnode.hpp" 38 #include "opto/runtime.hpp" 39 #include "opto/subnode.hpp" 40 41 //------------------------------is_loop_exit----------------------------------- 42 // Given an IfNode, return the loop-exiting projection or NULL if both 43 // arms remain in the loop. 44 Node *IdealLoopTree::is_loop_exit(Node *iff) const { 45 if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests 46 PhaseIdealLoop *phase = _phase; 47 // Test is an IfNode, has 2 projections. If BOTH are in the loop 48 // we need loop unswitching instead of peeling. 49 if( !is_member(phase->get_loop( iff->raw_out(0) )) ) 50 return iff->raw_out(0); 51 if( !is_member(phase->get_loop( iff->raw_out(1) )) ) 52 return iff->raw_out(1); 53 return NULL; 54 } 55 56 57 //============================================================================= 58 59 60 //------------------------------record_for_igvn---------------------------- 61 // Put loop body on igvn work list 62 void IdealLoopTree::record_for_igvn() { 63 for( uint i = 0; i < _body.size(); i++ ) { 64 Node *n = _body.at(i); 65 _phase->_igvn._worklist.push(n); 66 } 67 } 68 69 //------------------------------compute_exact_trip_count----------------------- 70 // Compute loop exact trip count if possible. Do not recalculate trip count for 71 // split loops (pre-main-post) which have their limits and inits behind Opaque node. 72 void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) { 73 if (!_head->as_Loop()->is_valid_counted_loop()) { 74 return; 75 } 76 CountedLoopNode* cl = _head->as_CountedLoop(); 77 // Trip count may become nonexact for iteration split loops since 78 // RCE modifies limits. Note, _trip_count value is not reset since 79 // it is used to limit unrolling of main loop. 80 cl->set_nonexact_trip_count(); 81 82 // Loop's test should be part of loop. 83 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) 84 return; // Infinite loop 85 86 #ifdef ASSERT 87 BoolTest::mask bt = cl->loopexit()->test_trip(); 88 assert(bt == BoolTest::lt || bt == BoolTest::gt || 89 bt == BoolTest::ne, "canonical test is expected"); 90 #endif 91 92 Node* init_n = cl->init_trip(); 93 Node* limit_n = cl->limit(); 94 if (init_n != NULL && init_n->is_Con() && 95 limit_n != NULL && limit_n->is_Con()) { 96 // Use longs to avoid integer overflow. 97 int stride_con = cl->stride_con(); 98 jlong init_con = cl->init_trip()->get_int(); 99 jlong limit_con = cl->limit()->get_int(); 100 int stride_m = stride_con - (stride_con > 0 ? 1 : -1); 101 jlong trip_count = (limit_con - init_con + stride_m)/stride_con; 102 if (trip_count > 0 && (julong)trip_count < (julong)max_juint) { 103 // Set exact trip count. 104 cl->set_exact_trip_count((uint)trip_count); 105 } 106 } 107 } 108 109 //------------------------------compute_profile_trip_cnt---------------------------- 110 // Compute loop trip count from profile data as 111 // (backedge_count + loop_exit_count) / loop_exit_count 112 void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) { 113 if (!_head->is_CountedLoop()) { 114 return; 115 } 116 CountedLoopNode* head = _head->as_CountedLoop(); 117 if (head->profile_trip_cnt() != COUNT_UNKNOWN) { 118 return; // Already computed 119 } 120 float trip_cnt = (float)max_jint; // default is big 121 122 Node* back = head->in(LoopNode::LoopBackControl); 123 while (back != head) { 124 if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && 125 back->in(0) && 126 back->in(0)->is_If() && 127 back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN && 128 back->in(0)->as_If()->_prob != PROB_UNKNOWN) { 129 break; 130 } 131 back = phase->idom(back); 132 } 133 if (back != head) { 134 assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && 135 back->in(0), "if-projection exists"); 136 IfNode* back_if = back->in(0)->as_If(); 137 float loop_back_cnt = back_if->_fcnt * back_if->_prob; 138 139 // Now compute a loop exit count 140 float loop_exit_cnt = 0.0f; 141 for( uint i = 0; i < _body.size(); i++ ) { 142 Node *n = _body[i]; 143 if( n->is_If() ) { 144 IfNode *iff = n->as_If(); 145 if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) { 146 Node *exit = is_loop_exit(iff); 147 if( exit ) { 148 float exit_prob = iff->_prob; 149 if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob; 150 if (exit_prob > PROB_MIN) { 151 float exit_cnt = iff->_fcnt * exit_prob; 152 loop_exit_cnt += exit_cnt; 153 } 154 } 155 } 156 } 157 } 158 if (loop_exit_cnt > 0.0f) { 159 trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt; 160 } else { 161 // No exit count so use 162 trip_cnt = loop_back_cnt; 163 } 164 } 165 #ifndef PRODUCT 166 if (TraceProfileTripCount) { 167 tty->print_cr("compute_profile_trip_cnt lp: %d cnt: %f\n", head->_idx, trip_cnt); 168 } 169 #endif 170 head->set_profile_trip_cnt(trip_cnt); 171 } 172 173 //---------------------is_invariant_addition----------------------------- 174 // Return nonzero index of invariant operand for an Add or Sub 175 // of (nonconstant) invariant and variant values. Helper for reassociate_invariants. 176 int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) { 177 int op = n->Opcode(); 178 if (op == Op_AddI || op == Op_SubI) { 179 bool in1_invar = this->is_invariant(n->in(1)); 180 bool in2_invar = this->is_invariant(n->in(2)); 181 if (in1_invar && !in2_invar) return 1; 182 if (!in1_invar && in2_invar) return 2; 183 } 184 return 0; 185 } 186 187 //---------------------reassociate_add_sub----------------------------- 188 // Reassociate invariant add and subtract expressions: 189 // 190 // inv1 + (x + inv2) => ( inv1 + inv2) + x 191 // (x + inv2) + inv1 => ( inv1 + inv2) + x 192 // inv1 + (x - inv2) => ( inv1 - inv2) + x 193 // inv1 - (inv2 - x) => ( inv1 - inv2) + x 194 // (x + inv2) - inv1 => (-inv1 + inv2) + x 195 // (x - inv2) + inv1 => ( inv1 - inv2) + x 196 // (x - inv2) - inv1 => (-inv1 - inv2) + x 197 // inv1 + (inv2 - x) => ( inv1 + inv2) - x 198 // inv1 - (x - inv2) => ( inv1 + inv2) - x 199 // (inv2 - x) + inv1 => ( inv1 + inv2) - x 200 // (inv2 - x) - inv1 => (-inv1 + inv2) - x 201 // inv1 - (x + inv2) => ( inv1 - inv2) - x 202 // 203 Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) { 204 if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL; 205 if (is_invariant(n1)) return NULL; 206 int inv1_idx = is_invariant_addition(n1, phase); 207 if (!inv1_idx) return NULL; 208 // Don't mess with add of constant (igvn moves them to expression tree root.) 209 if (n1->is_Add() && n1->in(2)->is_Con()) return NULL; 210 Node* inv1 = n1->in(inv1_idx); 211 Node* n2 = n1->in(3 - inv1_idx); 212 int inv2_idx = is_invariant_addition(n2, phase); 213 if (!inv2_idx) return NULL; 214 Node* x = n2->in(3 - inv2_idx); 215 Node* inv2 = n2->in(inv2_idx); 216 217 bool neg_x = n2->is_Sub() && inv2_idx == 1; 218 bool neg_inv2 = n2->is_Sub() && inv2_idx == 2; 219 bool neg_inv1 = n1->is_Sub() && inv1_idx == 2; 220 if (n1->is_Sub() && inv1_idx == 1) { 221 neg_x = !neg_x; 222 neg_inv2 = !neg_inv2; 223 } 224 Node* inv1_c = phase->get_ctrl(inv1); 225 Node* inv2_c = phase->get_ctrl(inv2); 226 Node* n_inv1; 227 if (neg_inv1) { 228 Node *zero = phase->_igvn.intcon(0); 229 phase->set_ctrl(zero, phase->C->root()); 230 n_inv1 = new SubINode(zero, inv1); 231 phase->register_new_node(n_inv1, inv1_c); 232 } else { 233 n_inv1 = inv1; 234 } 235 Node* inv; 236 if (neg_inv2) { 237 inv = new SubINode(n_inv1, inv2); 238 } else { 239 inv = new AddINode(n_inv1, inv2); 240 } 241 phase->register_new_node(inv, phase->get_early_ctrl(inv)); 242 243 Node* addx; 244 if (neg_x) { 245 addx = new SubINode(inv, x); 246 } else { 247 addx = new AddINode(x, inv); 248 } 249 phase->register_new_node(addx, phase->get_ctrl(x)); 250 phase->_igvn.replace_node(n1, addx); 251 assert(phase->get_loop(phase->get_ctrl(n1)) == this, ""); 252 _body.yank(n1); 253 return addx; 254 } 255 256 //---------------------reassociate_invariants----------------------------- 257 // Reassociate invariant expressions: 258 void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) { 259 for (int i = _body.size() - 1; i >= 0; i--) { 260 Node *n = _body.at(i); 261 for (int j = 0; j < 5; j++) { 262 Node* nn = reassociate_add_sub(n, phase); 263 if (nn == NULL) break; 264 n = nn; // again 265 }; 266 } 267 } 268 269 //------------------------------policy_peeling--------------------------------- 270 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can 271 // make some loop-invariant test (usually a null-check) happen before the loop. 272 bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const { 273 Node *test = ((IdealLoopTree*)this)->tail(); 274 int body_size = ((IdealLoopTree*)this)->_body.size(); 275 int live_node_count = phase->C->live_nodes(); 276 // Peeling does loop cloning which can result in O(N^2) node construction 277 if( body_size > 255 /* Prevent overflow for large body_size */ 278 || (body_size * body_size + live_node_count > MaxNodeLimit) ) { 279 return false; // too large to safely clone 280 } 281 while( test != _head ) { // Scan till run off top of loop 282 if( test->is_If() ) { // Test? 283 Node *ctrl = phase->get_ctrl(test->in(1)); 284 if (ctrl->is_top()) 285 return false; // Found dead test on live IF? No peeling! 286 // Standard IF only has one input value to check for loop invariance 287 assert( test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added"); 288 // Condition is not a member of this loop? 289 if( !is_member(phase->get_loop(ctrl)) && 290 is_loop_exit(test) ) 291 return true; // Found reason to peel! 292 } 293 // Walk up dominators to loop _head looking for test which is 294 // executed on every path thru loop. 295 test = phase->idom(test); 296 } 297 return false; 298 } 299 300 //------------------------------peeled_dom_test_elim--------------------------- 301 // If we got the effect of peeling, either by actually peeling or by making 302 // a pre-loop which must execute at least once, we can remove all 303 // loop-invariant dominated tests in the main body. 304 void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) { 305 bool progress = true; 306 while( progress ) { 307 progress = false; // Reset for next iteration 308 Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail(); 309 Node *test = prev->in(0); 310 while( test != loop->_head ) { // Scan till run off top of loop 311 312 int p_op = prev->Opcode(); 313 if( (p_op == Op_IfFalse || p_op == Op_IfTrue) && 314 test->is_If() && // Test? 315 !test->in(1)->is_Con() && // And not already obvious? 316 // Condition is not a member of this loop? 317 !loop->is_member(get_loop(get_ctrl(test->in(1))))){ 318 // Walk loop body looking for instances of this test 319 for( uint i = 0; i < loop->_body.size(); i++ ) { 320 Node *n = loop->_body.at(i); 321 if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) { 322 // IfNode was dominated by version in peeled loop body 323 progress = true; 324 dominated_by( old_new[prev->_idx], n ); 325 } 326 } 327 } 328 prev = test; 329 test = idom(test); 330 } // End of scan tests in loop 331 332 } // End of while( progress ) 333 } 334 335 //------------------------------do_peeling------------------------------------- 336 // Peel the first iteration of the given loop. 337 // Step 1: Clone the loop body. The clone becomes the peeled iteration. 338 // The pre-loop illegally has 2 control users (old & new loops). 339 // Step 2: Make the old-loop fall-in edges point to the peeled iteration. 340 // Do this by making the old-loop fall-in edges act as if they came 341 // around the loopback from the prior iteration (follow the old-loop 342 // backedges) and then map to the new peeled iteration. This leaves 343 // the pre-loop with only 1 user (the new peeled iteration), but the 344 // peeled-loop backedge has 2 users. 345 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the 346 // extra backedge user. 347 // 348 // orig 349 // 350 // stmt1 351 // | 352 // v 353 // loop predicate 354 // | 355 // v 356 // loop<----+ 357 // | | 358 // stmt2 | 359 // | | 360 // v | 361 // if ^ 362 // / \ | 363 // / \ | 364 // v v | 365 // false true | 366 // / \ | 367 // / ----+ 368 // | 369 // v 370 // exit 371 // 372 // 373 // after clone loop 374 // 375 // stmt1 376 // | 377 // v 378 // loop predicate 379 // / \ 380 // clone / \ orig 381 // / \ 382 // / \ 383 // v v 384 // +---->loop clone loop<----+ 385 // | | | | 386 // | stmt2 clone stmt2 | 387 // | | | | 388 // | v v | 389 // ^ if clone If ^ 390 // | / \ / \ | 391 // | / \ / \ | 392 // | v v v v | 393 // | true false false true | 394 // | / \ / \ | 395 // +---- \ / ----+ 396 // \ / 397 // 1v v2 398 // region 399 // | 400 // v 401 // exit 402 // 403 // 404 // after peel and predicate move 405 // 406 // stmt1 407 // / 408 // / 409 // clone / orig 410 // / 411 // / +----------+ 412 // / | | 413 // / loop predicate | 414 // / | | 415 // v v | 416 // TOP-->loop clone loop<----+ | 417 // | | | | 418 // stmt2 clone stmt2 | | 419 // | | | ^ 420 // v v | | 421 // if clone If ^ | 422 // / \ / \ | | 423 // / \ / \ | | 424 // v v v v | | 425 // true false false true | | 426 // | \ / \ | | 427 // | \ / ----+ ^ 428 // | \ / | 429 // | 1v v2 | 430 // v region | 431 // | | | 432 // | v | 433 // | exit | 434 // | | 435 // +--------------->-----------------+ 436 // 437 // 438 // final graph 439 // 440 // stmt1 441 // | 442 // v 443 // stmt2 clone 444 // | 445 // v 446 // if clone 447 // / | 448 // / | 449 // v v 450 // false true 451 // | | 452 // | v 453 // | loop predicate 454 // | | 455 // | v 456 // | loop<----+ 457 // | | | 458 // | stmt2 | 459 // | | | 460 // | v | 461 // v if ^ 462 // | / \ | 463 // | / \ | 464 // | v v | 465 // | false true | 466 // | | \ | 467 // v v --+ 468 // region 469 // | 470 // v 471 // exit 472 // 473 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) { 474 475 C->set_major_progress(); 476 // Peeling a 'main' loop in a pre/main/post situation obfuscates the 477 // 'pre' loop from the main and the 'pre' can no longer have it's 478 // iterations adjusted. Therefore, we need to declare this loop as 479 // no longer a 'main' loop; it will need new pre and post loops before 480 // we can do further RCE. 481 #ifndef PRODUCT 482 if (TraceLoopOpts) { 483 tty->print("Peel "); 484 loop->dump_head(); 485 } 486 #endif 487 Node* head = loop->_head; 488 bool counted_loop = head->is_CountedLoop(); 489 if (counted_loop) { 490 CountedLoopNode *cl = head->as_CountedLoop(); 491 assert(cl->trip_count() > 0, "peeling a fully unrolled loop"); 492 cl->set_trip_count(cl->trip_count() - 1); 493 if (cl->is_main_loop()) { 494 cl->set_normal_loop(); 495 #ifndef PRODUCT 496 if (PrintOpto && VerifyLoopOptimizations) { 497 tty->print("Peeling a 'main' loop; resetting to 'normal' "); 498 loop->dump_head(); 499 } 500 #endif 501 } 502 } 503 Node* entry = head->in(LoopNode::EntryControl); 504 505 // Step 1: Clone the loop body. The clone becomes the peeled iteration. 506 // The pre-loop illegally has 2 control users (old & new loops). 507 clone_loop( loop, old_new, dom_depth(head) ); 508 509 // Step 2: Make the old-loop fall-in edges point to the peeled iteration. 510 // Do this by making the old-loop fall-in edges act as if they came 511 // around the loopback from the prior iteration (follow the old-loop 512 // backedges) and then map to the new peeled iteration. This leaves 513 // the pre-loop with only 1 user (the new peeled iteration), but the 514 // peeled-loop backedge has 2 users. 515 Node* new_entry = old_new[head->in(LoopNode::LoopBackControl)->_idx]; 516 _igvn.hash_delete(head); 517 head->set_req(LoopNode::EntryControl, new_entry); 518 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { 519 Node* old = head->fast_out(j); 520 if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) { 521 Node* new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx]; 522 if (!new_exit_value ) // Backedge value is ALSO loop invariant? 523 // Then loop body backedge value remains the same. 524 new_exit_value = old->in(LoopNode::LoopBackControl); 525 _igvn.hash_delete(old); 526 old->set_req(LoopNode::EntryControl, new_exit_value); 527 } 528 } 529 530 531 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the 532 // extra backedge user. 533 Node* new_head = old_new[head->_idx]; 534 _igvn.hash_delete(new_head); 535 new_head->set_req(LoopNode::LoopBackControl, C->top()); 536 for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) { 537 Node* use = new_head->fast_out(j2); 538 if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) { 539 _igvn.hash_delete(use); 540 use->set_req(LoopNode::LoopBackControl, C->top()); 541 } 542 } 543 544 545 // Step 4: Correct dom-depth info. Set to loop-head depth. 546 int dd = dom_depth(head); 547 set_idom(head, head->in(1), dd); 548 for (uint j3 = 0; j3 < loop->_body.size(); j3++) { 549 Node *old = loop->_body.at(j3); 550 Node *nnn = old_new[old->_idx]; 551 if (!has_ctrl(nnn)) 552 set_idom(nnn, idom(nnn), dd-1); 553 } 554 555 // Now force out all loop-invariant dominating tests. The optimizer 556 // finds some, but we _know_ they are all useless. 557 peeled_dom_test_elim(loop,old_new); 558 559 loop->record_for_igvn(); 560 } 561 562 #define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop 563 564 //------------------------------policy_maximally_unroll------------------------ 565 // Calculate exact loop trip count and return true if loop can be maximally 566 // unrolled. 567 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const { 568 CountedLoopNode *cl = _head->as_CountedLoop(); 569 assert(cl->is_normal_loop(), ""); 570 if (!cl->is_valid_counted_loop()) 571 return false; // Malformed counted loop 572 573 if (!cl->has_exact_trip_count()) { 574 // Trip count is not exact. 575 return false; 576 } 577 578 uint trip_count = cl->trip_count(); 579 // Note, max_juint is used to indicate unknown trip count. 580 assert(trip_count > 1, "one iteration loop should be optimized out already"); 581 assert(trip_count < max_juint, "exact trip_count should be less than max_uint."); 582 583 // Real policy: if we maximally unroll, does it get too big? 584 // Allow the unrolled mess to get larger than standard loop 585 // size. After all, it will no longer be a loop. 586 uint body_size = _body.size(); 587 uint unroll_limit = (uint)LoopUnrollLimit * 4; 588 assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits"); 589 if (trip_count > unroll_limit || body_size > unroll_limit) { 590 return false; 591 } 592 593 // Fully unroll a loop with few iterations regardless next 594 // conditions since following loop optimizations will split 595 // such loop anyway (pre-main-post). 596 if (trip_count <= 3) 597 return true; 598 599 // Take into account that after unroll conjoined heads and tails will fold, 600 // otherwise policy_unroll() may allow more unrolling than max unrolling. 601 uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count; 602 uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE; 603 if (body_size != tst_body_size) // Check for int overflow 604 return false; 605 if (new_body_size > unroll_limit || 606 // Unrolling can result in a large amount of node construction 607 new_body_size >= MaxNodeLimit - (uint) phase->C->live_nodes()) { 608 return false; 609 } 610 611 // Do not unroll a loop with String intrinsics code. 612 // String intrinsics are large and have loops. 613 for (uint k = 0; k < _body.size(); k++) { 614 Node* n = _body.at(k); 615 switch (n->Opcode()) { 616 case Op_StrComp: 617 case Op_StrEquals: 618 case Op_StrIndexOf: 619 case Op_EncodeISOArray: 620 case Op_AryEq: { 621 return false; 622 } 623 #if INCLUDE_RTM_OPT 624 case Op_FastLock: 625 case Op_FastUnlock: { 626 // Don't unroll RTM locking code because it is large. 627 if (UseRTMLocking) { 628 return false; 629 } 630 } 631 #endif 632 } // switch 633 } 634 635 return true; // Do maximally unroll 636 } 637 638 639 //------------------------------policy_unroll---------------------------------- 640 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if 641 // the loop is a CountedLoop and the body is small enough. 642 bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const { 643 644 CountedLoopNode *cl = _head->as_CountedLoop(); 645 assert(cl->is_normal_loop() || cl->is_main_loop(), ""); 646 647 if (!cl->is_valid_counted_loop()) 648 return false; // Malformed counted loop 649 650 // Protect against over-unrolling. 651 // After split at least one iteration will be executed in pre-loop. 652 if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false; 653 654 int future_unroll_ct = cl->unrolled_count() * 2; 655 if (future_unroll_ct > LoopMaxUnroll) return false; 656 657 // Check for initial stride being a small enough constant 658 if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false; 659 660 // Don't unroll if the next round of unrolling would push us 661 // over the expected trip count of the loop. One is subtracted 662 // from the expected trip count because the pre-loop normally 663 // executes 1 iteration. 664 if (UnrollLimitForProfileCheck > 0 && 665 cl->profile_trip_cnt() != COUNT_UNKNOWN && 666 future_unroll_ct > UnrollLimitForProfileCheck && 667 (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) { 668 return false; 669 } 670 671 // When unroll count is greater than LoopUnrollMin, don't unroll if: 672 // the residual iterations are more than 10% of the trip count 673 // and rounds of "unroll,optimize" are not making significant progress 674 // Progress defined as current size less than 20% larger than previous size. 675 if (UseSuperWord && cl->node_count_before_unroll() > 0 && 676 future_unroll_ct > LoopUnrollMin && 677 (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() && 678 1.2 * cl->node_count_before_unroll() < (double)_body.size()) { 679 return false; 680 } 681 682 Node *init_n = cl->init_trip(); 683 Node *limit_n = cl->limit(); 684 int stride_con = cl->stride_con(); 685 // Non-constant bounds. 686 // Protect against over-unrolling when init or/and limit are not constant 687 // (so that trip_count's init value is maxint) but iv range is known. 688 if (init_n == NULL || !init_n->is_Con() || 689 limit_n == NULL || !limit_n->is_Con()) { 690 Node* phi = cl->phi(); 691 if (phi != NULL) { 692 assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi."); 693 const TypeInt* iv_type = phase->_igvn.type(phi)->is_int(); 694 int next_stride = stride_con * 2; // stride after this unroll 695 if (next_stride > 0) { 696 if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow 697 iv_type->_lo + next_stride > iv_type->_hi) { 698 return false; // over-unrolling 699 } 700 } else if (next_stride < 0) { 701 if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow 702 iv_type->_hi + next_stride < iv_type->_lo) { 703 return false; // over-unrolling 704 } 705 } 706 } 707 } 708 709 // After unroll limit will be adjusted: new_limit = limit-stride. 710 // Bailout if adjustment overflow. 711 const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int(); 712 if (stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi) || 713 stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo)) 714 return false; // overflow 715 716 // Adjust body_size to determine if we unroll or not 717 uint body_size = _body.size(); 718 // Key test to unroll loop in CRC32 java code 719 int xors_in_loop = 0; 720 // Also count ModL, DivL and MulL which expand mightly 721 for (uint k = 0; k < _body.size(); k++) { 722 Node* n = _body.at(k); 723 switch (n->Opcode()) { 724 case Op_XorI: xors_in_loop++; break; // CRC32 java code 725 case Op_ModL: body_size += 30; break; 726 case Op_DivL: body_size += 30; break; 727 case Op_MulL: body_size += 10; break; 728 case Op_StrComp: 729 case Op_StrEquals: 730 case Op_StrIndexOf: 731 case Op_EncodeISOArray: 732 case Op_AryEq: { 733 // Do not unroll a loop with String intrinsics code. 734 // String intrinsics are large and have loops. 735 return false; 736 } 737 #if INCLUDE_RTM_OPT 738 case Op_FastLock: 739 case Op_FastUnlock: { 740 // Don't unroll RTM locking code because it is large. 741 if (UseRTMLocking) { 742 return false; 743 } 744 } 745 #endif 746 } // switch 747 } 748 749 // Check for being too big 750 if (body_size > (uint)LoopUnrollLimit) { 751 if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true; 752 // Normal case: loop too big 753 return false; 754 } 755 756 // Unroll once! (Each trip will soon do double iterations) 757 return true; 758 } 759 760 //------------------------------policy_align----------------------------------- 761 // Return TRUE or FALSE if the loop should be cache-line aligned. Gather the 762 // expression that does the alignment. Note that only one array base can be 763 // aligned in a loop (unless the VM guarantees mutual alignment). Note that 764 // if we vectorize short memory ops into longer memory ops, we may want to 765 // increase alignment. 766 bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const { 767 return false; 768 } 769 770 //------------------------------policy_range_check----------------------------- 771 // Return TRUE or FALSE if the loop should be range-check-eliminated. 772 // Actually we do iteration-splitting, a more powerful form of RCE. 773 bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const { 774 if (!RangeCheckElimination) return false; 775 776 CountedLoopNode *cl = _head->as_CountedLoop(); 777 // If we unrolled with no intention of doing RCE and we later 778 // changed our minds, we got no pre-loop. Either we need to 779 // make a new pre-loop, or we gotta disallow RCE. 780 if (cl->is_main_no_pre_loop()) return false; // Disallowed for now. 781 Node *trip_counter = cl->phi(); 782 783 // Check loop body for tests of trip-counter plus loop-invariant vs 784 // loop-invariant. 785 for (uint i = 0; i < _body.size(); i++) { 786 Node *iff = _body[i]; 787 if (iff->Opcode() == Op_If) { // Test? 788 789 // Comparing trip+off vs limit 790 Node *bol = iff->in(1); 791 if (bol->req() != 2) continue; // dead constant test 792 if (!bol->is_Bool()) { 793 assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only"); 794 continue; 795 } 796 if (bol->as_Bool()->_test._test == BoolTest::ne) 797 continue; // not RC 798 799 Node *cmp = bol->in(1); 800 Node *rc_exp = cmp->in(1); 801 Node *limit = cmp->in(2); 802 803 Node *limit_c = phase->get_ctrl(limit); 804 if( limit_c == phase->C->top() ) 805 return false; // Found dead test on live IF? No RCE! 806 if( is_member(phase->get_loop(limit_c) ) ) { 807 // Compare might have operands swapped; commute them 808 rc_exp = cmp->in(2); 809 limit = cmp->in(1); 810 limit_c = phase->get_ctrl(limit); 811 if( is_member(phase->get_loop(limit_c) ) ) 812 continue; // Both inputs are loop varying; cannot RCE 813 } 814 815 if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) { 816 continue; 817 } 818 // Yeah! Found a test like 'trip+off vs limit' 819 // Test is an IfNode, has 2 projections. If BOTH are in the loop 820 // we need loop unswitching instead of iteration splitting. 821 if( is_loop_exit(iff) ) 822 return true; // Found reason to split iterations 823 } // End of is IF 824 } 825 826 return false; 827 } 828 829 //------------------------------policy_peel_only------------------------------- 830 // Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned. Useful 831 // for unrolling loops with NO array accesses. 832 bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const { 833 834 for( uint i = 0; i < _body.size(); i++ ) 835 if( _body[i]->is_Mem() ) 836 return false; 837 838 // No memory accesses at all! 839 return true; 840 } 841 842 //------------------------------clone_up_backedge_goo-------------------------- 843 // If Node n lives in the back_ctrl block and cannot float, we clone a private 844 // version of n in preheader_ctrl block and return that, otherwise return n. 845 Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) { 846 if( get_ctrl(n) != back_ctrl ) return n; 847 848 // Only visit once 849 if (visited.test_set(n->_idx)) { 850 Node *x = clones.find(n->_idx); 851 if (x != NULL) 852 return x; 853 return n; 854 } 855 856 Node *x = NULL; // If required, a clone of 'n' 857 // Check for 'n' being pinned in the backedge. 858 if( n->in(0) && n->in(0) == back_ctrl ) { 859 assert(clones.find(n->_idx) == NULL, "dead loop"); 860 x = n->clone(); // Clone a copy of 'n' to preheader 861 clones.push(x, n->_idx); 862 x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader 863 } 864 865 // Recursive fixup any other input edges into x. 866 // If there are no changes we can just return 'n', otherwise 867 // we need to clone a private copy and change it. 868 for( uint i = 1; i < n->req(); i++ ) { 869 Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones ); 870 if( g != n->in(i) ) { 871 if( !x ) { 872 assert(clones.find(n->_idx) == NULL, "dead loop"); 873 x = n->clone(); 874 clones.push(x, n->_idx); 875 } 876 x->set_req(i, g); 877 } 878 } 879 if( x ) { // x can legally float to pre-header location 880 register_new_node( x, preheader_ctrl ); 881 return x; 882 } else { // raise n to cover LCA of uses 883 set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) ); 884 } 885 return n; 886 } 887 888 //------------------------------insert_pre_post_loops-------------------------- 889 // Insert pre and post loops. If peel_only is set, the pre-loop can not have 890 // more iterations added. It acts as a 'peel' only, no lower-bound RCE, no 891 // alignment. Useful to unroll loops that do no array accesses. 892 void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) { 893 894 #ifndef PRODUCT 895 if (TraceLoopOpts) { 896 if (peel_only) 897 tty->print("PeelMainPost "); 898 else 899 tty->print("PreMainPost "); 900 loop->dump_head(); 901 } 902 #endif 903 C->set_major_progress(); 904 905 // Find common pieces of the loop being guarded with pre & post loops 906 CountedLoopNode *main_head = loop->_head->as_CountedLoop(); 907 assert( main_head->is_normal_loop(), "" ); 908 CountedLoopEndNode *main_end = main_head->loopexit(); 909 guarantee(main_end != NULL, "no loop exit node"); 910 assert( main_end->outcnt() == 2, "1 true, 1 false path only" ); 911 uint dd_main_head = dom_depth(main_head); 912 uint max = main_head->outcnt(); 913 914 Node *pre_header= main_head->in(LoopNode::EntryControl); 915 Node *init = main_head->init_trip(); 916 Node *incr = main_end ->incr(); 917 Node *limit = main_end ->limit(); 918 Node *stride = main_end ->stride(); 919 Node *cmp = main_end ->cmp_node(); 920 BoolTest::mask b_test = main_end->test_trip(); 921 922 // Need only 1 user of 'bol' because I will be hacking the loop bounds. 923 Node *bol = main_end->in(CountedLoopEndNode::TestValue); 924 if( bol->outcnt() != 1 ) { 925 bol = bol->clone(); 926 register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl)); 927 _igvn.hash_delete(main_end); 928 main_end->set_req(CountedLoopEndNode::TestValue, bol); 929 } 930 // Need only 1 user of 'cmp' because I will be hacking the loop bounds. 931 if( cmp->outcnt() != 1 ) { 932 cmp = cmp->clone(); 933 register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl)); 934 _igvn.hash_delete(bol); 935 bol->set_req(1, cmp); 936 } 937 938 //------------------------------ 939 // Step A: Create Post-Loop. 940 Node* main_exit = main_end->proj_out(false); 941 assert( main_exit->Opcode() == Op_IfFalse, "" ); 942 int dd_main_exit = dom_depth(main_exit); 943 944 // Step A1: Clone the loop body. The clone becomes the post-loop. The main 945 // loop pre-header illegally has 2 control users (old & new loops). 946 clone_loop( loop, old_new, dd_main_exit ); 947 assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" ); 948 CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop(); 949 post_head->set_post_loop(main_head); 950 951 // Reduce the post-loop trip count. 952 CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); 953 post_end->_prob = PROB_FAIR; 954 955 // Build the main-loop normal exit. 956 IfFalseNode *new_main_exit = new IfFalseNode(main_end); 957 _igvn.register_new_node_with_optimizer( new_main_exit ); 958 set_idom(new_main_exit, main_end, dd_main_exit ); 959 set_loop(new_main_exit, loop->_parent); 960 961 // Step A2: Build a zero-trip guard for the post-loop. After leaving the 962 // main-loop, the post-loop may not execute at all. We 'opaque' the incr 963 // (the main-loop trip-counter exit value) because we will be changing 964 // the exit value (via unrolling) so we cannot constant-fold away the zero 965 // trip guard until all unrolling is done. 966 Node *zer_opaq = new Opaque1Node(C, incr); 967 Node *zer_cmp = new CmpINode( zer_opaq, limit ); 968 Node *zer_bol = new BoolNode( zer_cmp, b_test ); 969 register_new_node( zer_opaq, new_main_exit ); 970 register_new_node( zer_cmp , new_main_exit ); 971 register_new_node( zer_bol , new_main_exit ); 972 973 // Build the IfNode 974 IfNode *zer_iff = new IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN ); 975 _igvn.register_new_node_with_optimizer( zer_iff ); 976 set_idom(zer_iff, new_main_exit, dd_main_exit); 977 set_loop(zer_iff, loop->_parent); 978 979 // Plug in the false-path, taken if we need to skip post-loop 980 _igvn.replace_input_of(main_exit, 0, zer_iff); 981 set_idom(main_exit, zer_iff, dd_main_exit); 982 set_idom(main_exit->unique_out(), zer_iff, dd_main_exit); 983 // Make the true-path, must enter the post loop 984 Node *zer_taken = new IfTrueNode( zer_iff ); 985 _igvn.register_new_node_with_optimizer( zer_taken ); 986 set_idom(zer_taken, zer_iff, dd_main_exit); 987 set_loop(zer_taken, loop->_parent); 988 // Plug in the true path 989 _igvn.hash_delete( post_head ); 990 post_head->set_req(LoopNode::EntryControl, zer_taken); 991 set_idom(post_head, zer_taken, dd_main_exit); 992 993 Arena *a = Thread::current()->resource_area(); 994 VectorSet visited(a); 995 Node_Stack clones(a, main_head->back_control()->outcnt()); 996 // Step A3: Make the fall-in values to the post-loop come from the 997 // fall-out values of the main-loop. 998 for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) { 999 Node* main_phi = main_head->fast_out(i); 1000 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) { 1001 Node *post_phi = old_new[main_phi->_idx]; 1002 Node *fallmain = clone_up_backedge_goo(main_head->back_control(), 1003 post_head->init_control(), 1004 main_phi->in(LoopNode::LoopBackControl), 1005 visited, clones); 1006 _igvn.hash_delete(post_phi); 1007 post_phi->set_req( LoopNode::EntryControl, fallmain ); 1008 } 1009 } 1010 1011 // Update local caches for next stanza 1012 main_exit = new_main_exit; 1013 1014 1015 //------------------------------ 1016 // Step B: Create Pre-Loop. 1017 1018 // Step B1: Clone the loop body. The clone becomes the pre-loop. The main 1019 // loop pre-header illegally has 2 control users (old & new loops). 1020 clone_loop( loop, old_new, dd_main_head ); 1021 CountedLoopNode* pre_head = old_new[main_head->_idx]->as_CountedLoop(); 1022 CountedLoopEndNode* pre_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); 1023 pre_head->set_pre_loop(main_head); 1024 Node *pre_incr = old_new[incr->_idx]; 1025 1026 // Reduce the pre-loop trip count. 1027 pre_end->_prob = PROB_FAIR; 1028 1029 // Find the pre-loop normal exit. 1030 Node* pre_exit = pre_end->proj_out(false); 1031 assert( pre_exit->Opcode() == Op_IfFalse, "" ); 1032 IfFalseNode *new_pre_exit = new IfFalseNode(pre_end); 1033 _igvn.register_new_node_with_optimizer( new_pre_exit ); 1034 set_idom(new_pre_exit, pre_end, dd_main_head); 1035 set_loop(new_pre_exit, loop->_parent); 1036 1037 // Step B2: Build a zero-trip guard for the main-loop. After leaving the 1038 // pre-loop, the main-loop may not execute at all. Later in life this 1039 // zero-trip guard will become the minimum-trip guard when we unroll 1040 // the main-loop. 1041 Node *min_opaq = new Opaque1Node(C, limit); 1042 Node *min_cmp = new CmpINode( pre_incr, min_opaq ); 1043 Node *min_bol = new BoolNode( min_cmp, b_test ); 1044 register_new_node( min_opaq, new_pre_exit ); 1045 register_new_node( min_cmp , new_pre_exit ); 1046 register_new_node( min_bol , new_pre_exit ); 1047 1048 // Build the IfNode (assume the main-loop is executed always). 1049 IfNode *min_iff = new IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN ); 1050 _igvn.register_new_node_with_optimizer( min_iff ); 1051 set_idom(min_iff, new_pre_exit, dd_main_head); 1052 set_loop(min_iff, loop->_parent); 1053 1054 // Plug in the false-path, taken if we need to skip main-loop 1055 _igvn.hash_delete( pre_exit ); 1056 pre_exit->set_req(0, min_iff); 1057 set_idom(pre_exit, min_iff, dd_main_head); 1058 set_idom(pre_exit->unique_out(), min_iff, dd_main_head); 1059 // Make the true-path, must enter the main loop 1060 Node *min_taken = new IfTrueNode( min_iff ); 1061 _igvn.register_new_node_with_optimizer( min_taken ); 1062 set_idom(min_taken, min_iff, dd_main_head); 1063 set_loop(min_taken, loop->_parent); 1064 // Plug in the true path 1065 _igvn.hash_delete( main_head ); 1066 main_head->set_req(LoopNode::EntryControl, min_taken); 1067 set_idom(main_head, min_taken, dd_main_head); 1068 1069 visited.Clear(); 1070 clones.clear(); 1071 // Step B3: Make the fall-in values to the main-loop come from the 1072 // fall-out values of the pre-loop. 1073 for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) { 1074 Node* main_phi = main_head->fast_out(i2); 1075 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) { 1076 Node *pre_phi = old_new[main_phi->_idx]; 1077 Node *fallpre = clone_up_backedge_goo(pre_head->back_control(), 1078 main_head->init_control(), 1079 pre_phi->in(LoopNode::LoopBackControl), 1080 visited, clones); 1081 _igvn.hash_delete(main_phi); 1082 main_phi->set_req( LoopNode::EntryControl, fallpre ); 1083 } 1084 } 1085 1086 // Step B4: Shorten the pre-loop to run only 1 iteration (for now). 1087 // RCE and alignment may change this later. 1088 Node *cmp_end = pre_end->cmp_node(); 1089 assert( cmp_end->in(2) == limit, "" ); 1090 Node *pre_limit = new AddINode( init, stride ); 1091 1092 // Save the original loop limit in this Opaque1 node for 1093 // use by range check elimination. 1094 Node *pre_opaq = new Opaque1Node(C, pre_limit, limit); 1095 1096 register_new_node( pre_limit, pre_head->in(0) ); 1097 register_new_node( pre_opaq , pre_head->in(0) ); 1098 1099 // Since no other users of pre-loop compare, I can hack limit directly 1100 assert( cmp_end->outcnt() == 1, "no other users" ); 1101 _igvn.hash_delete(cmp_end); 1102 cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq); 1103 1104 // Special case for not-equal loop bounds: 1105 // Change pre loop test, main loop test, and the 1106 // main loop guard test to use lt or gt depending on stride 1107 // direction: 1108 // positive stride use < 1109 // negative stride use > 1110 // 1111 // not-equal test is kept for post loop to handle case 1112 // when init > limit when stride > 0 (and reverse). 1113 1114 if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) { 1115 1116 BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt; 1117 // Modify pre loop end condition 1118 Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool(); 1119 BoolNode* new_bol0 = new BoolNode(pre_bol->in(1), new_test); 1120 register_new_node( new_bol0, pre_head->in(0) ); 1121 _igvn.hash_delete(pre_end); 1122 pre_end->set_req(CountedLoopEndNode::TestValue, new_bol0); 1123 // Modify main loop guard condition 1124 assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay"); 1125 BoolNode* new_bol1 = new BoolNode(min_bol->in(1), new_test); 1126 register_new_node( new_bol1, new_pre_exit ); 1127 _igvn.hash_delete(min_iff); 1128 min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1); 1129 // Modify main loop end condition 1130 BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool(); 1131 BoolNode* new_bol2 = new BoolNode(main_bol->in(1), new_test); 1132 register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) ); 1133 _igvn.hash_delete(main_end); 1134 main_end->set_req(CountedLoopEndNode::TestValue, new_bol2); 1135 } 1136 1137 // Flag main loop 1138 main_head->set_main_loop(); 1139 if( peel_only ) main_head->set_main_no_pre_loop(); 1140 1141 // Subtract a trip count for the pre-loop. 1142 main_head->set_trip_count(main_head->trip_count() - 1); 1143 1144 // It's difficult to be precise about the trip-counts 1145 // for the pre/post loops. They are usually very short, 1146 // so guess that 4 trips is a reasonable value. 1147 post_head->set_profile_trip_cnt(4.0); 1148 pre_head->set_profile_trip_cnt(4.0); 1149 1150 // Now force out all loop-invariant dominating tests. The optimizer 1151 // finds some, but we _know_ they are all useless. 1152 peeled_dom_test_elim(loop,old_new); 1153 loop->record_for_igvn(); 1154 } 1155 1156 //------------------------------is_invariant----------------------------- 1157 // Return true if n is invariant 1158 bool IdealLoopTree::is_invariant(Node* n) const { 1159 Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n; 1160 if (n_c->is_top()) return false; 1161 return !is_member(_phase->get_loop(n_c)); 1162 } 1163 1164 1165 //------------------------------do_unroll-------------------------------------- 1166 // Unroll the loop body one step - make each trip do 2 iterations. 1167 void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) { 1168 assert(LoopUnrollLimit, ""); 1169 CountedLoopNode *loop_head = loop->_head->as_CountedLoop(); 1170 CountedLoopEndNode *loop_end = loop_head->loopexit(); 1171 assert(loop_end, ""); 1172 #ifndef PRODUCT 1173 if (PrintOpto && VerifyLoopOptimizations) { 1174 tty->print("Unrolling "); 1175 loop->dump_head(); 1176 } else if (TraceLoopOpts) { 1177 if (loop_head->trip_count() < (uint)LoopUnrollLimit) { 1178 tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count()); 1179 } else { 1180 tty->print("Unroll %d ", loop_head->unrolled_count()*2); 1181 } 1182 loop->dump_head(); 1183 } 1184 #endif 1185 1186 // Remember loop node count before unrolling to detect 1187 // if rounds of unroll,optimize are making progress 1188 loop_head->set_node_count_before_unroll(loop->_body.size()); 1189 1190 Node *ctrl = loop_head->in(LoopNode::EntryControl); 1191 Node *limit = loop_head->limit(); 1192 Node *init = loop_head->init_trip(); 1193 Node *stride = loop_head->stride(); 1194 1195 Node *opaq = NULL; 1196 if (adjust_min_trip) { // If not maximally unrolling, need adjustment 1197 // Search for zero-trip guard. 1198 assert( loop_head->is_main_loop(), "" ); 1199 assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" ); 1200 Node *iff = ctrl->in(0); 1201 assert( iff->Opcode() == Op_If, "" ); 1202 Node *bol = iff->in(1); 1203 assert( bol->Opcode() == Op_Bool, "" ); 1204 Node *cmp = bol->in(1); 1205 assert( cmp->Opcode() == Op_CmpI, "" ); 1206 opaq = cmp->in(2); 1207 // Occasionally it's possible for a zero-trip guard Opaque1 node to be 1208 // optimized away and then another round of loop opts attempted. 1209 // We can not optimize this particular loop in that case. 1210 if (opaq->Opcode() != Op_Opaque1) 1211 return; // Cannot find zero-trip guard! Bail out! 1212 // Zero-trip test uses an 'opaque' node which is not shared. 1213 assert(opaq->outcnt() == 1 && opaq->in(1) == limit, ""); 1214 } 1215 1216 C->set_major_progress(); 1217 1218 Node* new_limit = NULL; 1219 if (UnrollLimitCheck) { 1220 int stride_con = stride->get_int(); 1221 int stride_p = (stride_con > 0) ? stride_con : -stride_con; 1222 uint old_trip_count = loop_head->trip_count(); 1223 // Verify that unroll policy result is still valid. 1224 assert(old_trip_count > 1 && 1225 (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity"); 1226 1227 // Adjust loop limit to keep valid iterations number after unroll. 1228 // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride 1229 // which may overflow. 1230 if (!adjust_min_trip) { 1231 assert(old_trip_count > 1 && (old_trip_count & 1) == 0, 1232 "odd trip count for maximally unroll"); 1233 // Don't need to adjust limit for maximally unroll since trip count is even. 1234 } else if (loop_head->has_exact_trip_count() && init->is_Con()) { 1235 // Loop's limit is constant. Loop's init could be constant when pre-loop 1236 // become peeled iteration. 1237 jlong init_con = init->get_int(); 1238 // We can keep old loop limit if iterations count stays the same: 1239 // old_trip_count == new_trip_count * 2 1240 // Note: since old_trip_count >= 2 then new_trip_count >= 1 1241 // so we also don't need to adjust zero trip test. 1242 jlong limit_con = limit->get_int(); 1243 // (stride_con*2) not overflow since stride_con <= 8. 1244 int new_stride_con = stride_con * 2; 1245 int stride_m = new_stride_con - (stride_con > 0 ? 1 : -1); 1246 jlong trip_count = (limit_con - init_con + stride_m)/new_stride_con; 1247 // New trip count should satisfy next conditions. 1248 assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity"); 1249 uint new_trip_count = (uint)trip_count; 1250 adjust_min_trip = (old_trip_count != new_trip_count*2); 1251 } 1252 1253 if (adjust_min_trip) { 1254 // Step 2: Adjust the trip limit if it is called for. 1255 // The adjustment amount is -stride. Need to make sure if the 1256 // adjustment underflows or overflows, then the main loop is skipped. 1257 Node* cmp = loop_end->cmp_node(); 1258 assert(cmp->in(2) == limit, "sanity"); 1259 assert(opaq != NULL && opaq->in(1) == limit, "sanity"); 1260 1261 // Verify that policy_unroll result is still valid. 1262 const TypeInt* limit_type = _igvn.type(limit)->is_int(); 1263 assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) || 1264 stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity"); 1265 1266 if (limit->is_Con()) { 1267 // The check in policy_unroll and the assert above guarantee 1268 // no underflow if limit is constant. 1269 new_limit = _igvn.intcon(limit->get_int() - stride_con); 1270 set_ctrl(new_limit, C->root()); 1271 } else { 1272 // Limit is not constant. 1273 if (loop_head->unrolled_count() == 1) { // only for first unroll 1274 // Separate limit by Opaque node in case it is an incremented 1275 // variable from previous loop to avoid using pre-incremented 1276 // value which could increase register pressure. 1277 // Otherwise reorg_offsets() optimization will create a separate 1278 // Opaque node for each use of trip-counter and as result 1279 // zero trip guard limit will be different from loop limit. 1280 assert(has_ctrl(opaq), "should have it"); 1281 Node* opaq_ctrl = get_ctrl(opaq); 1282 limit = new Opaque2Node( C, limit ); 1283 register_new_node( limit, opaq_ctrl ); 1284 } 1285 if (stride_con > 0 && ((limit_type->_lo - stride_con) < limit_type->_lo) || 1286 stride_con < 0 && ((limit_type->_hi - stride_con) > limit_type->_hi)) { 1287 // No underflow. 1288 new_limit = new SubINode(limit, stride); 1289 } else { 1290 // (limit - stride) may underflow. 1291 // Clamp the adjustment value with MININT or MAXINT: 1292 // 1293 // new_limit = limit-stride 1294 // if (stride > 0) 1295 // new_limit = (limit < new_limit) ? MININT : new_limit; 1296 // else 1297 // new_limit = (limit > new_limit) ? MAXINT : new_limit; 1298 // 1299 BoolTest::mask bt = loop_end->test_trip(); 1300 assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); 1301 Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint); 1302 set_ctrl(adj_max, C->root()); 1303 Node* old_limit = NULL; 1304 Node* adj_limit = NULL; 1305 Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL; 1306 if (loop_head->unrolled_count() > 1 && 1307 limit->is_CMove() && limit->Opcode() == Op_CMoveI && 1308 limit->in(CMoveNode::IfTrue) == adj_max && 1309 bol->as_Bool()->_test._test == bt && 1310 bol->in(1)->Opcode() == Op_CmpI && 1311 bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) { 1312 // Loop was unrolled before. 1313 // Optimize the limit to avoid nested CMove: 1314 // use original limit as old limit. 1315 old_limit = bol->in(1)->in(1); 1316 // Adjust previous adjusted limit. 1317 adj_limit = limit->in(CMoveNode::IfFalse); 1318 adj_limit = new SubINode(adj_limit, stride); 1319 } else { 1320 old_limit = limit; 1321 adj_limit = new SubINode(limit, stride); 1322 } 1323 assert(old_limit != NULL && adj_limit != NULL, ""); 1324 register_new_node( adj_limit, ctrl ); // adjust amount 1325 Node* adj_cmp = new CmpINode(old_limit, adj_limit); 1326 register_new_node( adj_cmp, ctrl ); 1327 Node* adj_bool = new BoolNode(adj_cmp, bt); 1328 register_new_node( adj_bool, ctrl ); 1329 new_limit = new CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT); 1330 } 1331 register_new_node(new_limit, ctrl); 1332 } 1333 assert(new_limit != NULL, ""); 1334 // Replace in loop test. 1335 assert(loop_end->in(1)->in(1) == cmp, "sanity"); 1336 if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) { 1337 // Don't need to create new test since only one user. 1338 _igvn.hash_delete(cmp); 1339 cmp->set_req(2, new_limit); 1340 } else { 1341 // Create new test since it is shared. 1342 Node* ctrl2 = loop_end->in(0); 1343 Node* cmp2 = cmp->clone(); 1344 cmp2->set_req(2, new_limit); 1345 register_new_node(cmp2, ctrl2); 1346 Node* bol2 = loop_end->in(1)->clone(); 1347 bol2->set_req(1, cmp2); 1348 register_new_node(bol2, ctrl2); 1349 _igvn.hash_delete(loop_end); 1350 loop_end->set_req(1, bol2); 1351 } 1352 // Step 3: Find the min-trip test guaranteed before a 'main' loop. 1353 // Make it a 1-trip test (means at least 2 trips). 1354 1355 // Guard test uses an 'opaque' node which is not shared. Hence I 1356 // can edit it's inputs directly. Hammer in the new limit for the 1357 // minimum-trip guard. 1358 assert(opaq->outcnt() == 1, ""); 1359 _igvn.hash_delete(opaq); 1360 opaq->set_req(1, new_limit); 1361 } 1362 1363 // Adjust max trip count. The trip count is intentionally rounded 1364 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, 1365 // the main, unrolled, part of the loop will never execute as it is protected 1366 // by the min-trip test. See bug 4834191 for a case where we over-unrolled 1367 // and later determined that part of the unrolled loop was dead. 1368 loop_head->set_trip_count(old_trip_count / 2); 1369 1370 // Double the count of original iterations in the unrolled loop body. 1371 loop_head->double_unrolled_count(); 1372 1373 } else { // LoopLimitCheck 1374 1375 // Adjust max trip count. The trip count is intentionally rounded 1376 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, 1377 // the main, unrolled, part of the loop will never execute as it is protected 1378 // by the min-trip test. See bug 4834191 for a case where we over-unrolled 1379 // and later determined that part of the unrolled loop was dead. 1380 loop_head->set_trip_count(loop_head->trip_count() / 2); 1381 1382 // Double the count of original iterations in the unrolled loop body. 1383 loop_head->double_unrolled_count(); 1384 1385 // ----------- 1386 // Step 2: Cut back the trip counter for an unroll amount of 2. 1387 // Loop will normally trip (limit - init)/stride_con. Since it's a 1388 // CountedLoop this is exact (stride divides limit-init exactly). 1389 // We are going to double the loop body, so we want to knock off any 1390 // odd iteration: (trip_cnt & ~1). Then back compute a new limit. 1391 Node *span = new SubINode( limit, init ); 1392 register_new_node( span, ctrl ); 1393 Node *trip = new DivINode( 0, span, stride ); 1394 register_new_node( trip, ctrl ); 1395 Node *mtwo = _igvn.intcon(-2); 1396 set_ctrl(mtwo, C->root()); 1397 Node *rond = new AndINode( trip, mtwo ); 1398 register_new_node( rond, ctrl ); 1399 Node *spn2 = new MulINode( rond, stride ); 1400 register_new_node( spn2, ctrl ); 1401 new_limit = new AddINode( spn2, init ); 1402 register_new_node( new_limit, ctrl ); 1403 1404 // Hammer in the new limit 1405 Node *ctrl2 = loop_end->in(0); 1406 Node *cmp2 = new CmpINode( loop_head->incr(), new_limit ); 1407 register_new_node( cmp2, ctrl2 ); 1408 Node *bol2 = new BoolNode( cmp2, loop_end->test_trip() ); 1409 register_new_node( bol2, ctrl2 ); 1410 _igvn.hash_delete(loop_end); 1411 loop_end->set_req(CountedLoopEndNode::TestValue, bol2); 1412 1413 // Step 3: Find the min-trip test guaranteed before a 'main' loop. 1414 // Make it a 1-trip test (means at least 2 trips). 1415 if( adjust_min_trip ) { 1416 assert( new_limit != NULL, "" ); 1417 // Guard test uses an 'opaque' node which is not shared. Hence I 1418 // can edit it's inputs directly. Hammer in the new limit for the 1419 // minimum-trip guard. 1420 assert( opaq->outcnt() == 1, "" ); 1421 _igvn.hash_delete(opaq); 1422 opaq->set_req(1, new_limit); 1423 } 1424 } // LoopLimitCheck 1425 1426 // --------- 1427 // Step 4: Clone the loop body. Move it inside the loop. This loop body 1428 // represents the odd iterations; since the loop trips an even number of 1429 // times its backedge is never taken. Kill the backedge. 1430 uint dd = dom_depth(loop_head); 1431 clone_loop( loop, old_new, dd ); 1432 1433 // Make backedges of the clone equal to backedges of the original. 1434 // Make the fall-in from the original come from the fall-out of the clone. 1435 for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) { 1436 Node* phi = loop_head->fast_out(j); 1437 if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) { 1438 Node *newphi = old_new[phi->_idx]; 1439 _igvn.hash_delete( phi ); 1440 _igvn.hash_delete( newphi ); 1441 1442 phi ->set_req(LoopNode:: EntryControl, newphi->in(LoopNode::LoopBackControl)); 1443 newphi->set_req(LoopNode::LoopBackControl, phi ->in(LoopNode::LoopBackControl)); 1444 phi ->set_req(LoopNode::LoopBackControl, C->top()); 1445 } 1446 } 1447 Node *clone_head = old_new[loop_head->_idx]; 1448 _igvn.hash_delete( clone_head ); 1449 loop_head ->set_req(LoopNode:: EntryControl, clone_head->in(LoopNode::LoopBackControl)); 1450 clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl)); 1451 loop_head ->set_req(LoopNode::LoopBackControl, C->top()); 1452 loop->_head = clone_head; // New loop header 1453 1454 set_idom(loop_head, loop_head ->in(LoopNode::EntryControl), dd); 1455 set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd); 1456 1457 // Kill the clone's backedge 1458 Node *newcle = old_new[loop_end->_idx]; 1459 _igvn.hash_delete( newcle ); 1460 Node *one = _igvn.intcon(1); 1461 set_ctrl(one, C->root()); 1462 newcle->set_req(1, one); 1463 // Force clone into same loop body 1464 uint max = loop->_body.size(); 1465 for( uint k = 0; k < max; k++ ) { 1466 Node *old = loop->_body.at(k); 1467 Node *nnn = old_new[old->_idx]; 1468 loop->_body.push(nnn); 1469 if (!has_ctrl(old)) 1470 set_loop(nnn, loop); 1471 } 1472 1473 loop->record_for_igvn(); 1474 } 1475 1476 //------------------------------do_maximally_unroll---------------------------- 1477 1478 void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) { 1479 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 1480 assert(cl->has_exact_trip_count(), "trip count is not exact"); 1481 assert(cl->trip_count() > 0, ""); 1482 #ifndef PRODUCT 1483 if (TraceLoopOpts) { 1484 tty->print("MaxUnroll %d ", cl->trip_count()); 1485 loop->dump_head(); 1486 } 1487 #endif 1488 1489 // If loop is tripping an odd number of times, peel odd iteration 1490 if ((cl->trip_count() & 1) == 1) { 1491 do_peeling(loop, old_new); 1492 } 1493 1494 // Now its tripping an even number of times remaining. Double loop body. 1495 // Do not adjust pre-guards; they are not needed and do not exist. 1496 if (cl->trip_count() > 0) { 1497 assert((cl->trip_count() & 1) == 0, "missed peeling"); 1498 do_unroll(loop, old_new, false); 1499 } 1500 } 1501 1502 //------------------------------dominates_backedge--------------------------------- 1503 // Returns true if ctrl is executed on every complete iteration 1504 bool IdealLoopTree::dominates_backedge(Node* ctrl) { 1505 assert(ctrl->is_CFG(), "must be control"); 1506 Node* backedge = _head->as_Loop()->in(LoopNode::LoopBackControl); 1507 return _phase->dom_lca_internal(ctrl, backedge) == ctrl; 1508 } 1509 1510 //------------------------------adjust_limit----------------------------------- 1511 // Helper function for add_constraint(). 1512 Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) { 1513 // Compute "I :: (limit-offset)/scale" 1514 Node *con = new SubINode(rc_limit, offset); 1515 register_new_node(con, pre_ctrl); 1516 Node *X = new DivINode(0, con, scale); 1517 register_new_node(X, pre_ctrl); 1518 1519 // Adjust loop limit 1520 loop_limit = (stride_con > 0) 1521 ? (Node*)(new MinINode(loop_limit, X)) 1522 : (Node*)(new MaxINode(loop_limit, X)); 1523 register_new_node(loop_limit, pre_ctrl); 1524 return loop_limit; 1525 } 1526 1527 //------------------------------add_constraint--------------------------------- 1528 // Constrain the main loop iterations so the conditions: 1529 // low_limit <= scale_con * I + offset < upper_limit 1530 // always holds true. That is, either increase the number of iterations in 1531 // the pre-loop or the post-loop until the condition holds true in the main 1532 // loop. Stride, scale, offset and limit are all loop invariant. Further, 1533 // stride and scale are constants (offset and limit often are). 1534 void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) { 1535 // For positive stride, the pre-loop limit always uses a MAX function 1536 // and the main loop a MIN function. For negative stride these are 1537 // reversed. 1538 1539 // Also for positive stride*scale the affine function is increasing, so the 1540 // pre-loop must check for underflow and the post-loop for overflow. 1541 // Negative stride*scale reverses this; pre-loop checks for overflow and 1542 // post-loop for underflow. 1543 1544 Node *scale = _igvn.intcon(scale_con); 1545 set_ctrl(scale, C->root()); 1546 1547 if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow 1548 // The overflow limit: scale*I+offset < upper_limit 1549 // For main-loop compute 1550 // ( if (scale > 0) /* and stride > 0 */ 1551 // I < (upper_limit-offset)/scale 1552 // else /* scale < 0 and stride < 0 */ 1553 // I > (upper_limit-offset)/scale 1554 // ) 1555 // 1556 // (upper_limit-offset) may overflow or underflow. 1557 // But it is fine since main loop will either have 1558 // less iterations or will be skipped in such case. 1559 *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl); 1560 1561 // The underflow limit: low_limit <= scale*I+offset. 1562 // For pre-loop compute 1563 // NOT(scale*I+offset >= low_limit) 1564 // scale*I+offset < low_limit 1565 // ( if (scale > 0) /* and stride > 0 */ 1566 // I < (low_limit-offset)/scale 1567 // else /* scale < 0 and stride < 0 */ 1568 // I > (low_limit-offset)/scale 1569 // ) 1570 1571 if (low_limit->get_int() == -max_jint) { 1572 if (!RangeLimitCheck) return; 1573 // We need this guard when scale*pre_limit+offset >= limit 1574 // due to underflow. So we need execute pre-loop until 1575 // scale*I+offset >= min_int. But (min_int-offset) will 1576 // underflow when offset > 0 and X will be > original_limit 1577 // when stride > 0. To avoid it we replace positive offset with 0. 1578 // 1579 // Also (min_int+1 == -max_int) is used instead of min_int here 1580 // to avoid problem with scale == -1 (min_int/(-1) == min_int). 1581 Node* shift = _igvn.intcon(31); 1582 set_ctrl(shift, C->root()); 1583 Node* sign = new RShiftINode(offset, shift); 1584 register_new_node(sign, pre_ctrl); 1585 offset = new AndINode(offset, sign); 1586 register_new_node(offset, pre_ctrl); 1587 } else { 1588 assert(low_limit->get_int() == 0, "wrong low limit for range check"); 1589 // The only problem we have here when offset == min_int 1590 // since (0-min_int) == min_int. It may be fine for stride > 0 1591 // but for stride < 0 X will be < original_limit. To avoid it 1592 // max(pre_limit, original_limit) is used in do_range_check(). 1593 } 1594 // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond); 1595 *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl); 1596 1597 } else { // stride_con*scale_con < 0 1598 // For negative stride*scale pre-loop checks for overflow and 1599 // post-loop for underflow. 1600 // 1601 // The overflow limit: scale*I+offset < upper_limit 1602 // For pre-loop compute 1603 // NOT(scale*I+offset < upper_limit) 1604 // scale*I+offset >= upper_limit 1605 // scale*I+offset+1 > upper_limit 1606 // ( if (scale < 0) /* and stride > 0 */ 1607 // I < (upper_limit-(offset+1))/scale 1608 // else /* scale > 0 and stride < 0 */ 1609 // I > (upper_limit-(offset+1))/scale 1610 // ) 1611 // 1612 // (upper_limit-offset-1) may underflow or overflow. 1613 // To avoid it min(pre_limit, original_limit) is used 1614 // in do_range_check() for stride > 0 and max() for < 0. 1615 Node *one = _igvn.intcon(1); 1616 set_ctrl(one, C->root()); 1617 1618 Node *plus_one = new AddINode(offset, one); 1619 register_new_node( plus_one, pre_ctrl ); 1620 // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond); 1621 *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl); 1622 1623 if (low_limit->get_int() == -max_jint) { 1624 if (!RangeLimitCheck) return; 1625 // We need this guard when scale*main_limit+offset >= limit 1626 // due to underflow. So we need execute main-loop while 1627 // scale*I+offset+1 > min_int. But (min_int-offset-1) will 1628 // underflow when (offset+1) > 0 and X will be < main_limit 1629 // when scale < 0 (and stride > 0). To avoid it we replace 1630 // positive (offset+1) with 0. 1631 // 1632 // Also (min_int+1 == -max_int) is used instead of min_int here 1633 // to avoid problem with scale == -1 (min_int/(-1) == min_int). 1634 Node* shift = _igvn.intcon(31); 1635 set_ctrl(shift, C->root()); 1636 Node* sign = new RShiftINode(plus_one, shift); 1637 register_new_node(sign, pre_ctrl); 1638 plus_one = new AndINode(plus_one, sign); 1639 register_new_node(plus_one, pre_ctrl); 1640 } else { 1641 assert(low_limit->get_int() == 0, "wrong low limit for range check"); 1642 // The only problem we have here when offset == max_int 1643 // since (max_int+1) == min_int and (0-min_int) == min_int. 1644 // But it is fine since main loop will either have 1645 // less iterations or will be skipped in such case. 1646 } 1647 // The underflow limit: low_limit <= scale*I+offset. 1648 // For main-loop compute 1649 // scale*I+offset+1 > low_limit 1650 // ( if (scale < 0) /* and stride > 0 */ 1651 // I < (low_limit-(offset+1))/scale 1652 // else /* scale > 0 and stride < 0 */ 1653 // I > (low_limit-(offset+1))/scale 1654 // ) 1655 1656 *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl); 1657 } 1658 } 1659 1660 1661 //------------------------------is_scaled_iv--------------------------------- 1662 // Return true if exp is a constant times an induction var 1663 bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) { 1664 if (exp == iv) { 1665 if (p_scale != NULL) { 1666 *p_scale = 1; 1667 } 1668 return true; 1669 } 1670 int opc = exp->Opcode(); 1671 if (opc == Op_MulI) { 1672 if (exp->in(1) == iv && exp->in(2)->is_Con()) { 1673 if (p_scale != NULL) { 1674 *p_scale = exp->in(2)->get_int(); 1675 } 1676 return true; 1677 } 1678 if (exp->in(2) == iv && exp->in(1)->is_Con()) { 1679 if (p_scale != NULL) { 1680 *p_scale = exp->in(1)->get_int(); 1681 } 1682 return true; 1683 } 1684 } else if (opc == Op_LShiftI) { 1685 if (exp->in(1) == iv && exp->in(2)->is_Con()) { 1686 if (p_scale != NULL) { 1687 *p_scale = 1 << exp->in(2)->get_int(); 1688 } 1689 return true; 1690 } 1691 } 1692 return false; 1693 } 1694 1695 //-----------------------------is_scaled_iv_plus_offset------------------------------ 1696 // Return true if exp is a simple induction variable expression: k1*iv + (invar + k2) 1697 bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) { 1698 if (is_scaled_iv(exp, iv, p_scale)) { 1699 if (p_offset != NULL) { 1700 Node *zero = _igvn.intcon(0); 1701 set_ctrl(zero, C->root()); 1702 *p_offset = zero; 1703 } 1704 return true; 1705 } 1706 int opc = exp->Opcode(); 1707 if (opc == Op_AddI) { 1708 if (is_scaled_iv(exp->in(1), iv, p_scale)) { 1709 if (p_offset != NULL) { 1710 *p_offset = exp->in(2); 1711 } 1712 return true; 1713 } 1714 if (exp->in(2)->is_Con()) { 1715 Node* offset2 = NULL; 1716 if (depth < 2 && 1717 is_scaled_iv_plus_offset(exp->in(1), iv, p_scale, 1718 p_offset != NULL ? &offset2 : NULL, depth+1)) { 1719 if (p_offset != NULL) { 1720 Node *ctrl_off2 = get_ctrl(offset2); 1721 Node* offset = new AddINode(offset2, exp->in(2)); 1722 register_new_node(offset, ctrl_off2); 1723 *p_offset = offset; 1724 } 1725 return true; 1726 } 1727 } 1728 } else if (opc == Op_SubI) { 1729 if (is_scaled_iv(exp->in(1), iv, p_scale)) { 1730 if (p_offset != NULL) { 1731 Node *zero = _igvn.intcon(0); 1732 set_ctrl(zero, C->root()); 1733 Node *ctrl_off = get_ctrl(exp->in(2)); 1734 Node* offset = new SubINode(zero, exp->in(2)); 1735 register_new_node(offset, ctrl_off); 1736 *p_offset = offset; 1737 } 1738 return true; 1739 } 1740 if (is_scaled_iv(exp->in(2), iv, p_scale)) { 1741 if (p_offset != NULL) { 1742 *p_scale *= -1; 1743 *p_offset = exp->in(1); 1744 } 1745 return true; 1746 } 1747 } 1748 return false; 1749 } 1750 1751 //------------------------------do_range_check--------------------------------- 1752 // Eliminate range-checks and other trip-counter vs loop-invariant tests. 1753 void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) { 1754 #ifndef PRODUCT 1755 if (PrintOpto && VerifyLoopOptimizations) { 1756 tty->print("Range Check Elimination "); 1757 loop->dump_head(); 1758 } else if (TraceLoopOpts) { 1759 tty->print("RangeCheck "); 1760 loop->dump_head(); 1761 } 1762 #endif 1763 assert(RangeCheckElimination, ""); 1764 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 1765 assert(cl->is_main_loop(), ""); 1766 1767 // protect against stride not being a constant 1768 if (!cl->stride_is_con()) 1769 return; 1770 1771 // Find the trip counter; we are iteration splitting based on it 1772 Node *trip_counter = cl->phi(); 1773 // Find the main loop limit; we will trim it's iterations 1774 // to not ever trip end tests 1775 Node *main_limit = cl->limit(); 1776 1777 // Need to find the main-loop zero-trip guard 1778 Node *ctrl = cl->in(LoopNode::EntryControl); 1779 assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, ""); 1780 Node *iffm = ctrl->in(0); 1781 assert(iffm->Opcode() == Op_If, ""); 1782 Node *bolzm = iffm->in(1); 1783 assert(bolzm->Opcode() == Op_Bool, ""); 1784 Node *cmpzm = bolzm->in(1); 1785 assert(cmpzm->is_Cmp(), ""); 1786 Node *opqzm = cmpzm->in(2); 1787 // Can not optimize a loop if zero-trip Opaque1 node is optimized 1788 // away and then another round of loop opts attempted. 1789 if (opqzm->Opcode() != Op_Opaque1) 1790 return; 1791 assert(opqzm->in(1) == main_limit, "do not understand situation"); 1792 1793 // Find the pre-loop limit; we will expand it's iterations to 1794 // not ever trip low tests. 1795 Node *p_f = iffm->in(0); 1796 assert(p_f->Opcode() == Op_IfFalse, ""); 1797 CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); 1798 assert(pre_end->loopnode()->is_pre_loop(), ""); 1799 Node *pre_opaq1 = pre_end->limit(); 1800 // Occasionally it's possible for a pre-loop Opaque1 node to be 1801 // optimized away and then another round of loop opts attempted. 1802 // We can not optimize this particular loop in that case. 1803 if (pre_opaq1->Opcode() != Op_Opaque1) 1804 return; 1805 Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1; 1806 Node *pre_limit = pre_opaq->in(1); 1807 1808 // Where do we put new limit calculations 1809 Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl); 1810 1811 // Ensure the original loop limit is available from the 1812 // pre-loop Opaque1 node. 1813 Node *orig_limit = pre_opaq->original_loop_limit(); 1814 if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP) 1815 return; 1816 1817 // Must know if its a count-up or count-down loop 1818 1819 int stride_con = cl->stride_con(); 1820 Node *zero = _igvn.intcon(0); 1821 Node *one = _igvn.intcon(1); 1822 // Use symmetrical int range [-max_jint,max_jint] 1823 Node *mini = _igvn.intcon(-max_jint); 1824 set_ctrl(zero, C->root()); 1825 set_ctrl(one, C->root()); 1826 set_ctrl(mini, C->root()); 1827 1828 // Range checks that do not dominate the loop backedge (ie. 1829 // conditionally executed) can lengthen the pre loop limit beyond 1830 // the original loop limit. To prevent this, the pre limit is 1831 // (for stride > 0) MINed with the original loop limit (MAXed 1832 // stride < 0) when some range_check (rc) is conditionally 1833 // executed. 1834 bool conditional_rc = false; 1835 1836 // Check loop body for tests of trip-counter plus loop-invariant vs 1837 // loop-invariant. 1838 for( uint i = 0; i < loop->_body.size(); i++ ) { 1839 Node *iff = loop->_body[i]; 1840 if( iff->Opcode() == Op_If ) { // Test? 1841 1842 // Test is an IfNode, has 2 projections. If BOTH are in the loop 1843 // we need loop unswitching instead of iteration splitting. 1844 Node *exit = loop->is_loop_exit(iff); 1845 if( !exit ) continue; 1846 int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0; 1847 1848 // Get boolean condition to test 1849 Node *i1 = iff->in(1); 1850 if( !i1->is_Bool() ) continue; 1851 BoolNode *bol = i1->as_Bool(); 1852 BoolTest b_test = bol->_test; 1853 // Flip sense of test if exit condition is flipped 1854 if( flip ) 1855 b_test = b_test.negate(); 1856 1857 // Get compare 1858 Node *cmp = bol->in(1); 1859 1860 // Look for trip_counter + offset vs limit 1861 Node *rc_exp = cmp->in(1); 1862 Node *limit = cmp->in(2); 1863 jint scale_con= 1; // Assume trip counter not scaled 1864 1865 Node *limit_c = get_ctrl(limit); 1866 if( loop->is_member(get_loop(limit_c) ) ) { 1867 // Compare might have operands swapped; commute them 1868 b_test = b_test.commute(); 1869 rc_exp = cmp->in(2); 1870 limit = cmp->in(1); 1871 limit_c = get_ctrl(limit); 1872 if( loop->is_member(get_loop(limit_c) ) ) 1873 continue; // Both inputs are loop varying; cannot RCE 1874 } 1875 // Here we know 'limit' is loop invariant 1876 1877 // 'limit' maybe pinned below the zero trip test (probably from a 1878 // previous round of rce), in which case, it can't be used in the 1879 // zero trip test expression which must occur before the zero test's if. 1880 if( limit_c == ctrl ) { 1881 continue; // Don't rce this check but continue looking for other candidates. 1882 } 1883 1884 // Check for scaled induction variable plus an offset 1885 Node *offset = NULL; 1886 1887 if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) { 1888 continue; 1889 } 1890 1891 Node *offset_c = get_ctrl(offset); 1892 if( loop->is_member( get_loop(offset_c) ) ) 1893 continue; // Offset is not really loop invariant 1894 // Here we know 'offset' is loop invariant. 1895 1896 // As above for the 'limit', the 'offset' maybe pinned below the 1897 // zero trip test. 1898 if( offset_c == ctrl ) { 1899 continue; // Don't rce this check but continue looking for other candidates. 1900 } 1901 #ifdef ASSERT 1902 if (TraceRangeLimitCheck) { 1903 tty->print_cr("RC bool node%s", flip ? " flipped:" : ":"); 1904 bol->dump(2); 1905 } 1906 #endif 1907 // At this point we have the expression as: 1908 // scale_con * trip_counter + offset :: limit 1909 // where scale_con, offset and limit are loop invariant. Trip_counter 1910 // monotonically increases by stride_con, a constant. Both (or either) 1911 // stride_con and scale_con can be negative which will flip about the 1912 // sense of the test. 1913 1914 // Adjust pre and main loop limits to guard the correct iteration set 1915 if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests 1916 if( b_test._test == BoolTest::lt ) { // Range checks always use lt 1917 // The underflow and overflow limits: 0 <= scale*I+offset < limit 1918 add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit ); 1919 if (!conditional_rc) { 1920 // (0-offset)/scale could be outside of loop iterations range. 1921 conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; 1922 } 1923 } else { 1924 #ifndef PRODUCT 1925 if( PrintOpto ) 1926 tty->print_cr("missed RCE opportunity"); 1927 #endif 1928 continue; // In release mode, ignore it 1929 } 1930 } else { // Otherwise work on normal compares 1931 switch( b_test._test ) { 1932 case BoolTest::gt: 1933 // Fall into GE case 1934 case BoolTest::ge: 1935 // Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit 1936 scale_con = -scale_con; 1937 offset = new SubINode( zero, offset ); 1938 register_new_node( offset, pre_ctrl ); 1939 limit = new SubINode( zero, limit ); 1940 register_new_node( limit, pre_ctrl ); 1941 // Fall into LE case 1942 case BoolTest::le: 1943 if (b_test._test != BoolTest::gt) { 1944 // Convert X <= Y to X < Y+1 1945 limit = new AddINode( limit, one ); 1946 register_new_node( limit, pre_ctrl ); 1947 } 1948 // Fall into LT case 1949 case BoolTest::lt: 1950 // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit 1951 // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here 1952 // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT. 1953 add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit ); 1954 if (!conditional_rc) { 1955 // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range. 1956 // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could 1957 // still be outside of loop range. 1958 conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; 1959 } 1960 break; 1961 default: 1962 #ifndef PRODUCT 1963 if( PrintOpto ) 1964 tty->print_cr("missed RCE opportunity"); 1965 #endif 1966 continue; // Unhandled case 1967 } 1968 } 1969 1970 // Kill the eliminated test 1971 C->set_major_progress(); 1972 Node *kill_con = _igvn.intcon( 1-flip ); 1973 set_ctrl(kill_con, C->root()); 1974 _igvn.replace_input_of(iff, 1, kill_con); 1975 // Find surviving projection 1976 assert(iff->is_If(), ""); 1977 ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip); 1978 // Find loads off the surviving projection; remove their control edge 1979 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { 1980 Node* cd = dp->fast_out(i); // Control-dependent node 1981 if (cd->is_Load() && cd->depends_only_on_test()) { // Loads can now float around in the loop 1982 // Allow the load to float around in the loop, or before it 1983 // but NOT before the pre-loop. 1984 _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL 1985 --i; 1986 --imax; 1987 } 1988 } 1989 1990 } // End of is IF 1991 1992 } 1993 1994 // Update loop limits 1995 if (conditional_rc) { 1996 pre_limit = (stride_con > 0) ? (Node*)new MinINode(pre_limit, orig_limit) 1997 : (Node*)new MaxINode(pre_limit, orig_limit); 1998 register_new_node(pre_limit, pre_ctrl); 1999 } 2000 _igvn.hash_delete(pre_opaq); 2001 pre_opaq->set_req(1, pre_limit); 2002 2003 // Note:: we are making the main loop limit no longer precise; 2004 // need to round up based on stride. 2005 cl->set_nonexact_trip_count(); 2006 if (!LoopLimitCheck && stride_con != 1 && stride_con != -1) { // Cutout for common case 2007 // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init 2008 // Hopefully, compiler will optimize for powers of 2. 2009 Node *ctrl = get_ctrl(main_limit); 2010 Node *stride = cl->stride(); 2011 Node *init = cl->init_trip(); 2012 Node *span = new SubINode(main_limit,init); 2013 register_new_node(span,ctrl); 2014 Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1)); 2015 Node *add = new AddINode(span,rndup); 2016 register_new_node(add,ctrl); 2017 Node *div = new DivINode(0,add,stride); 2018 register_new_node(div,ctrl); 2019 Node *mul = new MulINode(div,stride); 2020 register_new_node(mul,ctrl); 2021 Node *newlim = new AddINode(mul,init); 2022 register_new_node(newlim,ctrl); 2023 main_limit = newlim; 2024 } 2025 2026 Node *main_cle = cl->loopexit(); 2027 Node *main_bol = main_cle->in(1); 2028 // Hacking loop bounds; need private copies of exit test 2029 if( main_bol->outcnt() > 1 ) {// BoolNode shared? 2030 _igvn.hash_delete(main_cle); 2031 main_bol = main_bol->clone();// Clone a private BoolNode 2032 register_new_node( main_bol, main_cle->in(0) ); 2033 main_cle->set_req(1,main_bol); 2034 } 2035 Node *main_cmp = main_bol->in(1); 2036 if( main_cmp->outcnt() > 1 ) { // CmpNode shared? 2037 _igvn.hash_delete(main_bol); 2038 main_cmp = main_cmp->clone();// Clone a private CmpNode 2039 register_new_node( main_cmp, main_cle->in(0) ); 2040 main_bol->set_req(1,main_cmp); 2041 } 2042 // Hack the now-private loop bounds 2043 _igvn.replace_input_of(main_cmp, 2, main_limit); 2044 // The OpaqueNode is unshared by design 2045 assert( opqzm->outcnt() == 1, "cannot hack shared node" ); 2046 _igvn.replace_input_of(opqzm, 1, main_limit); 2047 } 2048 2049 //------------------------------DCE_loop_body---------------------------------- 2050 // Remove simplistic dead code from loop body 2051 void IdealLoopTree::DCE_loop_body() { 2052 for( uint i = 0; i < _body.size(); i++ ) 2053 if( _body.at(i)->outcnt() == 0 ) 2054 _body.map( i--, _body.pop() ); 2055 } 2056 2057 2058 //------------------------------adjust_loop_exit_prob-------------------------- 2059 // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage. 2060 // Replace with a 1-in-10 exit guess. 2061 void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) { 2062 Node *test = tail(); 2063 while( test != _head ) { 2064 uint top = test->Opcode(); 2065 if( top == Op_IfTrue || top == Op_IfFalse ) { 2066 int test_con = ((ProjNode*)test)->_con; 2067 assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity"); 2068 IfNode *iff = test->in(0)->as_If(); 2069 if( iff->outcnt() == 2 ) { // Ignore dead tests 2070 Node *bol = iff->in(1); 2071 if( bol && bol->req() > 1 && bol->in(1) && 2072 ((bol->in(1)->Opcode() == Op_StorePConditional ) || 2073 (bol->in(1)->Opcode() == Op_StoreIConditional ) || 2074 (bol->in(1)->Opcode() == Op_StoreLConditional ) || 2075 (bol->in(1)->Opcode() == Op_CompareAndSwapI ) || 2076 (bol->in(1)->Opcode() == Op_CompareAndSwapL ) || 2077 (bol->in(1)->Opcode() == Op_CompareAndSwapP ) || 2078 (bol->in(1)->Opcode() == Op_CompareAndSwapN ))) 2079 return; // Allocation loops RARELY take backedge 2080 // Find the OTHER exit path from the IF 2081 Node* ex = iff->proj_out(1-test_con); 2082 float p = iff->_prob; 2083 if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) { 2084 if( top == Op_IfTrue ) { 2085 if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) { 2086 iff->_prob = PROB_STATIC_FREQUENT; 2087 } 2088 } else { 2089 if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) { 2090 iff->_prob = PROB_STATIC_INFREQUENT; 2091 } 2092 } 2093 } 2094 } 2095 } 2096 test = phase->idom(test); 2097 } 2098 } 2099 2100 2101 //------------------------------policy_do_remove_empty_loop-------------------- 2102 // Micro-benchmark spamming. Policy is to always remove empty loops. 2103 // The 'DO' part is to replace the trip counter with the value it will 2104 // have on the last iteration. This will break the loop. 2105 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) { 2106 // Minimum size must be empty loop 2107 if (_body.size() > EMPTY_LOOP_SIZE) 2108 return false; 2109 2110 if (!_head->is_CountedLoop()) 2111 return false; // Dead loop 2112 CountedLoopNode *cl = _head->as_CountedLoop(); 2113 if (!cl->is_valid_counted_loop()) 2114 return false; // Malformed loop 2115 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) 2116 return false; // Infinite loop 2117 2118 #ifdef ASSERT 2119 // Ensure only one phi which is the iv. 2120 Node* iv = NULL; 2121 for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) { 2122 Node* n = cl->fast_out(i); 2123 if (n->Opcode() == Op_Phi) { 2124 assert(iv == NULL, "Too many phis" ); 2125 iv = n; 2126 } 2127 } 2128 assert(iv == cl->phi(), "Wrong phi" ); 2129 #endif 2130 2131 // main and post loops have explicitly created zero trip guard 2132 bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop(); 2133 if (needs_guard) { 2134 // Skip guard if values not overlap. 2135 const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int(); 2136 const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int(); 2137 int stride_con = cl->stride_con(); 2138 if (stride_con > 0) { 2139 needs_guard = (init_t->_hi >= limit_t->_lo); 2140 } else { 2141 needs_guard = (init_t->_lo <= limit_t->_hi); 2142 } 2143 } 2144 if (needs_guard) { 2145 // Check for an obvious zero trip guard. 2146 Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl)); 2147 if (inctrl->Opcode() == Op_IfTrue) { 2148 // The test should look like just the backedge of a CountedLoop 2149 Node* iff = inctrl->in(0); 2150 if (iff->is_If()) { 2151 Node* bol = iff->in(1); 2152 if (bol->is_Bool() && bol->as_Bool()->_test._test == cl->loopexit()->test_trip()) { 2153 Node* cmp = bol->in(1); 2154 if (cmp->is_Cmp() && cmp->in(1) == cl->init_trip() && cmp->in(2) == cl->limit()) { 2155 needs_guard = false; 2156 } 2157 } 2158 } 2159 } 2160 } 2161 2162 #ifndef PRODUCT 2163 if (PrintOpto) { 2164 tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : ""); 2165 this->dump_head(); 2166 } else if (TraceLoopOpts) { 2167 tty->print("Empty with%s zero trip guard ", needs_guard ? "out" : ""); 2168 this->dump_head(); 2169 } 2170 #endif 2171 2172 if (needs_guard) { 2173 // Peel the loop to ensure there's a zero trip guard 2174 Node_List old_new; 2175 phase->do_peeling(this, old_new); 2176 } 2177 2178 // Replace the phi at loop head with the final value of the last 2179 // iteration. Then the CountedLoopEnd will collapse (backedge never 2180 // taken) and all loop-invariant uses of the exit values will be correct. 2181 Node *phi = cl->phi(); 2182 Node *exact_limit = phase->exact_limit(this); 2183 if (exact_limit != cl->limit()) { 2184 // We also need to replace the original limit to collapse loop exit. 2185 Node* cmp = cl->loopexit()->cmp_node(); 2186 assert(cl->limit() == cmp->in(2), "sanity"); 2187 phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist 2188 phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist 2189 } 2190 // Note: the final value after increment should not overflow since 2191 // counted loop has limit check predicate. 2192 Node *final = new SubINode( exact_limit, cl->stride() ); 2193 phase->register_new_node(final,cl->in(LoopNode::EntryControl)); 2194 phase->_igvn.replace_node(phi,final); 2195 phase->C->set_major_progress(); 2196 return true; 2197 } 2198 2199 //------------------------------policy_do_one_iteration_loop------------------- 2200 // Convert one iteration loop into normal code. 2201 bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) { 2202 if (!_head->as_Loop()->is_valid_counted_loop()) 2203 return false; // Only for counted loop 2204 2205 CountedLoopNode *cl = _head->as_CountedLoop(); 2206 if (!cl->has_exact_trip_count() || cl->trip_count() != 1) { 2207 return false; 2208 } 2209 2210 #ifndef PRODUCT 2211 if(TraceLoopOpts) { 2212 tty->print("OneIteration "); 2213 this->dump_head(); 2214 } 2215 #endif 2216 2217 Node *init_n = cl->init_trip(); 2218 #ifdef ASSERT 2219 // Loop boundaries should be constant since trip count is exact. 2220 assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration"); 2221 #endif 2222 // Replace the phi at loop head with the value of the init_trip. 2223 // Then the CountedLoopEnd will collapse (backedge will not be taken) 2224 // and all loop-invariant uses of the exit values will be correct. 2225 phase->_igvn.replace_node(cl->phi(), cl->init_trip()); 2226 phase->C->set_major_progress(); 2227 return true; 2228 } 2229 2230 //============================================================================= 2231 //------------------------------iteration_split_impl--------------------------- 2232 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) { 2233 // Compute exact loop trip count if possible. 2234 compute_exact_trip_count(phase); 2235 2236 // Convert one iteration loop into normal code. 2237 if (policy_do_one_iteration_loop(phase)) 2238 return true; 2239 2240 // Check and remove empty loops (spam micro-benchmarks) 2241 if (policy_do_remove_empty_loop(phase)) 2242 return true; // Here we removed an empty loop 2243 2244 bool should_peel = policy_peeling(phase); // Should we peel? 2245 2246 bool should_unswitch = policy_unswitching(phase); 2247 2248 // Non-counted loops may be peeled; exactly 1 iteration is peeled. 2249 // This removes loop-invariant tests (usually null checks). 2250 if (!_head->is_CountedLoop()) { // Non-counted loop 2251 if (PartialPeelLoop && phase->partial_peel(this, old_new)) { 2252 // Partial peel succeeded so terminate this round of loop opts 2253 return false; 2254 } 2255 if (should_peel) { // Should we peel? 2256 #ifndef PRODUCT 2257 if (PrintOpto) tty->print_cr("should_peel"); 2258 #endif 2259 phase->do_peeling(this,old_new); 2260 } else if (should_unswitch) { 2261 phase->do_unswitching(this, old_new); 2262 } 2263 return true; 2264 } 2265 CountedLoopNode *cl = _head->as_CountedLoop(); 2266 2267 if (!cl->is_valid_counted_loop()) return true; // Ignore various kinds of broken loops 2268 2269 // Do nothing special to pre- and post- loops 2270 if (cl->is_pre_loop() || cl->is_post_loop()) return true; 2271 2272 // Compute loop trip count from profile data 2273 compute_profile_trip_cnt(phase); 2274 2275 // Before attempting fancy unrolling, RCE or alignment, see if we want 2276 // to completely unroll this loop or do loop unswitching. 2277 if (cl->is_normal_loop()) { 2278 if (should_unswitch) { 2279 phase->do_unswitching(this, old_new); 2280 return true; 2281 } 2282 bool should_maximally_unroll = policy_maximally_unroll(phase); 2283 if (should_maximally_unroll) { 2284 // Here we did some unrolling and peeling. Eventually we will 2285 // completely unroll this loop and it will no longer be a loop. 2286 phase->do_maximally_unroll(this,old_new); 2287 return true; 2288 } 2289 } 2290 2291 // Skip next optimizations if running low on nodes. Note that 2292 // policy_unswitching and policy_maximally_unroll have this check. 2293 uint nodes_left = MaxNodeLimit - (uint) phase->C->live_nodes(); 2294 if ((2 * _body.size()) > nodes_left) { 2295 return true; 2296 } 2297 2298 // Counted loops may be peeled, may need some iterations run up 2299 // front for RCE, and may want to align loop refs to a cache 2300 // line. Thus we clone a full loop up front whose trip count is 2301 // at least 1 (if peeling), but may be several more. 2302 2303 // The main loop will start cache-line aligned with at least 1 2304 // iteration of the unrolled body (zero-trip test required) and 2305 // will have some range checks removed. 2306 2307 // A post-loop will finish any odd iterations (leftover after 2308 // unrolling), plus any needed for RCE purposes. 2309 2310 bool should_unroll = policy_unroll(phase); 2311 2312 bool should_rce = policy_range_check(phase); 2313 2314 bool should_align = policy_align(phase); 2315 2316 // If not RCE'ing (iteration splitting) or Aligning, then we do not 2317 // need a pre-loop. We may still need to peel an initial iteration but 2318 // we will not be needing an unknown number of pre-iterations. 2319 // 2320 // Basically, if may_rce_align reports FALSE first time through, 2321 // we will not be able to later do RCE or Aligning on this loop. 2322 bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align; 2323 2324 // If we have any of these conditions (RCE, alignment, unrolling) met, then 2325 // we switch to the pre-/main-/post-loop model. This model also covers 2326 // peeling. 2327 if (should_rce || should_align || should_unroll) { 2328 if (cl->is_normal_loop()) // Convert to 'pre/main/post' loops 2329 phase->insert_pre_post_loops(this,old_new, !may_rce_align); 2330 2331 // Adjust the pre- and main-loop limits to let the pre and post loops run 2332 // with full checks, but the main-loop with no checks. Remove said 2333 // checks from the main body. 2334 if (should_rce) 2335 phase->do_range_check(this,old_new); 2336 2337 // Double loop body for unrolling. Adjust the minimum-trip test (will do 2338 // twice as many iterations as before) and the main body limit (only do 2339 // an even number of trips). If we are peeling, we might enable some RCE 2340 // and we'd rather unroll the post-RCE'd loop SO... do not unroll if 2341 // peeling. 2342 if (should_unroll && !should_peel) 2343 phase->do_unroll(this,old_new, true); 2344 2345 // Adjust the pre-loop limits to align the main body 2346 // iterations. 2347 if (should_align) 2348 Unimplemented(); 2349 2350 } else { // Else we have an unchanged counted loop 2351 if (should_peel) // Might want to peel but do nothing else 2352 phase->do_peeling(this,old_new); 2353 } 2354 return true; 2355 } 2356 2357 2358 //============================================================================= 2359 //------------------------------iteration_split-------------------------------- 2360 bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) { 2361 // Recursively iteration split nested loops 2362 if (_child && !_child->iteration_split(phase, old_new)) 2363 return false; 2364 2365 // Clean out prior deadwood 2366 DCE_loop_body(); 2367 2368 2369 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage. 2370 // Replace with a 1-in-10 exit guess. 2371 if (_parent /*not the root loop*/ && 2372 !_irreducible && 2373 // Also ignore the occasional dead backedge 2374 !tail()->is_top()) { 2375 adjust_loop_exit_prob(phase); 2376 } 2377 2378 // Gate unrolling, RCE and peeling efforts. 2379 if (!_child && // If not an inner loop, do not split 2380 !_irreducible && 2381 _allow_optimizations && 2382 !tail()->is_top()) { // Also ignore the occasional dead backedge 2383 if (!_has_call) { 2384 if (!iteration_split_impl(phase, old_new)) { 2385 return false; 2386 } 2387 } else if (policy_unswitching(phase)) { 2388 phase->do_unswitching(this, old_new); 2389 } 2390 } 2391 2392 // Minor offset re-organization to remove loop-fallout uses of 2393 // trip counter when there was no major reshaping. 2394 phase->reorg_offsets(this); 2395 2396 if (_next && !_next->iteration_split(phase, old_new)) 2397 return false; 2398 return true; 2399 } 2400 2401 2402 //============================================================================= 2403 // Process all the loops in the loop tree and replace any fill 2404 // patterns with an intrisc version. 2405 bool PhaseIdealLoop::do_intrinsify_fill() { 2406 bool changed = false; 2407 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { 2408 IdealLoopTree* lpt = iter.current(); 2409 changed |= intrinsify_fill(lpt); 2410 } 2411 return changed; 2412 } 2413 2414 2415 // Examine an inner loop looking for a a single store of an invariant 2416 // value in a unit stride loop, 2417 bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value, 2418 Node*& shift, Node*& con) { 2419 const char* msg = NULL; 2420 Node* msg_node = NULL; 2421 2422 store_value = NULL; 2423 con = NULL; 2424 shift = NULL; 2425 2426 // Process the loop looking for stores. If there are multiple 2427 // stores or extra control flow give at this point. 2428 CountedLoopNode* head = lpt->_head->as_CountedLoop(); 2429 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2430 Node* n = lpt->_body.at(i); 2431 if (n->outcnt() == 0) continue; // Ignore dead 2432 if (n->is_Store()) { 2433 if (store != NULL) { 2434 msg = "multiple stores"; 2435 break; 2436 } 2437 int opc = n->Opcode(); 2438 if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreNKlass || opc == Op_StoreCM) { 2439 msg = "oop fills not handled"; 2440 break; 2441 } 2442 Node* value = n->in(MemNode::ValueIn); 2443 if (!lpt->is_invariant(value)) { 2444 msg = "variant store value"; 2445 } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) { 2446 msg = "not array address"; 2447 } 2448 store = n; 2449 store_value = value; 2450 } else if (n->is_If() && n != head->loopexit()) { 2451 msg = "extra control flow"; 2452 msg_node = n; 2453 } 2454 } 2455 2456 if (store == NULL) { 2457 // No store in loop 2458 return false; 2459 } 2460 2461 if (msg == NULL && head->stride_con() != 1) { 2462 // could handle negative strides too 2463 if (head->stride_con() < 0) { 2464 msg = "negative stride"; 2465 } else { 2466 msg = "non-unit stride"; 2467 } 2468 } 2469 2470 if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) { 2471 msg = "can't handle store address"; 2472 msg_node = store->in(MemNode::Address); 2473 } 2474 2475 if (msg == NULL && 2476 (!store->in(MemNode::Memory)->is_Phi() || 2477 store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) { 2478 msg = "store memory isn't proper phi"; 2479 msg_node = store->in(MemNode::Memory); 2480 } 2481 2482 // Make sure there is an appropriate fill routine 2483 BasicType t = store->as_Mem()->memory_type(); 2484 const char* fill_name; 2485 if (msg == NULL && 2486 StubRoutines::select_fill_function(t, false, fill_name) == NULL) { 2487 msg = "unsupported store"; 2488 msg_node = store; 2489 } 2490 2491 if (msg != NULL) { 2492 #ifndef PRODUCT 2493 if (TraceOptimizeFill) { 2494 tty->print_cr("not fill intrinsic candidate: %s", msg); 2495 if (msg_node != NULL) msg_node->dump(); 2496 } 2497 #endif 2498 return false; 2499 } 2500 2501 // Make sure the address expression can be handled. It should be 2502 // head->phi * elsize + con. head->phi might have a ConvI2L. 2503 Node* elements[4]; 2504 Node* conv = NULL; 2505 bool found_index = false; 2506 int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements)); 2507 for (int e = 0; e < count; e++) { 2508 Node* n = elements[e]; 2509 if (n->is_Con() && con == NULL) { 2510 con = n; 2511 } else if (n->Opcode() == Op_LShiftX && shift == NULL) { 2512 Node* value = n->in(1); 2513 #ifdef _LP64 2514 if (value->Opcode() == Op_ConvI2L) { 2515 conv = value; 2516 value = value->in(1); 2517 } 2518 #endif 2519 if (value != head->phi()) { 2520 msg = "unhandled shift in address"; 2521 } else { 2522 if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) { 2523 msg = "scale doesn't match"; 2524 } else { 2525 found_index = true; 2526 shift = n; 2527 } 2528 } 2529 } else if (n->Opcode() == Op_ConvI2L && conv == NULL) { 2530 if (n->in(1) == head->phi()) { 2531 found_index = true; 2532 conv = n; 2533 } else { 2534 msg = "unhandled input to ConvI2L"; 2535 } 2536 } else if (n == head->phi()) { 2537 // no shift, check below for allowed cases 2538 found_index = true; 2539 } else { 2540 msg = "unhandled node in address"; 2541 msg_node = n; 2542 } 2543 } 2544 2545 if (count == -1) { 2546 msg = "malformed address expression"; 2547 msg_node = store; 2548 } 2549 2550 if (!found_index) { 2551 msg = "missing use of index"; 2552 } 2553 2554 // byte sized items won't have a shift 2555 if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) { 2556 msg = "can't find shift"; 2557 msg_node = store; 2558 } 2559 2560 if (msg != NULL) { 2561 #ifndef PRODUCT 2562 if (TraceOptimizeFill) { 2563 tty->print_cr("not fill intrinsic: %s", msg); 2564 if (msg_node != NULL) msg_node->dump(); 2565 } 2566 #endif 2567 return false; 2568 } 2569 2570 // No make sure all the other nodes in the loop can be handled 2571 VectorSet ok(Thread::current()->resource_area()); 2572 2573 // store related values are ok 2574 ok.set(store->_idx); 2575 ok.set(store->in(MemNode::Memory)->_idx); 2576 2577 CountedLoopEndNode* loop_exit = head->loopexit(); 2578 guarantee(loop_exit != NULL, "no loop exit node"); 2579 2580 // Loop structure is ok 2581 ok.set(head->_idx); 2582 ok.set(loop_exit->_idx); 2583 ok.set(head->phi()->_idx); 2584 ok.set(head->incr()->_idx); 2585 ok.set(loop_exit->cmp_node()->_idx); 2586 ok.set(loop_exit->in(1)->_idx); 2587 2588 // Address elements are ok 2589 if (con) ok.set(con->_idx); 2590 if (shift) ok.set(shift->_idx); 2591 if (conv) ok.set(conv->_idx); 2592 2593 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2594 Node* n = lpt->_body.at(i); 2595 if (n->outcnt() == 0) continue; // Ignore dead 2596 if (ok.test(n->_idx)) continue; 2597 // Backedge projection is ok 2598 if (n->is_IfTrue() && n->in(0) == loop_exit) continue; 2599 if (!n->is_AddP()) { 2600 msg = "unhandled node"; 2601 msg_node = n; 2602 break; 2603 } 2604 } 2605 2606 // Make sure no unexpected values are used outside the loop 2607 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2608 Node* n = lpt->_body.at(i); 2609 // These values can be replaced with other nodes if they are used 2610 // outside the loop. 2611 if (n == store || n == loop_exit || n == head->incr() || n == store->in(MemNode::Memory)) continue; 2612 for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) { 2613 Node* use = iter.get(); 2614 if (!lpt->_body.contains(use)) { 2615 msg = "node is used outside loop"; 2616 // lpt->_body.dump(); 2617 msg_node = n; 2618 break; 2619 } 2620 } 2621 } 2622 2623 #ifdef ASSERT 2624 if (TraceOptimizeFill) { 2625 if (msg != NULL) { 2626 tty->print_cr("no fill intrinsic: %s", msg); 2627 if (msg_node != NULL) msg_node->dump(); 2628 } else { 2629 tty->print_cr("fill intrinsic for:"); 2630 } 2631 store->dump(); 2632 if (Verbose) { 2633 lpt->_body.dump(); 2634 } 2635 } 2636 #endif 2637 2638 return msg == NULL; 2639 } 2640 2641 2642 2643 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { 2644 // Only for counted inner loops 2645 if (!lpt->is_counted() || !lpt->is_inner()) { 2646 return false; 2647 } 2648 2649 // Must have constant stride 2650 CountedLoopNode* head = lpt->_head->as_CountedLoop(); 2651 if (!head->is_valid_counted_loop() || !head->is_normal_loop()) { 2652 return false; 2653 } 2654 2655 // Check that the body only contains a store of a loop invariant 2656 // value that is indexed by the loop phi. 2657 Node* store = NULL; 2658 Node* store_value = NULL; 2659 Node* shift = NULL; 2660 Node* offset = NULL; 2661 if (!match_fill_loop(lpt, store, store_value, shift, offset)) { 2662 return false; 2663 } 2664 2665 #ifndef PRODUCT 2666 if (TraceLoopOpts) { 2667 tty->print("ArrayFill "); 2668 lpt->dump_head(); 2669 } 2670 #endif 2671 2672 // Now replace the whole loop body by a call to a fill routine that 2673 // covers the same region as the loop. 2674 Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base); 2675 2676 // Build an expression for the beginning of the copy region 2677 Node* index = head->init_trip(); 2678 #ifdef _LP64 2679 index = new ConvI2LNode(index); 2680 _igvn.register_new_node_with_optimizer(index); 2681 #endif 2682 if (shift != NULL) { 2683 // byte arrays don't require a shift but others do. 2684 index = new LShiftXNode(index, shift->in(2)); 2685 _igvn.register_new_node_with_optimizer(index); 2686 } 2687 index = new AddPNode(base, base, index); 2688 _igvn.register_new_node_with_optimizer(index); 2689 Node* from = new AddPNode(base, index, offset); 2690 _igvn.register_new_node_with_optimizer(from); 2691 // Compute the number of elements to copy 2692 Node* len = new SubINode(head->limit(), head->init_trip()); 2693 _igvn.register_new_node_with_optimizer(len); 2694 2695 BasicType t = store->as_Mem()->memory_type(); 2696 bool aligned = false; 2697 if (offset != NULL && head->init_trip()->is_Con()) { 2698 int element_size = type2aelembytes(t); 2699 aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0; 2700 } 2701 2702 // Build a call to the fill routine 2703 const char* fill_name; 2704 address fill = StubRoutines::select_fill_function(t, aligned, fill_name); 2705 assert(fill != NULL, "what?"); 2706 2707 // Convert float/double to int/long for fill routines 2708 if (t == T_FLOAT) { 2709 store_value = new MoveF2INode(store_value); 2710 _igvn.register_new_node_with_optimizer(store_value); 2711 } else if (t == T_DOUBLE) { 2712 store_value = new MoveD2LNode(store_value); 2713 _igvn.register_new_node_with_optimizer(store_value); 2714 } 2715 2716 if (CCallingConventionRequiresIntsAsLongs && 2717 // See StubRoutines::select_fill_function for types. FLOAT has been converted to INT. 2718 (t == T_FLOAT || t == T_INT || is_subword_type(t))) { 2719 store_value = new ConvI2LNode(store_value); 2720 _igvn.register_new_node_with_optimizer(store_value); 2721 } 2722 2723 Node* mem_phi = store->in(MemNode::Memory); 2724 Node* result_ctrl; 2725 Node* result_mem; 2726 const TypeFunc* call_type = OptoRuntime::array_fill_Type(); 2727 CallLeafNode *call = new CallLeafNoFPNode(call_type, fill, 2728 fill_name, TypeAryPtr::get_array_body_type(t)); 2729 uint cnt = 0; 2730 call->init_req(TypeFunc::Parms + cnt++, from); 2731 call->init_req(TypeFunc::Parms + cnt++, store_value); 2732 if (CCallingConventionRequiresIntsAsLongs) { 2733 call->init_req(TypeFunc::Parms + cnt++, C->top()); 2734 } 2735 #ifdef _LP64 2736 len = new ConvI2LNode(len); 2737 _igvn.register_new_node_with_optimizer(len); 2738 #endif 2739 call->init_req(TypeFunc::Parms + cnt++, len); 2740 #ifdef _LP64 2741 call->init_req(TypeFunc::Parms + cnt++, C->top()); 2742 #endif 2743 call->init_req(TypeFunc::Control, head->init_control()); 2744 call->init_req(TypeFunc::I_O, C->top()); // Does no I/O. 2745 call->init_req(TypeFunc::Memory, mem_phi->in(LoopNode::EntryControl)); 2746 call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr)); 2747 call->init_req(TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr)); 2748 _igvn.register_new_node_with_optimizer(call); 2749 result_ctrl = new ProjNode(call,TypeFunc::Control); 2750 _igvn.register_new_node_with_optimizer(result_ctrl); 2751 result_mem = new ProjNode(call,TypeFunc::Memory); 2752 _igvn.register_new_node_with_optimizer(result_mem); 2753 2754 /* Disable following optimization until proper fix (add missing checks). 2755 2756 // If this fill is tightly coupled to an allocation and overwrites 2757 // the whole body, allow it to take over the zeroing. 2758 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this); 2759 if (alloc != NULL && alloc->is_AllocateArray()) { 2760 Node* length = alloc->as_AllocateArray()->Ideal_length(); 2761 if (head->limit() == length && 2762 head->init_trip() == _igvn.intcon(0)) { 2763 if (TraceOptimizeFill) { 2764 tty->print_cr("Eliminated zeroing in allocation"); 2765 } 2766 alloc->maybe_set_complete(&_igvn); 2767 } else { 2768 #ifdef ASSERT 2769 if (TraceOptimizeFill) { 2770 tty->print_cr("filling array but bounds don't match"); 2771 alloc->dump(); 2772 head->init_trip()->dump(); 2773 head->limit()->dump(); 2774 length->dump(); 2775 } 2776 #endif 2777 } 2778 } 2779 */ 2780 2781 // Redirect the old control and memory edges that are outside the loop. 2782 Node* exit = head->loopexit()->proj_out(0); 2783 // Sometimes the memory phi of the head is used as the outgoing 2784 // state of the loop. It's safe in this case to replace it with the 2785 // result_mem. 2786 _igvn.replace_node(store->in(MemNode::Memory), result_mem); 2787 _igvn.replace_node(exit, result_ctrl); 2788 _igvn.replace_node(store, result_mem); 2789 // Any uses the increment outside of the loop become the loop limit. 2790 _igvn.replace_node(head->incr(), head->limit()); 2791 2792 // Disconnect the head from the loop. 2793 for (uint i = 0; i < lpt->_body.size(); i++) { 2794 Node* n = lpt->_body.at(i); 2795 _igvn.replace_node(n, C->top()); 2796 } 2797 2798 return true; 2799 }