1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/callnode.hpp" 30 #include "opto/castnode.hpp" 31 #include "opto/connode.hpp" 32 #include "opto/convertnode.hpp" 33 #include "opto/divnode.hpp" 34 #include "opto/loopnode.hpp" 35 #include "opto/mulnode.hpp" 36 #include "opto/movenode.hpp" 37 #include "opto/opaquenode.hpp" 38 #include "opto/rootnode.hpp" 39 #include "opto/runtime.hpp" 40 #include "opto/subnode.hpp" 41 #include "opto/vectornode.hpp" 42 43 //------------------------------is_loop_exit----------------------------------- 44 // Given an IfNode, return the loop-exiting projection or NULL if both 45 // arms remain in the loop. 46 Node *IdealLoopTree::is_loop_exit(Node *iff) const { 47 if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests 48 PhaseIdealLoop *phase = _phase; 49 // Test is an IfNode, has 2 projections. If BOTH are in the loop 50 // we need loop unswitching instead of peeling. 51 if( !is_member(phase->get_loop( iff->raw_out(0) )) ) 52 return iff->raw_out(0); 53 if( !is_member(phase->get_loop( iff->raw_out(1) )) ) 54 return iff->raw_out(1); 55 return NULL; 56 } 57 58 59 //============================================================================= 60 61 62 //------------------------------record_for_igvn---------------------------- 63 // Put loop body on igvn work list 64 void IdealLoopTree::record_for_igvn() { 65 for( uint i = 0; i < _body.size(); i++ ) { 66 Node *n = _body.at(i); 67 _phase->_igvn._worklist.push(n); 68 } 69 } 70 71 //------------------------------compute_exact_trip_count----------------------- 72 // Compute loop exact trip count if possible. Do not recalculate trip count for 73 // split loops (pre-main-post) which have their limits and inits behind Opaque node. 74 void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) { 75 if (!_head->as_Loop()->is_valid_counted_loop()) { 76 return; 77 } 78 CountedLoopNode* cl = _head->as_CountedLoop(); 79 // Trip count may become nonexact for iteration split loops since 80 // RCE modifies limits. Note, _trip_count value is not reset since 81 // it is used to limit unrolling of main loop. 82 cl->set_nonexact_trip_count(); 83 84 // Loop's test should be part of loop. 85 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) 86 return; // Infinite loop 87 88 #ifdef ASSERT 89 BoolTest::mask bt = cl->loopexit()->test_trip(); 90 assert(bt == BoolTest::lt || bt == BoolTest::gt || 91 bt == BoolTest::ne, "canonical test is expected"); 92 #endif 93 94 Node* init_n = cl->init_trip(); 95 Node* limit_n = cl->limit(); 96 if (init_n != NULL && init_n->is_Con() && 97 limit_n != NULL && limit_n->is_Con()) { 98 // Use longs to avoid integer overflow. 99 int stride_con = cl->stride_con(); 100 jlong init_con = cl->init_trip()->get_int(); 101 jlong limit_con = cl->limit()->get_int(); 102 int stride_m = stride_con - (stride_con > 0 ? 1 : -1); 103 jlong trip_count = (limit_con - init_con + stride_m)/stride_con; 104 if (trip_count > 0 && (julong)trip_count < (julong)max_juint) { 105 // Set exact trip count. 106 cl->set_exact_trip_count((uint)trip_count); 107 } 108 } 109 } 110 111 //------------------------------compute_profile_trip_cnt---------------------------- 112 // Compute loop trip count from profile data as 113 // (backedge_count + loop_exit_count) / loop_exit_count 114 void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) { 115 if (!_head->is_CountedLoop()) { 116 return; 117 } 118 CountedLoopNode* head = _head->as_CountedLoop(); 119 if (head->profile_trip_cnt() != COUNT_UNKNOWN) { 120 return; // Already computed 121 } 122 float trip_cnt = (float)max_jint; // default is big 123 124 Node* back = head->in(LoopNode::LoopBackControl); 125 while (back != head) { 126 if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && 127 back->in(0) && 128 back->in(0)->is_If() && 129 back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN && 130 back->in(0)->as_If()->_prob != PROB_UNKNOWN) { 131 break; 132 } 133 back = phase->idom(back); 134 } 135 if (back != head) { 136 assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && 137 back->in(0), "if-projection exists"); 138 IfNode* back_if = back->in(0)->as_If(); 139 float loop_back_cnt = back_if->_fcnt * back_if->_prob; 140 141 // Now compute a loop exit count 142 float loop_exit_cnt = 0.0f; 143 for( uint i = 0; i < _body.size(); i++ ) { 144 Node *n = _body[i]; 145 if( n->is_If() ) { 146 IfNode *iff = n->as_If(); 147 if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) { 148 Node *exit = is_loop_exit(iff); 149 if( exit ) { 150 float exit_prob = iff->_prob; 151 if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob; 152 if (exit_prob > PROB_MIN) { 153 float exit_cnt = iff->_fcnt * exit_prob; 154 loop_exit_cnt += exit_cnt; 155 } 156 } 157 } 158 } 159 } 160 if (loop_exit_cnt > 0.0f) { 161 trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt; 162 } else { 163 // No exit count so use 164 trip_cnt = loop_back_cnt; 165 } 166 } 167 #ifndef PRODUCT 168 if (TraceProfileTripCount) { 169 tty->print_cr("compute_profile_trip_cnt lp: %d cnt: %f\n", head->_idx, trip_cnt); 170 } 171 #endif 172 head->set_profile_trip_cnt(trip_cnt); 173 } 174 175 //---------------------is_invariant_addition----------------------------- 176 // Return nonzero index of invariant operand for an Add or Sub 177 // of (nonconstant) invariant and variant values. Helper for reassociate_invariants. 178 int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) { 179 int op = n->Opcode(); 180 if (op == Op_AddI || op == Op_SubI) { 181 bool in1_invar = this->is_invariant(n->in(1)); 182 bool in2_invar = this->is_invariant(n->in(2)); 183 if (in1_invar && !in2_invar) return 1; 184 if (!in1_invar && in2_invar) return 2; 185 } 186 return 0; 187 } 188 189 //---------------------reassociate_add_sub----------------------------- 190 // Reassociate invariant add and subtract expressions: 191 // 192 // inv1 + (x + inv2) => ( inv1 + inv2) + x 193 // (x + inv2) + inv1 => ( inv1 + inv2) + x 194 // inv1 + (x - inv2) => ( inv1 - inv2) + x 195 // inv1 - (inv2 - x) => ( inv1 - inv2) + x 196 // (x + inv2) - inv1 => (-inv1 + inv2) + x 197 // (x - inv2) + inv1 => ( inv1 - inv2) + x 198 // (x - inv2) - inv1 => (-inv1 - inv2) + x 199 // inv1 + (inv2 - x) => ( inv1 + inv2) - x 200 // inv1 - (x - inv2) => ( inv1 + inv2) - x 201 // (inv2 - x) + inv1 => ( inv1 + inv2) - x 202 // (inv2 - x) - inv1 => (-inv1 + inv2) - x 203 // inv1 - (x + inv2) => ( inv1 - inv2) - x 204 // 205 Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) { 206 if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL; 207 if (is_invariant(n1)) return NULL; 208 int inv1_idx = is_invariant_addition(n1, phase); 209 if (!inv1_idx) return NULL; 210 // Don't mess with add of constant (igvn moves them to expression tree root.) 211 if (n1->is_Add() && n1->in(2)->is_Con()) return NULL; 212 Node* inv1 = n1->in(inv1_idx); 213 Node* n2 = n1->in(3 - inv1_idx); 214 int inv2_idx = is_invariant_addition(n2, phase); 215 if (!inv2_idx) return NULL; 216 Node* x = n2->in(3 - inv2_idx); 217 Node* inv2 = n2->in(inv2_idx); 218 219 bool neg_x = n2->is_Sub() && inv2_idx == 1; 220 bool neg_inv2 = n2->is_Sub() && inv2_idx == 2; 221 bool neg_inv1 = n1->is_Sub() && inv1_idx == 2; 222 if (n1->is_Sub() && inv1_idx == 1) { 223 neg_x = !neg_x; 224 neg_inv2 = !neg_inv2; 225 } 226 Node* inv1_c = phase->get_ctrl(inv1); 227 Node* inv2_c = phase->get_ctrl(inv2); 228 Node* n_inv1; 229 if (neg_inv1) { 230 Node *zero = phase->_igvn.intcon(0); 231 phase->set_ctrl(zero, phase->C->root()); 232 n_inv1 = new SubINode(zero, inv1); 233 phase->register_new_node(n_inv1, inv1_c); 234 } else { 235 n_inv1 = inv1; 236 } 237 Node* inv; 238 if (neg_inv2) { 239 inv = new SubINode(n_inv1, inv2); 240 } else { 241 inv = new AddINode(n_inv1, inv2); 242 } 243 phase->register_new_node(inv, phase->get_early_ctrl(inv)); 244 245 Node* addx; 246 if (neg_x) { 247 addx = new SubINode(inv, x); 248 } else { 249 addx = new AddINode(x, inv); 250 } 251 phase->register_new_node(addx, phase->get_ctrl(x)); 252 phase->_igvn.replace_node(n1, addx); 253 assert(phase->get_loop(phase->get_ctrl(n1)) == this, ""); 254 _body.yank(n1); 255 return addx; 256 } 257 258 //---------------------reassociate_invariants----------------------------- 259 // Reassociate invariant expressions: 260 void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) { 261 for (int i = _body.size() - 1; i >= 0; i--) { 262 Node *n = _body.at(i); 263 for (int j = 0; j < 5; j++) { 264 Node* nn = reassociate_add_sub(n, phase); 265 if (nn == NULL) break; 266 n = nn; // again 267 }; 268 } 269 } 270 271 //------------------------------policy_peeling--------------------------------- 272 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can 273 // make some loop-invariant test (usually a null-check) happen before the loop. 274 bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const { 275 Node *test = ((IdealLoopTree*)this)->tail(); 276 int body_size = ((IdealLoopTree*)this)->_body.size(); 277 // Peeling does loop cloning which can result in O(N^2) node construction 278 if( body_size > 255 /* Prevent overflow for large body_size */ 279 || (body_size * body_size + phase->C->live_nodes()) > phase->C->max_node_limit() ) { 280 return false; // too large to safely clone 281 } 282 while( test != _head ) { // Scan till run off top of loop 283 if( test->is_If() ) { // Test? 284 Node *ctrl = phase->get_ctrl(test->in(1)); 285 if (ctrl->is_top()) 286 return false; // Found dead test on live IF? No peeling! 287 // Standard IF only has one input value to check for loop invariance 288 assert( test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added"); 289 // Condition is not a member of this loop? 290 if( !is_member(phase->get_loop(ctrl)) && 291 is_loop_exit(test) ) 292 return true; // Found reason to peel! 293 } 294 // Walk up dominators to loop _head looking for test which is 295 // executed on every path thru loop. 296 test = phase->idom(test); 297 } 298 return false; 299 } 300 301 //------------------------------peeled_dom_test_elim--------------------------- 302 // If we got the effect of peeling, either by actually peeling or by making 303 // a pre-loop which must execute at least once, we can remove all 304 // loop-invariant dominated tests in the main body. 305 void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) { 306 bool progress = true; 307 while( progress ) { 308 progress = false; // Reset for next iteration 309 Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail(); 310 Node *test = prev->in(0); 311 while( test != loop->_head ) { // Scan till run off top of loop 312 313 int p_op = prev->Opcode(); 314 if( (p_op == Op_IfFalse || p_op == Op_IfTrue) && 315 test->is_If() && // Test? 316 !test->in(1)->is_Con() && // And not already obvious? 317 // Condition is not a member of this loop? 318 !loop->is_member(get_loop(get_ctrl(test->in(1))))){ 319 // Walk loop body looking for instances of this test 320 for( uint i = 0; i < loop->_body.size(); i++ ) { 321 Node *n = loop->_body.at(i); 322 if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) { 323 // IfNode was dominated by version in peeled loop body 324 progress = true; 325 dominated_by( old_new[prev->_idx], n ); 326 } 327 } 328 } 329 prev = test; 330 test = idom(test); 331 } // End of scan tests in loop 332 333 } // End of while( progress ) 334 } 335 336 //------------------------------do_peeling------------------------------------- 337 // Peel the first iteration of the given loop. 338 // Step 1: Clone the loop body. The clone becomes the peeled iteration. 339 // The pre-loop illegally has 2 control users (old & new loops). 340 // Step 2: Make the old-loop fall-in edges point to the peeled iteration. 341 // Do this by making the old-loop fall-in edges act as if they came 342 // around the loopback from the prior iteration (follow the old-loop 343 // backedges) and then map to the new peeled iteration. This leaves 344 // the pre-loop with only 1 user (the new peeled iteration), but the 345 // peeled-loop backedge has 2 users. 346 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the 347 // extra backedge user. 348 // 349 // orig 350 // 351 // stmt1 352 // | 353 // v 354 // loop predicate 355 // | 356 // v 357 // loop<----+ 358 // | | 359 // stmt2 | 360 // | | 361 // v | 362 // if ^ 363 // / \ | 364 // / \ | 365 // v v | 366 // false true | 367 // / \ | 368 // / ----+ 369 // | 370 // v 371 // exit 372 // 373 // 374 // after clone loop 375 // 376 // stmt1 377 // | 378 // v 379 // loop predicate 380 // / \ 381 // clone / \ orig 382 // / \ 383 // / \ 384 // v v 385 // +---->loop clone loop<----+ 386 // | | | | 387 // | stmt2 clone stmt2 | 388 // | | | | 389 // | v v | 390 // ^ if clone If ^ 391 // | / \ / \ | 392 // | / \ / \ | 393 // | v v v v | 394 // | true false false true | 395 // | / \ / \ | 396 // +---- \ / ----+ 397 // \ / 398 // 1v v2 399 // region 400 // | 401 // v 402 // exit 403 // 404 // 405 // after peel and predicate move 406 // 407 // stmt1 408 // / 409 // / 410 // clone / orig 411 // / 412 // / +----------+ 413 // / | | 414 // / loop predicate | 415 // / | | 416 // v v | 417 // TOP-->loop clone loop<----+ | 418 // | | | | 419 // stmt2 clone stmt2 | | 420 // | | | ^ 421 // v v | | 422 // if clone If ^ | 423 // / \ / \ | | 424 // / \ / \ | | 425 // v v v v | | 426 // true false false true | | 427 // | \ / \ | | 428 // | \ / ----+ ^ 429 // | \ / | 430 // | 1v v2 | 431 // v region | 432 // | | | 433 // | v | 434 // | exit | 435 // | | 436 // +--------------->-----------------+ 437 // 438 // 439 // final graph 440 // 441 // stmt1 442 // | 443 // v 444 // stmt2 clone 445 // | 446 // v 447 // if clone 448 // / | 449 // / | 450 // v v 451 // false true 452 // | | 453 // | v 454 // | loop predicate 455 // | | 456 // | v 457 // | loop<----+ 458 // | | | 459 // | stmt2 | 460 // | | | 461 // | v | 462 // v if ^ 463 // | / \ | 464 // | / \ | 465 // | v v | 466 // | false true | 467 // | | \ | 468 // v v --+ 469 // region 470 // | 471 // v 472 // exit 473 // 474 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) { 475 476 C->set_major_progress(); 477 // Peeling a 'main' loop in a pre/main/post situation obfuscates the 478 // 'pre' loop from the main and the 'pre' can no longer have it's 479 // iterations adjusted. Therefore, we need to declare this loop as 480 // no longer a 'main' loop; it will need new pre and post loops before 481 // we can do further RCE. 482 #ifndef PRODUCT 483 if (TraceLoopOpts) { 484 tty->print("Peel "); 485 loop->dump_head(); 486 } 487 #endif 488 Node* head = loop->_head; 489 bool counted_loop = head->is_CountedLoop(); 490 if (counted_loop) { 491 CountedLoopNode *cl = head->as_CountedLoop(); 492 assert(cl->trip_count() > 0, "peeling a fully unrolled loop"); 493 cl->set_trip_count(cl->trip_count() - 1); 494 if (cl->is_main_loop()) { 495 cl->set_normal_loop(); 496 #ifndef PRODUCT 497 if (PrintOpto && VerifyLoopOptimizations) { 498 tty->print("Peeling a 'main' loop; resetting to 'normal' "); 499 loop->dump_head(); 500 } 501 #endif 502 } 503 } 504 Node* entry = head->in(LoopNode::EntryControl); 505 506 // Step 1: Clone the loop body. The clone becomes the peeled iteration. 507 // The pre-loop illegally has 2 control users (old & new loops). 508 clone_loop( loop, old_new, dom_depth(head) ); 509 510 // Step 2: Make the old-loop fall-in edges point to the peeled iteration. 511 // Do this by making the old-loop fall-in edges act as if they came 512 // around the loopback from the prior iteration (follow the old-loop 513 // backedges) and then map to the new peeled iteration. This leaves 514 // the pre-loop with only 1 user (the new peeled iteration), but the 515 // peeled-loop backedge has 2 users. 516 Node* new_entry = old_new[head->in(LoopNode::LoopBackControl)->_idx]; 517 _igvn.hash_delete(head); 518 head->set_req(LoopNode::EntryControl, new_entry); 519 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { 520 Node* old = head->fast_out(j); 521 if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) { 522 Node* new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx]; 523 if (!new_exit_value ) // Backedge value is ALSO loop invariant? 524 // Then loop body backedge value remains the same. 525 new_exit_value = old->in(LoopNode::LoopBackControl); 526 _igvn.hash_delete(old); 527 old->set_req(LoopNode::EntryControl, new_exit_value); 528 } 529 } 530 531 532 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the 533 // extra backedge user. 534 Node* new_head = old_new[head->_idx]; 535 _igvn.hash_delete(new_head); 536 new_head->set_req(LoopNode::LoopBackControl, C->top()); 537 for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) { 538 Node* use = new_head->fast_out(j2); 539 if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) { 540 _igvn.hash_delete(use); 541 use->set_req(LoopNode::LoopBackControl, C->top()); 542 } 543 } 544 545 546 // Step 4: Correct dom-depth info. Set to loop-head depth. 547 int dd = dom_depth(head); 548 set_idom(head, head->in(1), dd); 549 for (uint j3 = 0; j3 < loop->_body.size(); j3++) { 550 Node *old = loop->_body.at(j3); 551 Node *nnn = old_new[old->_idx]; 552 if (!has_ctrl(nnn)) 553 set_idom(nnn, idom(nnn), dd-1); 554 } 555 556 // Now force out all loop-invariant dominating tests. The optimizer 557 // finds some, but we _know_ they are all useless. 558 peeled_dom_test_elim(loop,old_new); 559 560 loop->record_for_igvn(); 561 } 562 563 #define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop 564 565 //------------------------------policy_maximally_unroll------------------------ 566 // Calculate exact loop trip count and return true if loop can be maximally 567 // unrolled. 568 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const { 569 CountedLoopNode *cl = _head->as_CountedLoop(); 570 assert(cl->is_normal_loop(), ""); 571 if (!cl->is_valid_counted_loop()) 572 return false; // Malformed counted loop 573 574 if (!cl->has_exact_trip_count()) { 575 // Trip count is not exact. 576 return false; 577 } 578 579 uint trip_count = cl->trip_count(); 580 // Note, max_juint is used to indicate unknown trip count. 581 assert(trip_count > 1, "one iteration loop should be optimized out already"); 582 assert(trip_count < max_juint, "exact trip_count should be less than max_uint."); 583 584 // Real policy: if we maximally unroll, does it get too big? 585 // Allow the unrolled mess to get larger than standard loop 586 // size. After all, it will no longer be a loop. 587 uint body_size = _body.size(); 588 uint unroll_limit = (uint)LoopUnrollLimit * 4; 589 assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits"); 590 if (trip_count > unroll_limit || body_size > unroll_limit) { 591 return false; 592 } 593 594 // Fully unroll a loop with few iterations regardless next 595 // conditions since following loop optimizations will split 596 // such loop anyway (pre-main-post). 597 if (trip_count <= 3) 598 return true; 599 600 // Take into account that after unroll conjoined heads and tails will fold, 601 // otherwise policy_unroll() may allow more unrolling than max unrolling. 602 uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count; 603 uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE; 604 if (body_size != tst_body_size) // Check for int overflow 605 return false; 606 if (new_body_size > unroll_limit || 607 // Unrolling can result in a large amount of node construction 608 new_body_size >= phase->C->max_node_limit() - phase->C->live_nodes()) { 609 return false; 610 } 611 612 // Do not unroll a loop with String intrinsics code. 613 // String intrinsics are large and have loops. 614 for (uint k = 0; k < _body.size(); k++) { 615 Node* n = _body.at(k); 616 switch (n->Opcode()) { 617 case Op_StrComp: 618 case Op_StrEquals: 619 case Op_StrIndexOf: 620 case Op_EncodeISOArray: 621 case Op_AryEq: { 622 return false; 623 } 624 #if INCLUDE_RTM_OPT 625 case Op_FastLock: 626 case Op_FastUnlock: { 627 // Don't unroll RTM locking code because it is large. 628 if (UseRTMLocking) { 629 return false; 630 } 631 } 632 #endif 633 } // switch 634 } 635 636 return true; // Do maximally unroll 637 } 638 639 640 //------------------------------policy_unroll---------------------------------- 641 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if 642 // the loop is a CountedLoop and the body is small enough. 643 bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const { 644 645 CountedLoopNode *cl = _head->as_CountedLoop(); 646 assert(cl->is_normal_loop() || cl->is_main_loop(), ""); 647 648 if (!cl->is_valid_counted_loop()) 649 return false; // Malformed counted loop 650 651 // Protect against over-unrolling. 652 // After split at least one iteration will be executed in pre-loop. 653 if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false; 654 655 int future_unroll_ct = cl->unrolled_count() * 2; 656 if (future_unroll_ct > LoopMaxUnroll) return false; 657 658 // Check for initial stride being a small enough constant 659 if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false; 660 661 // Don't unroll if the next round of unrolling would push us 662 // over the expected trip count of the loop. One is subtracted 663 // from the expected trip count because the pre-loop normally 664 // executes 1 iteration. 665 if (UnrollLimitForProfileCheck > 0 && 666 cl->profile_trip_cnt() != COUNT_UNKNOWN && 667 future_unroll_ct > UnrollLimitForProfileCheck && 668 (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) { 669 return false; 670 } 671 672 // When unroll count is greater than LoopUnrollMin, don't unroll if: 673 // the residual iterations are more than 10% of the trip count 674 // and rounds of "unroll,optimize" are not making significant progress 675 // Progress defined as current size less than 20% larger than previous size. 676 if (UseSuperWord && cl->node_count_before_unroll() > 0 && 677 future_unroll_ct > LoopUnrollMin && 678 (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() && 679 1.2 * cl->node_count_before_unroll() < (double)_body.size()) { 680 return false; 681 } 682 683 Node *init_n = cl->init_trip(); 684 Node *limit_n = cl->limit(); 685 int stride_con = cl->stride_con(); 686 // Non-constant bounds. 687 // Protect against over-unrolling when init or/and limit are not constant 688 // (so that trip_count's init value is maxint) but iv range is known. 689 if (init_n == NULL || !init_n->is_Con() || 690 limit_n == NULL || !limit_n->is_Con()) { 691 Node* phi = cl->phi(); 692 if (phi != NULL) { 693 assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi."); 694 const TypeInt* iv_type = phase->_igvn.type(phi)->is_int(); 695 int next_stride = stride_con * 2; // stride after this unroll 696 if (next_stride > 0) { 697 if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow 698 iv_type->_lo + next_stride > iv_type->_hi) { 699 return false; // over-unrolling 700 } 701 } else if (next_stride < 0) { 702 if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow 703 iv_type->_hi + next_stride < iv_type->_lo) { 704 return false; // over-unrolling 705 } 706 } 707 } 708 } 709 710 // After unroll limit will be adjusted: new_limit = limit-stride. 711 // Bailout if adjustment overflow. 712 const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int(); 713 if (stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi) || 714 stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo)) 715 return false; // overflow 716 717 // Adjust body_size to determine if we unroll or not 718 uint body_size = _body.size(); 719 // Key test to unroll loop in CRC32 java code 720 int xors_in_loop = 0; 721 // Also count ModL, DivL and MulL which expand mightly 722 for (uint k = 0; k < _body.size(); k++) { 723 Node* n = _body.at(k); 724 switch (n->Opcode()) { 725 case Op_XorI: xors_in_loop++; break; // CRC32 java code 726 case Op_ModL: body_size += 30; break; 727 case Op_DivL: body_size += 30; break; 728 case Op_MulL: body_size += 10; break; 729 case Op_StrComp: 730 case Op_StrEquals: 731 case Op_StrIndexOf: 732 case Op_EncodeISOArray: 733 case Op_AryEq: { 734 // Do not unroll a loop with String intrinsics code. 735 // String intrinsics are large and have loops. 736 return false; 737 } 738 #if INCLUDE_RTM_OPT 739 case Op_FastLock: 740 case Op_FastUnlock: { 741 // Don't unroll RTM locking code because it is large. 742 if (UseRTMLocking) { 743 return false; 744 } 745 } 746 #endif 747 } // switch 748 } 749 750 // Check for being too big 751 if (body_size > (uint)LoopUnrollLimit) { 752 if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true; 753 // Normal case: loop too big 754 return false; 755 } 756 757 // Unroll once! (Each trip will soon do double iterations) 758 return true; 759 } 760 761 //------------------------------policy_align----------------------------------- 762 // Return TRUE or FALSE if the loop should be cache-line aligned. Gather the 763 // expression that does the alignment. Note that only one array base can be 764 // aligned in a loop (unless the VM guarantees mutual alignment). Note that 765 // if we vectorize short memory ops into longer memory ops, we may want to 766 // increase alignment. 767 bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const { 768 return false; 769 } 770 771 //------------------------------policy_range_check----------------------------- 772 // Return TRUE or FALSE if the loop should be range-check-eliminated. 773 // Actually we do iteration-splitting, a more powerful form of RCE. 774 bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const { 775 if (!RangeCheckElimination) return false; 776 777 CountedLoopNode *cl = _head->as_CountedLoop(); 778 // If we unrolled with no intention of doing RCE and we later 779 // changed our minds, we got no pre-loop. Either we need to 780 // make a new pre-loop, or we gotta disallow RCE. 781 if (cl->is_main_no_pre_loop()) return false; // Disallowed for now. 782 Node *trip_counter = cl->phi(); 783 784 // Check loop body for tests of trip-counter plus loop-invariant vs 785 // loop-invariant. 786 for (uint i = 0; i < _body.size(); i++) { 787 Node *iff = _body[i]; 788 if (iff->Opcode() == Op_If) { // Test? 789 790 // Comparing trip+off vs limit 791 Node *bol = iff->in(1); 792 if (bol->req() != 2) continue; // dead constant test 793 if (!bol->is_Bool()) { 794 assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only"); 795 continue; 796 } 797 if (bol->as_Bool()->_test._test == BoolTest::ne) 798 continue; // not RC 799 800 Node *cmp = bol->in(1); 801 Node *rc_exp = cmp->in(1); 802 Node *limit = cmp->in(2); 803 804 Node *limit_c = phase->get_ctrl(limit); 805 if( limit_c == phase->C->top() ) 806 return false; // Found dead test on live IF? No RCE! 807 if( is_member(phase->get_loop(limit_c) ) ) { 808 // Compare might have operands swapped; commute them 809 rc_exp = cmp->in(2); 810 limit = cmp->in(1); 811 limit_c = phase->get_ctrl(limit); 812 if( is_member(phase->get_loop(limit_c) ) ) 813 continue; // Both inputs are loop varying; cannot RCE 814 } 815 816 if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) { 817 continue; 818 } 819 // Yeah! Found a test like 'trip+off vs limit' 820 // Test is an IfNode, has 2 projections. If BOTH are in the loop 821 // we need loop unswitching instead of iteration splitting. 822 if( is_loop_exit(iff) ) 823 return true; // Found reason to split iterations 824 } // End of is IF 825 } 826 827 return false; 828 } 829 830 //------------------------------policy_peel_only------------------------------- 831 // Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned. Useful 832 // for unrolling loops with NO array accesses. 833 bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const { 834 835 for( uint i = 0; i < _body.size(); i++ ) 836 if( _body[i]->is_Mem() ) 837 return false; 838 839 // No memory accesses at all! 840 return true; 841 } 842 843 //------------------------------clone_up_backedge_goo-------------------------- 844 // If Node n lives in the back_ctrl block and cannot float, we clone a private 845 // version of n in preheader_ctrl block and return that, otherwise return n. 846 Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) { 847 if( get_ctrl(n) != back_ctrl ) return n; 848 849 // Only visit once 850 if (visited.test_set(n->_idx)) { 851 Node *x = clones.find(n->_idx); 852 if (x != NULL) 853 return x; 854 return n; 855 } 856 857 Node *x = NULL; // If required, a clone of 'n' 858 // Check for 'n' being pinned in the backedge. 859 if( n->in(0) && n->in(0) == back_ctrl ) { 860 assert(clones.find(n->_idx) == NULL, "dead loop"); 861 x = n->clone(); // Clone a copy of 'n' to preheader 862 clones.push(x, n->_idx); 863 x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader 864 } 865 866 // Recursive fixup any other input edges into x. 867 // If there are no changes we can just return 'n', otherwise 868 // we need to clone a private copy and change it. 869 for( uint i = 1; i < n->req(); i++ ) { 870 Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones ); 871 if( g != n->in(i) ) { 872 if( !x ) { 873 assert(clones.find(n->_idx) == NULL, "dead loop"); 874 x = n->clone(); 875 clones.push(x, n->_idx); 876 } 877 x->set_req(i, g); 878 } 879 } 880 if( x ) { // x can legally float to pre-header location 881 register_new_node( x, preheader_ctrl ); 882 return x; 883 } else { // raise n to cover LCA of uses 884 set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) ); 885 } 886 return n; 887 } 888 889 bool PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop) { 890 Node* castii = new CastIINode(incr, TypeInt::INT, true); 891 castii->set_req(0, ctrl); 892 register_new_node(castii, ctrl); 893 for (DUIterator_Fast imax, i = incr->fast_outs(imax); i < imax; i++) { 894 Node* n = incr->fast_out(i); 895 if (n->is_Phi() && n->in(0) == loop) { 896 int nrep = n->replace_edge(incr, castii); 897 return true; 898 } 899 } 900 return false; 901 } 902 903 //------------------------------insert_pre_post_loops-------------------------- 904 // Insert pre and post loops. If peel_only is set, the pre-loop can not have 905 // more iterations added. It acts as a 'peel' only, no lower-bound RCE, no 906 // alignment. Useful to unroll loops that do no array accesses. 907 void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) { 908 909 #ifndef PRODUCT 910 if (TraceLoopOpts) { 911 if (peel_only) 912 tty->print("PeelMainPost "); 913 else 914 tty->print("PreMainPost "); 915 loop->dump_head(); 916 } 917 #endif 918 C->set_major_progress(); 919 920 // Find common pieces of the loop being guarded with pre & post loops 921 CountedLoopNode *main_head = loop->_head->as_CountedLoop(); 922 assert( main_head->is_normal_loop(), "" ); 923 CountedLoopEndNode *main_end = main_head->loopexit(); 924 guarantee(main_end != NULL, "no loop exit node"); 925 assert( main_end->outcnt() == 2, "1 true, 1 false path only" ); 926 uint dd_main_head = dom_depth(main_head); 927 uint max = main_head->outcnt(); 928 929 Node *pre_header= main_head->in(LoopNode::EntryControl); 930 Node *init = main_head->init_trip(); 931 Node *incr = main_end ->incr(); 932 Node *limit = main_end ->limit(); 933 Node *stride = main_end ->stride(); 934 Node *cmp = main_end ->cmp_node(); 935 BoolTest::mask b_test = main_end->test_trip(); 936 937 // Need only 1 user of 'bol' because I will be hacking the loop bounds. 938 Node *bol = main_end->in(CountedLoopEndNode::TestValue); 939 if( bol->outcnt() != 1 ) { 940 bol = bol->clone(); 941 register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl)); 942 _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, bol); 943 } 944 // Need only 1 user of 'cmp' because I will be hacking the loop bounds. 945 if( cmp->outcnt() != 1 ) { 946 cmp = cmp->clone(); 947 register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl)); 948 _igvn.replace_input_of(bol, 1, cmp); 949 } 950 951 //------------------------------ 952 // Step A: Create Post-Loop. 953 Node* main_exit = main_end->proj_out(false); 954 assert( main_exit->Opcode() == Op_IfFalse, "" ); 955 int dd_main_exit = dom_depth(main_exit); 956 957 // Step A1: Clone the loop body. The clone becomes the post-loop. The main 958 // loop pre-header illegally has 2 control users (old & new loops). 959 clone_loop( loop, old_new, dd_main_exit ); 960 assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" ); 961 CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop(); 962 post_head->set_post_loop(main_head); 963 964 // Reduce the post-loop trip count. 965 CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); 966 post_end->_prob = PROB_FAIR; 967 968 // Build the main-loop normal exit. 969 IfFalseNode *new_main_exit = new IfFalseNode(main_end); 970 _igvn.register_new_node_with_optimizer( new_main_exit ); 971 set_idom(new_main_exit, main_end, dd_main_exit ); 972 set_loop(new_main_exit, loop->_parent); 973 974 // Step A2: Build a zero-trip guard for the post-loop. After leaving the 975 // main-loop, the post-loop may not execute at all. We 'opaque' the incr 976 // (the main-loop trip-counter exit value) because we will be changing 977 // the exit value (via unrolling) so we cannot constant-fold away the zero 978 // trip guard until all unrolling is done. 979 Node *zer_opaq = new Opaque1Node(C, incr); 980 Node *zer_cmp = new CmpINode( zer_opaq, limit ); 981 Node *zer_bol = new BoolNode( zer_cmp, b_test ); 982 register_new_node( zer_opaq, new_main_exit ); 983 register_new_node( zer_cmp , new_main_exit ); 984 register_new_node( zer_bol , new_main_exit ); 985 986 // Build the IfNode 987 IfNode *zer_iff = new IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN ); 988 _igvn.register_new_node_with_optimizer( zer_iff ); 989 set_idom(zer_iff, new_main_exit, dd_main_exit); 990 set_loop(zer_iff, loop->_parent); 991 992 // Plug in the false-path, taken if we need to skip post-loop 993 _igvn.replace_input_of(main_exit, 0, zer_iff); 994 set_idom(main_exit, zer_iff, dd_main_exit); 995 set_idom(main_exit->unique_out(), zer_iff, dd_main_exit); 996 // Make the true-path, must enter the post loop 997 Node *zer_taken = new IfTrueNode( zer_iff ); 998 _igvn.register_new_node_with_optimizer( zer_taken ); 999 set_idom(zer_taken, zer_iff, dd_main_exit); 1000 set_loop(zer_taken, loop->_parent); 1001 // Plug in the true path 1002 _igvn.hash_delete( post_head ); 1003 post_head->set_req(LoopNode::EntryControl, zer_taken); 1004 set_idom(post_head, zer_taken, dd_main_exit); 1005 1006 Arena *a = Thread::current()->resource_area(); 1007 VectorSet visited(a); 1008 Node_Stack clones(a, main_head->back_control()->outcnt()); 1009 // Step A3: Make the fall-in values to the post-loop come from the 1010 // fall-out values of the main-loop. 1011 for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) { 1012 Node* main_phi = main_head->fast_out(i); 1013 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) { 1014 Node *post_phi = old_new[main_phi->_idx]; 1015 Node *fallmain = clone_up_backedge_goo(main_head->back_control(), 1016 post_head->init_control(), 1017 main_phi->in(LoopNode::LoopBackControl), 1018 visited, clones); 1019 _igvn.hash_delete(post_phi); 1020 post_phi->set_req( LoopNode::EntryControl, fallmain ); 1021 } 1022 } 1023 1024 // Update local caches for next stanza 1025 main_exit = new_main_exit; 1026 1027 1028 //------------------------------ 1029 // Step B: Create Pre-Loop. 1030 1031 // Step B1: Clone the loop body. The clone becomes the pre-loop. The main 1032 // loop pre-header illegally has 2 control users (old & new loops). 1033 clone_loop( loop, old_new, dd_main_head ); 1034 CountedLoopNode* pre_head = old_new[main_head->_idx]->as_CountedLoop(); 1035 CountedLoopEndNode* pre_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); 1036 pre_head->set_pre_loop(main_head); 1037 Node *pre_incr = old_new[incr->_idx]; 1038 1039 // Reduce the pre-loop trip count. 1040 pre_end->_prob = PROB_FAIR; 1041 1042 // Find the pre-loop normal exit. 1043 Node* pre_exit = pre_end->proj_out(false); 1044 assert( pre_exit->Opcode() == Op_IfFalse, "" ); 1045 IfFalseNode *new_pre_exit = new IfFalseNode(pre_end); 1046 _igvn.register_new_node_with_optimizer( new_pre_exit ); 1047 set_idom(new_pre_exit, pre_end, dd_main_head); 1048 set_loop(new_pre_exit, loop->_parent); 1049 1050 // Step B2: Build a zero-trip guard for the main-loop. After leaving the 1051 // pre-loop, the main-loop may not execute at all. Later in life this 1052 // zero-trip guard will become the minimum-trip guard when we unroll 1053 // the main-loop. 1054 Node *min_opaq = new Opaque1Node(C, limit); 1055 Node *min_cmp = new CmpINode( pre_incr, min_opaq ); 1056 Node *min_bol = new BoolNode( min_cmp, b_test ); 1057 register_new_node( min_opaq, new_pre_exit ); 1058 register_new_node( min_cmp , new_pre_exit ); 1059 register_new_node( min_bol , new_pre_exit ); 1060 1061 // Build the IfNode (assume the main-loop is executed always). 1062 IfNode *min_iff = new IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN ); 1063 _igvn.register_new_node_with_optimizer( min_iff ); 1064 set_idom(min_iff, new_pre_exit, dd_main_head); 1065 set_loop(min_iff, loop->_parent); 1066 1067 // Plug in the false-path, taken if we need to skip main-loop 1068 _igvn.hash_delete( pre_exit ); 1069 pre_exit->set_req(0, min_iff); 1070 set_idom(pre_exit, min_iff, dd_main_head); 1071 set_idom(pre_exit->unique_out(), min_iff, dd_main_head); 1072 // Make the true-path, must enter the main loop 1073 Node *min_taken = new IfTrueNode( min_iff ); 1074 _igvn.register_new_node_with_optimizer( min_taken ); 1075 set_idom(min_taken, min_iff, dd_main_head); 1076 set_loop(min_taken, loop->_parent); 1077 // Plug in the true path 1078 _igvn.hash_delete( main_head ); 1079 main_head->set_req(LoopNode::EntryControl, min_taken); 1080 set_idom(main_head, min_taken, dd_main_head); 1081 1082 visited.Clear(); 1083 clones.clear(); 1084 // Step B3: Make the fall-in values to the main-loop come from the 1085 // fall-out values of the pre-loop. 1086 for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) { 1087 Node* main_phi = main_head->fast_out(i2); 1088 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) { 1089 Node *pre_phi = old_new[main_phi->_idx]; 1090 Node *fallpre = clone_up_backedge_goo(pre_head->back_control(), 1091 main_head->init_control(), 1092 pre_phi->in(LoopNode::LoopBackControl), 1093 visited, clones); 1094 _igvn.hash_delete(main_phi); 1095 main_phi->set_req( LoopNode::EntryControl, fallpre ); 1096 } 1097 } 1098 1099 // Nodes inside the loop may be control dependent on a predicate 1100 // that was moved before the preloop. If the back branch of the main 1101 // or post loops becomes dead, those nodes won't be dependent on the 1102 // test that guards that loop nest anymore which could lead to an 1103 // incorrect array access because it executes independently of the 1104 // test that was guarding the loop nest. We add a special CastII on 1105 // the if branch that enters the loop, between the input induction 1106 // variable value and the induction variable Phi to preserve correct 1107 // dependencies. 1108 1109 // CastII for the post loop: 1110 bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head); 1111 assert(inserted, "no castII inserted"); 1112 1113 // CastII for the main loop: 1114 inserted = cast_incr_before_loop(pre_incr, min_taken, main_head); 1115 assert(inserted, "no castII inserted"); 1116 1117 // Step B4: Shorten the pre-loop to run only 1 iteration (for now). 1118 // RCE and alignment may change this later. 1119 Node *cmp_end = pre_end->cmp_node(); 1120 assert( cmp_end->in(2) == limit, "" ); 1121 Node *pre_limit = new AddINode( init, stride ); 1122 1123 // Save the original loop limit in this Opaque1 node for 1124 // use by range check elimination. 1125 Node *pre_opaq = new Opaque1Node(C, pre_limit, limit); 1126 1127 register_new_node( pre_limit, pre_head->in(0) ); 1128 register_new_node( pre_opaq , pre_head->in(0) ); 1129 1130 // Since no other users of pre-loop compare, I can hack limit directly 1131 assert( cmp_end->outcnt() == 1, "no other users" ); 1132 _igvn.hash_delete(cmp_end); 1133 cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq); 1134 1135 // Special case for not-equal loop bounds: 1136 // Change pre loop test, main loop test, and the 1137 // main loop guard test to use lt or gt depending on stride 1138 // direction: 1139 // positive stride use < 1140 // negative stride use > 1141 // 1142 // not-equal test is kept for post loop to handle case 1143 // when init > limit when stride > 0 (and reverse). 1144 1145 if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) { 1146 1147 BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt; 1148 // Modify pre loop end condition 1149 Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool(); 1150 BoolNode* new_bol0 = new BoolNode(pre_bol->in(1), new_test); 1151 register_new_node( new_bol0, pre_head->in(0) ); 1152 _igvn.replace_input_of(pre_end, CountedLoopEndNode::TestValue, new_bol0); 1153 // Modify main loop guard condition 1154 assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay"); 1155 BoolNode* new_bol1 = new BoolNode(min_bol->in(1), new_test); 1156 register_new_node( new_bol1, new_pre_exit ); 1157 _igvn.hash_delete(min_iff); 1158 min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1); 1159 // Modify main loop end condition 1160 BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool(); 1161 BoolNode* new_bol2 = new BoolNode(main_bol->in(1), new_test); 1162 register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) ); 1163 _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, new_bol2); 1164 } 1165 1166 // Flag main loop 1167 main_head->set_main_loop(); 1168 if( peel_only ) main_head->set_main_no_pre_loop(); 1169 1170 // Subtract a trip count for the pre-loop. 1171 main_head->set_trip_count(main_head->trip_count() - 1); 1172 1173 // It's difficult to be precise about the trip-counts 1174 // for the pre/post loops. They are usually very short, 1175 // so guess that 4 trips is a reasonable value. 1176 post_head->set_profile_trip_cnt(4.0); 1177 pre_head->set_profile_trip_cnt(4.0); 1178 1179 // Now force out all loop-invariant dominating tests. The optimizer 1180 // finds some, but we _know_ they are all useless. 1181 peeled_dom_test_elim(loop,old_new); 1182 loop->record_for_igvn(); 1183 } 1184 1185 //------------------------------is_invariant----------------------------- 1186 // Return true if n is invariant 1187 bool IdealLoopTree::is_invariant(Node* n) const { 1188 Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n; 1189 if (n_c->is_top()) return false; 1190 return !is_member(_phase->get_loop(n_c)); 1191 } 1192 1193 1194 //------------------------------do_unroll-------------------------------------- 1195 // Unroll the loop body one step - make each trip do 2 iterations. 1196 void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) { 1197 assert(LoopUnrollLimit, ""); 1198 CountedLoopNode *loop_head = loop->_head->as_CountedLoop(); 1199 CountedLoopEndNode *loop_end = loop_head->loopexit(); 1200 assert(loop_end, ""); 1201 #ifndef PRODUCT 1202 if (PrintOpto && VerifyLoopOptimizations) { 1203 tty->print("Unrolling "); 1204 loop->dump_head(); 1205 } else if (TraceLoopOpts) { 1206 if (loop_head->trip_count() < (uint)LoopUnrollLimit) { 1207 tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count()); 1208 } else { 1209 tty->print("Unroll %d ", loop_head->unrolled_count()*2); 1210 } 1211 loop->dump_head(); 1212 } 1213 1214 if (C->do_vector_loop() && (PrintOpto && VerifyLoopOptimizations || TraceLoopOpts)) { 1215 Arena* arena = Thread::current()->resource_area(); 1216 Node_Stack stack(arena, C->unique() >> 2); 1217 Node_List rpo_list; 1218 VectorSet visited(arena); 1219 visited.set(loop_head->_idx); 1220 rpo( loop_head, stack, visited, rpo_list ); 1221 dump(loop, rpo_list.size(), rpo_list ); 1222 } 1223 #endif 1224 1225 // Remember loop node count before unrolling to detect 1226 // if rounds of unroll,optimize are making progress 1227 loop_head->set_node_count_before_unroll(loop->_body.size()); 1228 1229 Node *ctrl = loop_head->in(LoopNode::EntryControl); 1230 Node *limit = loop_head->limit(); 1231 Node *init = loop_head->init_trip(); 1232 Node *stride = loop_head->stride(); 1233 1234 Node *opaq = NULL; 1235 if (adjust_min_trip) { // If not maximally unrolling, need adjustment 1236 // Search for zero-trip guard. 1237 assert( loop_head->is_main_loop(), "" ); 1238 assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" ); 1239 Node *iff = ctrl->in(0); 1240 assert( iff->Opcode() == Op_If, "" ); 1241 Node *bol = iff->in(1); 1242 assert( bol->Opcode() == Op_Bool, "" ); 1243 Node *cmp = bol->in(1); 1244 assert( cmp->Opcode() == Op_CmpI, "" ); 1245 opaq = cmp->in(2); 1246 // Occasionally it's possible for a zero-trip guard Opaque1 node to be 1247 // optimized away and then another round of loop opts attempted. 1248 // We can not optimize this particular loop in that case. 1249 if (opaq->Opcode() != Op_Opaque1) 1250 return; // Cannot find zero-trip guard! Bail out! 1251 // Zero-trip test uses an 'opaque' node which is not shared. 1252 assert(opaq->outcnt() == 1 && opaq->in(1) == limit, ""); 1253 } 1254 1255 C->set_major_progress(); 1256 1257 Node* new_limit = NULL; 1258 if (UnrollLimitCheck) { 1259 int stride_con = stride->get_int(); 1260 int stride_p = (stride_con > 0) ? stride_con : -stride_con; 1261 uint old_trip_count = loop_head->trip_count(); 1262 // Verify that unroll policy result is still valid. 1263 assert(old_trip_count > 1 && 1264 (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity"); 1265 1266 // Adjust loop limit to keep valid iterations number after unroll. 1267 // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride 1268 // which may overflow. 1269 if (!adjust_min_trip) { 1270 assert(old_trip_count > 1 && (old_trip_count & 1) == 0, 1271 "odd trip count for maximally unroll"); 1272 // Don't need to adjust limit for maximally unroll since trip count is even. 1273 } else if (loop_head->has_exact_trip_count() && init->is_Con()) { 1274 // Loop's limit is constant. Loop's init could be constant when pre-loop 1275 // become peeled iteration. 1276 jlong init_con = init->get_int(); 1277 // We can keep old loop limit if iterations count stays the same: 1278 // old_trip_count == new_trip_count * 2 1279 // Note: since old_trip_count >= 2 then new_trip_count >= 1 1280 // so we also don't need to adjust zero trip test. 1281 jlong limit_con = limit->get_int(); 1282 // (stride_con*2) not overflow since stride_con <= 8. 1283 int new_stride_con = stride_con * 2; 1284 int stride_m = new_stride_con - (stride_con > 0 ? 1 : -1); 1285 jlong trip_count = (limit_con - init_con + stride_m)/new_stride_con; 1286 // New trip count should satisfy next conditions. 1287 assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity"); 1288 uint new_trip_count = (uint)trip_count; 1289 adjust_min_trip = (old_trip_count != new_trip_count*2); 1290 } 1291 1292 if (adjust_min_trip) { 1293 // Step 2: Adjust the trip limit if it is called for. 1294 // The adjustment amount is -stride. Need to make sure if the 1295 // adjustment underflows or overflows, then the main loop is skipped. 1296 Node* cmp = loop_end->cmp_node(); 1297 assert(cmp->in(2) == limit, "sanity"); 1298 assert(opaq != NULL && opaq->in(1) == limit, "sanity"); 1299 1300 // Verify that policy_unroll result is still valid. 1301 const TypeInt* limit_type = _igvn.type(limit)->is_int(); 1302 assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) || 1303 stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity"); 1304 1305 if (limit->is_Con()) { 1306 // The check in policy_unroll and the assert above guarantee 1307 // no underflow if limit is constant. 1308 new_limit = _igvn.intcon(limit->get_int() - stride_con); 1309 set_ctrl(new_limit, C->root()); 1310 } else { 1311 // Limit is not constant. 1312 if (loop_head->unrolled_count() == 1) { // only for first unroll 1313 // Separate limit by Opaque node in case it is an incremented 1314 // variable from previous loop to avoid using pre-incremented 1315 // value which could increase register pressure. 1316 // Otherwise reorg_offsets() optimization will create a separate 1317 // Opaque node for each use of trip-counter and as result 1318 // zero trip guard limit will be different from loop limit. 1319 assert(has_ctrl(opaq), "should have it"); 1320 Node* opaq_ctrl = get_ctrl(opaq); 1321 limit = new Opaque2Node( C, limit ); 1322 register_new_node( limit, opaq_ctrl ); 1323 } 1324 if (stride_con > 0 && ((limit_type->_lo - stride_con) < limit_type->_lo) || 1325 stride_con < 0 && ((limit_type->_hi - stride_con) > limit_type->_hi)) { 1326 // No underflow. 1327 new_limit = new SubINode(limit, stride); 1328 } else { 1329 // (limit - stride) may underflow. 1330 // Clamp the adjustment value with MININT or MAXINT: 1331 // 1332 // new_limit = limit-stride 1333 // if (stride > 0) 1334 // new_limit = (limit < new_limit) ? MININT : new_limit; 1335 // else 1336 // new_limit = (limit > new_limit) ? MAXINT : new_limit; 1337 // 1338 BoolTest::mask bt = loop_end->test_trip(); 1339 assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); 1340 Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint); 1341 set_ctrl(adj_max, C->root()); 1342 Node* old_limit = NULL; 1343 Node* adj_limit = NULL; 1344 Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL; 1345 if (loop_head->unrolled_count() > 1 && 1346 limit->is_CMove() && limit->Opcode() == Op_CMoveI && 1347 limit->in(CMoveNode::IfTrue) == adj_max && 1348 bol->as_Bool()->_test._test == bt && 1349 bol->in(1)->Opcode() == Op_CmpI && 1350 bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) { 1351 // Loop was unrolled before. 1352 // Optimize the limit to avoid nested CMove: 1353 // use original limit as old limit. 1354 old_limit = bol->in(1)->in(1); 1355 // Adjust previous adjusted limit. 1356 adj_limit = limit->in(CMoveNode::IfFalse); 1357 adj_limit = new SubINode(adj_limit, stride); 1358 } else { 1359 old_limit = limit; 1360 adj_limit = new SubINode(limit, stride); 1361 } 1362 assert(old_limit != NULL && adj_limit != NULL, ""); 1363 register_new_node( adj_limit, ctrl ); // adjust amount 1364 Node* adj_cmp = new CmpINode(old_limit, adj_limit); 1365 register_new_node( adj_cmp, ctrl ); 1366 Node* adj_bool = new BoolNode(adj_cmp, bt); 1367 register_new_node( adj_bool, ctrl ); 1368 new_limit = new CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT); 1369 } 1370 register_new_node(new_limit, ctrl); 1371 } 1372 assert(new_limit != NULL, ""); 1373 // Replace in loop test. 1374 assert(loop_end->in(1)->in(1) == cmp, "sanity"); 1375 if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) { 1376 // Don't need to create new test since only one user. 1377 _igvn.hash_delete(cmp); 1378 cmp->set_req(2, new_limit); 1379 } else { 1380 // Create new test since it is shared. 1381 Node* ctrl2 = loop_end->in(0); 1382 Node* cmp2 = cmp->clone(); 1383 cmp2->set_req(2, new_limit); 1384 register_new_node(cmp2, ctrl2); 1385 Node* bol2 = loop_end->in(1)->clone(); 1386 bol2->set_req(1, cmp2); 1387 register_new_node(bol2, ctrl2); 1388 _igvn.replace_input_of(loop_end, 1, bol2); 1389 } 1390 // Step 3: Find the min-trip test guaranteed before a 'main' loop. 1391 // Make it a 1-trip test (means at least 2 trips). 1392 1393 // Guard test uses an 'opaque' node which is not shared. Hence I 1394 // can edit it's inputs directly. Hammer in the new limit for the 1395 // minimum-trip guard. 1396 assert(opaq->outcnt() == 1, ""); 1397 _igvn.replace_input_of(opaq, 1, new_limit); 1398 } 1399 1400 // Adjust max trip count. The trip count is intentionally rounded 1401 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, 1402 // the main, unrolled, part of the loop will never execute as it is protected 1403 // by the min-trip test. See bug 4834191 for a case where we over-unrolled 1404 // and later determined that part of the unrolled loop was dead. 1405 loop_head->set_trip_count(old_trip_count / 2); 1406 1407 // Double the count of original iterations in the unrolled loop body. 1408 loop_head->double_unrolled_count(); 1409 1410 } else { // LoopLimitCheck 1411 1412 // Adjust max trip count. The trip count is intentionally rounded 1413 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, 1414 // the main, unrolled, part of the loop will never execute as it is protected 1415 // by the min-trip test. See bug 4834191 for a case where we over-unrolled 1416 // and later determined that part of the unrolled loop was dead. 1417 loop_head->set_trip_count(loop_head->trip_count() / 2); 1418 1419 // Double the count of original iterations in the unrolled loop body. 1420 loop_head->double_unrolled_count(); 1421 1422 // ----------- 1423 // Step 2: Cut back the trip counter for an unroll amount of 2. 1424 // Loop will normally trip (limit - init)/stride_con. Since it's a 1425 // CountedLoop this is exact (stride divides limit-init exactly). 1426 // We are going to double the loop body, so we want to knock off any 1427 // odd iteration: (trip_cnt & ~1). Then back compute a new limit. 1428 Node *span = new SubINode( limit, init ); 1429 register_new_node( span, ctrl ); 1430 Node *trip = new DivINode( 0, span, stride ); 1431 register_new_node( trip, ctrl ); 1432 Node *mtwo = _igvn.intcon(-2); 1433 set_ctrl(mtwo, C->root()); 1434 Node *rond = new AndINode( trip, mtwo ); 1435 register_new_node( rond, ctrl ); 1436 Node *spn2 = new MulINode( rond, stride ); 1437 register_new_node( spn2, ctrl ); 1438 new_limit = new AddINode( spn2, init ); 1439 register_new_node( new_limit, ctrl ); 1440 1441 // Hammer in the new limit 1442 Node *ctrl2 = loop_end->in(0); 1443 Node *cmp2 = new CmpINode( loop_head->incr(), new_limit ); 1444 register_new_node( cmp2, ctrl2 ); 1445 Node *bol2 = new BoolNode( cmp2, loop_end->test_trip() ); 1446 register_new_node( bol2, ctrl2 ); 1447 _igvn.replace_input_of(loop_end, CountedLoopEndNode::TestValue, bol2); 1448 1449 // Step 3: Find the min-trip test guaranteed before a 'main' loop. 1450 // Make it a 1-trip test (means at least 2 trips). 1451 if( adjust_min_trip ) { 1452 assert( new_limit != NULL, "" ); 1453 // Guard test uses an 'opaque' node which is not shared. Hence I 1454 // can edit it's inputs directly. Hammer in the new limit for the 1455 // minimum-trip guard. 1456 assert( opaq->outcnt() == 1, "" ); 1457 _igvn.hash_delete(opaq); 1458 opaq->set_req(1, new_limit); 1459 } 1460 } // LoopLimitCheck 1461 1462 // --------- 1463 // Step 4: Clone the loop body. Move it inside the loop. This loop body 1464 // represents the odd iterations; since the loop trips an even number of 1465 // times its backedge is never taken. Kill the backedge. 1466 uint dd = dom_depth(loop_head); 1467 clone_loop( loop, old_new, dd ); 1468 1469 // Make backedges of the clone equal to backedges of the original. 1470 // Make the fall-in from the original come from the fall-out of the clone. 1471 for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) { 1472 Node* phi = loop_head->fast_out(j); 1473 if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) { 1474 Node *newphi = old_new[phi->_idx]; 1475 _igvn.hash_delete( phi ); 1476 _igvn.hash_delete( newphi ); 1477 1478 phi ->set_req(LoopNode:: EntryControl, newphi->in(LoopNode::LoopBackControl)); 1479 newphi->set_req(LoopNode::LoopBackControl, phi ->in(LoopNode::LoopBackControl)); 1480 phi ->set_req(LoopNode::LoopBackControl, C->top()); 1481 } 1482 } 1483 Node *clone_head = old_new[loop_head->_idx]; 1484 _igvn.hash_delete( clone_head ); 1485 loop_head ->set_req(LoopNode:: EntryControl, clone_head->in(LoopNode::LoopBackControl)); 1486 clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl)); 1487 loop_head ->set_req(LoopNode::LoopBackControl, C->top()); 1488 loop->_head = clone_head; // New loop header 1489 1490 set_idom(loop_head, loop_head ->in(LoopNode::EntryControl), dd); 1491 set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd); 1492 1493 // Kill the clone's backedge 1494 Node *newcle = old_new[loop_end->_idx]; 1495 _igvn.hash_delete( newcle ); 1496 Node *one = _igvn.intcon(1); 1497 set_ctrl(one, C->root()); 1498 newcle->set_req(1, one); 1499 // Force clone into same loop body 1500 uint max = loop->_body.size(); 1501 for( uint k = 0; k < max; k++ ) { 1502 Node *old = loop->_body.at(k); 1503 Node *nnn = old_new[old->_idx]; 1504 loop->_body.push(nnn); 1505 if (!has_ctrl(old)) 1506 set_loop(nnn, loop); 1507 } 1508 1509 loop->record_for_igvn(); 1510 1511 #ifndef PRODUCT 1512 if (C->do_vector_loop() && (PrintOpto && VerifyLoopOptimizations || TraceLoopOpts)) { 1513 tty->print("\nnew loop after unroll\n"); loop->dump_head(); 1514 for (uint i = 0; i < loop->_body.size(); i++) { 1515 loop->_body.at(i)->dump(); 1516 } 1517 if(C->clone_map().is_debug()) { 1518 tty->print("\nCloneMap\n"); 1519 Dict* dict = C->clone_map().dict(); 1520 DictI i(dict); 1521 tty->print_cr("Dict@%p[%d] = ", dict, dict->Size()); 1522 for (int ii = 0; i.test(); ++i, ++ii) { 1523 NodeCloneInfo cl((uint64_t)dict->operator[]((void*)i._key)); 1524 tty->print("%d->%d:%d,", (int)(intptr_t)i._key, cl.idx(), cl.gen()); 1525 if (ii % 10 == 9) { 1526 tty->print_cr(" "); 1527 } 1528 } 1529 tty->print_cr(" "); 1530 } 1531 } 1532 #endif 1533 1534 } 1535 1536 //------------------------------do_maximally_unroll---------------------------- 1537 1538 void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) { 1539 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 1540 assert(cl->has_exact_trip_count(), "trip count is not exact"); 1541 assert(cl->trip_count() > 0, ""); 1542 #ifndef PRODUCT 1543 if (TraceLoopOpts) { 1544 tty->print("MaxUnroll %d ", cl->trip_count()); 1545 loop->dump_head(); 1546 } 1547 #endif 1548 1549 // If loop is tripping an odd number of times, peel odd iteration 1550 if ((cl->trip_count() & 1) == 1) { 1551 do_peeling(loop, old_new); 1552 } 1553 1554 // Now its tripping an even number of times remaining. Double loop body. 1555 // Do not adjust pre-guards; they are not needed and do not exist. 1556 if (cl->trip_count() > 0) { 1557 assert((cl->trip_count() & 1) == 0, "missed peeling"); 1558 do_unroll(loop, old_new, false); 1559 } 1560 } 1561 1562 void PhaseIdealLoop::mark_reductions(IdealLoopTree *loop) { 1563 if (SuperWordReductions == false) return; 1564 1565 CountedLoopNode* loop_head = loop->_head->as_CountedLoop(); 1566 if (loop_head->unrolled_count() > 1) { 1567 return; 1568 } 1569 1570 Node* trip_phi = loop_head->phi(); 1571 for (DUIterator_Fast imax, i = loop_head->fast_outs(imax); i < imax; i++) { 1572 Node* phi = loop_head->fast_out(i); 1573 if (phi->is_Phi() && phi->outcnt() > 0 && phi != trip_phi) { 1574 // For definitions which are loop inclusive and not tripcounts. 1575 Node* def_node = phi->in(LoopNode::LoopBackControl); 1576 1577 if (def_node != NULL) { 1578 Node* n_ctrl = get_ctrl(def_node); 1579 if (n_ctrl != NULL && loop->is_member(get_loop(n_ctrl))) { 1580 // Now test it to see if it fits the standard pattern for a reduction operator. 1581 int opc = def_node->Opcode(); 1582 if (opc != ReductionNode::opcode(opc, def_node->bottom_type()->basic_type())) { 1583 if (!def_node->is_reduction()) { // Not marked yet 1584 // To be a reduction, the arithmetic node must have the phi as input and provide a def to it 1585 bool ok = false; 1586 for (unsigned j = 1; j < def_node->req(); j++) { 1587 Node* in = def_node->in(j); 1588 if (in == phi) { 1589 ok = true; 1590 break; 1591 } 1592 } 1593 1594 // do nothing if we did not match the initial criteria 1595 if (ok == false) { 1596 continue; 1597 } 1598 1599 // The result of the reduction must not be used in the loop 1600 for (DUIterator_Fast imax, i = def_node->fast_outs(imax); i < imax && ok; i++) { 1601 Node* u = def_node->fast_out(i); 1602 if (has_ctrl(u) && !loop->is_member(get_loop(get_ctrl(u)))) { 1603 continue; 1604 } 1605 if (u == phi) { 1606 continue; 1607 } 1608 ok = false; 1609 } 1610 1611 // iff the uses conform 1612 if (ok) { 1613 def_node->add_flag(Node::Flag_is_reduction); 1614 } 1615 } 1616 } 1617 } 1618 } 1619 } 1620 } 1621 } 1622 1623 //------------------------------dominates_backedge--------------------------------- 1624 // Returns true if ctrl is executed on every complete iteration 1625 bool IdealLoopTree::dominates_backedge(Node* ctrl) { 1626 assert(ctrl->is_CFG(), "must be control"); 1627 Node* backedge = _head->as_Loop()->in(LoopNode::LoopBackControl); 1628 return _phase->dom_lca_internal(ctrl, backedge) == ctrl; 1629 } 1630 1631 //------------------------------adjust_limit----------------------------------- 1632 // Helper function for add_constraint(). 1633 Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) { 1634 // Compute "I :: (limit-offset)/scale" 1635 Node *con = new SubINode(rc_limit, offset); 1636 register_new_node(con, pre_ctrl); 1637 Node *X = new DivINode(0, con, scale); 1638 register_new_node(X, pre_ctrl); 1639 1640 // Adjust loop limit 1641 loop_limit = (stride_con > 0) 1642 ? (Node*)(new MinINode(loop_limit, X)) 1643 : (Node*)(new MaxINode(loop_limit, X)); 1644 register_new_node(loop_limit, pre_ctrl); 1645 return loop_limit; 1646 } 1647 1648 //------------------------------add_constraint--------------------------------- 1649 // Constrain the main loop iterations so the conditions: 1650 // low_limit <= scale_con * I + offset < upper_limit 1651 // always holds true. That is, either increase the number of iterations in 1652 // the pre-loop or the post-loop until the condition holds true in the main 1653 // loop. Stride, scale, offset and limit are all loop invariant. Further, 1654 // stride and scale are constants (offset and limit often are). 1655 void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) { 1656 // For positive stride, the pre-loop limit always uses a MAX function 1657 // and the main loop a MIN function. For negative stride these are 1658 // reversed. 1659 1660 // Also for positive stride*scale the affine function is increasing, so the 1661 // pre-loop must check for underflow and the post-loop for overflow. 1662 // Negative stride*scale reverses this; pre-loop checks for overflow and 1663 // post-loop for underflow. 1664 1665 Node *scale = _igvn.intcon(scale_con); 1666 set_ctrl(scale, C->root()); 1667 1668 if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow 1669 // The overflow limit: scale*I+offset < upper_limit 1670 // For main-loop compute 1671 // ( if (scale > 0) /* and stride > 0 */ 1672 // I < (upper_limit-offset)/scale 1673 // else /* scale < 0 and stride < 0 */ 1674 // I > (upper_limit-offset)/scale 1675 // ) 1676 // 1677 // (upper_limit-offset) may overflow or underflow. 1678 // But it is fine since main loop will either have 1679 // less iterations or will be skipped in such case. 1680 *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl); 1681 1682 // The underflow limit: low_limit <= scale*I+offset. 1683 // For pre-loop compute 1684 // NOT(scale*I+offset >= low_limit) 1685 // scale*I+offset < low_limit 1686 // ( if (scale > 0) /* and stride > 0 */ 1687 // I < (low_limit-offset)/scale 1688 // else /* scale < 0 and stride < 0 */ 1689 // I > (low_limit-offset)/scale 1690 // ) 1691 1692 if (low_limit->get_int() == -max_jint) { 1693 if (!RangeLimitCheck) return; 1694 // We need this guard when scale*pre_limit+offset >= limit 1695 // due to underflow. So we need execute pre-loop until 1696 // scale*I+offset >= min_int. But (min_int-offset) will 1697 // underflow when offset > 0 and X will be > original_limit 1698 // when stride > 0. To avoid it we replace positive offset with 0. 1699 // 1700 // Also (min_int+1 == -max_int) is used instead of min_int here 1701 // to avoid problem with scale == -1 (min_int/(-1) == min_int). 1702 Node* shift = _igvn.intcon(31); 1703 set_ctrl(shift, C->root()); 1704 Node* sign = new RShiftINode(offset, shift); 1705 register_new_node(sign, pre_ctrl); 1706 offset = new AndINode(offset, sign); 1707 register_new_node(offset, pre_ctrl); 1708 } else { 1709 assert(low_limit->get_int() == 0, "wrong low limit for range check"); 1710 // The only problem we have here when offset == min_int 1711 // since (0-min_int) == min_int. It may be fine for stride > 0 1712 // but for stride < 0 X will be < original_limit. To avoid it 1713 // max(pre_limit, original_limit) is used in do_range_check(). 1714 } 1715 // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond); 1716 *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl); 1717 1718 } else { // stride_con*scale_con < 0 1719 // For negative stride*scale pre-loop checks for overflow and 1720 // post-loop for underflow. 1721 // 1722 // The overflow limit: scale*I+offset < upper_limit 1723 // For pre-loop compute 1724 // NOT(scale*I+offset < upper_limit) 1725 // scale*I+offset >= upper_limit 1726 // scale*I+offset+1 > upper_limit 1727 // ( if (scale < 0) /* and stride > 0 */ 1728 // I < (upper_limit-(offset+1))/scale 1729 // else /* scale > 0 and stride < 0 */ 1730 // I > (upper_limit-(offset+1))/scale 1731 // ) 1732 // 1733 // (upper_limit-offset-1) may underflow or overflow. 1734 // To avoid it min(pre_limit, original_limit) is used 1735 // in do_range_check() for stride > 0 and max() for < 0. 1736 Node *one = _igvn.intcon(1); 1737 set_ctrl(one, C->root()); 1738 1739 Node *plus_one = new AddINode(offset, one); 1740 register_new_node( plus_one, pre_ctrl ); 1741 // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond); 1742 *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl); 1743 1744 if (low_limit->get_int() == -max_jint) { 1745 if (!RangeLimitCheck) return; 1746 // We need this guard when scale*main_limit+offset >= limit 1747 // due to underflow. So we need execute main-loop while 1748 // scale*I+offset+1 > min_int. But (min_int-offset-1) will 1749 // underflow when (offset+1) > 0 and X will be < main_limit 1750 // when scale < 0 (and stride > 0). To avoid it we replace 1751 // positive (offset+1) with 0. 1752 // 1753 // Also (min_int+1 == -max_int) is used instead of min_int here 1754 // to avoid problem with scale == -1 (min_int/(-1) == min_int). 1755 Node* shift = _igvn.intcon(31); 1756 set_ctrl(shift, C->root()); 1757 Node* sign = new RShiftINode(plus_one, shift); 1758 register_new_node(sign, pre_ctrl); 1759 plus_one = new AndINode(plus_one, sign); 1760 register_new_node(plus_one, pre_ctrl); 1761 } else { 1762 assert(low_limit->get_int() == 0, "wrong low limit for range check"); 1763 // The only problem we have here when offset == max_int 1764 // since (max_int+1) == min_int and (0-min_int) == min_int. 1765 // But it is fine since main loop will either have 1766 // less iterations or will be skipped in such case. 1767 } 1768 // The underflow limit: low_limit <= scale*I+offset. 1769 // For main-loop compute 1770 // scale*I+offset+1 > low_limit 1771 // ( if (scale < 0) /* and stride > 0 */ 1772 // I < (low_limit-(offset+1))/scale 1773 // else /* scale > 0 and stride < 0 */ 1774 // I > (low_limit-(offset+1))/scale 1775 // ) 1776 1777 *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl); 1778 } 1779 } 1780 1781 1782 //------------------------------is_scaled_iv--------------------------------- 1783 // Return true if exp is a constant times an induction var 1784 bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) { 1785 if (exp == iv) { 1786 if (p_scale != NULL) { 1787 *p_scale = 1; 1788 } 1789 return true; 1790 } 1791 int opc = exp->Opcode(); 1792 if (opc == Op_MulI) { 1793 if (exp->in(1) == iv && exp->in(2)->is_Con()) { 1794 if (p_scale != NULL) { 1795 *p_scale = exp->in(2)->get_int(); 1796 } 1797 return true; 1798 } 1799 if (exp->in(2) == iv && exp->in(1)->is_Con()) { 1800 if (p_scale != NULL) { 1801 *p_scale = exp->in(1)->get_int(); 1802 } 1803 return true; 1804 } 1805 } else if (opc == Op_LShiftI) { 1806 if (exp->in(1) == iv && exp->in(2)->is_Con()) { 1807 if (p_scale != NULL) { 1808 *p_scale = 1 << exp->in(2)->get_int(); 1809 } 1810 return true; 1811 } 1812 } 1813 return false; 1814 } 1815 1816 //-----------------------------is_scaled_iv_plus_offset------------------------------ 1817 // Return true if exp is a simple induction variable expression: k1*iv + (invar + k2) 1818 bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) { 1819 if (is_scaled_iv(exp, iv, p_scale)) { 1820 if (p_offset != NULL) { 1821 Node *zero = _igvn.intcon(0); 1822 set_ctrl(zero, C->root()); 1823 *p_offset = zero; 1824 } 1825 return true; 1826 } 1827 int opc = exp->Opcode(); 1828 if (opc == Op_AddI) { 1829 if (is_scaled_iv(exp->in(1), iv, p_scale)) { 1830 if (p_offset != NULL) { 1831 *p_offset = exp->in(2); 1832 } 1833 return true; 1834 } 1835 if (exp->in(2)->is_Con()) { 1836 Node* offset2 = NULL; 1837 if (depth < 2 && 1838 is_scaled_iv_plus_offset(exp->in(1), iv, p_scale, 1839 p_offset != NULL ? &offset2 : NULL, depth+1)) { 1840 if (p_offset != NULL) { 1841 Node *ctrl_off2 = get_ctrl(offset2); 1842 Node* offset = new AddINode(offset2, exp->in(2)); 1843 register_new_node(offset, ctrl_off2); 1844 *p_offset = offset; 1845 } 1846 return true; 1847 } 1848 } 1849 } else if (opc == Op_SubI) { 1850 if (is_scaled_iv(exp->in(1), iv, p_scale)) { 1851 if (p_offset != NULL) { 1852 Node *zero = _igvn.intcon(0); 1853 set_ctrl(zero, C->root()); 1854 Node *ctrl_off = get_ctrl(exp->in(2)); 1855 Node* offset = new SubINode(zero, exp->in(2)); 1856 register_new_node(offset, ctrl_off); 1857 *p_offset = offset; 1858 } 1859 return true; 1860 } 1861 if (is_scaled_iv(exp->in(2), iv, p_scale)) { 1862 if (p_offset != NULL) { 1863 *p_scale *= -1; 1864 *p_offset = exp->in(1); 1865 } 1866 return true; 1867 } 1868 } 1869 return false; 1870 } 1871 1872 //------------------------------do_range_check--------------------------------- 1873 // Eliminate range-checks and other trip-counter vs loop-invariant tests. 1874 void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) { 1875 #ifndef PRODUCT 1876 if (PrintOpto && VerifyLoopOptimizations) { 1877 tty->print("Range Check Elimination "); 1878 loop->dump_head(); 1879 } else if (TraceLoopOpts) { 1880 tty->print("RangeCheck "); 1881 loop->dump_head(); 1882 } 1883 #endif 1884 assert(RangeCheckElimination, ""); 1885 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 1886 assert(cl->is_main_loop(), ""); 1887 1888 // protect against stride not being a constant 1889 if (!cl->stride_is_con()) 1890 return; 1891 1892 // Find the trip counter; we are iteration splitting based on it 1893 Node *trip_counter = cl->phi(); 1894 // Find the main loop limit; we will trim it's iterations 1895 // to not ever trip end tests 1896 Node *main_limit = cl->limit(); 1897 1898 // Need to find the main-loop zero-trip guard 1899 Node *ctrl = cl->in(LoopNode::EntryControl); 1900 assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, ""); 1901 Node *iffm = ctrl->in(0); 1902 assert(iffm->Opcode() == Op_If, ""); 1903 Node *bolzm = iffm->in(1); 1904 assert(bolzm->Opcode() == Op_Bool, ""); 1905 Node *cmpzm = bolzm->in(1); 1906 assert(cmpzm->is_Cmp(), ""); 1907 Node *opqzm = cmpzm->in(2); 1908 // Can not optimize a loop if zero-trip Opaque1 node is optimized 1909 // away and then another round of loop opts attempted. 1910 if (opqzm->Opcode() != Op_Opaque1) 1911 return; 1912 assert(opqzm->in(1) == main_limit, "do not understand situation"); 1913 1914 // Find the pre-loop limit; we will expand it's iterations to 1915 // not ever trip low tests. 1916 Node *p_f = iffm->in(0); 1917 // pre loop may have been optimized out 1918 if (p_f->Opcode() != Op_IfFalse) { 1919 return; 1920 } 1921 CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); 1922 assert(pre_end->loopnode()->is_pre_loop(), ""); 1923 Node *pre_opaq1 = pre_end->limit(); 1924 // Occasionally it's possible for a pre-loop Opaque1 node to be 1925 // optimized away and then another round of loop opts attempted. 1926 // We can not optimize this particular loop in that case. 1927 if (pre_opaq1->Opcode() != Op_Opaque1) 1928 return; 1929 Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1; 1930 Node *pre_limit = pre_opaq->in(1); 1931 1932 // Where do we put new limit calculations 1933 Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl); 1934 1935 // Ensure the original loop limit is available from the 1936 // pre-loop Opaque1 node. 1937 Node *orig_limit = pre_opaq->original_loop_limit(); 1938 if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP) 1939 return; 1940 1941 // Must know if its a count-up or count-down loop 1942 1943 int stride_con = cl->stride_con(); 1944 Node *zero = _igvn.intcon(0); 1945 Node *one = _igvn.intcon(1); 1946 // Use symmetrical int range [-max_jint,max_jint] 1947 Node *mini = _igvn.intcon(-max_jint); 1948 set_ctrl(zero, C->root()); 1949 set_ctrl(one, C->root()); 1950 set_ctrl(mini, C->root()); 1951 1952 // Range checks that do not dominate the loop backedge (ie. 1953 // conditionally executed) can lengthen the pre loop limit beyond 1954 // the original loop limit. To prevent this, the pre limit is 1955 // (for stride > 0) MINed with the original loop limit (MAXed 1956 // stride < 0) when some range_check (rc) is conditionally 1957 // executed. 1958 bool conditional_rc = false; 1959 1960 // Check loop body for tests of trip-counter plus loop-invariant vs 1961 // loop-invariant. 1962 for( uint i = 0; i < loop->_body.size(); i++ ) { 1963 Node *iff = loop->_body[i]; 1964 if( iff->Opcode() == Op_If ) { // Test? 1965 1966 // Test is an IfNode, has 2 projections. If BOTH are in the loop 1967 // we need loop unswitching instead of iteration splitting. 1968 Node *exit = loop->is_loop_exit(iff); 1969 if( !exit ) continue; 1970 int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0; 1971 1972 // Get boolean condition to test 1973 Node *i1 = iff->in(1); 1974 if( !i1->is_Bool() ) continue; 1975 BoolNode *bol = i1->as_Bool(); 1976 BoolTest b_test = bol->_test; 1977 // Flip sense of test if exit condition is flipped 1978 if( flip ) 1979 b_test = b_test.negate(); 1980 1981 // Get compare 1982 Node *cmp = bol->in(1); 1983 1984 // Look for trip_counter + offset vs limit 1985 Node *rc_exp = cmp->in(1); 1986 Node *limit = cmp->in(2); 1987 jint scale_con= 1; // Assume trip counter not scaled 1988 1989 Node *limit_c = get_ctrl(limit); 1990 if( loop->is_member(get_loop(limit_c) ) ) { 1991 // Compare might have operands swapped; commute them 1992 b_test = b_test.commute(); 1993 rc_exp = cmp->in(2); 1994 limit = cmp->in(1); 1995 limit_c = get_ctrl(limit); 1996 if( loop->is_member(get_loop(limit_c) ) ) 1997 continue; // Both inputs are loop varying; cannot RCE 1998 } 1999 // Here we know 'limit' is loop invariant 2000 2001 // 'limit' maybe pinned below the zero trip test (probably from a 2002 // previous round of rce), in which case, it can't be used in the 2003 // zero trip test expression which must occur before the zero test's if. 2004 if( limit_c == ctrl ) { 2005 continue; // Don't rce this check but continue looking for other candidates. 2006 } 2007 2008 // Check for scaled induction variable plus an offset 2009 Node *offset = NULL; 2010 2011 if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) { 2012 continue; 2013 } 2014 2015 Node *offset_c = get_ctrl(offset); 2016 if( loop->is_member( get_loop(offset_c) ) ) 2017 continue; // Offset is not really loop invariant 2018 // Here we know 'offset' is loop invariant. 2019 2020 // As above for the 'limit', the 'offset' maybe pinned below the 2021 // zero trip test. 2022 if( offset_c == ctrl ) { 2023 continue; // Don't rce this check but continue looking for other candidates. 2024 } 2025 #ifdef ASSERT 2026 if (TraceRangeLimitCheck) { 2027 tty->print_cr("RC bool node%s", flip ? " flipped:" : ":"); 2028 bol->dump(2); 2029 } 2030 #endif 2031 // At this point we have the expression as: 2032 // scale_con * trip_counter + offset :: limit 2033 // where scale_con, offset and limit are loop invariant. Trip_counter 2034 // monotonically increases by stride_con, a constant. Both (or either) 2035 // stride_con and scale_con can be negative which will flip about the 2036 // sense of the test. 2037 2038 // Adjust pre and main loop limits to guard the correct iteration set 2039 if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests 2040 if( b_test._test == BoolTest::lt ) { // Range checks always use lt 2041 // The underflow and overflow limits: 0 <= scale*I+offset < limit 2042 add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit ); 2043 if (!conditional_rc) { 2044 // (0-offset)/scale could be outside of loop iterations range. 2045 conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; 2046 } 2047 } else { 2048 #ifndef PRODUCT 2049 if( PrintOpto ) 2050 tty->print_cr("missed RCE opportunity"); 2051 #endif 2052 continue; // In release mode, ignore it 2053 } 2054 } else { // Otherwise work on normal compares 2055 switch( b_test._test ) { 2056 case BoolTest::gt: 2057 // Fall into GE case 2058 case BoolTest::ge: 2059 // Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit 2060 scale_con = -scale_con; 2061 offset = new SubINode( zero, offset ); 2062 register_new_node( offset, pre_ctrl ); 2063 limit = new SubINode( zero, limit ); 2064 register_new_node( limit, pre_ctrl ); 2065 // Fall into LE case 2066 case BoolTest::le: 2067 if (b_test._test != BoolTest::gt) { 2068 // Convert X <= Y to X < Y+1 2069 limit = new AddINode( limit, one ); 2070 register_new_node( limit, pre_ctrl ); 2071 } 2072 // Fall into LT case 2073 case BoolTest::lt: 2074 // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit 2075 // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here 2076 // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT. 2077 add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit ); 2078 if (!conditional_rc) { 2079 // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range. 2080 // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could 2081 // still be outside of loop range. 2082 conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; 2083 } 2084 break; 2085 default: 2086 #ifndef PRODUCT 2087 if( PrintOpto ) 2088 tty->print_cr("missed RCE opportunity"); 2089 #endif 2090 continue; // Unhandled case 2091 } 2092 } 2093 2094 // Kill the eliminated test 2095 C->set_major_progress(); 2096 Node *kill_con = _igvn.intcon( 1-flip ); 2097 set_ctrl(kill_con, C->root()); 2098 _igvn.replace_input_of(iff, 1, kill_con); 2099 // Find surviving projection 2100 assert(iff->is_If(), ""); 2101 ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip); 2102 // Find loads off the surviving projection; remove their control edge 2103 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { 2104 Node* cd = dp->fast_out(i); // Control-dependent node 2105 if (cd->is_Load() && cd->depends_only_on_test()) { // Loads can now float around in the loop 2106 // Allow the load to float around in the loop, or before it 2107 // but NOT before the pre-loop. 2108 _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL 2109 --i; 2110 --imax; 2111 } 2112 } 2113 2114 } // End of is IF 2115 2116 } 2117 2118 // Update loop limits 2119 if (conditional_rc) { 2120 pre_limit = (stride_con > 0) ? (Node*)new MinINode(pre_limit, orig_limit) 2121 : (Node*)new MaxINode(pre_limit, orig_limit); 2122 register_new_node(pre_limit, pre_ctrl); 2123 } 2124 _igvn.replace_input_of(pre_opaq, 1, pre_limit); 2125 2126 // Note:: we are making the main loop limit no longer precise; 2127 // need to round up based on stride. 2128 cl->set_nonexact_trip_count(); 2129 if (!LoopLimitCheck && stride_con != 1 && stride_con != -1) { // Cutout for common case 2130 // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init 2131 // Hopefully, compiler will optimize for powers of 2. 2132 Node *ctrl = get_ctrl(main_limit); 2133 Node *stride = cl->stride(); 2134 Node *init = cl->init_trip()->uncast(); 2135 Node *span = new SubINode(main_limit,init); 2136 register_new_node(span,ctrl); 2137 Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1)); 2138 Node *add = new AddINode(span,rndup); 2139 register_new_node(add,ctrl); 2140 Node *div = new DivINode(0,add,stride); 2141 register_new_node(div,ctrl); 2142 Node *mul = new MulINode(div,stride); 2143 register_new_node(mul,ctrl); 2144 Node *newlim = new AddINode(mul,init); 2145 register_new_node(newlim,ctrl); 2146 main_limit = newlim; 2147 } 2148 2149 Node *main_cle = cl->loopexit(); 2150 Node *main_bol = main_cle->in(1); 2151 // Hacking loop bounds; need private copies of exit test 2152 if( main_bol->outcnt() > 1 ) {// BoolNode shared? 2153 main_bol = main_bol->clone();// Clone a private BoolNode 2154 register_new_node( main_bol, main_cle->in(0) ); 2155 _igvn.replace_input_of(main_cle, 1, main_bol); 2156 } 2157 Node *main_cmp = main_bol->in(1); 2158 if( main_cmp->outcnt() > 1 ) { // CmpNode shared? 2159 main_cmp = main_cmp->clone();// Clone a private CmpNode 2160 register_new_node( main_cmp, main_cle->in(0) ); 2161 _igvn.replace_input_of(main_bol, 1, main_cmp); 2162 } 2163 // Hack the now-private loop bounds 2164 _igvn.replace_input_of(main_cmp, 2, main_limit); 2165 // The OpaqueNode is unshared by design 2166 assert( opqzm->outcnt() == 1, "cannot hack shared node" ); 2167 _igvn.replace_input_of(opqzm, 1, main_limit); 2168 } 2169 2170 //------------------------------DCE_loop_body---------------------------------- 2171 // Remove simplistic dead code from loop body 2172 void IdealLoopTree::DCE_loop_body() { 2173 for( uint i = 0; i < _body.size(); i++ ) 2174 if( _body.at(i)->outcnt() == 0 ) 2175 _body.map( i--, _body.pop() ); 2176 } 2177 2178 2179 //------------------------------adjust_loop_exit_prob-------------------------- 2180 // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage. 2181 // Replace with a 1-in-10 exit guess. 2182 void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) { 2183 Node *test = tail(); 2184 while( test != _head ) { 2185 uint top = test->Opcode(); 2186 if( top == Op_IfTrue || top == Op_IfFalse ) { 2187 int test_con = ((ProjNode*)test)->_con; 2188 assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity"); 2189 IfNode *iff = test->in(0)->as_If(); 2190 if( iff->outcnt() == 2 ) { // Ignore dead tests 2191 Node *bol = iff->in(1); 2192 if( bol && bol->req() > 1 && bol->in(1) && 2193 ((bol->in(1)->Opcode() == Op_StorePConditional ) || 2194 (bol->in(1)->Opcode() == Op_StoreIConditional ) || 2195 (bol->in(1)->Opcode() == Op_StoreLConditional ) || 2196 (bol->in(1)->Opcode() == Op_CompareAndSwapI ) || 2197 (bol->in(1)->Opcode() == Op_CompareAndSwapL ) || 2198 (bol->in(1)->Opcode() == Op_CompareAndSwapP ) || 2199 (bol->in(1)->Opcode() == Op_CompareAndSwapN ))) 2200 return; // Allocation loops RARELY take backedge 2201 // Find the OTHER exit path from the IF 2202 Node* ex = iff->proj_out(1-test_con); 2203 float p = iff->_prob; 2204 if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) { 2205 if( top == Op_IfTrue ) { 2206 if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) { 2207 iff->_prob = PROB_STATIC_FREQUENT; 2208 } 2209 } else { 2210 if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) { 2211 iff->_prob = PROB_STATIC_INFREQUENT; 2212 } 2213 } 2214 } 2215 } 2216 } 2217 test = phase->idom(test); 2218 } 2219 } 2220 2221 2222 //------------------------------policy_do_remove_empty_loop-------------------- 2223 // Micro-benchmark spamming. Policy is to always remove empty loops. 2224 // The 'DO' part is to replace the trip counter with the value it will 2225 // have on the last iteration. This will break the loop. 2226 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) { 2227 // Minimum size must be empty loop 2228 if (_body.size() > EMPTY_LOOP_SIZE) 2229 return false; 2230 2231 if (!_head->is_CountedLoop()) 2232 return false; // Dead loop 2233 CountedLoopNode *cl = _head->as_CountedLoop(); 2234 if (!cl->is_valid_counted_loop()) 2235 return false; // Malformed loop 2236 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) 2237 return false; // Infinite loop 2238 2239 #ifdef ASSERT 2240 // Ensure only one phi which is the iv. 2241 Node* iv = NULL; 2242 for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) { 2243 Node* n = cl->fast_out(i); 2244 if (n->Opcode() == Op_Phi) { 2245 assert(iv == NULL, "Too many phis" ); 2246 iv = n; 2247 } 2248 } 2249 assert(iv == cl->phi(), "Wrong phi" ); 2250 #endif 2251 2252 // main and post loops have explicitly created zero trip guard 2253 bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop(); 2254 if (needs_guard) { 2255 // Skip guard if values not overlap. 2256 const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int(); 2257 const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int(); 2258 int stride_con = cl->stride_con(); 2259 if (stride_con > 0) { 2260 needs_guard = (init_t->_hi >= limit_t->_lo); 2261 } else { 2262 needs_guard = (init_t->_lo <= limit_t->_hi); 2263 } 2264 } 2265 if (needs_guard) { 2266 // Check for an obvious zero trip guard. 2267 Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl)); 2268 if (inctrl->Opcode() == Op_IfTrue) { 2269 // The test should look like just the backedge of a CountedLoop 2270 Node* iff = inctrl->in(0); 2271 if (iff->is_If()) { 2272 Node* bol = iff->in(1); 2273 if (bol->is_Bool() && bol->as_Bool()->_test._test == cl->loopexit()->test_trip()) { 2274 Node* cmp = bol->in(1); 2275 if (cmp->is_Cmp() && cmp->in(1) == cl->init_trip() && cmp->in(2) == cl->limit()) { 2276 needs_guard = false; 2277 } 2278 } 2279 } 2280 } 2281 } 2282 2283 #ifndef PRODUCT 2284 if (PrintOpto) { 2285 tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : ""); 2286 this->dump_head(); 2287 } else if (TraceLoopOpts) { 2288 tty->print("Empty with%s zero trip guard ", needs_guard ? "out" : ""); 2289 this->dump_head(); 2290 } 2291 #endif 2292 2293 if (needs_guard) { 2294 // Peel the loop to ensure there's a zero trip guard 2295 Node_List old_new; 2296 phase->do_peeling(this, old_new); 2297 } 2298 2299 // Replace the phi at loop head with the final value of the last 2300 // iteration. Then the CountedLoopEnd will collapse (backedge never 2301 // taken) and all loop-invariant uses of the exit values will be correct. 2302 Node *phi = cl->phi(); 2303 Node *exact_limit = phase->exact_limit(this); 2304 if (exact_limit != cl->limit()) { 2305 // We also need to replace the original limit to collapse loop exit. 2306 Node* cmp = cl->loopexit()->cmp_node(); 2307 assert(cl->limit() == cmp->in(2), "sanity"); 2308 phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist 2309 phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist 2310 } 2311 // Note: the final value after increment should not overflow since 2312 // counted loop has limit check predicate. 2313 Node *final = new SubINode( exact_limit, cl->stride() ); 2314 phase->register_new_node(final,cl->in(LoopNode::EntryControl)); 2315 phase->_igvn.replace_node(phi,final); 2316 phase->C->set_major_progress(); 2317 return true; 2318 } 2319 2320 //------------------------------policy_do_one_iteration_loop------------------- 2321 // Convert one iteration loop into normal code. 2322 bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) { 2323 if (!_head->as_Loop()->is_valid_counted_loop()) 2324 return false; // Only for counted loop 2325 2326 CountedLoopNode *cl = _head->as_CountedLoop(); 2327 if (!cl->has_exact_trip_count() || cl->trip_count() != 1) { 2328 return false; 2329 } 2330 2331 #ifndef PRODUCT 2332 if(TraceLoopOpts) { 2333 tty->print("OneIteration "); 2334 this->dump_head(); 2335 } 2336 #endif 2337 2338 Node *init_n = cl->init_trip(); 2339 #ifdef ASSERT 2340 // Loop boundaries should be constant since trip count is exact. 2341 assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration"); 2342 #endif 2343 // Replace the phi at loop head with the value of the init_trip. 2344 // Then the CountedLoopEnd will collapse (backedge will not be taken) 2345 // and all loop-invariant uses of the exit values will be correct. 2346 phase->_igvn.replace_node(cl->phi(), cl->init_trip()); 2347 phase->C->set_major_progress(); 2348 return true; 2349 } 2350 2351 //============================================================================= 2352 //------------------------------iteration_split_impl--------------------------- 2353 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) { 2354 // Compute exact loop trip count if possible. 2355 compute_exact_trip_count(phase); 2356 2357 // Convert one iteration loop into normal code. 2358 if (policy_do_one_iteration_loop(phase)) 2359 return true; 2360 2361 // Check and remove empty loops (spam micro-benchmarks) 2362 if (policy_do_remove_empty_loop(phase)) 2363 return true; // Here we removed an empty loop 2364 2365 bool should_peel = policy_peeling(phase); // Should we peel? 2366 2367 bool should_unswitch = policy_unswitching(phase); 2368 2369 // Non-counted loops may be peeled; exactly 1 iteration is peeled. 2370 // This removes loop-invariant tests (usually null checks). 2371 if (!_head->is_CountedLoop()) { // Non-counted loop 2372 if (PartialPeelLoop && phase->partial_peel(this, old_new)) { 2373 // Partial peel succeeded so terminate this round of loop opts 2374 return false; 2375 } 2376 if (should_peel) { // Should we peel? 2377 #ifndef PRODUCT 2378 if (PrintOpto) tty->print_cr("should_peel"); 2379 #endif 2380 phase->do_peeling(this,old_new); 2381 } else if (should_unswitch) { 2382 phase->do_unswitching(this, old_new); 2383 } 2384 return true; 2385 } 2386 CountedLoopNode *cl = _head->as_CountedLoop(); 2387 2388 if (!cl->is_valid_counted_loop()) return true; // Ignore various kinds of broken loops 2389 2390 // Do nothing special to pre- and post- loops 2391 if (cl->is_pre_loop() || cl->is_post_loop()) return true; 2392 2393 // Compute loop trip count from profile data 2394 compute_profile_trip_cnt(phase); 2395 2396 // Before attempting fancy unrolling, RCE or alignment, see if we want 2397 // to completely unroll this loop or do loop unswitching. 2398 if (cl->is_normal_loop()) { 2399 if (should_unswitch) { 2400 phase->do_unswitching(this, old_new); 2401 return true; 2402 } 2403 bool should_maximally_unroll = policy_maximally_unroll(phase); 2404 if (should_maximally_unroll) { 2405 // Here we did some unrolling and peeling. Eventually we will 2406 // completely unroll this loop and it will no longer be a loop. 2407 phase->do_maximally_unroll(this,old_new); 2408 return true; 2409 } 2410 } 2411 2412 // Skip next optimizations if running low on nodes. Note that 2413 // policy_unswitching and policy_maximally_unroll have this check. 2414 int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes(); 2415 if ((int)(2 * _body.size()) > nodes_left) { 2416 return true; 2417 } 2418 2419 // Counted loops may be peeled, may need some iterations run up 2420 // front for RCE, and may want to align loop refs to a cache 2421 // line. Thus we clone a full loop up front whose trip count is 2422 // at least 1 (if peeling), but may be several more. 2423 2424 // The main loop will start cache-line aligned with at least 1 2425 // iteration of the unrolled body (zero-trip test required) and 2426 // will have some range checks removed. 2427 2428 // A post-loop will finish any odd iterations (leftover after 2429 // unrolling), plus any needed for RCE purposes. 2430 2431 bool should_unroll = policy_unroll(phase); 2432 2433 bool should_rce = policy_range_check(phase); 2434 2435 bool should_align = policy_align(phase); 2436 2437 // If not RCE'ing (iteration splitting) or Aligning, then we do not 2438 // need a pre-loop. We may still need to peel an initial iteration but 2439 // we will not be needing an unknown number of pre-iterations. 2440 // 2441 // Basically, if may_rce_align reports FALSE first time through, 2442 // we will not be able to later do RCE or Aligning on this loop. 2443 bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align; 2444 2445 // If we have any of these conditions (RCE, alignment, unrolling) met, then 2446 // we switch to the pre-/main-/post-loop model. This model also covers 2447 // peeling. 2448 if (should_rce || should_align || should_unroll) { 2449 if (cl->is_normal_loop()) // Convert to 'pre/main/post' loops 2450 phase->insert_pre_post_loops(this,old_new, !may_rce_align); 2451 2452 // Adjust the pre- and main-loop limits to let the pre and post loops run 2453 // with full checks, but the main-loop with no checks. Remove said 2454 // checks from the main body. 2455 if (should_rce) 2456 phase->do_range_check(this,old_new); 2457 2458 // Double loop body for unrolling. Adjust the minimum-trip test (will do 2459 // twice as many iterations as before) and the main body limit (only do 2460 // an even number of trips). If we are peeling, we might enable some RCE 2461 // and we'd rather unroll the post-RCE'd loop SO... do not unroll if 2462 // peeling. 2463 if (should_unroll && !should_peel) { 2464 phase->mark_reductions(this); 2465 phase->do_unroll(this, old_new, true); 2466 } 2467 2468 // Adjust the pre-loop limits to align the main body 2469 // iterations. 2470 if (should_align) 2471 Unimplemented(); 2472 2473 } else { // Else we have an unchanged counted loop 2474 if (should_peel) // Might want to peel but do nothing else 2475 phase->do_peeling(this,old_new); 2476 } 2477 return true; 2478 } 2479 2480 2481 //============================================================================= 2482 //------------------------------iteration_split-------------------------------- 2483 bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) { 2484 // Recursively iteration split nested loops 2485 if (_child && !_child->iteration_split(phase, old_new)) 2486 return false; 2487 2488 // Clean out prior deadwood 2489 DCE_loop_body(); 2490 2491 2492 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage. 2493 // Replace with a 1-in-10 exit guess. 2494 if (_parent /*not the root loop*/ && 2495 !_irreducible && 2496 // Also ignore the occasional dead backedge 2497 !tail()->is_top()) { 2498 adjust_loop_exit_prob(phase); 2499 } 2500 2501 // Gate unrolling, RCE and peeling efforts. 2502 if (!_child && // If not an inner loop, do not split 2503 !_irreducible && 2504 _allow_optimizations && 2505 !tail()->is_top()) { // Also ignore the occasional dead backedge 2506 if (!_has_call) { 2507 if (!iteration_split_impl(phase, old_new)) { 2508 return false; 2509 } 2510 } else if (policy_unswitching(phase)) { 2511 phase->do_unswitching(this, old_new); 2512 } 2513 } 2514 2515 // Minor offset re-organization to remove loop-fallout uses of 2516 // trip counter when there was no major reshaping. 2517 phase->reorg_offsets(this); 2518 2519 if (_next && !_next->iteration_split(phase, old_new)) 2520 return false; 2521 return true; 2522 } 2523 2524 2525 //============================================================================= 2526 // Process all the loops in the loop tree and replace any fill 2527 // patterns with an intrisc version. 2528 bool PhaseIdealLoop::do_intrinsify_fill() { 2529 bool changed = false; 2530 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { 2531 IdealLoopTree* lpt = iter.current(); 2532 changed |= intrinsify_fill(lpt); 2533 } 2534 return changed; 2535 } 2536 2537 2538 // Examine an inner loop looking for a a single store of an invariant 2539 // value in a unit stride loop, 2540 bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value, 2541 Node*& shift, Node*& con) { 2542 const char* msg = NULL; 2543 Node* msg_node = NULL; 2544 2545 store_value = NULL; 2546 con = NULL; 2547 shift = NULL; 2548 2549 // Process the loop looking for stores. If there are multiple 2550 // stores or extra control flow give at this point. 2551 CountedLoopNode* head = lpt->_head->as_CountedLoop(); 2552 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2553 Node* n = lpt->_body.at(i); 2554 if (n->outcnt() == 0) continue; // Ignore dead 2555 if (n->is_Store()) { 2556 if (store != NULL) { 2557 msg = "multiple stores"; 2558 break; 2559 } 2560 int opc = n->Opcode(); 2561 if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreNKlass || opc == Op_StoreCM) { 2562 msg = "oop fills not handled"; 2563 break; 2564 } 2565 Node* value = n->in(MemNode::ValueIn); 2566 if (!lpt->is_invariant(value)) { 2567 msg = "variant store value"; 2568 } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) { 2569 msg = "not array address"; 2570 } 2571 store = n; 2572 store_value = value; 2573 } else if (n->is_If() && n != head->loopexit()) { 2574 msg = "extra control flow"; 2575 msg_node = n; 2576 } 2577 } 2578 2579 if (store == NULL) { 2580 // No store in loop 2581 return false; 2582 } 2583 2584 if (msg == NULL && head->stride_con() != 1) { 2585 // could handle negative strides too 2586 if (head->stride_con() < 0) { 2587 msg = "negative stride"; 2588 } else { 2589 msg = "non-unit stride"; 2590 } 2591 } 2592 2593 if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) { 2594 msg = "can't handle store address"; 2595 msg_node = store->in(MemNode::Address); 2596 } 2597 2598 if (msg == NULL && 2599 (!store->in(MemNode::Memory)->is_Phi() || 2600 store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) { 2601 msg = "store memory isn't proper phi"; 2602 msg_node = store->in(MemNode::Memory); 2603 } 2604 2605 // Make sure there is an appropriate fill routine 2606 BasicType t = store->as_Mem()->memory_type(); 2607 const char* fill_name; 2608 if (msg == NULL && 2609 StubRoutines::select_fill_function(t, false, fill_name) == NULL) { 2610 msg = "unsupported store"; 2611 msg_node = store; 2612 } 2613 2614 if (msg != NULL) { 2615 #ifndef PRODUCT 2616 if (TraceOptimizeFill) { 2617 tty->print_cr("not fill intrinsic candidate: %s", msg); 2618 if (msg_node != NULL) msg_node->dump(); 2619 } 2620 #endif 2621 return false; 2622 } 2623 2624 // Make sure the address expression can be handled. It should be 2625 // head->phi * elsize + con. head->phi might have a ConvI2L. 2626 Node* elements[4]; 2627 Node* conv = NULL; 2628 bool found_index = false; 2629 int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements)); 2630 for (int e = 0; e < count; e++) { 2631 Node* n = elements[e]; 2632 if (n->is_Con() && con == NULL) { 2633 con = n; 2634 } else if (n->Opcode() == Op_LShiftX && shift == NULL) { 2635 Node* value = n->in(1); 2636 #ifdef _LP64 2637 if (value->Opcode() == Op_ConvI2L) { 2638 conv = value; 2639 value = value->in(1); 2640 } 2641 #endif 2642 if (value != head->phi()) { 2643 msg = "unhandled shift in address"; 2644 } else { 2645 if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) { 2646 msg = "scale doesn't match"; 2647 } else { 2648 found_index = true; 2649 shift = n; 2650 } 2651 } 2652 } else if (n->Opcode() == Op_ConvI2L && conv == NULL) { 2653 if (n->in(1) == head->phi()) { 2654 found_index = true; 2655 conv = n; 2656 } else { 2657 msg = "unhandled input to ConvI2L"; 2658 } 2659 } else if (n == head->phi()) { 2660 // no shift, check below for allowed cases 2661 found_index = true; 2662 } else { 2663 msg = "unhandled node in address"; 2664 msg_node = n; 2665 } 2666 } 2667 2668 if (count == -1) { 2669 msg = "malformed address expression"; 2670 msg_node = store; 2671 } 2672 2673 if (!found_index) { 2674 msg = "missing use of index"; 2675 } 2676 2677 // byte sized items won't have a shift 2678 if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) { 2679 msg = "can't find shift"; 2680 msg_node = store; 2681 } 2682 2683 if (msg != NULL) { 2684 #ifndef PRODUCT 2685 if (TraceOptimizeFill) { 2686 tty->print_cr("not fill intrinsic: %s", msg); 2687 if (msg_node != NULL) msg_node->dump(); 2688 } 2689 #endif 2690 return false; 2691 } 2692 2693 // No make sure all the other nodes in the loop can be handled 2694 VectorSet ok(Thread::current()->resource_area()); 2695 2696 // store related values are ok 2697 ok.set(store->_idx); 2698 ok.set(store->in(MemNode::Memory)->_idx); 2699 2700 CountedLoopEndNode* loop_exit = head->loopexit(); 2701 guarantee(loop_exit != NULL, "no loop exit node"); 2702 2703 // Loop structure is ok 2704 ok.set(head->_idx); 2705 ok.set(loop_exit->_idx); 2706 ok.set(head->phi()->_idx); 2707 ok.set(head->incr()->_idx); 2708 ok.set(loop_exit->cmp_node()->_idx); 2709 ok.set(loop_exit->in(1)->_idx); 2710 2711 // Address elements are ok 2712 if (con) ok.set(con->_idx); 2713 if (shift) ok.set(shift->_idx); 2714 if (conv) ok.set(conv->_idx); 2715 2716 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2717 Node* n = lpt->_body.at(i); 2718 if (n->outcnt() == 0) continue; // Ignore dead 2719 if (ok.test(n->_idx)) continue; 2720 // Backedge projection is ok 2721 if (n->is_IfTrue() && n->in(0) == loop_exit) continue; 2722 if (!n->is_AddP()) { 2723 msg = "unhandled node"; 2724 msg_node = n; 2725 break; 2726 } 2727 } 2728 2729 // Make sure no unexpected values are used outside the loop 2730 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2731 Node* n = lpt->_body.at(i); 2732 // These values can be replaced with other nodes if they are used 2733 // outside the loop. 2734 if (n == store || n == loop_exit || n == head->incr() || n == store->in(MemNode::Memory)) continue; 2735 for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) { 2736 Node* use = iter.get(); 2737 if (!lpt->_body.contains(use)) { 2738 msg = "node is used outside loop"; 2739 // lpt->_body.dump(); 2740 msg_node = n; 2741 break; 2742 } 2743 } 2744 } 2745 2746 #ifdef ASSERT 2747 if (TraceOptimizeFill) { 2748 if (msg != NULL) { 2749 tty->print_cr("no fill intrinsic: %s", msg); 2750 if (msg_node != NULL) msg_node->dump(); 2751 } else { 2752 tty->print_cr("fill intrinsic for:"); 2753 } 2754 store->dump(); 2755 if (Verbose) { 2756 lpt->_body.dump(); 2757 } 2758 } 2759 #endif 2760 2761 return msg == NULL; 2762 } 2763 2764 2765 2766 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { 2767 // Only for counted inner loops 2768 if (!lpt->is_counted() || !lpt->is_inner()) { 2769 return false; 2770 } 2771 2772 // Must have constant stride 2773 CountedLoopNode* head = lpt->_head->as_CountedLoop(); 2774 if (!head->is_valid_counted_loop() || !head->is_normal_loop()) { 2775 return false; 2776 } 2777 2778 // Check that the body only contains a store of a loop invariant 2779 // value that is indexed by the loop phi. 2780 Node* store = NULL; 2781 Node* store_value = NULL; 2782 Node* shift = NULL; 2783 Node* offset = NULL; 2784 if (!match_fill_loop(lpt, store, store_value, shift, offset)) { 2785 return false; 2786 } 2787 2788 #ifndef PRODUCT 2789 if (TraceLoopOpts) { 2790 tty->print("ArrayFill "); 2791 lpt->dump_head(); 2792 } 2793 #endif 2794 2795 // Now replace the whole loop body by a call to a fill routine that 2796 // covers the same region as the loop. 2797 Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base); 2798 2799 // Build an expression for the beginning of the copy region 2800 Node* index = head->init_trip(); 2801 #ifdef _LP64 2802 index = new ConvI2LNode(index); 2803 _igvn.register_new_node_with_optimizer(index); 2804 #endif 2805 if (shift != NULL) { 2806 // byte arrays don't require a shift but others do. 2807 index = new LShiftXNode(index, shift->in(2)); 2808 _igvn.register_new_node_with_optimizer(index); 2809 } 2810 index = new AddPNode(base, base, index); 2811 _igvn.register_new_node_with_optimizer(index); 2812 Node* from = new AddPNode(base, index, offset); 2813 _igvn.register_new_node_with_optimizer(from); 2814 // Compute the number of elements to copy 2815 Node* len = new SubINode(head->limit(), head->init_trip()); 2816 _igvn.register_new_node_with_optimizer(len); 2817 2818 BasicType t = store->as_Mem()->memory_type(); 2819 bool aligned = false; 2820 if (offset != NULL && head->init_trip()->is_Con()) { 2821 int element_size = type2aelembytes(t); 2822 aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0; 2823 } 2824 2825 // Build a call to the fill routine 2826 const char* fill_name; 2827 address fill = StubRoutines::select_fill_function(t, aligned, fill_name); 2828 assert(fill != NULL, "what?"); 2829 2830 // Convert float/double to int/long for fill routines 2831 if (t == T_FLOAT) { 2832 store_value = new MoveF2INode(store_value); 2833 _igvn.register_new_node_with_optimizer(store_value); 2834 } else if (t == T_DOUBLE) { 2835 store_value = new MoveD2LNode(store_value); 2836 _igvn.register_new_node_with_optimizer(store_value); 2837 } 2838 2839 if (CCallingConventionRequiresIntsAsLongs && 2840 // See StubRoutines::select_fill_function for types. FLOAT has been converted to INT. 2841 (t == T_FLOAT || t == T_INT || is_subword_type(t))) { 2842 store_value = new ConvI2LNode(store_value); 2843 _igvn.register_new_node_with_optimizer(store_value); 2844 } 2845 2846 Node* mem_phi = store->in(MemNode::Memory); 2847 Node* result_ctrl; 2848 Node* result_mem; 2849 const TypeFunc* call_type = OptoRuntime::array_fill_Type(); 2850 CallLeafNode *call = new CallLeafNoFPNode(call_type, fill, 2851 fill_name, TypeAryPtr::get_array_body_type(t)); 2852 uint cnt = 0; 2853 call->init_req(TypeFunc::Parms + cnt++, from); 2854 call->init_req(TypeFunc::Parms + cnt++, store_value); 2855 if (CCallingConventionRequiresIntsAsLongs) { 2856 call->init_req(TypeFunc::Parms + cnt++, C->top()); 2857 } 2858 #ifdef _LP64 2859 len = new ConvI2LNode(len); 2860 _igvn.register_new_node_with_optimizer(len); 2861 #endif 2862 call->init_req(TypeFunc::Parms + cnt++, len); 2863 #ifdef _LP64 2864 call->init_req(TypeFunc::Parms + cnt++, C->top()); 2865 #endif 2866 call->init_req(TypeFunc::Control, head->init_control()); 2867 call->init_req(TypeFunc::I_O, C->top()); // Does no I/O. 2868 call->init_req(TypeFunc::Memory, mem_phi->in(LoopNode::EntryControl)); 2869 call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr)); 2870 call->init_req(TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr)); 2871 _igvn.register_new_node_with_optimizer(call); 2872 result_ctrl = new ProjNode(call,TypeFunc::Control); 2873 _igvn.register_new_node_with_optimizer(result_ctrl); 2874 result_mem = new ProjNode(call,TypeFunc::Memory); 2875 _igvn.register_new_node_with_optimizer(result_mem); 2876 2877 /* Disable following optimization until proper fix (add missing checks). 2878 2879 // If this fill is tightly coupled to an allocation and overwrites 2880 // the whole body, allow it to take over the zeroing. 2881 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this); 2882 if (alloc != NULL && alloc->is_AllocateArray()) { 2883 Node* length = alloc->as_AllocateArray()->Ideal_length(); 2884 if (head->limit() == length && 2885 head->init_trip() == _igvn.intcon(0)) { 2886 if (TraceOptimizeFill) { 2887 tty->print_cr("Eliminated zeroing in allocation"); 2888 } 2889 alloc->maybe_set_complete(&_igvn); 2890 } else { 2891 #ifdef ASSERT 2892 if (TraceOptimizeFill) { 2893 tty->print_cr("filling array but bounds don't match"); 2894 alloc->dump(); 2895 head->init_trip()->dump(); 2896 head->limit()->dump(); 2897 length->dump(); 2898 } 2899 #endif 2900 } 2901 } 2902 */ 2903 2904 // Redirect the old control and memory edges that are outside the loop. 2905 Node* exit = head->loopexit()->proj_out(0); 2906 // Sometimes the memory phi of the head is used as the outgoing 2907 // state of the loop. It's safe in this case to replace it with the 2908 // result_mem. 2909 _igvn.replace_node(store->in(MemNode::Memory), result_mem); 2910 _igvn.replace_node(exit, result_ctrl); 2911 _igvn.replace_node(store, result_mem); 2912 // Any uses the increment outside of the loop become the loop limit. 2913 _igvn.replace_node(head->incr(), head->limit()); 2914 2915 // Disconnect the head from the loop. 2916 for (uint i = 0; i < lpt->_body.size(); i++) { 2917 Node* n = lpt->_body.at(i); 2918 _igvn.replace_node(n, C->top()); 2919 } 2920 2921 return true; 2922 }