1 /* 2 * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "opto/loopnode.hpp" 27 #include "opto/addnode.hpp" 28 #include "opto/callnode.hpp" 29 #include "opto/connode.hpp" 30 #include "opto/convertnode.hpp" 31 #include "opto/loopnode.hpp" 32 #include "opto/matcher.hpp" 33 #include "opto/mulnode.hpp" 34 #include "opto/opaquenode.hpp" 35 #include "opto/rootnode.hpp" 36 #include "opto/subnode.hpp" 37 #include <fenv.h> 38 #include <math.h> 39 40 /* 41 * The general idea of Loop Predication is to insert a predicate on the entry 42 * path to a loop, and raise a uncommon trap if the check of the condition fails. 43 * The condition checks are promoted from inside the loop body, and thus 44 * the checks inside the loop could be eliminated. Currently, loop predication 45 * optimization has been applied to remove array range check and loop invariant 46 * checks (such as null checks). 47 */ 48 49 //-------------------------------register_control------------------------- 50 void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) { 51 assert(n->is_CFG(), "must be control node"); 52 _igvn.register_new_node_with_optimizer(n); 53 loop->_body.push(n); 54 set_loop(n, loop); 55 // When called from beautify_loops() idom is not constructed yet. 56 if (_idom != NULL) { 57 set_idom(n, pred, dom_depth(pred)); 58 } 59 } 60 61 //------------------------------create_new_if_for_predicate------------------------ 62 // create a new if above the uct_if_pattern for the predicate to be promoted. 63 // 64 // before after 65 // ---------- ---------- 66 // ctrl ctrl 67 // | | 68 // | | 69 // v v 70 // iff new_iff 71 // / \ / \ 72 // / \ / \ 73 // v v v v 74 // uncommon_proj cont_proj if_uct if_cont 75 // \ | | | | 76 // \ | | | | 77 // v v v | v 78 // rgn loop | iff 79 // | | / \ 80 // | | / \ 81 // v | v v 82 // uncommon_trap | uncommon_proj cont_proj 83 // \ \ | | 84 // \ \ | | 85 // v v v v 86 // rgn loop 87 // | 88 // | 89 // v 90 // uncommon_trap 91 // 92 // 93 // We will create a region to guard the uct call if there is no one there. 94 // The true projecttion (if_cont) of the new_iff is returned. 95 // This code is also used to clone predicates to cloned loops. 96 ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, 97 Deoptimization::DeoptReason reason, 98 int opcode) { 99 assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!"); 100 IfNode* iff = cont_proj->in(0)->as_If(); 101 102 ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con); 103 Node *rgn = uncommon_proj->unique_ctrl_out(); 104 assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); 105 106 uint proj_index = 1; // region's edge corresponding to uncommon_proj 107 if (!rgn->is_Region()) { // create a region to guard the call 108 assert(rgn->is_Call(), "must be call uct"); 109 CallNode* call = rgn->as_Call(); 110 IdealLoopTree* loop = get_loop(call); 111 rgn = new RegionNode(1); 112 rgn->add_req(uncommon_proj); 113 register_control(rgn, loop, uncommon_proj); 114 _igvn.replace_input_of(call, 0, rgn); 115 // When called from beautify_loops() idom is not constructed yet. 116 if (_idom != NULL) { 117 set_idom(call, rgn, dom_depth(rgn)); 118 } 119 for (DUIterator_Fast imax, i = uncommon_proj->fast_outs(imax); i < imax; i++) { 120 Node* n = uncommon_proj->fast_out(i); 121 if (n->is_Load() || n->is_Store()) { 122 _igvn.replace_input_of(n, 0, rgn); 123 --i; --imax; 124 } 125 } 126 } else { 127 // Find region's edge corresponding to uncommon_proj 128 for (; proj_index < rgn->req(); proj_index++) 129 if (rgn->in(proj_index) == uncommon_proj) break; 130 assert(proj_index < rgn->req(), "sanity"); 131 } 132 133 Node* entry = iff->in(0); 134 if (new_entry != NULL) { 135 // Clonning the predicate to new location. 136 entry = new_entry; 137 } 138 // Create new_iff 139 IdealLoopTree* lp = get_loop(entry); 140 IfNode* new_iff = NULL; 141 if (opcode == Op_If) { 142 new_iff = new IfNode(entry, iff->in(1), iff->_prob, iff->_fcnt); 143 } else { 144 assert(opcode == Op_RangeCheck, "no other if variant here"); 145 new_iff = new RangeCheckNode(entry, iff->in(1), iff->_prob, iff->_fcnt); 146 } 147 register_control(new_iff, lp, entry); 148 Node *if_cont = new IfTrueNode(new_iff); 149 Node *if_uct = new IfFalseNode(new_iff); 150 if (cont_proj->is_IfFalse()) { 151 // Swap 152 Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp; 153 } 154 register_control(if_cont, lp, new_iff); 155 register_control(if_uct, get_loop(rgn), new_iff); 156 157 // if_uct to rgn 158 _igvn.hash_delete(rgn); 159 rgn->add_req(if_uct); 160 // When called from beautify_loops() idom is not constructed yet. 161 if (_idom != NULL) { 162 Node* ridom = idom(rgn); 163 Node* nrdom = dom_lca(ridom, new_iff); 164 set_idom(rgn, nrdom, dom_depth(rgn)); 165 } 166 167 // If rgn has phis add new edges which has the same 168 // value as on original uncommon_proj pass. 169 assert(rgn->in(rgn->req() -1) == if_uct, "new edge should be last"); 170 bool has_phi = false; 171 for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) { 172 Node* use = rgn->fast_out(i); 173 if (use->is_Phi() && use->outcnt() > 0) { 174 assert(use->in(0) == rgn, ""); 175 _igvn.rehash_node_delayed(use); 176 use->add_req(use->in(proj_index)); 177 has_phi = true; 178 } 179 } 180 assert(!has_phi || rgn->req() > 3, "no phis when region is created"); 181 182 if (new_entry == NULL) { 183 // Attach if_cont to iff 184 _igvn.replace_input_of(iff, 0, if_cont); 185 if (_idom != NULL) { 186 set_idom(iff, if_cont, dom_depth(iff)); 187 } 188 } 189 return if_cont->as_Proj(); 190 } 191 192 //------------------------------create_new_if_for_predicate------------------------ 193 // Create a new if below new_entry for the predicate to be cloned (IGVN optimization) 194 ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, 195 Deoptimization::DeoptReason reason, 196 int opcode) { 197 assert(new_entry != 0, "only used for clone predicate"); 198 assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!"); 199 IfNode* iff = cont_proj->in(0)->as_If(); 200 201 ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con); 202 Node *rgn = uncommon_proj->unique_ctrl_out(); 203 assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); 204 205 uint proj_index = 1; // region's edge corresponding to uncommon_proj 206 if (!rgn->is_Region()) { // create a region to guard the call 207 assert(rgn->is_Call(), "must be call uct"); 208 CallNode* call = rgn->as_Call(); 209 rgn = new RegionNode(1); 210 register_new_node_with_optimizer(rgn); 211 rgn->add_req(uncommon_proj); 212 replace_input_of(call, 0, rgn); 213 } else { 214 // Find region's edge corresponding to uncommon_proj 215 for (; proj_index < rgn->req(); proj_index++) 216 if (rgn->in(proj_index) == uncommon_proj) break; 217 assert(proj_index < rgn->req(), "sanity"); 218 } 219 220 // Create new_iff in new location. 221 IfNode* new_iff = NULL; 222 if (opcode == Op_If) { 223 new_iff = new IfNode(new_entry, iff->in(1), iff->_prob, iff->_fcnt); 224 } else { 225 assert(opcode == Op_RangeCheck, "no other if variant here"); 226 new_iff = new RangeCheckNode(new_entry, iff->in(1), iff->_prob, iff->_fcnt); 227 } 228 229 register_new_node_with_optimizer(new_iff); 230 Node *if_cont = new IfTrueNode(new_iff); 231 Node *if_uct = new IfFalseNode(new_iff); 232 if (cont_proj->is_IfFalse()) { 233 // Swap 234 Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp; 235 } 236 register_new_node_with_optimizer(if_cont); 237 register_new_node_with_optimizer(if_uct); 238 239 // if_uct to rgn 240 hash_delete(rgn); 241 rgn->add_req(if_uct); 242 243 // If rgn has phis add corresponding new edges which has the same 244 // value as on original uncommon_proj pass. 245 assert(rgn->in(rgn->req() -1) == if_uct, "new edge should be last"); 246 bool has_phi = false; 247 for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) { 248 Node* use = rgn->fast_out(i); 249 if (use->is_Phi() && use->outcnt() > 0) { 250 rehash_node_delayed(use); 251 use->add_req(use->in(proj_index)); 252 has_phi = true; 253 } 254 } 255 assert(!has_phi || rgn->req() > 3, "no phis when region is created"); 256 257 return if_cont->as_Proj(); 258 } 259 260 //--------------------------clone_predicate----------------------- 261 ProjNode* PhaseIdealLoop::clone_predicate(ProjNode* predicate_proj, Node* new_entry, 262 Deoptimization::DeoptReason reason, 263 PhaseIdealLoop* loop_phase, 264 PhaseIterGVN* igvn) { 265 ProjNode* new_predicate_proj; 266 if (loop_phase != NULL) { 267 new_predicate_proj = loop_phase->create_new_if_for_predicate(predicate_proj, new_entry, reason, Op_If); 268 } else { 269 new_predicate_proj = igvn->create_new_if_for_predicate(predicate_proj, new_entry, reason, Op_If); 270 } 271 IfNode* iff = new_predicate_proj->in(0)->as_If(); 272 Node* ctrl = iff->in(0); 273 274 // Match original condition since predicate's projections could be swapped. 275 assert(predicate_proj->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be"); 276 Node* opq = new Opaque1Node(igvn->C, predicate_proj->in(0)->in(1)->in(1)->in(1)); 277 igvn->C->add_predicate_opaq(opq); 278 279 Node* bol = new Conv2BNode(opq); 280 if (loop_phase != NULL) { 281 loop_phase->register_new_node(opq, ctrl); 282 loop_phase->register_new_node(bol, ctrl); 283 } else { 284 igvn->register_new_node_with_optimizer(opq); 285 igvn->register_new_node_with_optimizer(bol); 286 } 287 igvn->hash_delete(iff); 288 iff->set_req(1, bol); 289 return new_predicate_proj; 290 } 291 292 293 //--------------------------clone_loop_predicates----------------------- 294 // Interface from IGVN 295 Node* PhaseIterGVN::clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) { 296 return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, clone_limit_check, NULL, this); 297 } 298 299 // Interface from PhaseIdealLoop 300 Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) { 301 return clone_loop_predicates(old_entry, new_entry, clone_limit_check, this, &this->_igvn); 302 } 303 304 // Clone loop predicates to cloned loops (peeled, unswitched, split_if). 305 Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry, 306 bool clone_limit_check, 307 PhaseIdealLoop* loop_phase, 308 PhaseIterGVN* igvn) { 309 #ifdef ASSERT 310 if (new_entry == NULL || !(new_entry->is_Proj() || new_entry->is_Region() || new_entry->is_SafePoint())) { 311 if (new_entry != NULL) 312 new_entry->dump(); 313 assert(false, "not IfTrue, IfFalse, Region or SafePoint"); 314 } 315 #endif 316 // Search original predicates 317 Node* entry = old_entry; 318 ProjNode* limit_check_proj = NULL; 319 limit_check_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); 320 if (limit_check_proj != NULL) { 321 entry = entry->in(0)->in(0); 322 } 323 ProjNode* profile_predicate_proj = NULL; 324 ProjNode* predicate_proj = NULL; 325 if (UseProfiledLoopPredicate) { 326 profile_predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); 327 if (profile_predicate_proj != NULL) { 328 entry = skip_loop_predicates(entry); 329 } 330 } 331 if (UseLoopPredicate) { 332 predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); 333 } 334 if (predicate_proj != NULL) { // right pattern that can be used by loop predication 335 // clone predicate 336 new_entry = clone_predicate(predicate_proj, new_entry, 337 Deoptimization::Reason_predicate, 338 loop_phase, igvn); 339 assert(new_entry != NULL && new_entry->is_Proj(), "IfTrue or IfFalse after clone predicate"); 340 if (TraceLoopPredicate) { 341 tty->print("Loop Predicate cloned: "); 342 debug_only( new_entry->in(0)->dump(); ); 343 } 344 } 345 if (profile_predicate_proj != NULL) { // right pattern that can be used by loop predication 346 // clone predicate 347 new_entry = clone_predicate(profile_predicate_proj, new_entry, 348 Deoptimization::Reason_profile_predicate, 349 loop_phase, igvn); 350 assert(new_entry != NULL && new_entry->is_Proj(), "IfTrue or IfFalse after clone predicate"); 351 if (TraceLoopPredicate) { 352 tty->print("Loop Predicate cloned: "); 353 debug_only( new_entry->in(0)->dump(); ); 354 } 355 } 356 if (limit_check_proj != NULL && clone_limit_check) { 357 // Clone loop limit check last to insert it before loop. 358 // Don't clone a limit check which was already finalized 359 // for this counted loop (only one limit check is needed). 360 new_entry = clone_predicate(limit_check_proj, new_entry, 361 Deoptimization::Reason_loop_limit_check, 362 loop_phase, igvn); 363 assert(new_entry != NULL && new_entry->is_Proj(), "IfTrue or IfFalse after clone limit check"); 364 if (TraceLoopLimitCheck) { 365 tty->print("Loop Limit Check cloned: "); 366 debug_only( new_entry->in(0)->dump(); ) 367 } 368 } 369 return new_entry; 370 } 371 372 //--------------------------skip_loop_predicates------------------------------ 373 // Skip related predicates. 374 Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) { 375 IfNode* iff = entry->in(0)->as_If(); 376 ProjNode* uncommon_proj = iff->proj_out(1 - entry->as_Proj()->_con); 377 Node* rgn = uncommon_proj->unique_ctrl_out(); 378 assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); 379 entry = entry->in(0)->in(0); 380 while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) { 381 uncommon_proj = entry->in(0)->as_If()->proj_out(1 - entry->as_Proj()->_con); 382 if (uncommon_proj->unique_ctrl_out() != rgn) 383 break; 384 entry = entry->in(0)->in(0); 385 } 386 return entry; 387 } 388 389 Node* PhaseIdealLoop::skip_all_loop_predicates(Node* entry) { 390 Node* predicate = NULL; 391 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); 392 if (predicate != NULL) { 393 entry = entry->in(0)->in(0); 394 } 395 if (UseProfiledLoopPredicate) { 396 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); 397 if (predicate != NULL) { // right pattern that can be used by loop predication 398 entry = skip_loop_predicates(entry); 399 } 400 } 401 if (UseLoopPredicate) { 402 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); 403 if (predicate != NULL) { // right pattern that can be used by loop predication 404 entry = skip_loop_predicates(entry); 405 } 406 } 407 return entry; 408 } 409 410 //--------------------------find_predicate_insertion_point------------------- 411 // Find a good location to insert a predicate 412 ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) { 413 if (start_c == NULL || !start_c->is_Proj()) 414 return NULL; 415 if (start_c->as_Proj()->is_uncommon_trap_if_pattern(reason)) { 416 return start_c->as_Proj(); 417 } 418 return NULL; 419 } 420 421 //--------------------------find_predicate------------------------------------ 422 // Find a predicate 423 Node* PhaseIdealLoop::find_predicate(Node* entry) { 424 Node* predicate = NULL; 425 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); 426 if (predicate != NULL) { // right pattern that can be used by loop predication 427 return entry; 428 } 429 if (UseLoopPredicate) { 430 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); 431 if (predicate != NULL) { // right pattern that can be used by loop predication 432 return entry; 433 } 434 } 435 if (UseProfiledLoopPredicate) { 436 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); 437 if (predicate != NULL) { // right pattern that can be used by loop predication 438 return entry; 439 } 440 } 441 return NULL; 442 } 443 444 //------------------------------Invariance----------------------------------- 445 // Helper class for loop_predication_impl to compute invariance on the fly and 446 // clone invariants. 447 class Invariance : public StackObj { 448 VectorSet _visited, _invariant; 449 Node_Stack _stack; 450 VectorSet _clone_visited; 451 Node_List _old_new; // map of old to new (clone) 452 IdealLoopTree* _lpt; 453 PhaseIdealLoop* _phase; 454 455 // Helper function to set up the invariance for invariance computation 456 // If n is a known invariant, set up directly. Otherwise, look up the 457 // the possibility to push n onto the stack for further processing. 458 void visit(Node* use, Node* n) { 459 if (_lpt->is_invariant(n)) { // known invariant 460 _invariant.set(n->_idx); 461 } else if (!n->is_CFG()) { 462 Node *n_ctrl = _phase->ctrl_or_self(n); 463 Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG 464 if (_phase->is_dominator(n_ctrl, u_ctrl)) { 465 _stack.push(n, n->in(0) == NULL ? 1 : 0); 466 } 467 } 468 } 469 470 // Compute invariance for "the_node" and (possibly) all its inputs recursively 471 // on the fly 472 void compute_invariance(Node* n) { 473 assert(_visited.test(n->_idx), "must be"); 474 visit(n, n); 475 while (_stack.is_nonempty()) { 476 Node* n = _stack.node(); 477 uint idx = _stack.index(); 478 if (idx == n->req()) { // all inputs are processed 479 _stack.pop(); 480 // n is invariant if it's inputs are all invariant 481 bool all_inputs_invariant = true; 482 for (uint i = 0; i < n->req(); i++) { 483 Node* in = n->in(i); 484 if (in == NULL) continue; 485 assert(_visited.test(in->_idx), "must have visited input"); 486 if (!_invariant.test(in->_idx)) { // bad guy 487 all_inputs_invariant = false; 488 break; 489 } 490 } 491 if (all_inputs_invariant) { 492 // If n's control is a predicate that was moved out of the 493 // loop, it was marked invariant but n is only invariant if 494 // it depends only on that test. Otherwise, unless that test 495 // is out of the loop, it's not invariant. 496 if (n->is_CFG() || n->depends_only_on_test() || n->in(0) == NULL || !_phase->is_member(_lpt, n->in(0))) { 497 _invariant.set(n->_idx); // I am a invariant too 498 } 499 } 500 } else { // process next input 501 _stack.set_index(idx + 1); 502 Node* m = n->in(idx); 503 if (m != NULL && !_visited.test_set(m->_idx)) { 504 visit(n, m); 505 } 506 } 507 } 508 } 509 510 // Helper function to set up _old_new map for clone_nodes. 511 // If n is a known invariant, set up directly ("clone" of n == n). 512 // Otherwise, push n onto the stack for real cloning. 513 void clone_visit(Node* n) { 514 assert(_invariant.test(n->_idx), "must be invariant"); 515 if (_lpt->is_invariant(n)) { // known invariant 516 _old_new.map(n->_idx, n); 517 } else { // to be cloned 518 assert(!n->is_CFG(), "should not see CFG here"); 519 _stack.push(n, n->in(0) == NULL ? 1 : 0); 520 } 521 } 522 523 // Clone "n" and (possibly) all its inputs recursively 524 void clone_nodes(Node* n, Node* ctrl) { 525 clone_visit(n); 526 while (_stack.is_nonempty()) { 527 Node* n = _stack.node(); 528 uint idx = _stack.index(); 529 if (idx == n->req()) { // all inputs processed, clone n! 530 _stack.pop(); 531 // clone invariant node 532 Node* n_cl = n->clone(); 533 _old_new.map(n->_idx, n_cl); 534 _phase->register_new_node(n_cl, ctrl); 535 for (uint i = 0; i < n->req(); i++) { 536 Node* in = n_cl->in(i); 537 if (in == NULL) continue; 538 n_cl->set_req(i, _old_new[in->_idx]); 539 } 540 } else { // process next input 541 _stack.set_index(idx + 1); 542 Node* m = n->in(idx); 543 if (m != NULL && !_clone_visited.test_set(m->_idx)) { 544 clone_visit(m); // visit the input 545 } 546 } 547 } 548 } 549 550 public: 551 Invariance(Arena* area, IdealLoopTree* lpt) : 552 _lpt(lpt), _phase(lpt->_phase), 553 _visited(area), _invariant(area), _stack(area, 10 /* guess */), 554 _clone_visited(area), _old_new(area) 555 { 556 LoopNode* head = _lpt->_head->as_Loop(); 557 Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl); 558 if (entry->outcnt() != 1) { 559 // If a node is pinned between the predicates and the loop 560 // entry, we won't be able to move any node in the loop that 561 // depends on it above it in a predicate. Mark all those nodes 562 // as non loop invariatnt. 563 Unique_Node_List wq; 564 wq.push(entry); 565 for (uint next = 0; next < wq.size(); ++next) { 566 Node *n = wq.at(next); 567 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 568 Node* u = n->fast_out(i); 569 if (!u->is_CFG()) { 570 Node* c = _phase->get_ctrl(u); 571 if (_lpt->is_member(_phase->get_loop(c)) || _phase->is_dominator(c, head)) { 572 _visited.set(u->_idx); 573 wq.push(u); 574 } 575 } 576 } 577 } 578 } 579 } 580 581 // Map old to n for invariance computation and clone 582 void map_ctrl(Node* old, Node* n) { 583 assert(old->is_CFG() && n->is_CFG(), "must be"); 584 _old_new.map(old->_idx, n); // "clone" of old is n 585 _invariant.set(old->_idx); // old is invariant 586 _clone_visited.set(old->_idx); 587 } 588 589 // Driver function to compute invariance 590 bool is_invariant(Node* n) { 591 if (!_visited.test_set(n->_idx)) 592 compute_invariance(n); 593 return (_invariant.test(n->_idx) != 0); 594 } 595 596 // Driver function to clone invariant 597 Node* clone(Node* n, Node* ctrl) { 598 assert(ctrl->is_CFG(), "must be"); 599 assert(_invariant.test(n->_idx), "must be an invariant"); 600 if (!_clone_visited.test(n->_idx)) 601 clone_nodes(n, ctrl); 602 return _old_new[n->_idx]; 603 } 604 }; 605 606 //------------------------------is_range_check_if ----------------------------------- 607 // Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format 608 // Note: this function is particularly designed for loop predication. We require load_range 609 // and offset to be loop invariant computed on the fly by "invar" 610 bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const { 611 if (!is_loop_exit(iff)) { 612 return false; 613 } 614 if (!iff->in(1)->is_Bool()) { 615 return false; 616 } 617 const BoolNode *bol = iff->in(1)->as_Bool(); 618 if (bol->_test._test != BoolTest::lt) { 619 return false; 620 } 621 if (!bol->in(1)->is_Cmp()) { 622 return false; 623 } 624 const CmpNode *cmp = bol->in(1)->as_Cmp(); 625 if (cmp->Opcode() != Op_CmpU) { 626 return false; 627 } 628 Node* range = cmp->in(2); 629 if (range->Opcode() != Op_LoadRange && !iff->is_RangeCheck()) { 630 const TypeInt* tint = phase->_igvn.type(range)->isa_int(); 631 if (tint == NULL || tint->empty() || tint->_lo < 0) { 632 // Allow predication on positive values that aren't LoadRanges. 633 // This allows optimization of loops where the length of the 634 // array is a known value and doesn't need to be loaded back 635 // from the array. 636 return false; 637 } 638 } 639 if (!invar.is_invariant(range)) { 640 return false; 641 } 642 Node *iv = _head->as_CountedLoop()->phi(); 643 int scale = 0; 644 Node *offset = NULL; 645 if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, &scale, &offset)) { 646 return false; 647 } 648 if (offset && !invar.is_invariant(offset)) { // offset must be invariant 649 return false; 650 } 651 return true; 652 } 653 654 //------------------------------rc_predicate----------------------------------- 655 // Create a range check predicate 656 // 657 // for (i = init; i < limit; i += stride) { 658 // a[scale*i+offset] 659 // } 660 // 661 // Compute max(scale*i + offset) for init <= i < limit and build the predicate 662 // as "max(scale*i + offset) u< a.length". 663 // 664 // There are two cases for max(scale*i + offset): 665 // (1) stride*scale > 0 666 // max(scale*i + offset) = scale*(limit-stride) + offset 667 // (2) stride*scale < 0 668 // max(scale*i + offset) = scale*init + offset 669 BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree *loop, Node* ctrl, 670 int scale, Node* offset, 671 Node* init, Node* limit, jint stride, 672 Node* range, bool upper, bool &overflow) { 673 jint con_limit = (limit != NULL && limit->is_Con()) ? limit->get_int() : 0; 674 jint con_init = init->is_Con() ? init->get_int() : 0; 675 jint con_offset = offset->is_Con() ? offset->get_int() : 0; 676 677 stringStream* predString = NULL; 678 if (TraceLoopPredicate) { 679 predString = new stringStream(); 680 predString->print("rc_predicate "); 681 } 682 683 overflow = false; 684 Node* max_idx_expr = NULL; 685 const TypeInt* idx_type = TypeInt::INT; 686 if ((stride > 0) == (scale > 0) == upper) { 687 if (TraceLoopPredicate) { 688 if (limit->is_Con()) { 689 predString->print("(%d ", con_limit); 690 } else { 691 predString->print("(limit "); 692 } 693 predString->print("- %d) ", stride); 694 } 695 // Check if (limit - stride) may overflow 696 const TypeInt* limit_type = _igvn.type(limit)->isa_int(); 697 jint limit_lo = limit_type->_lo; 698 jint limit_hi = limit_type->_hi; 699 if ((stride > 0 && (java_subtract(limit_lo, stride) < limit_lo)) || 700 (stride < 0 && (java_subtract(limit_hi, stride) > limit_hi))) { 701 // No overflow possible 702 ConINode* con_stride = _igvn.intcon(stride); 703 set_ctrl(con_stride, C->root()); 704 max_idx_expr = new SubINode(limit, con_stride); 705 idx_type = TypeInt::make(limit_lo - stride, limit_hi - stride, limit_type->_widen); 706 } else { 707 // May overflow 708 overflow = true; 709 limit = new ConvI2LNode(limit); 710 register_new_node(limit, ctrl); 711 ConLNode* con_stride = _igvn.longcon(stride); 712 set_ctrl(con_stride, C->root()); 713 max_idx_expr = new SubLNode(limit, con_stride); 714 } 715 register_new_node(max_idx_expr, ctrl); 716 } else { 717 if (TraceLoopPredicate) { 718 if (init->is_Con()) { 719 predString->print("%d ", con_init); 720 } else { 721 predString->print("init "); 722 } 723 } 724 idx_type = _igvn.type(init)->isa_int(); 725 max_idx_expr = init; 726 } 727 728 if (scale != 1) { 729 ConNode* con_scale = _igvn.intcon(scale); 730 set_ctrl(con_scale, C->root()); 731 if (TraceLoopPredicate) { 732 predString->print("* %d ", scale); 733 } 734 // Check if (scale * max_idx_expr) may overflow 735 const TypeInt* scale_type = TypeInt::make(scale); 736 MulINode* mul = new MulINode(max_idx_expr, con_scale); 737 idx_type = (TypeInt*)mul->mul_ring(idx_type, scale_type); 738 if (overflow || TypeInt::INT->higher_equal(idx_type)) { 739 // May overflow 740 mul->destruct(); 741 if (!overflow) { 742 max_idx_expr = new ConvI2LNode(max_idx_expr); 743 register_new_node(max_idx_expr, ctrl); 744 } 745 overflow = true; 746 con_scale = _igvn.longcon(scale); 747 set_ctrl(con_scale, C->root()); 748 max_idx_expr = new MulLNode(max_idx_expr, con_scale); 749 } else { 750 // No overflow possible 751 max_idx_expr = mul; 752 } 753 register_new_node(max_idx_expr, ctrl); 754 } 755 756 if (offset && (!offset->is_Con() || con_offset != 0)){ 757 if (TraceLoopPredicate) { 758 if (offset->is_Con()) { 759 predString->print("+ %d ", con_offset); 760 } else { 761 predString->print("+ offset"); 762 } 763 } 764 // Check if (max_idx_expr + offset) may overflow 765 const TypeInt* offset_type = _igvn.type(offset)->isa_int(); 766 jint lo = java_add(idx_type->_lo, offset_type->_lo); 767 jint hi = java_add(idx_type->_hi, offset_type->_hi); 768 if (overflow || (lo > hi) || 769 ((idx_type->_lo & offset_type->_lo) < 0 && lo >= 0) || 770 ((~(idx_type->_hi | offset_type->_hi)) < 0 && hi < 0)) { 771 // May overflow 772 if (!overflow) { 773 max_idx_expr = new ConvI2LNode(max_idx_expr); 774 register_new_node(max_idx_expr, ctrl); 775 } 776 overflow = true; 777 offset = new ConvI2LNode(offset); 778 register_new_node(offset, ctrl); 779 max_idx_expr = new AddLNode(max_idx_expr, offset); 780 } else { 781 // No overflow possible 782 max_idx_expr = new AddINode(max_idx_expr, offset); 783 } 784 register_new_node(max_idx_expr, ctrl); 785 } 786 787 CmpNode* cmp = NULL; 788 if (overflow) { 789 // Integer expressions may overflow, do long comparison 790 range = new ConvI2LNode(range); 791 register_new_node(range, ctrl); 792 cmp = new CmpULNode(max_idx_expr, range); 793 } else { 794 cmp = new CmpUNode(max_idx_expr, range); 795 } 796 register_new_node(cmp, ctrl); 797 BoolNode* bol = new BoolNode(cmp, BoolTest::lt); 798 register_new_node(bol, ctrl); 799 800 if (TraceLoopPredicate) { 801 predString->print_cr("<u range"); 802 tty->print("%s", predString->as_string()); 803 } 804 return bol; 805 } 806 807 // Should loop predication look not only in the path from tail to head 808 // but also in branches of the loop body? 809 bool PhaseIdealLoop::loop_predication_should_follow_branches(IdealLoopTree *loop, ProjNode *predicate_proj, float& loop_trip_cnt) { 810 if (!UseProfiledLoopPredicate) { 811 return false; 812 } 813 814 if (predicate_proj == NULL) { 815 return false; 816 } 817 818 LoopNode* head = loop->_head->as_Loop(); 819 bool follow_branches = true; 820 IdealLoopTree* l = loop->_child; 821 // For leaf loops and loops with a single inner loop 822 while (l != NULL && follow_branches) { 823 IdealLoopTree* child = l; 824 if (child->_child != NULL && 825 child->_head->is_OuterStripMinedLoop()) { 826 assert(child->_child->_next == NULL, "only one inner loop for strip mined loop"); 827 assert(child->_child->_head->is_CountedLoop() && child->_child->_head->as_CountedLoop()->is_strip_mined(), "inner loop should be strip mined"); 828 child = child->_child; 829 } 830 if (child->_child != NULL || child->_irreducible) { 831 follow_branches = false; 832 } 833 l = l->_next; 834 } 835 if (follow_branches) { 836 loop->compute_profile_trip_cnt(this); 837 if (head->is_profile_trip_failed()) { 838 follow_branches = false; 839 } else { 840 loop_trip_cnt = head->profile_trip_cnt(); 841 if (head->is_CountedLoop()) { 842 CountedLoopNode* cl = head->as_CountedLoop(); 843 if (cl->phi() != NULL) { 844 const TypeInt* t = _igvn.type(cl->phi())->is_int(); 845 float worst_case_trip_cnt = ((float)t->_hi - t->_lo) / ABS(cl->stride_con()); 846 if (worst_case_trip_cnt < loop_trip_cnt) { 847 loop_trip_cnt = worst_case_trip_cnt; 848 } 849 } 850 } 851 } 852 } 853 return follow_branches; 854 } 855 856 // Compute probability of reaching some CFG node from a fixed 857 // dominating CFG node 858 class PathFrequency { 859 private: 860 Node* _dom; // frequencies are computed relative to this node 861 Node_Stack _stack; 862 GrowableArray<float> _freqs_stack; // keep track of intermediate result at regions 863 GrowableArray<float> _freqs; // cache frequencies 864 PhaseIdealLoop* _phase; 865 866 public: 867 PathFrequency(Node* dom, PhaseIdealLoop* phase) 868 : _dom(dom), _stack(0), _phase(phase) { 869 } 870 871 float to(Node* n) { 872 // post order walk on the CFG graph from n to _dom 873 fesetround(FE_TOWARDZERO); // make sure rounding doesn't push frequency above 1 874 IdealLoopTree* loop = _phase->get_loop(_dom); 875 Node* c = n; 876 for (;;) { 877 assert(_phase->get_loop(c) == loop, "have to be in the same loop"); 878 if (c == _dom || _freqs.at_grow(c->_idx, -1) >= 0) { 879 float f = c == _dom ? 1 : _freqs.at(c->_idx); 880 Node* prev = c; 881 while (_stack.size() > 0 && prev == c) { 882 Node* n = _stack.node(); 883 if (!n->is_Region()) { 884 if (_phase->get_loop(n) != _phase->get_loop(n->in(0))) { 885 // Found an inner loop: compute frequency of reaching this 886 // exit from the loop head by looking at the number of 887 // times each loop exit was taken 888 IdealLoopTree* inner_loop = _phase->get_loop(n->in(0)); 889 LoopNode* inner_head = inner_loop->_head->as_Loop(); 890 assert(_phase->get_loop(n) == loop, "only 1 inner loop"); 891 if (inner_head->is_OuterStripMinedLoop()) { 892 inner_head->verify_strip_mined(1); 893 if (n->in(0) == inner_head->in(LoopNode::LoopBackControl)->in(0)) { 894 n = n->in(0)->in(0)->in(0); 895 } 896 inner_loop = inner_loop->_child; 897 inner_head = inner_loop->_head->as_Loop(); 898 inner_head->verify_strip_mined(1); 899 } 900 fesetround(FE_UPWARD); // make sure rounding doesn't push frequency above 1 901 float loop_exit_cnt = 0.0f; 902 for (uint i = 0; i < inner_loop->_body.size(); i++) { 903 Node *n = inner_loop->_body[i]; 904 float c = inner_loop->compute_profile_trip_cnt_helper(n); 905 loop_exit_cnt += c; 906 } 907 fesetround(FE_TOWARDZERO); 908 float cnt = -1; 909 if (n->in(0)->is_If()) { 910 IfNode* iff = n->in(0)->as_If(); 911 float p = n->in(0)->as_If()->_prob; 912 if (n->Opcode() == Op_IfFalse) { 913 p = 1 - p; 914 } 915 if (p > PROB_MIN) { 916 cnt = p * iff->_fcnt; 917 } else { 918 cnt = 0; 919 } 920 } else { 921 assert(n->in(0)->is_Jump(), "unsupported node kind"); 922 JumpNode* jmp = n->in(0)->as_Jump(); 923 float p = n->in(0)->as_Jump()->_probs[n->as_JumpProj()->_con]; 924 cnt = p * jmp->_fcnt; 925 } 926 float this_exit_f = cnt > 0 ? cnt / loop_exit_cnt : 0; 927 assert(this_exit_f <= 1 && this_exit_f >= 0, "Incorrect frequency"); 928 f = f * this_exit_f; 929 assert(f <= 1 && f >= 0, "Incorrect frequency"); 930 } else { 931 float p = -1; 932 if (n->in(0)->is_If()) { 933 p = n->in(0)->as_If()->_prob; 934 if (n->Opcode() == Op_IfFalse) { 935 p = 1 - p; 936 } 937 } else { 938 assert(n->in(0)->is_Jump(), "unsupported node kind"); 939 p = n->in(0)->as_Jump()->_probs[n->as_JumpProj()->_con]; 940 } 941 f = f * p; 942 assert(f <= 1 && f >= 0, "Incorrect frequency"); 943 } 944 _freqs.at_put_grow(n->_idx, (float)f, -1); 945 _stack.pop(); 946 } else { 947 float prev_f = _freqs_stack.pop(); 948 float new_f = f; 949 f = new_f + prev_f; 950 assert(f <= 1 && f >= 0, "Incorrect frequency"); 951 uint i = _stack.index(); 952 if (i < n->req()) { 953 c = n->in(i); 954 _stack.set_index(i+1); 955 _freqs_stack.push(f); 956 } else { 957 _freqs.at_put_grow(n->_idx, f, -1); 958 _stack.pop(); 959 } 960 } 961 } 962 if (_stack.size() == 0) { 963 fesetround(FE_TONEAREST); 964 assert(f >= 0 && f <= 1, "should have been computed"); 965 return f; 966 } 967 } else if (c->is_Loop()) { 968 ShouldNotReachHere(); 969 c = c->in(LoopNode::EntryControl); 970 } else if (c->is_Region()) { 971 _freqs_stack.push(0); 972 _stack.push(c, 2); 973 c = c->in(1); 974 } else { 975 if (c->is_IfProj()) { 976 IfNode* iff = c->in(0)->as_If(); 977 if (iff->_prob == PROB_UNKNOWN) { 978 // assume never taken 979 _freqs.at_put_grow(c->_idx, 0, -1); 980 } else if (_phase->get_loop(c) != _phase->get_loop(iff)) { 981 if (iff->_fcnt == COUNT_UNKNOWN) { 982 // assume never taken 983 _freqs.at_put_grow(c->_idx, 0, -1); 984 } else { 985 // skip over loop 986 _stack.push(c, 1); 987 c = _phase->get_loop(c->in(0))->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl); 988 } 989 } else { 990 _stack.push(c, 1); 991 c = iff; 992 } 993 } else if (c->is_JumpProj()) { 994 JumpNode* jmp = c->in(0)->as_Jump(); 995 if (_phase->get_loop(c) != _phase->get_loop(jmp)) { 996 if (jmp->_fcnt == COUNT_UNKNOWN) { 997 // assume never taken 998 _freqs.at_put_grow(c->_idx, 0, -1); 999 } else { 1000 // skip over loop 1001 _stack.push(c, 1); 1002 c = _phase->get_loop(c->in(0))->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl); 1003 } 1004 } else { 1005 _stack.push(c, 1); 1006 c = jmp; 1007 } 1008 } else if (c->Opcode() == Op_CatchProj && 1009 c->in(0)->Opcode() == Op_Catch && 1010 c->in(0)->in(0)->is_Proj() && 1011 c->in(0)->in(0)->in(0)->is_Call()) { 1012 // assume exceptions are never thrown 1013 uint con = c->as_Proj()->_con; 1014 if (con == CatchProjNode::fall_through_index) { 1015 Node* call = c->in(0)->in(0)->in(0)->in(0); 1016 if (_phase->get_loop(call) != _phase->get_loop(c)) { 1017 _freqs.at_put_grow(c->_idx, 0, -1); 1018 } else { 1019 c = call; 1020 } 1021 } else { 1022 assert(con >= CatchProjNode::catch_all_index, "what else?"); 1023 _freqs.at_put_grow(c->_idx, 0, -1); 1024 } 1025 } else if (c->unique_ctrl_out() == NULL && !c->is_If() && !c->is_Jump()) { 1026 ShouldNotReachHere(); 1027 } else { 1028 c = c->in(0); 1029 } 1030 } 1031 } 1032 ShouldNotReachHere(); 1033 return -1; 1034 } 1035 }; 1036 1037 void PhaseIdealLoop::loop_predication_follow_branches(Node *n, IdealLoopTree *loop, float loop_trip_cnt, 1038 PathFrequency& pf, Node_Stack& stack, VectorSet& seen, 1039 Node_List& if_proj_list) { 1040 assert(n->is_Region(), "start from a region"); 1041 Node* tail = loop->tail(); 1042 stack.push(n, 1); 1043 do { 1044 Node* c = stack.node(); 1045 assert(c->is_Region() || c->is_IfProj(), "only region here"); 1046 uint i = stack.index(); 1047 1048 if (i < c->req()) { 1049 stack.set_index(i+1); 1050 Node* in = c->in(i); 1051 while (!is_dominator(in, tail) && !seen.test_set(in->_idx)) { 1052 IdealLoopTree* in_loop = get_loop(in); 1053 if (in_loop != loop) { 1054 in = in_loop->_head->in(LoopNode::EntryControl); 1055 } else if (in->is_Region()) { 1056 stack.push(in, 1); 1057 break; 1058 } else if (in->is_IfProj() && 1059 in->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { 1060 if (pf.to(in) * loop_trip_cnt >= 1) { 1061 stack.push(in, 1); 1062 } 1063 in = in->in(0); 1064 } else { 1065 in = in->in(0); 1066 } 1067 } 1068 } else { 1069 if (c->is_IfProj()) { 1070 if_proj_list.push(c); 1071 } 1072 stack.pop(); 1073 } 1074 1075 } while (stack.size() > 0); 1076 } 1077 1078 1079 bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode* proj, ProjNode *predicate_proj, 1080 CountedLoopNode *cl, ConNode* zero, Invariance& invar, 1081 Deoptimization::DeoptReason reason) { 1082 // Following are changed to nonnull when a predicate can be hoisted 1083 ProjNode* new_predicate_proj = NULL; 1084 IfNode* iff = proj->in(0)->as_If(); 1085 Node* test = iff->in(1); 1086 if (!test->is_Bool()){ //Conv2B, ... 1087 return false; 1088 } 1089 BoolNode* bol = test->as_Bool(); 1090 if (invar.is_invariant(bol)) { 1091 // Invariant test 1092 new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL, 1093 reason, 1094 iff->Opcode()); 1095 Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0); 1096 BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool(); 1097 1098 // Negate test if necessary 1099 bool negated = false; 1100 if (proj->_con != predicate_proj->_con) { 1101 new_predicate_bol = new BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate()); 1102 register_new_node(new_predicate_bol, ctrl); 1103 negated = true; 1104 } 1105 IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If(); 1106 _igvn.hash_delete(new_predicate_iff); 1107 new_predicate_iff->set_req(1, new_predicate_bol); 1108 #ifndef PRODUCT 1109 if (TraceLoopPredicate) { 1110 tty->print("Predicate invariant if%s: %d ", negated ? " negated" : "", new_predicate_iff->_idx); 1111 loop->dump_head(); 1112 } else if (TraceLoopOpts) { 1113 tty->print("Predicate IC "); 1114 loop->dump_head(); 1115 } 1116 #endif 1117 } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) { 1118 // Range check for counted loops 1119 const Node* cmp = bol->in(1)->as_Cmp(); 1120 Node* idx = cmp->in(1); 1121 assert(!invar.is_invariant(idx), "index is variant"); 1122 Node* rng = cmp->in(2); 1123 assert(rng->Opcode() == Op_LoadRange || iff->is_RangeCheck() || _igvn.type(rng)->is_int()->_lo >= 0, "must be"); 1124 assert(invar.is_invariant(rng), "range must be invariant"); 1125 int scale = 1; 1126 Node* offset = zero; 1127 bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset); 1128 assert(ok, "must be index expression"); 1129 1130 Node* init = cl->init_trip(); 1131 // Limit is not exact. 1132 // Calculate exact limit here. 1133 // Note, counted loop's test is '<' or '>'. 1134 Node* limit = exact_limit(loop); 1135 int stride = cl->stride()->get_int(); 1136 1137 // Build if's for the upper and lower bound tests. The 1138 // lower_bound test will dominate the upper bound test and all 1139 // cloned or created nodes will use the lower bound test as 1140 // their declared control. 1141 1142 // Perform cloning to keep Invariance state correct since the 1143 // late schedule will place invariant things in the loop. 1144 Node *ctrl = predicate_proj->in(0)->as_If()->in(0); 1145 rng = invar.clone(rng, ctrl); 1146 if (offset && offset != zero) { 1147 assert(invar.is_invariant(offset), "offset must be loop invariant"); 1148 offset = invar.clone(offset, ctrl); 1149 } 1150 // If predicate expressions may overflow in the integer range, longs are used. 1151 bool overflow = false; 1152 1153 // Test the lower bound 1154 BoolNode* lower_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, false, overflow); 1155 // Negate test if necessary 1156 bool negated = false; 1157 if (proj->_con != predicate_proj->_con) { 1158 lower_bound_bol = new BoolNode(lower_bound_bol->in(1), lower_bound_bol->_test.negate()); 1159 register_new_node(lower_bound_bol, ctrl); 1160 negated = true; 1161 } 1162 ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode()); 1163 IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If(); 1164 _igvn.hash_delete(lower_bound_iff); 1165 lower_bound_iff->set_req(1, lower_bound_bol); 1166 if (TraceLoopPredicate) tty->print_cr("lower bound check if: %s %d ", negated ? " negated" : "", lower_bound_iff->_idx); 1167 1168 // Test the upper bound 1169 BoolNode* upper_bound_bol = rc_predicate(loop, lower_bound_proj, scale, offset, init, limit, stride, rng, true, overflow); 1170 negated = false; 1171 if (proj->_con != predicate_proj->_con) { 1172 upper_bound_bol = new BoolNode(upper_bound_bol->in(1), upper_bound_bol->_test.negate()); 1173 register_new_node(upper_bound_bol, ctrl); 1174 negated = true; 1175 } 1176 ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode()); 1177 assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate"); 1178 IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If(); 1179 _igvn.hash_delete(upper_bound_iff); 1180 upper_bound_iff->set_req(1, upper_bound_bol); 1181 if (TraceLoopPredicate) tty->print_cr("upper bound check if: %s %d ", negated ? " negated" : "", lower_bound_iff->_idx); 1182 1183 // Fall through into rest of the clean up code which will move 1184 // any dependent nodes onto the upper bound test. 1185 new_predicate_proj = upper_bound_proj; 1186 1187 if (iff->is_RangeCheck()) { 1188 new_predicate_proj = insert_skeleton_predicate(iff, loop, proj, predicate_proj, upper_bound_proj, scale, offset, init, limit, stride, rng, overflow, reason); 1189 } 1190 1191 #ifndef PRODUCT 1192 if (TraceLoopOpts && !TraceLoopPredicate) { 1193 tty->print("Predicate RC "); 1194 loop->dump_head(); 1195 } 1196 #endif 1197 } else { 1198 // Loop variant check (for example, range check in non-counted loop) 1199 // with uncommon trap. 1200 return false; 1201 } 1202 assert(new_predicate_proj != NULL, "sanity"); 1203 // Success - attach condition (new_predicate_bol) to predicate if 1204 invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate 1205 1206 // Eliminate the old If in the loop body 1207 dominated_by( new_predicate_proj, iff, proj->_con != new_predicate_proj->_con ); 1208 1209 C->set_major_progress(); 1210 return true; 1211 } 1212 1213 1214 // After pre/main/post loops are created, we'll put a copy of some 1215 // range checks between the pre and main loop to validate the initial 1216 // value of the induction variable for the main loop. Make a copy of 1217 // the predicates here with an opaque node as a place holder for the 1218 // initial value. 1219 ProjNode* PhaseIdealLoop::insert_skeleton_predicate(IfNode* iff, IdealLoopTree *loop, 1220 ProjNode* proj, ProjNode *predicate_proj, 1221 ProjNode* upper_bound_proj, 1222 int scale, Node* offset, 1223 Node* init, Node* limit, jint stride, 1224 Node* rng, bool &overflow, 1225 Deoptimization::DeoptReason reason) { 1226 assert(proj->_con && predicate_proj->_con, "not a range check?"); 1227 Node* opaque_init = new Opaque1Node(C, init); 1228 register_new_node(opaque_init, upper_bound_proj); 1229 BoolNode* bol = rc_predicate(loop, upper_bound_proj, scale, offset, opaque_init, limit, stride, rng, (stride > 0) != (scale > 0), overflow); 1230 Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1)); // This will go away once loop opts are over 1231 register_new_node(opaque_bol, upper_bound_proj); 1232 ProjNode* new_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode()); 1233 _igvn.replace_input_of(new_proj->in(0), 1, opaque_bol); 1234 assert(opaque_init->outcnt() > 0, "should be used"); 1235 return new_proj; 1236 } 1237 1238 //------------------------------ loop_predication_impl-------------------------- 1239 // Insert loop predicates for null checks and range checks 1240 bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) { 1241 if (!UseLoopPredicate) return false; 1242 1243 if (!loop->_head->is_Loop()) { 1244 // Could be a simple region when irreducible loops are present. 1245 return false; 1246 } 1247 LoopNode* head = loop->_head->as_Loop(); 1248 1249 if (head->unique_ctrl_out()->Opcode() == Op_NeverBranch) { 1250 // do nothing for infinite loops 1251 return false; 1252 } 1253 1254 if (head->is_OuterStripMinedLoop()) { 1255 return false; 1256 } 1257 1258 CountedLoopNode *cl = NULL; 1259 if (head->is_valid_counted_loop()) { 1260 cl = head->as_CountedLoop(); 1261 // do nothing for iteration-splitted loops 1262 if (!cl->is_normal_loop()) return false; 1263 // Avoid RCE if Counted loop's test is '!='. 1264 BoolTest::mask bt = cl->loopexit()->test_trip(); 1265 if (bt != BoolTest::lt && bt != BoolTest::gt) 1266 cl = NULL; 1267 } 1268 1269 Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl); 1270 ProjNode *loop_limit_proj = NULL; 1271 ProjNode *predicate_proj = NULL; 1272 ProjNode *profile_predicate_proj = NULL; 1273 // Loop limit check predicate should be near the loop. 1274 loop_limit_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); 1275 if (loop_limit_proj != NULL) { 1276 entry = loop_limit_proj->in(0)->in(0); 1277 } 1278 bool has_profile_predicates = false; 1279 profile_predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); 1280 if (profile_predicate_proj != NULL) { 1281 Node* n = skip_loop_predicates(entry); 1282 // Check if predicates were already added to the profile predicate 1283 // block 1284 if (n != entry->in(0)->in(0)) { 1285 has_profile_predicates = true; 1286 } 1287 entry = n; 1288 } 1289 predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); 1290 1291 float loop_trip_cnt = -1; 1292 bool follow_branches = loop_predication_should_follow_branches(loop, profile_predicate_proj, loop_trip_cnt); 1293 assert(!follow_branches || loop_trip_cnt >= 0, "negative trip count?"); 1294 1295 if (predicate_proj == NULL && !follow_branches) { 1296 #ifndef PRODUCT 1297 if (TraceLoopPredicate) { 1298 tty->print("missing predicate:"); 1299 loop->dump_head(); 1300 head->dump(1); 1301 } 1302 #endif 1303 return false; 1304 } 1305 ConNode* zero = _igvn.intcon(0); 1306 set_ctrl(zero, C->root()); 1307 1308 ResourceArea *area = Thread::current()->resource_area(); 1309 Invariance invar(area, loop); 1310 1311 // Create list of if-projs such that a newer proj dominates all older 1312 // projs in the list, and they all dominate loop->tail() 1313 Node_List if_proj_list(area); 1314 Node_List regions(area); 1315 Node *current_proj = loop->tail(); //start from tail 1316 1317 1318 Node_List controls(area); 1319 while (current_proj != head) { 1320 if (loop == get_loop(current_proj) && // still in the loop ? 1321 current_proj->is_Proj() && // is a projection ? 1322 (current_proj->in(0)->Opcode() == Op_If || 1323 current_proj->in(0)->Opcode() == Op_RangeCheck)) { // is a if projection ? 1324 if_proj_list.push(current_proj); 1325 } 1326 if (follow_branches && 1327 current_proj->Opcode() == Op_Region && 1328 loop == get_loop(current_proj)) { 1329 regions.push(current_proj); 1330 } 1331 current_proj = idom(current_proj); 1332 } 1333 1334 bool hoisted = false; // true if at least one proj is promoted 1335 1336 if (!has_profile_predicates) { 1337 while (if_proj_list.size() > 0) { 1338 Node* n = if_proj_list.pop(); 1339 1340 ProjNode* proj = n->as_Proj(); 1341 IfNode* iff = proj->in(0)->as_If(); 1342 1343 CallStaticJavaNode* call = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); 1344 if (call == NULL) { 1345 if (loop->is_loop_exit(iff)) { 1346 // stop processing the remaining projs in the list because the execution of them 1347 // depends on the condition of "iff" (iff->in(1)). 1348 break; 1349 } else { 1350 // Both arms are inside the loop. There are two cases: 1351 // (1) there is one backward branch. In this case, any remaining proj 1352 // in the if_proj list post-dominates "iff". So, the condition of "iff" 1353 // does not determine the execution the remining projs directly, and we 1354 // can safely continue. 1355 // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj" 1356 // does not dominate loop->tail(), so it can not be in the if_proj list. 1357 continue; 1358 } 1359 } 1360 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(call->uncommon_trap_request()); 1361 if (reason == Deoptimization::Reason_predicate) { 1362 break; 1363 } 1364 1365 if (predicate_proj != NULL) { 1366 hoisted = loop_predication_impl_helper(loop, proj, predicate_proj, cl, zero, invar, Deoptimization::Reason_predicate) | hoisted; 1367 } 1368 } // end while 1369 } 1370 1371 Node_List if_proj_list_freq(area); 1372 if (follow_branches) { 1373 PathFrequency pf(loop->_head, this); 1374 1375 // Some projections were skipped by regular predicates because of 1376 // an early loop exit. Try them with profile data. 1377 while (if_proj_list.size() > 0) { 1378 Node* proj = if_proj_list.pop(); 1379 float f = pf.to(proj); 1380 if (proj->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) && 1381 f * loop_trip_cnt >= 1) { 1382 hoisted = loop_predication_impl_helper(loop, proj->as_Proj(), profile_predicate_proj, cl, zero, invar, Deoptimization::Reason_profile_predicate) | hoisted; 1383 } 1384 } 1385 1386 // And look into all branches 1387 Node_Stack stack(0); 1388 VectorSet seen(Thread::current()->resource_area()); 1389 while (regions.size() > 0) { 1390 Node* c = regions.pop(); 1391 loop_predication_follow_branches(c, loop, loop_trip_cnt, pf, stack, seen, if_proj_list_freq); 1392 } 1393 1394 for (uint i = 0; i < if_proj_list_freq.size(); i++) { 1395 ProjNode* proj = if_proj_list_freq.at(i)->as_Proj(); 1396 hoisted = loop_predication_impl_helper(loop, proj, profile_predicate_proj, cl, zero, invar, Deoptimization::Reason_profile_predicate) | hoisted; 1397 } 1398 } 1399 1400 #ifndef PRODUCT 1401 // report that the loop predication has been actually performed 1402 // for this loop 1403 if (TraceLoopPredicate && hoisted) { 1404 tty->print("Loop Predication Performed:"); 1405 loop->dump_head(); 1406 } 1407 #endif 1408 1409 head->verify_strip_mined(1); 1410 1411 return hoisted; 1412 } 1413 1414 //------------------------------loop_predication-------------------------------- 1415 // driver routine for loop predication optimization 1416 bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) { 1417 bool hoisted = false; 1418 // Recursively promote predicates 1419 if (_child) { 1420 hoisted = _child->loop_predication( phase); 1421 } 1422 1423 // self 1424 if (!_irreducible && !tail()->is_top()) { 1425 hoisted |= phase->loop_predication_impl(this); 1426 } 1427 1428 if (_next) { //sibling 1429 hoisted |= _next->loop_predication( phase); 1430 } 1431 1432 return hoisted; 1433 }