1 /*
   2  * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "opto/mulnode.hpp"
  28 #include "opto/addnode.hpp"
  29 #include "opto/connode.hpp"
  30 #include "opto/convertnode.hpp"
  31 #include "opto/loopnode.hpp"
  32 #include "opto/opaquenode.hpp"
  33 #include "opto/rootnode.hpp"
  34 
  35 //================= Loop Unswitching =====================
  36 //
  37 // orig:                       transformed:
  38 //                               if (invariant-test) then
  39 //  predicate                      predicate
  40 //  loop                           loop
  41 //    stmt1                          stmt1
  42 //    if (invariant-test) then       stmt2
  43 //      stmt2                        stmt4
  44 //    else                         endloop
  45 //      stmt3                    else
  46 //    endif                        predicate [clone]
  47 //    stmt4                        loop [clone]
  48 //  endloop                          stmt1 [clone]
  49 //                                   stmt3
  50 //                                   stmt4 [clone]
  51 //                                 endloop
  52 //                               endif
  53 //
  54 // Note: the "else" clause may be empty
  55 
  56 static bool is_flattened_array_check(Node* iff, PhaseTransform* phase) {
  57   if (iff->Opcode() != Op_If) {
  58     return false;
  59   }
  60   Node* bol = iff->in(1);
  61   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  62     return false;
  63   }
  64   Node* cmp = bol->in(1);
  65   if (cmp->Opcode() != Op_CmpI) {
  66     return false;
  67   }
  68   Node* cmp_in1 = cmp->in(1);
  69   Node* cmp_in2 = cmp->in(2);
  70   if ((unsigned int)cmp_in2->find_int_con(0) != Klass::_lh_array_tag_vt_value) {
  71     return false;
  72   }
  73   if (cmp_in1->Opcode() != Op_RShiftI) {
  74     return false;
  75   }
  76   Node* shift_in1 = cmp_in1->in(1);
  77   Node* shift_in2 = cmp_in1->in(2);
  78   if ((unsigned int)shift_in2->find_int_con(0) != Klass::_lh_array_tag_shift) {
  79     return false;
  80   }
  81   if (shift_in1->Opcode() != Op_LoadI) {
  82     return false;
  83   }
  84   intptr_t offset;
  85   Node* addr = AddPNode::Ideal_base_and_offset(shift_in1->in(MemNode::Address), phase, offset);
  86   if (addr == NULL || offset != in_bytes(Klass::layout_helper_offset())) {
  87     return false;
  88   }
  89   if (!phase->type(addr)->isa_klassptr()) {
  90     return false;
  91   }
  92 
  93   return true;
  94 }
  95 
  96 //------------------------------policy_unswitching-----------------------------
  97 // Return TRUE or FALSE if the loop should be unswitched
  98 // (ie. clone loop with an invariant test that does not exit the loop)
  99 bool IdealLoopTree::policy_unswitching( PhaseIdealLoop *phase ) const {
 100   if (!LoopUnswitching) {
 101     return false;
 102   }
 103   if (!_head->is_Loop()) {
 104     return false;
 105   }
 106 
 107   // If nodes are depleted, some transform has miscalculated its needs.
 108   assert(!phase->exceeding_node_budget(), "sanity");
 109 
 110   // check for vectorized loops, any unswitching was already applied
 111   if (_head->is_CountedLoop() && _head->as_CountedLoop()->is_unroll_only()) {
 112     return false;
 113   }
 114 
 115   LoopNode* head = _head->as_Loop();
 116   if (head->unswitch_count() + 1 > head->unswitch_max()) {
 117     return false;
 118   }
 119 
 120   if (head->is_flattened_arrays()) {
 121     return false;
 122   }
 123 
 124   Node_List flattened_checks;
 125   if (phase->find_unswitching_candidate(this, flattened_checks) == NULL && flattened_checks.size() == 0) {
 126     return false;
 127   }
 128 
 129   // Too speculative if running low on nodes.
 130   return phase->may_require_nodes(est_loop_clone_sz(2));
 131 }
 132 
 133 //------------------------------find_unswitching_candidate-----------------------------
 134 // Find candidate "if" for unswitching
 135 IfNode* PhaseIdealLoop::find_unswitching_candidate(const IdealLoopTree *loop, Node_List& flattened_checks) const {
 136 
 137   // Find first invariant test that doesn't exit the loop
 138   LoopNode *head = loop->_head->as_Loop();
 139   IfNode* unswitch_iff = NULL;
 140   Node* n = head->in(LoopNode::LoopBackControl);
 141   while (n != head) {
 142     Node* n_dom = idom(n);
 143     if (n->is_Region()) {
 144       if (n_dom->is_If()) {
 145         IfNode* iff = n_dom->as_If();
 146         if (iff->in(1)->is_Bool()) {
 147           BoolNode* bol = iff->in(1)->as_Bool();
 148           if (bol->in(1)->is_Cmp()) {
 149             // If condition is invariant and not a loop exit,
 150             // then found reason to unswitch.
 151             if (loop->is_invariant(bol) && !loop->is_loop_exit(iff)) {
 152               unswitch_iff = iff;
 153             }
 154           }
 155         }
 156       }
 157     }
 158     n = n_dom;
 159   }
 160 
 161   if (unswitch_iff == NULL || is_flattened_array_check(unswitch_iff, &_igvn)) {
 162     // collect all flattened array checks
 163     for (uint i = 0; i < loop->_body.size(); i++) {
 164       Node* n = loop->_body.at(i);
 165       if (is_flattened_array_check(n, &_igvn) &&
 166           loop->is_invariant(n->in(1)) &&
 167           !loop->is_loop_exit(n)) {
 168         flattened_checks.push(n);
 169       }
 170     }
 171     unswitch_iff = NULL;
 172   }
 173 
 174   return unswitch_iff;
 175 }
 176 
 177 //------------------------------do_unswitching-----------------------------
 178 // Clone loop with an invariant test (that does not exit) and
 179 // insert a clone of the test that selects which version to
 180 // execute.
 181 void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) {
 182 
 183   // Find first invariant test that doesn't exit the loop
 184   LoopNode *head = loop->_head->as_Loop();
 185 
 186   Node_List flattened_checks;
 187   IfNode* unswitch_iff = find_unswitching_candidate((const IdealLoopTree *)loop, flattened_checks);
 188   assert(unswitch_iff != NULL || flattened_checks.size() > 0, "should be at least one");
 189   if (unswitch_iff == NULL) {
 190     unswitch_iff = flattened_checks.at(0)->as_If();
 191   }
 192 
 193 #ifndef PRODUCT
 194   if (TraceLoopOpts) {
 195     tty->print("Unswitch   %d ", head->unswitch_count()+1);
 196     loop->dump_head();
 197   }
 198 #endif
 199 
 200   // Need to revert back to normal loop
 201   if (head->is_CountedLoop() && !head->as_CountedLoop()->is_normal_loop()) {
 202     head->as_CountedLoop()->set_normal_loop();
 203   }
 204 
 205   ProjNode* proj_true = create_slow_version_of_loop(loop, old_new, unswitch_iff->Opcode(), CloneIncludesStripMined);
 206 
 207 #ifdef ASSERT
 208   Node* uniqc = proj_true->unique_ctrl_out();
 209   Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
 210   Node* predicate = find_predicate(entry);
 211   if (predicate != NULL) {
 212     entry = skip_loop_predicates(entry);
 213   }
 214   if (predicate != NULL && UseLoopPredicate) {
 215     // We may have two predicates, find first.
 216     Node* n = find_predicate(entry);
 217     if (n != NULL) {
 218       predicate = n;
 219       entry = skip_loop_predicates(entry);
 220     }
 221   }
 222   if (predicate != NULL && UseProfiledLoopPredicate) {
 223     entry = find_predicate(entry);
 224     if (entry != NULL) predicate = entry;
 225   }
 226   if (predicate != NULL) predicate = predicate->in(0);
 227   assert(proj_true->is_IfTrue() &&
 228          (predicate == NULL && uniqc == head && !head->is_strip_mined() ||
 229           predicate == NULL && uniqc == head->in(LoopNode::EntryControl) && head->is_strip_mined() ||
 230           predicate != NULL && uniqc == predicate), "by construction");
 231 #endif
 232   // Increment unswitch count
 233   LoopNode* head_clone = old_new[head->_idx]->as_Loop();
 234   int nct = head->unswitch_count() + 1;
 235   head->set_unswitch_count(nct);
 236   head_clone->set_unswitch_count(nct);
 237   head_clone->mark_flattened_arrays();
 238 
 239   // Add test to new "if" outside of loop
 240   IfNode* invar_iff   = proj_true->in(0)->as_If();
 241   Node* invar_iff_c   = invar_iff->in(0);
 242   invar_iff->_prob    = unswitch_iff->_prob;
 243   if (flattened_checks.size() > 0) {
 244     // Flattened array checks are used in
 245     // Parse::array_store()/Parse::array_load() to switch between a
 246     // legacy object array access and a flattened value array
 247     // access. We want the performance impact on legacy accesses to be
 248     // as small as possible so we make 2 copies of the loops: a fast
 249     // one where all accesses are known to be legacy, a slow one where
 250     // some accesses are to flattened arrays. Flattened array checks
 251     // can be removed from the first one but not from the second one
 252     // as it can have a mix of flattened/legacy accesses.
 253     BoolNode* bol       = unswitch_iff->in(1)->clone()->as_Bool();
 254     register_new_node(bol, invar_iff->in(0));
 255     Node* cmp = bol->in(1)->clone();
 256     register_new_node(cmp, invar_iff->in(0));
 257     bol->set_req(1, cmp);
 258     Node* in1 = NULL;
 259     for (uint i = 0; i < flattened_checks.size(); i++) {
 260       Node* v = flattened_checks.at(i)->in(1)->in(1)->in(1);
 261       v = new AndINode(v, _igvn.intcon(Klass::_lh_array_tag_vt_value));
 262       register_new_node(v, invar_iff->in(0));
 263       if (in1 == NULL) {
 264         in1 = v;
 265       } else {
 266         in1 = new OrINode(in1, v);
 267         register_new_node(in1, invar_iff->in(0));
 268       }
 269     }
 270     cmp->set_req(1, in1);
 271     invar_iff->set_req(1, bol);
 272   } else {
 273     BoolNode* bol       = unswitch_iff->in(1)->as_Bool();
 274     invar_iff->set_req(1, bol);
 275   }
 276 
 277   ProjNode* proj_false = invar_iff->proj_out(0)->as_Proj();
 278 
 279   // Hoist invariant casts out of each loop to the appropriate
 280   // control projection.
 281 
 282   Node_List worklist;
 283 
 284   for (DUIterator_Fast imax, i = unswitch_iff->fast_outs(imax); i < imax; i++) {
 285     ProjNode* proj= unswitch_iff->fast_out(i)->as_Proj();
 286     // Copy to a worklist for easier manipulation
 287     for (DUIterator_Fast jmax, j = proj->fast_outs(jmax); j < jmax; j++) {
 288       Node* use = proj->fast_out(j);
 289       if (use->Opcode() == Op_CheckCastPP && loop->is_invariant(use->in(1))) {
 290         worklist.push(use);
 291       }
 292     }
 293     ProjNode* invar_proj = invar_iff->proj_out(proj->_con)->as_Proj();
 294     while (worklist.size() > 0) {
 295       Node* use = worklist.pop();
 296       Node* nuse = use->clone();
 297       nuse->set_req(0, invar_proj);
 298       _igvn.replace_input_of(use, 1, nuse);
 299       register_new_node(nuse, invar_proj);
 300       // Same for the clone
 301       Node* use_clone = old_new[use->_idx];
 302       _igvn.replace_input_of(use_clone, 1, nuse);
 303     }
 304   }
 305 
 306   IfNode* unswitch_iff_clone = old_new[unswitch_iff->_idx]->as_If();
 307   if (flattened_checks.size() > 0) {
 308     for (uint i = 0; i < flattened_checks.size(); i++) {
 309       IfNode* iff = flattened_checks.at(i)->as_If();
 310       _igvn.rehash_node_delayed(iff);
 311       short_circuit_if(iff, proj_true);
 312     }
 313   } else {
 314     // Hardwire the control paths in the loops into if(true) and if(false)
 315     _igvn.rehash_node_delayed(unswitch_iff);
 316     short_circuit_if(unswitch_iff, proj_true);
 317 
 318     _igvn.rehash_node_delayed(unswitch_iff_clone);
 319     short_circuit_if(unswitch_iff_clone, proj_false);
 320   }
 321 
 322   // Reoptimize loops
 323   loop->record_for_igvn();
 324   for(int i = loop->_body.size() - 1; i >= 0 ; i--) {
 325     Node *n = loop->_body[i];
 326     Node *n_clone = old_new[n->_idx];
 327     _igvn._worklist.push(n_clone);
 328   }
 329 
 330 #ifndef PRODUCT
 331   if (TraceLoopUnswitching) {
 332     tty->print_cr("Loop unswitching orig: %d @ %d  new: %d @ %d",
 333                   head->_idx,                unswitch_iff->_idx,
 334                   old_new[head->_idx]->_idx, unswitch_iff_clone->_idx);
 335   }
 336 #endif
 337 
 338   C->set_major_progress();
 339 }
 340 
 341 //-------------------------create_slow_version_of_loop------------------------
 342 // Create a slow version of the loop by cloning the loop
 343 // and inserting an if to select fast-slow versions.
 344 // Return control projection of the entry to the fast version.
 345 ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
 346                                                       Node_List &old_new,
 347                                                       int opcode,
 348                                                       CloneLoopMode mode) {
 349   LoopNode* head  = loop->_head->as_Loop();
 350   bool counted_loop = head->is_CountedLoop();
 351   Node*     entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
 352   _igvn.rehash_node_delayed(entry);
 353   IdealLoopTree* outer_loop = loop->_parent;
 354 
 355   head->verify_strip_mined(1);
 356 
 357   Node *cont      = _igvn.intcon(1);
 358   set_ctrl(cont, C->root());
 359   Node* opq       = new Opaque1Node(C, cont);
 360   register_node(opq, outer_loop, entry, dom_depth(entry));
 361   Node *bol       = new Conv2BNode(opq);
 362   register_node(bol, outer_loop, entry, dom_depth(entry));
 363   IfNode* iff = (opcode == Op_RangeCheck) ? new RangeCheckNode(entry, bol, PROB_MAX, COUNT_UNKNOWN) :
 364     new IfNode(entry, bol, PROB_MAX, COUNT_UNKNOWN);
 365   register_node(iff, outer_loop, entry, dom_depth(entry));
 366   ProjNode* iffast = new IfTrueNode(iff);
 367   register_node(iffast, outer_loop, iff, dom_depth(iff));
 368   ProjNode* ifslow = new IfFalseNode(iff);
 369   register_node(ifslow, outer_loop, iff, dom_depth(iff));
 370 
 371   // Clone the loop body.  The clone becomes the fast loop.  The
 372   // original pre-header will (illegally) have 3 control users
 373   // (old & new loops & new if).
 374   clone_loop(loop, old_new, dom_depth(head->skip_strip_mined()), mode, iff);
 375   assert(old_new[head->_idx]->is_Loop(), "" );
 376 
 377   // Fast (true) control
 378   Node* iffast_pred = clone_loop_predicates(entry, iffast, !counted_loop);
 379 
 380   // Slow (false) control
 381   Node* ifslow_pred = clone_loop_predicates(entry, ifslow, !counted_loop);
 382 
 383   Node* l = head->skip_strip_mined();
 384   _igvn.replace_input_of(l, LoopNode::EntryControl, iffast_pred);
 385   set_idom(l, iffast_pred, dom_depth(l));
 386   LoopNode* slow_l = old_new[head->_idx]->as_Loop()->skip_strip_mined();
 387   _igvn.replace_input_of(slow_l, LoopNode::EntryControl, ifslow_pred);
 388   set_idom(slow_l, ifslow_pred, dom_depth(l));
 389 
 390   recompute_dom_depth();
 391 
 392   return iffast;
 393 }
 394 
 395 LoopNode* PhaseIdealLoop::create_reserve_version_of_loop(IdealLoopTree *loop, CountedLoopReserveKit* lk) {
 396   Node_List old_new;
 397   LoopNode* head  = loop->_head->as_Loop();
 398   bool counted_loop = head->is_CountedLoop();
 399   Node*     entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
 400   _igvn.rehash_node_delayed(entry);
 401   IdealLoopTree* outer_loop = head->is_strip_mined() ? loop->_parent->_parent : loop->_parent;
 402 
 403   ConINode* const_1 = _igvn.intcon(1);
 404   set_ctrl(const_1, C->root());
 405   IfNode* iff = new IfNode(entry, const_1, PROB_MAX, COUNT_UNKNOWN);
 406   register_node(iff, outer_loop, entry, dom_depth(entry));
 407   ProjNode* iffast = new IfTrueNode(iff);
 408   register_node(iffast, outer_loop, iff, dom_depth(iff));
 409   ProjNode* ifslow = new IfFalseNode(iff);
 410   register_node(ifslow, outer_loop, iff, dom_depth(iff));
 411 
 412   // Clone the loop body.  The clone becomes the fast loop.  The
 413   // original pre-header will (illegally) have 3 control users
 414   // (old & new loops & new if).
 415   clone_loop(loop, old_new, dom_depth(head), CloneIncludesStripMined, iff);
 416   assert(old_new[head->_idx]->is_Loop(), "" );
 417 
 418   LoopNode* slow_head = old_new[head->_idx]->as_Loop();
 419 
 420 #ifndef PRODUCT
 421   if (TraceLoopOpts) {
 422     tty->print_cr("PhaseIdealLoop::create_reserve_version_of_loop:");
 423     tty->print("\t iff = %d, ", iff->_idx); iff->dump();
 424     tty->print("\t iffast = %d, ", iffast->_idx); iffast->dump();
 425     tty->print("\t ifslow = %d, ", ifslow->_idx); ifslow->dump();
 426     tty->print("\t before replace_input_of: head = %d, ", head->_idx); head->dump();
 427     tty->print("\t before replace_input_of: slow_head = %d, ", slow_head->_idx); slow_head->dump();
 428   }
 429 #endif
 430 
 431   // Fast (true) control
 432   _igvn.replace_input_of(head->skip_strip_mined(), LoopNode::EntryControl, iffast);
 433   // Slow (false) control
 434   _igvn.replace_input_of(slow_head->skip_strip_mined(), LoopNode::EntryControl, ifslow);
 435 
 436   recompute_dom_depth();
 437 
 438   lk->set_iff(iff);
 439 
 440 #ifndef PRODUCT
 441   if (TraceLoopOpts ) {
 442     tty->print("\t after  replace_input_of: head = %d, ", head->_idx); head->dump();
 443     tty->print("\t after  replace_input_of: slow_head = %d, ", slow_head->_idx); slow_head->dump();
 444   }
 445 #endif
 446 
 447   return slow_head->as_Loop();
 448 }
 449 
 450 CountedLoopReserveKit::CountedLoopReserveKit(PhaseIdealLoop* phase, IdealLoopTree *loop, bool active = true) :
 451   _phase(phase),
 452   _lpt(loop),
 453   _lp(NULL),
 454   _iff(NULL),
 455   _lp_reserved(NULL),
 456   _has_reserved(false),
 457   _use_new(false),
 458   _active(active)
 459   {
 460     create_reserve();
 461   };
 462 
 463 CountedLoopReserveKit::~CountedLoopReserveKit() {
 464   if (!_active) {
 465     return;
 466   }
 467 
 468   if (_has_reserved && !_use_new) {
 469     // intcon(0)->iff-node reverts CF to the reserved copy
 470     ConINode* const_0 = _phase->_igvn.intcon(0);
 471     _phase->set_ctrl(const_0, _phase->C->root());
 472     _iff->set_req(1, const_0);
 473 
 474     #ifndef PRODUCT
 475       if (TraceLoopOpts) {
 476         tty->print_cr("CountedLoopReserveKit::~CountedLoopReserveKit()");
 477         tty->print("\t discard loop %d and revert to the reserved loop clone %d: ", _lp->_idx, _lp_reserved->_idx);
 478         _lp_reserved->dump();
 479       }
 480     #endif
 481   }
 482 }
 483 
 484 bool CountedLoopReserveKit::create_reserve() {
 485   if (!_active) {
 486     return false;
 487   }
 488 
 489   if(!_lpt->_head->is_CountedLoop()) {
 490     if (TraceLoopOpts) {
 491       tty->print_cr("CountedLoopReserveKit::create_reserve: %d not counted loop", _lpt->_head->_idx);
 492     }
 493     return false;
 494   }
 495   CountedLoopNode *cl = _lpt->_head->as_CountedLoop();
 496   if (!cl->is_valid_counted_loop()) {
 497     if (TraceLoopOpts) {
 498       tty->print_cr("CountedLoopReserveKit::create_reserve: %d not valid counted loop", cl->_idx);
 499     }
 500     return false; // skip malformed counted loop
 501   }
 502   if (!cl->is_main_loop()) {
 503     bool loop_not_canonical = true;
 504     if (cl->is_post_loop() && (cl->slp_max_unroll() > 0)) {
 505       loop_not_canonical = false;
 506     }
 507     // only reject some loop forms
 508     if (loop_not_canonical) {
 509       if (TraceLoopOpts) {
 510         tty->print_cr("CountedLoopReserveKit::create_reserve: %d not canonical loop", cl->_idx);
 511       }
 512       return false; // skip normal, pre, and post (conditionally) loops
 513     }
 514   }
 515 
 516   _lp = _lpt->_head->as_Loop();
 517   _lp_reserved = _phase->create_reserve_version_of_loop(_lpt, this);
 518 
 519   if (!_lp_reserved->is_CountedLoop()) {
 520     return false;
 521   }
 522 
 523   Node* ifslow_pred = _lp_reserved->skip_strip_mined()->in(LoopNode::EntryControl);
 524 
 525   if (!ifslow_pred->is_IfFalse()) {
 526     return false;
 527   }
 528 
 529   Node* iff = ifslow_pred->in(0);
 530   if (!iff->is_If() || iff != _iff) {
 531     return false;
 532   }
 533 
 534   if (iff->in(1)->Opcode() != Op_ConI) {
 535     return false;
 536   }
 537 
 538   return _has_reserved = true;
 539 }