1 /* 2 * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_LOOPNODE_HPP 26 #define SHARE_VM_OPTO_LOOPNODE_HPP 27 28 #include "opto/cfgnode.hpp" 29 #include "opto/multnode.hpp" 30 #include "opto/phaseX.hpp" 31 #include "opto/subnode.hpp" 32 #include "opto/type.hpp" 33 34 class CmpNode; 35 class CountedLoopEndNode; 36 class CountedLoopNode; 37 class IdealLoopTree; 38 class LoopNode; 39 class Node; 40 class PhaseIdealLoop; 41 class CountedLoopReserveKit; 42 class VectorSet; 43 class Invariance; 44 struct small_cache; 45 46 // 47 // I D E A L I Z E D L O O P S 48 // 49 // Idealized loops are the set of loops I perform more interesting 50 // transformations on, beyond simple hoisting. 51 52 //------------------------------LoopNode--------------------------------------- 53 // Simple loop header. Fall in path on left, loop-back path on right. 54 class LoopNode : public RegionNode { 55 // Size is bigger to hold the flags. However, the flags do not change 56 // the semantics so it does not appear in the hash & cmp functions. 57 virtual uint size_of() const { return sizeof(*this); } 58 protected: 59 short _loop_flags; 60 // Names for flag bitfields 61 enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3, 62 MainHasNoPreLoop=4, 63 HasExactTripCount=8, 64 InnerLoop=16, 65 PartialPeelLoop=32, 66 PartialPeelFailed=64, 67 HasReductions=128, 68 WasSlpAnalyzed=256, 69 PassedSlpAnalysis=512, 70 DoUnrollOnly=1024, 71 VectorizedLoop=2048, 72 HasAtomicPostLoop=4096, 73 HasRangeChecks=8192, 74 IsMultiversioned=16384}; 75 char _unswitch_count; 76 enum { _unswitch_max=3 }; 77 78 public: 79 // Names for edge indices 80 enum { Self=0, EntryControl, LoopBackControl }; 81 82 int is_inner_loop() const { return _loop_flags & InnerLoop; } 83 void set_inner_loop() { _loop_flags |= InnerLoop; } 84 85 int has_range_checks() const { return _loop_flags & HasRangeChecks; } 86 int is_multiversioned() const { return _loop_flags & IsMultiversioned; } 87 int is_vectorized_loop() const { return _loop_flags & VectorizedLoop; } 88 int is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; } 89 void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; } 90 int partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; } 91 92 void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; } 93 void mark_has_reductions() { _loop_flags |= HasReductions; } 94 void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; } 95 void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; } 96 void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; } 97 void mark_loop_vectorized() { _loop_flags |= VectorizedLoop; } 98 void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; } 99 void mark_has_range_checks() { _loop_flags |= HasRangeChecks; } 100 void mark_is_multiversioned() { _loop_flags |= IsMultiversioned; } 101 102 int unswitch_max() { return _unswitch_max; } 103 int unswitch_count() { return _unswitch_count; } 104 void set_unswitch_count(int val) { 105 assert (val <= unswitch_max(), "too many unswitches"); 106 _unswitch_count = val; 107 } 108 109 LoopNode( Node *entry, Node *backedge ) : RegionNode(3), _loop_flags(0), _unswitch_count(0) { 110 init_class_id(Class_Loop); 111 init_req(EntryControl, entry); 112 init_req(LoopBackControl, backedge); 113 } 114 115 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 116 virtual int Opcode() const; 117 bool can_be_counted_loop(PhaseTransform* phase) const { 118 return req() == 3 && in(0) != NULL && 119 in(1) != NULL && phase->type(in(1)) != Type::TOP && 120 in(2) != NULL && phase->type(in(2)) != Type::TOP; 121 } 122 bool is_valid_counted_loop() const; 123 #ifndef PRODUCT 124 virtual void dump_spec(outputStream *st) const; 125 #endif 126 }; 127 128 //------------------------------Counted Loops---------------------------------- 129 // Counted loops are all trip-counted loops, with exactly 1 trip-counter exit 130 // path (and maybe some other exit paths). The trip-counter exit is always 131 // last in the loop. The trip-counter have to stride by a constant; 132 // the exit value is also loop invariant. 133 134 // CountedLoopNodes and CountedLoopEndNodes come in matched pairs. The 135 // CountedLoopNode has the incoming loop control and the loop-back-control 136 // which is always the IfTrue before the matching CountedLoopEndNode. The 137 // CountedLoopEndNode has an incoming control (possibly not the 138 // CountedLoopNode if there is control flow in the loop), the post-increment 139 // trip-counter value, and the limit. The trip-counter value is always of 140 // the form (Op old-trip-counter stride). The old-trip-counter is produced 141 // by a Phi connected to the CountedLoopNode. The stride is constant. 142 // The Op is any commutable opcode, including Add, Mul, Xor. The 143 // CountedLoopEndNode also takes in the loop-invariant limit value. 144 145 // From a CountedLoopNode I can reach the matching CountedLoopEndNode via the 146 // loop-back control. From CountedLoopEndNodes I can reach CountedLoopNodes 147 // via the old-trip-counter from the Op node. 148 149 //------------------------------CountedLoopNode-------------------------------- 150 // CountedLoopNodes head simple counted loops. CountedLoopNodes have as 151 // inputs the incoming loop-start control and the loop-back control, so they 152 // act like RegionNodes. They also take in the initial trip counter, the 153 // loop-invariant stride and the loop-invariant limit value. CountedLoopNodes 154 // produce a loop-body control and the trip counter value. Since 155 // CountedLoopNodes behave like RegionNodes I still have a standard CFG model. 156 157 class CountedLoopNode : public LoopNode { 158 // Size is bigger to hold _main_idx. However, _main_idx does not change 159 // the semantics so it does not appear in the hash & cmp functions. 160 virtual uint size_of() const { return sizeof(*this); } 161 162 // For Pre- and Post-loops during debugging ONLY, this holds the index of 163 // the Main CountedLoop. Used to assert that we understand the graph shape. 164 node_idx_t _main_idx; 165 166 // Known trip count calculated by compute_exact_trip_count() 167 uint _trip_count; 168 169 // Expected trip count from profile data 170 float _profile_trip_cnt; 171 172 // Log2 of original loop bodies in unrolled loop 173 int _unrolled_count_log2; 174 175 // Node count prior to last unrolling - used to decide if 176 // unroll,optimize,unroll,optimize,... is making progress 177 int _node_count_before_unroll; 178 179 // If slp analysis is performed we record the maximum 180 // vector mapped unroll factor here 181 int _slp_maximum_unroll_factor; 182 183 public: 184 CountedLoopNode( Node *entry, Node *backedge ) 185 : LoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint), 186 _profile_trip_cnt(COUNT_UNKNOWN), _unrolled_count_log2(0), 187 _node_count_before_unroll(0), _slp_maximum_unroll_factor(0) { 188 init_class_id(Class_CountedLoop); 189 // Initialize _trip_count to the largest possible value. 190 // Will be reset (lower) if the loop's trip count is known. 191 } 192 193 virtual int Opcode() const; 194 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 195 196 Node *init_control() const { return in(EntryControl); } 197 Node *back_control() const { return in(LoopBackControl); } 198 CountedLoopEndNode *loopexit() const; 199 Node *init_trip() const; 200 Node *stride() const; 201 int stride_con() const; 202 bool stride_is_con() const; 203 Node *limit() const; 204 Node *incr() const; 205 Node *phi() const; 206 207 // Match increment with optional truncation 208 static Node* match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type); 209 210 // A 'main' loop has a pre-loop and a post-loop. The 'main' loop 211 // can run short a few iterations and may start a few iterations in. 212 // It will be RCE'd and unrolled and aligned. 213 214 // A following 'post' loop will run any remaining iterations. Used 215 // during Range Check Elimination, the 'post' loop will do any final 216 // iterations with full checks. Also used by Loop Unrolling, where 217 // the 'post' loop will do any epilog iterations needed. Basically, 218 // a 'post' loop can not profitably be further unrolled or RCE'd. 219 220 // A preceding 'pre' loop will run at least 1 iteration (to do peeling), 221 // it may do under-flow checks for RCE and may do alignment iterations 222 // so the following main loop 'knows' that it is striding down cache 223 // lines. 224 225 // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or 226 // Aligned, may be missing it's pre-loop. 227 int is_normal_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Normal; } 228 int is_pre_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Pre; } 229 int is_main_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Main; } 230 int is_post_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Post; } 231 int is_reduction_loop() const { return (_loop_flags&HasReductions) == HasReductions; } 232 int was_slp_analyzed () const { return (_loop_flags&WasSlpAnalyzed) == WasSlpAnalyzed; } 233 int has_passed_slp () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; } 234 int do_unroll_only () const { return (_loop_flags&DoUnrollOnly) == DoUnrollOnly; } 235 int is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; } 236 int has_atomic_post_loop () const { return (_loop_flags & HasAtomicPostLoop) == HasAtomicPostLoop; } 237 void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; } 238 239 int main_idx() const { return _main_idx; } 240 241 242 void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; } 243 void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; } 244 void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; } 245 void set_normal_loop( ) { _loop_flags &= ~PreMainPostFlagsMask; } 246 247 void set_trip_count(uint tc) { _trip_count = tc; } 248 uint trip_count() { return _trip_count; } 249 250 bool has_exact_trip_count() const { return (_loop_flags & HasExactTripCount) != 0; } 251 void set_exact_trip_count(uint tc) { 252 _trip_count = tc; 253 _loop_flags |= HasExactTripCount; 254 } 255 void set_nonexact_trip_count() { 256 _loop_flags &= ~HasExactTripCount; 257 } 258 void set_notpassed_slp() { 259 _loop_flags &= ~PassedSlpAnalysis; 260 } 261 262 void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; } 263 float profile_trip_cnt() { return _profile_trip_cnt; } 264 265 void double_unrolled_count() { _unrolled_count_log2++; } 266 int unrolled_count() { return 1 << MIN2(_unrolled_count_log2, BitsPerInt-3); } 267 268 void set_node_count_before_unroll(int ct) { _node_count_before_unroll = ct; } 269 int node_count_before_unroll() { return _node_count_before_unroll; } 270 void set_slp_max_unroll(int unroll_factor) { _slp_maximum_unroll_factor = unroll_factor; } 271 int slp_max_unroll() const { return _slp_maximum_unroll_factor; } 272 273 #ifndef PRODUCT 274 virtual void dump_spec(outputStream *st) const; 275 #endif 276 }; 277 278 //------------------------------CountedLoopEndNode----------------------------- 279 // CountedLoopEndNodes end simple trip counted loops. They act much like 280 // IfNodes. 281 class CountedLoopEndNode : public IfNode { 282 public: 283 enum { TestControl, TestValue }; 284 285 CountedLoopEndNode( Node *control, Node *test, float prob, float cnt ) 286 : IfNode( control, test, prob, cnt) { 287 init_class_id(Class_CountedLoopEnd); 288 } 289 virtual int Opcode() const; 290 291 Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : NULL; } 292 Node *incr() const { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } 293 Node *limit() const { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; } 294 Node *stride() const { Node *tmp = incr (); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; } 295 Node *init_trip() const { Node *tmp = phi (); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } 296 int stride_con() const; 297 bool stride_is_con() const { Node *tmp = stride (); return (tmp != NULL && tmp->is_Con()); } 298 BoolTest::mask test_trip() const { return in(TestValue)->as_Bool()->_test._test; } 299 PhiNode *phi() const { 300 Node *tmp = incr(); 301 if (tmp && tmp->req() == 3) { 302 Node* phi = tmp->in(1); 303 if (phi->is_Phi()) { 304 return phi->as_Phi(); 305 } 306 } 307 return NULL; 308 } 309 CountedLoopNode *loopnode() const { 310 // The CountedLoopNode that goes with this CountedLoopEndNode may 311 // have been optimized out by the IGVN so be cautious with the 312 // pattern matching on the graph 313 PhiNode* iv_phi = phi(); 314 if (iv_phi == NULL) { 315 return NULL; 316 } 317 Node *ln = iv_phi->in(0); 318 if (ln->is_CountedLoop() && ln->as_CountedLoop()->loopexit() == this) { 319 return (CountedLoopNode*)ln; 320 } 321 return NULL; 322 } 323 324 #ifndef PRODUCT 325 virtual void dump_spec(outputStream *st) const; 326 #endif 327 }; 328 329 330 inline CountedLoopEndNode *CountedLoopNode::loopexit() const { 331 Node *bc = back_control(); 332 if( bc == NULL ) return NULL; 333 Node *le = bc->in(0); 334 if( le->Opcode() != Op_CountedLoopEnd ) 335 return NULL; 336 return (CountedLoopEndNode*)le; 337 } 338 inline Node *CountedLoopNode::init_trip() const { return loopexit() ? loopexit()->init_trip() : NULL; } 339 inline Node *CountedLoopNode::stride() const { return loopexit() ? loopexit()->stride() : NULL; } 340 inline int CountedLoopNode::stride_con() const { return loopexit() ? loopexit()->stride_con() : 0; } 341 inline bool CountedLoopNode::stride_is_con() const { return loopexit() && loopexit()->stride_is_con(); } 342 inline Node *CountedLoopNode::limit() const { return loopexit() ? loopexit()->limit() : NULL; } 343 inline Node *CountedLoopNode::incr() const { return loopexit() ? loopexit()->incr() : NULL; } 344 inline Node *CountedLoopNode::phi() const { return loopexit() ? loopexit()->phi() : NULL; } 345 346 //------------------------------LoopLimitNode----------------------------- 347 // Counted Loop limit node which represents exact final iterator value: 348 // trip_count = (limit - init_trip + stride - 1)/stride 349 // final_value= trip_count * stride + init_trip. 350 // Use HW instructions to calculate it when it can overflow in integer. 351 // Note, final_value should fit into integer since counted loop has 352 // limit check: limit <= max_int-stride. 353 class LoopLimitNode : public Node { 354 enum { Init=1, Limit=2, Stride=3 }; 355 public: 356 LoopLimitNode( Compile* C, Node *init, Node *limit, Node *stride ) : Node(0,init,limit,stride) { 357 // Put it on the Macro nodes list to optimize during macro nodes expansion. 358 init_flags(Flag_is_macro); 359 C->add_macro_node(this); 360 } 361 virtual int Opcode() const; 362 virtual const Type *bottom_type() const { return TypeInt::INT; } 363 virtual uint ideal_reg() const { return Op_RegI; } 364 virtual const Type* Value(PhaseGVN* phase) const; 365 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 366 virtual Node* Identity(PhaseGVN* phase); 367 }; 368 369 // -----------------------------IdealLoopTree---------------------------------- 370 class IdealLoopTree : public ResourceObj { 371 public: 372 IdealLoopTree *_parent; // Parent in loop tree 373 IdealLoopTree *_next; // Next sibling in loop tree 374 IdealLoopTree *_child; // First child in loop tree 375 376 // The head-tail backedge defines the loop. 377 // If tail is NULL then this loop has multiple backedges as part of the 378 // same loop. During cleanup I'll peel off the multiple backedges; merge 379 // them at the loop bottom and flow 1 real backedge into the loop. 380 Node *_head; // Head of loop 381 Node *_tail; // Tail of loop 382 inline Node *tail(); // Handle lazy update of _tail field 383 PhaseIdealLoop* _phase; 384 int _local_loop_unroll_limit; 385 int _local_loop_unroll_factor; 386 387 Node_List _body; // Loop body for inner loops 388 389 uint8_t _nest; // Nesting depth 390 uint8_t _irreducible:1, // True if irreducible 391 _has_call:1, // True if has call safepoint 392 _has_sfpt:1, // True if has non-call safepoint 393 _rce_candidate:1; // True if candidate for range check elimination 394 395 Node_List* _safepts; // List of safepoints in this loop 396 Node_List* _required_safept; // A inner loop cannot delete these safepts; 397 bool _allow_optimizations; // Allow loop optimizations 398 399 IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail ) 400 : _parent(0), _next(0), _child(0), 401 _head(head), _tail(tail), 402 _phase(phase), 403 _safepts(NULL), 404 _required_safept(NULL), 405 _allow_optimizations(true), 406 _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0), 407 _local_loop_unroll_limit(0), _local_loop_unroll_factor(0) 408 { } 409 410 // Is 'l' a member of 'this'? 411 bool is_member(const IdealLoopTree *l) const; // Test for nested membership 412 413 // Set loop nesting depth. Accumulate has_call bits. 414 int set_nest( uint depth ); 415 416 // Split out multiple fall-in edges from the loop header. Move them to a 417 // private RegionNode before the loop. This becomes the loop landing pad. 418 void split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ); 419 420 // Split out the outermost loop from this shared header. 421 void split_outer_loop( PhaseIdealLoop *phase ); 422 423 // Merge all the backedges from the shared header into a private Region. 424 // Feed that region as the one backedge to this loop. 425 void merge_many_backedges( PhaseIdealLoop *phase ); 426 427 // Split shared headers and insert loop landing pads. 428 // Insert a LoopNode to replace the RegionNode. 429 // Returns TRUE if loop tree is structurally changed. 430 bool beautify_loops( PhaseIdealLoop *phase ); 431 432 // Perform optimization to use the loop predicates for null checks and range checks. 433 // Applies to any loop level (not just the innermost one) 434 bool loop_predication( PhaseIdealLoop *phase); 435 436 // Perform iteration-splitting on inner loops. Split iterations to 437 // avoid range checks or one-shot null checks. Returns false if the 438 // current round of loop opts should stop. 439 bool iteration_split( PhaseIdealLoop *phase, Node_List &old_new ); 440 441 // Driver for various flavors of iteration splitting. Returns false 442 // if the current round of loop opts should stop. 443 bool iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ); 444 445 // Given dominators, try to find loops with calls that must always be 446 // executed (call dominates loop tail). These loops do not need non-call 447 // safepoints (ncsfpt). 448 void check_safepts(VectorSet &visited, Node_List &stack); 449 450 // Allpaths backwards scan from loop tail, terminating each path at first safepoint 451 // encountered. 452 void allpaths_check_safepts(VectorSet &visited, Node_List &stack); 453 454 // Remove safepoints from loop. Optionally keeping one. 455 void remove_safepoints(PhaseIdealLoop* phase, bool keep_one); 456 457 // Convert to counted loops where possible 458 void counted_loop( PhaseIdealLoop *phase ); 459 460 // Check for Node being a loop-breaking test 461 Node *is_loop_exit(Node *iff) const; 462 463 // Returns true if ctrl is executed on every complete iteration 464 bool dominates_backedge(Node* ctrl); 465 466 // Remove simplistic dead code from loop body 467 void DCE_loop_body(); 468 469 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage. 470 // Replace with a 1-in-10 exit guess. 471 void adjust_loop_exit_prob( PhaseIdealLoop *phase ); 472 473 // Return TRUE or FALSE if the loop should never be RCE'd or aligned. 474 // Useful for unrolling loops with NO array accesses. 475 bool policy_peel_only( PhaseIdealLoop *phase ) const; 476 477 // Return TRUE or FALSE if the loop should be unswitched -- clone 478 // loop with an invariant test 479 bool policy_unswitching( PhaseIdealLoop *phase ) const; 480 481 // Micro-benchmark spamming. Remove empty loops. 482 bool policy_do_remove_empty_loop( PhaseIdealLoop *phase ); 483 484 // Convert one iteration loop into normal code. 485 bool policy_do_one_iteration_loop( PhaseIdealLoop *phase ); 486 487 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can 488 // make some loop-invariant test (usually a null-check) happen before the 489 // loop. 490 bool policy_peeling( PhaseIdealLoop *phase ) const; 491 492 // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any 493 // known trip count in the counted loop node. 494 bool policy_maximally_unroll( PhaseIdealLoop *phase ) const; 495 496 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if 497 // the loop is a CountedLoop and the body is small enough. 498 bool policy_unroll(PhaseIdealLoop *phase); 499 500 // Loop analyses to map to a maximal superword unrolling for vectorization. 501 void policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct); 502 503 // Return TRUE or FALSE if the loop should be range-check-eliminated. 504 // Gather a list of IF tests that are dominated by iteration splitting; 505 // also gather the end of the first split and the start of the 2nd split. 506 bool policy_range_check( PhaseIdealLoop *phase ) const; 507 508 // Return TRUE or FALSE if the loop should be cache-line aligned. 509 // Gather the expression that does the alignment. Note that only 510 // one array base can be aligned in a loop (unless the VM guarantees 511 // mutual alignment). Note that if we vectorize short memory ops 512 // into longer memory ops, we may want to increase alignment. 513 bool policy_align( PhaseIdealLoop *phase ) const; 514 515 // Return TRUE if "iff" is a range check. 516 bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const; 517 518 // Compute loop exact trip count if possible 519 void compute_exact_trip_count( PhaseIdealLoop *phase ); 520 521 // Compute loop trip count from profile data 522 void compute_profile_trip_cnt( PhaseIdealLoop *phase ); 523 524 // Reassociate invariant expressions. 525 void reassociate_invariants(PhaseIdealLoop *phase); 526 // Reassociate invariant add and subtract expressions. 527 Node* reassociate_add_sub(Node* n1, PhaseIdealLoop *phase); 528 // Return nonzero index of invariant operand if invariant and variant 529 // are combined with an Add or Sub. Helper for reassociate_invariants. 530 int is_invariant_addition(Node* n, PhaseIdealLoop *phase); 531 532 // Return true if n is invariant 533 bool is_invariant(Node* n) const; 534 535 // Put loop body on igvn work list 536 void record_for_igvn(); 537 538 bool is_loop() { return !_irreducible && _tail && !_tail->is_top(); } 539 bool is_inner() { return is_loop() && _child == NULL; } 540 bool is_counted() { return is_loop() && _head != NULL && _head->is_CountedLoop(); } 541 542 void remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase); 543 544 #ifndef PRODUCT 545 void dump_head( ) const; // Dump loop head only 546 void dump() const; // Dump this loop recursively 547 void verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const; 548 #endif 549 550 }; 551 552 class PostLoopInfo { 553 public: 554 Node *new_main_exit; 555 CountedLoopNode *post_head; 556 557 PostLoopInfo() { init(); } 558 559 void init() { new_main_exit = NULL; post_head = NULL; } 560 }; 561 562 // -----------------------------PhaseIdealLoop--------------------------------- 563 // Computes the mapping from Nodes to IdealLoopTrees. Organizes IdealLoopTrees into a 564 // loop tree. Drives the loop-based transformations on the ideal graph. 565 class PhaseIdealLoop : public PhaseTransform { 566 friend class IdealLoopTree; 567 friend class SuperWord; 568 friend class CountedLoopReserveKit; 569 570 // Pre-computed def-use info 571 PhaseIterGVN &_igvn; 572 573 // Head of loop tree 574 IdealLoopTree *_ltree_root; 575 576 // Array of pre-order numbers, plus post-visited bit. 577 // ZERO for not pre-visited. EVEN for pre-visited but not post-visited. 578 // ODD for post-visited. Other bits are the pre-order number. 579 uint *_preorders; 580 uint _max_preorder; 581 582 const PhaseIdealLoop* _verify_me; 583 bool _verify_only; 584 585 // Allocate _preorders[] array 586 void allocate_preorders() { 587 _max_preorder = C->unique()+8; 588 _preorders = NEW_RESOURCE_ARRAY(uint, _max_preorder); 589 memset(_preorders, 0, sizeof(uint) * _max_preorder); 590 } 591 592 // Allocate _preorders[] array 593 void reallocate_preorders() { 594 if ( _max_preorder < C->unique() ) { 595 _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, C->unique()); 596 _max_preorder = C->unique(); 597 } 598 memset(_preorders, 0, sizeof(uint) * _max_preorder); 599 } 600 601 // Check to grow _preorders[] array for the case when build_loop_tree_impl() 602 // adds new nodes. 603 void check_grow_preorders( ) { 604 if ( _max_preorder < C->unique() ) { 605 uint newsize = _max_preorder<<1; // double size of array 606 _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, newsize); 607 memset(&_preorders[_max_preorder],0,sizeof(uint)*(newsize-_max_preorder)); 608 _max_preorder = newsize; 609 } 610 } 611 // Check for pre-visited. Zero for NOT visited; non-zero for visited. 612 int is_visited( Node *n ) const { return _preorders[n->_idx]; } 613 // Pre-order numbers are written to the Nodes array as low-bit-set values. 614 void set_preorder_visited( Node *n, int pre_order ) { 615 assert( !is_visited( n ), "already set" ); 616 _preorders[n->_idx] = (pre_order<<1); 617 }; 618 // Return pre-order number. 619 int get_preorder( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]>>1; } 620 621 // Check for being post-visited. 622 // Should be previsited already (checked with assert(is_visited(n))). 623 int is_postvisited( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]&1; } 624 625 // Mark as post visited 626 void set_postvisited( Node *n ) { assert( !is_postvisited( n ), "" ); _preorders[n->_idx] |= 1; } 627 628 // Set/get control node out. Set lower bit to distinguish from IdealLoopTree 629 // Returns true if "n" is a data node, false if it's a control node. 630 bool has_ctrl( Node *n ) const { return ((intptr_t)_nodes[n->_idx]) & 1; } 631 632 // clear out dead code after build_loop_late 633 Node_List _deadlist; 634 635 // Support for faster execution of get_late_ctrl()/dom_lca() 636 // when a node has many uses and dominator depth is deep. 637 Node_Array _dom_lca_tags; 638 void init_dom_lca_tags(); 639 void clear_dom_lca_tags(); 640 641 // Helper for debugging bad dominance relationships 642 bool verify_dominance(Node* n, Node* use, Node* LCA, Node* early); 643 644 Node* compute_lca_of_uses(Node* n, Node* early, bool verify = false); 645 646 // Inline wrapper for frequent cases: 647 // 1) only one use 648 // 2) a use is the same as the current LCA passed as 'n1' 649 Node *dom_lca_for_get_late_ctrl( Node *lca, Node *n, Node *tag ) { 650 assert( n->is_CFG(), "" ); 651 // Fast-path NULL lca 652 if( lca != NULL && lca != n ) { 653 assert( lca->is_CFG(), "" ); 654 // find LCA of all uses 655 n = dom_lca_for_get_late_ctrl_internal( lca, n, tag ); 656 } 657 return find_non_split_ctrl(n); 658 } 659 Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag ); 660 661 // Helper function for directing control inputs away from CFG split 662 // points. 663 Node *find_non_split_ctrl( Node *ctrl ) const { 664 if (ctrl != NULL) { 665 if (ctrl->is_MultiBranch()) { 666 ctrl = ctrl->in(0); 667 } 668 assert(ctrl->is_CFG(), "CFG"); 669 } 670 return ctrl; 671 } 672 673 bool cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop); 674 675 public: 676 bool has_node( Node* n ) const { 677 guarantee(n != NULL, "No Node."); 678 return _nodes[n->_idx] != NULL; 679 } 680 // check if transform created new nodes that need _ctrl recorded 681 Node *get_late_ctrl( Node *n, Node *early ); 682 Node *get_early_ctrl( Node *n ); 683 Node *get_early_ctrl_for_expensive(Node *n, Node* earliest); 684 void set_early_ctrl( Node *n ); 685 void set_subtree_ctrl( Node *root ); 686 void set_ctrl( Node *n, Node *ctrl ) { 687 assert( !has_node(n) || has_ctrl(n), "" ); 688 assert( ctrl->in(0), "cannot set dead control node" ); 689 assert( ctrl == find_non_split_ctrl(ctrl), "must set legal crtl" ); 690 _nodes.map( n->_idx, (Node*)((intptr_t)ctrl + 1) ); 691 } 692 // Set control and update loop membership 693 void set_ctrl_and_loop(Node* n, Node* ctrl) { 694 IdealLoopTree* old_loop = get_loop(get_ctrl(n)); 695 IdealLoopTree* new_loop = get_loop(ctrl); 696 if (old_loop != new_loop) { 697 if (old_loop->_child == NULL) old_loop->_body.yank(n); 698 if (new_loop->_child == NULL) new_loop->_body.push(n); 699 } 700 set_ctrl(n, ctrl); 701 } 702 // Control nodes can be replaced or subsumed. During this pass they 703 // get their replacement Node in slot 1. Instead of updating the block 704 // location of all Nodes in the subsumed block, we lazily do it. As we 705 // pull such a subsumed block out of the array, we write back the final 706 // correct block. 707 Node *get_ctrl( Node *i ) { 708 assert(has_node(i), ""); 709 Node *n = get_ctrl_no_update(i); 710 _nodes.map( i->_idx, (Node*)((intptr_t)n + 1) ); 711 assert(has_node(i) && has_ctrl(i), ""); 712 assert(n == find_non_split_ctrl(n), "must return legal ctrl" ); 713 return n; 714 } 715 // true if CFG node d dominates CFG node n 716 bool is_dominator(Node *d, Node *n); 717 // return get_ctrl for a data node and self(n) for a CFG node 718 Node* ctrl_or_self(Node* n) { 719 if (has_ctrl(n)) 720 return get_ctrl(n); 721 else { 722 assert (n->is_CFG(), "must be a CFG node"); 723 return n; 724 } 725 } 726 727 private: 728 Node *get_ctrl_no_update_helper(Node *i) const { 729 assert(has_ctrl(i), "should be control, not loop"); 730 return (Node*)(((intptr_t)_nodes[i->_idx]) & ~1); 731 } 732 733 Node *get_ctrl_no_update(Node *i) const { 734 assert( has_ctrl(i), "" ); 735 Node *n = get_ctrl_no_update_helper(i); 736 if (!n->in(0)) { 737 // Skip dead CFG nodes 738 do { 739 n = get_ctrl_no_update_helper(n); 740 } while (!n->in(0)); 741 n = find_non_split_ctrl(n); 742 } 743 return n; 744 } 745 746 // Check for loop being set 747 // "n" must be a control node. Returns true if "n" is known to be in a loop. 748 bool has_loop( Node *n ) const { 749 assert(!has_node(n) || !has_ctrl(n), ""); 750 return has_node(n); 751 } 752 // Set loop 753 void set_loop( Node *n, IdealLoopTree *loop ) { 754 _nodes.map(n->_idx, (Node*)loop); 755 } 756 // Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms. Replace 757 // the 'old_node' with 'new_node'. Kill old-node. Add a reference 758 // from old_node to new_node to support the lazy update. Reference 759 // replaces loop reference, since that is not needed for dead node. 760 public: 761 void lazy_update(Node *old_node, Node *new_node) { 762 assert(old_node != new_node, "no cycles please"); 763 // Re-use the side array slot for this node to provide the 764 // forwarding pointer. 765 _nodes.map(old_node->_idx, (Node*)((intptr_t)new_node + 1)); 766 } 767 void lazy_replace(Node *old_node, Node *new_node) { 768 _igvn.replace_node(old_node, new_node); 769 lazy_update(old_node, new_node); 770 } 771 772 private: 773 774 // Place 'n' in some loop nest, where 'n' is a CFG node 775 void build_loop_tree(); 776 int build_loop_tree_impl( Node *n, int pre_order ); 777 // Insert loop into the existing loop tree. 'innermost' is a leaf of the 778 // loop tree, not the root. 779 IdealLoopTree *sort( IdealLoopTree *loop, IdealLoopTree *innermost ); 780 781 // Place Data nodes in some loop nest 782 void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ); 783 void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ); 784 void build_loop_late_post ( Node* n ); 785 786 // Array of immediate dominance info for each CFG node indexed by node idx 787 private: 788 uint _idom_size; 789 Node **_idom; // Array of immediate dominators 790 uint *_dom_depth; // Used for fast LCA test 791 GrowableArray<uint>* _dom_stk; // For recomputation of dom depth 792 793 Node* idom_no_update(Node* d) const { 794 assert(d->_idx < _idom_size, "oob"); 795 Node* n = _idom[d->_idx]; 796 assert(n != NULL,"Bad immediate dominator info."); 797 while (n->in(0) == NULL) { // Skip dead CFG nodes 798 //n = n->in(1); 799 n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1); 800 assert(n != NULL,"Bad immediate dominator info."); 801 } 802 return n; 803 } 804 Node *idom(Node* d) const { 805 uint didx = d->_idx; 806 Node *n = idom_no_update(d); 807 _idom[didx] = n; // Lazily remove dead CFG nodes from table. 808 return n; 809 } 810 uint dom_depth(Node* d) const { 811 guarantee(d != NULL, "Null dominator info."); 812 guarantee(d->_idx < _idom_size, ""); 813 return _dom_depth[d->_idx]; 814 } 815 void set_idom(Node* d, Node* n, uint dom_depth); 816 // Locally compute IDOM using dom_lca call 817 Node *compute_idom( Node *region ) const; 818 // Recompute dom_depth 819 void recompute_dom_depth(); 820 821 // Is safept not required by an outer loop? 822 bool is_deleteable_safept(Node* sfpt); 823 824 // Replace parallel induction variable (parallel to trip counter) 825 void replace_parallel_iv(IdealLoopTree *loop); 826 827 // Perform verification that the graph is valid. 828 PhaseIdealLoop( PhaseIterGVN &igvn) : 829 PhaseTransform(Ideal_Loop), 830 _igvn(igvn), 831 _dom_lca_tags(arena()), // Thread::resource_area 832 _verify_me(NULL), 833 _verify_only(true) { 834 build_and_optimize(false, false); 835 } 836 837 // build the loop tree and perform any requested optimizations 838 void build_and_optimize(bool do_split_if, bool skip_loop_opts); 839 840 public: 841 // Dominators for the sea of nodes 842 void Dominators(); 843 Node *dom_lca( Node *n1, Node *n2 ) const { 844 return find_non_split_ctrl(dom_lca_internal(n1, n2)); 845 } 846 Node *dom_lca_internal( Node *n1, Node *n2 ) const; 847 848 // Compute the Ideal Node to Loop mapping 849 PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool skip_loop_opts = false) : 850 PhaseTransform(Ideal_Loop), 851 _igvn(igvn), 852 _dom_lca_tags(arena()), // Thread::resource_area 853 _verify_me(NULL), 854 _verify_only(false) { 855 build_and_optimize(do_split_ifs, skip_loop_opts); 856 } 857 858 // Verify that verify_me made the same decisions as a fresh run. 859 PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me) : 860 PhaseTransform(Ideal_Loop), 861 _igvn(igvn), 862 _dom_lca_tags(arena()), // Thread::resource_area 863 _verify_me(verify_me), 864 _verify_only(false) { 865 build_and_optimize(false, false); 866 } 867 868 // Build and verify the loop tree without modifying the graph. This 869 // is useful to verify that all inputs properly dominate their uses. 870 static void verify(PhaseIterGVN& igvn) { 871 #ifdef ASSERT 872 PhaseIdealLoop v(igvn); 873 #endif 874 } 875 876 // True if the method has at least 1 irreducible loop 877 bool _has_irreducible_loops; 878 879 // Per-Node transform 880 virtual Node *transform( Node *a_node ) { return 0; } 881 882 bool is_counted_loop( Node *x, IdealLoopTree *loop ); 883 884 Node* exact_limit( IdealLoopTree *loop ); 885 886 // Return a post-walked LoopNode 887 IdealLoopTree *get_loop( Node *n ) const { 888 // Dead nodes have no loop, so return the top level loop instead 889 if (!has_node(n)) return _ltree_root; 890 assert(!has_ctrl(n), ""); 891 return (IdealLoopTree*)_nodes[n->_idx]; 892 } 893 894 // Is 'n' a (nested) member of 'loop'? 895 int is_member( const IdealLoopTree *loop, Node *n ) const { 896 return loop->is_member(get_loop(n)); } 897 898 // This is the basic building block of the loop optimizations. It clones an 899 // entire loop body. It makes an old_new loop body mapping; with this 900 // mapping you can find the new-loop equivalent to an old-loop node. All 901 // new-loop nodes are exactly equal to their old-loop counterparts, all 902 // edges are the same. All exits from the old-loop now have a RegionNode 903 // that merges the equivalent new-loop path. This is true even for the 904 // normal "loop-exit" condition. All uses of loop-invariant old-loop values 905 // now come from (one or more) Phis that merge their new-loop equivalents. 906 // Parameter side_by_side_idom: 907 // When side_by_size_idom is NULL, the dominator tree is constructed for 908 // the clone loop to dominate the original. Used in construction of 909 // pre-main-post loop sequence. 910 // When nonnull, the clone and original are side-by-side, both are 911 // dominated by the passed in side_by_side_idom node. Used in 912 // construction of unswitched loops. 913 void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth, 914 Node* side_by_side_idom = NULL); 915 916 // If we got the effect of peeling, either by actually peeling or by 917 // making a pre-loop which must execute at least once, we can remove 918 // all loop-invariant dominated tests in the main body. 919 void peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ); 920 921 // Generate code to do a loop peel for the given loop (and body). 922 // old_new is a temp array. 923 void do_peeling( IdealLoopTree *loop, Node_List &old_new ); 924 925 // Add pre and post loops around the given loop. These loops are used 926 // during RCE, unrolling and aligning loops. 927 void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ); 928 929 // Add post loop after the given loop. 930 void insert_post_loop( IdealLoopTree *loop, Node_List &old_new, 931 CountedLoopNode *main_head, CountedLoopEndNode *main_end, 932 Node *incr, Node *limit, PostLoopInfo &post_loop_info ); 933 934 // Add an RCE'd post loop which we will multi-version adapt for run time test path usage 935 void insert_scalar_rced_post_loop( IdealLoopTree *loop, Node_List &old_new ); 936 937 // Add a vector post loop between a vector main loop and the current post loop 938 void insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new); 939 // If Node n lives in the back_ctrl block, we clone a private version of n 940 // in preheader_ctrl block and return that, otherwise return n. 941 Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ); 942 943 // Take steps to maximally unroll the loop. Peel any odd iterations, then 944 // unroll to do double iterations. The next round of major loop transforms 945 // will repeat till the doubled loop body does all remaining iterations in 1 946 // pass. 947 void do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ); 948 949 // Unroll the loop body one step - make each trip do 2 iterations. 950 void do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ); 951 952 // Mark vector reduction candidates before loop unrolling 953 void mark_reductions( IdealLoopTree *loop ); 954 955 // Return true if exp is a constant times an induction var 956 bool is_scaled_iv(Node* exp, Node* iv, int* p_scale); 957 958 // Return true if exp is a scaled induction var plus (or minus) constant 959 bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0); 960 961 // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted 962 ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, 963 Deoptimization::DeoptReason reason, 964 int opcode); 965 void register_control(Node* n, IdealLoopTree *loop, Node* pred); 966 967 // Clone loop predicates to cloned loops (peeled, unswitched) 968 static ProjNode* clone_predicate(ProjNode* predicate_proj, Node* new_entry, 969 Deoptimization::DeoptReason reason, 970 PhaseIdealLoop* loop_phase, 971 PhaseIterGVN* igvn); 972 973 static Node* clone_loop_predicates(Node* old_entry, Node* new_entry, 974 bool clone_limit_check, 975 PhaseIdealLoop* loop_phase, 976 PhaseIterGVN* igvn); 977 Node* clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check); 978 979 static Node* skip_loop_predicates(Node* entry); 980 981 // Find a good location to insert a predicate 982 static ProjNode* find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason); 983 // Find a predicate 984 static Node* find_predicate(Node* entry); 985 // Construct a range check for a predicate if 986 BoolNode* rc_predicate(IdealLoopTree *loop, Node* ctrl, 987 int scale, Node* offset, 988 Node* init, Node* limit, Node* stride, 989 Node* range, bool upper); 990 991 // Implementation of the loop predication to promote checks outside the loop 992 bool loop_predication_impl(IdealLoopTree *loop); 993 994 // Helper function to collect predicate for eliminating the useless ones 995 void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1); 996 void eliminate_useless_predicates(); 997 998 // Change the control input of expensive nodes to allow commoning by 999 // IGVN when it is guaranteed to not result in a more frequent 1000 // execution of the expensive node. Return true if progress. 1001 bool process_expensive_nodes(); 1002 1003 // Check whether node has become unreachable 1004 bool is_node_unreachable(Node *n) const { 1005 return !has_node(n) || n->is_unreachable(_igvn); 1006 } 1007 1008 // Eliminate range-checks and other trip-counter vs loop-invariant tests. 1009 void do_range_check( IdealLoopTree *loop, Node_List &old_new ); 1010 1011 // Check to see if do_range_check(...) cleaned the main loop of range-checks 1012 void has_range_checks(IdealLoopTree *loop); 1013 1014 // Process post loops which have range checks and try to build a multi-version 1015 // guard to safely determine if we can execute the post loop which was RCE'd. 1016 bool multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoopTree *legacy_loop); 1017 1018 // Cause the rce'd post loop to optimized away, this happens if we cannot complete multiverioning 1019 void poison_rce_post_loop(IdealLoopTree *rce_loop); 1020 1021 // Create a slow version of the loop by cloning the loop 1022 // and inserting an if to select fast-slow versions. 1023 ProjNode* create_slow_version_of_loop(IdealLoopTree *loop, 1024 Node_List &old_new, 1025 int opcode); 1026 1027 // Clone a loop and return the clone head (clone_loop_head). 1028 // Added nodes include int(1), int(0) - disconnected, If, IfTrue, IfFalse, 1029 // This routine was created for usage in CountedLoopReserveKit. 1030 // 1031 // int(1) -> If -> IfTrue -> original_loop_head 1032 // | 1033 // V 1034 // IfFalse -> clone_loop_head (returned by function pointer) 1035 // 1036 LoopNode* create_reserve_version_of_loop(IdealLoopTree *loop, CountedLoopReserveKit* lk); 1037 // Clone loop with an invariant test (that does not exit) and 1038 // insert a clone of the test that selects which version to 1039 // execute. 1040 void do_unswitching (IdealLoopTree *loop, Node_List &old_new); 1041 1042 // Find candidate "if" for unswitching 1043 IfNode* find_unswitching_candidate(const IdealLoopTree *loop) const; 1044 1045 // Range Check Elimination uses this function! 1046 // Constrain the main loop iterations so the affine function: 1047 // low_limit <= scale_con * I + offset < upper_limit 1048 // always holds true. That is, either increase the number of iterations in 1049 // the pre-loop or the post-loop until the condition holds true in the main 1050 // loop. Scale_con, offset and limit are all loop invariant. 1051 void add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ); 1052 // Helper function for add_constraint(). 1053 Node* adjust_limit( int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl ); 1054 1055 // Partially peel loop up through last_peel node. 1056 bool partial_peel( IdealLoopTree *loop, Node_List &old_new ); 1057 1058 // Create a scheduled list of nodes control dependent on ctrl set. 1059 void scheduled_nodelist( IdealLoopTree *loop, VectorSet& ctrl, Node_List &sched ); 1060 // Has a use in the vector set 1061 bool has_use_in_set( Node* n, VectorSet& vset ); 1062 // Has use internal to the vector set (ie. not in a phi at the loop head) 1063 bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ); 1064 // clone "n" for uses that are outside of loop 1065 int clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ); 1066 // clone "n" for special uses that are in the not_peeled region 1067 void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n, 1068 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ); 1069 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist 1070 void insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ); 1071 #ifdef ASSERT 1072 // Validate the loop partition sets: peel and not_peel 1073 bool is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, VectorSet& not_peel ); 1074 // Ensure that uses outside of loop are of the right form 1075 bool is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list, 1076 uint orig_exit_idx, uint clone_exit_idx); 1077 bool is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx); 1078 #endif 1079 1080 // Returns nonzero constant stride if-node is a possible iv test (otherwise returns zero.) 1081 int stride_of_possible_iv( Node* iff ); 1082 bool is_possible_iv_test( Node* iff ) { return stride_of_possible_iv(iff) != 0; } 1083 // Return the (unique) control output node that's in the loop (if it exists.) 1084 Node* stay_in_loop( Node* n, IdealLoopTree *loop); 1085 // Insert a signed compare loop exit cloned from an unsigned compare. 1086 IfNode* insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop); 1087 void remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop); 1088 // Utility to register node "n" with PhaseIdealLoop 1089 void register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth); 1090 // Utility to create an if-projection 1091 ProjNode* proj_clone(ProjNode* p, IfNode* iff); 1092 // Force the iff control output to be the live_proj 1093 Node* short_circuit_if(IfNode* iff, ProjNode* live_proj); 1094 // Insert a region before an if projection 1095 RegionNode* insert_region_before_proj(ProjNode* proj); 1096 // Insert a new if before an if projection 1097 ProjNode* insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj); 1098 1099 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. 1100 // "Nearly" because all Nodes have been cloned from the original in the loop, 1101 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs 1102 // through the Phi recursively, and return a Bool. 1103 BoolNode *clone_iff( PhiNode *phi, IdealLoopTree *loop ); 1104 CmpNode *clone_bool( PhiNode *phi, IdealLoopTree *loop ); 1105 1106 1107 // Rework addressing expressions to get the most loop-invariant stuff 1108 // moved out. We'd like to do all associative operators, but it's especially 1109 // important (common) to do address expressions. 1110 Node *remix_address_expressions( Node *n ); 1111 1112 // Attempt to use a conditional move instead of a phi/branch 1113 Node *conditional_move( Node *n ); 1114 1115 // Reorganize offset computations to lower register pressure. 1116 // Mostly prevent loop-fallout uses of the pre-incremented trip counter 1117 // (which are then alive with the post-incremented trip counter 1118 // forcing an extra register move) 1119 void reorg_offsets( IdealLoopTree *loop ); 1120 1121 // Check for aggressive application of 'split-if' optimization, 1122 // using basic block level info. 1123 void split_if_with_blocks ( VectorSet &visited, Node_Stack &nstack ); 1124 Node *split_if_with_blocks_pre ( Node *n ); 1125 void split_if_with_blocks_post( Node *n ); 1126 Node *has_local_phi_input( Node *n ); 1127 // Mark an IfNode as being dominated by a prior test, 1128 // without actually altering the CFG (and hence IDOM info). 1129 void dominated_by( Node *prevdom, Node *iff, bool flip = false, bool exclude_loop_predicate = false ); 1130 1131 // Split Node 'n' through merge point 1132 Node *split_thru_region( Node *n, Node *region ); 1133 // Split Node 'n' through merge point if there is enough win. 1134 Node *split_thru_phi( Node *n, Node *region, int policy ); 1135 // Found an If getting its condition-code input from a Phi in the 1136 // same block. Split thru the Region. 1137 void do_split_if( Node *iff ); 1138 1139 // Conversion of fill/copy patterns into intrisic versions 1140 bool do_intrinsify_fill(); 1141 bool intrinsify_fill(IdealLoopTree* lpt); 1142 bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value, 1143 Node*& shift, Node*& offset); 1144 1145 private: 1146 // Return a type based on condition control flow 1147 const TypeInt* filtered_type( Node *n, Node* n_ctrl); 1148 const TypeInt* filtered_type( Node *n ) { return filtered_type(n, NULL); } 1149 // Helpers for filtered type 1150 const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl); 1151 1152 // Helper functions 1153 Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache ); 1154 Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ); 1155 void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true ); 1156 bool split_up( Node *n, Node *blk1, Node *blk2 ); 1157 void sink_use( Node *use, Node *post_loop ); 1158 Node *place_near_use( Node *useblock ) const; 1159 Node* try_move_store_before_loop(Node* n, Node *n_ctrl); 1160 void try_move_store_after_loop(Node* n); 1161 bool identical_backtoback_ifs(Node *n); 1162 bool can_split_if(Node *n_ctrl); 1163 1164 bool _created_loop_node; 1165 public: 1166 void set_created_loop_node() { _created_loop_node = true; } 1167 bool created_loop_node() { return _created_loop_node; } 1168 void register_new_node( Node *n, Node *blk ); 1169 1170 #ifdef ASSERT 1171 void dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA); 1172 #endif 1173 1174 #ifndef PRODUCT 1175 void dump( ) const; 1176 void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const; 1177 void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const; 1178 void verify() const; // Major slow :-) 1179 void verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const; 1180 IdealLoopTree *get_loop_idx(Node* n) const { 1181 // Dead nodes have no loop, so return the top level loop instead 1182 return _nodes[n->_idx] ? (IdealLoopTree*)_nodes[n->_idx] : _ltree_root; 1183 } 1184 // Print some stats 1185 static void print_statistics(); 1186 static int _loop_invokes; // Count of PhaseIdealLoop invokes 1187 static int _loop_work; // Sum of PhaseIdealLoop x _unique 1188 #endif 1189 }; 1190 1191 // This kit may be used for making of a reserved copy of a loop before this loop 1192 // goes under non-reversible changes. 1193 // 1194 // Function create_reserve() creates a reserved copy (clone) of the loop. 1195 // The reserved copy is created by calling 1196 // PhaseIdealLoop::create_reserve_version_of_loop - see there how 1197 // the original and reserved loops are connected in the outer graph. 1198 // If create_reserve succeeded, it returns 'true' and _has_reserved is set to 'true'. 1199 // 1200 // By default the reserved copy (clone) of the loop is created as dead code - it is 1201 // dominated in the outer loop by this node chain: 1202 // intcon(1)->If->IfFalse->reserved_copy. 1203 // The original loop is dominated by the the same node chain but IfTrue projection: 1204 // intcon(0)->If->IfTrue->original_loop. 1205 // 1206 // In this implementation of CountedLoopReserveKit the ctor includes create_reserve() 1207 // and the dtor, checks _use_new value. 1208 // If _use_new == false, it "switches" control to reserved copy of the loop 1209 // by simple replacing of node intcon(1) with node intcon(0). 1210 // 1211 // Here is a proposed example of usage (see also SuperWord::output in superword.cpp). 1212 // 1213 // void CountedLoopReserveKit_example() 1214 // { 1215 // CountedLoopReserveKit lrk((phase, lpt, DoReserveCopy = true); // create local object 1216 // if (DoReserveCopy && !lrk.has_reserved()) { 1217 // return; //failed to create reserved loop copy 1218 // } 1219 // ... 1220 // //something is wrong, switch to original loop 1221 /// if(something_is_wrong) return; // ~CountedLoopReserveKit makes the switch 1222 // ... 1223 // //everything worked ok, return with the newly modified loop 1224 // lrk.use_new(); 1225 // return; // ~CountedLoopReserveKit does nothing once use_new() was called 1226 // } 1227 // 1228 // Keep in mind, that by default if create_reserve() is not followed by use_new() 1229 // the dtor will "switch to the original" loop. 1230 // NOTE. You you modify outside of the original loop this class is no help. 1231 // 1232 class CountedLoopReserveKit { 1233 private: 1234 PhaseIdealLoop* _phase; 1235 IdealLoopTree* _lpt; 1236 LoopNode* _lp; 1237 IfNode* _iff; 1238 LoopNode* _lp_reserved; 1239 bool _has_reserved; 1240 bool _use_new; 1241 const bool _active; //may be set to false in ctor, then the object is dummy 1242 1243 public: 1244 CountedLoopReserveKit(PhaseIdealLoop* phase, IdealLoopTree *loop, bool active); 1245 ~CountedLoopReserveKit(); 1246 void use_new() {_use_new = true;} 1247 void set_iff(IfNode* x) {_iff = x;} 1248 bool has_reserved() const { return _active && _has_reserved;} 1249 private: 1250 bool create_reserve(); 1251 };// class CountedLoopReserveKit 1252 1253 inline Node* IdealLoopTree::tail() { 1254 // Handle lazy update of _tail field 1255 Node *n = _tail; 1256 //while( !n->in(0) ) // Skip dead CFG nodes 1257 //n = n->in(1); 1258 if (n->in(0) == NULL) 1259 n = _phase->get_ctrl(n); 1260 _tail = n; 1261 return n; 1262 } 1263 1264 1265 // Iterate over the loop tree using a preorder, left-to-right traversal. 1266 // 1267 // Example that visits all counted loops from within PhaseIdealLoop 1268 // 1269 // for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { 1270 // IdealLoopTree* lpt = iter.current(); 1271 // if (!lpt->is_counted()) continue; 1272 // ... 1273 class LoopTreeIterator : public StackObj { 1274 private: 1275 IdealLoopTree* _root; 1276 IdealLoopTree* _curnt; 1277 1278 public: 1279 LoopTreeIterator(IdealLoopTree* root) : _root(root), _curnt(root) {} 1280 1281 bool done() { return _curnt == NULL; } // Finished iterating? 1282 1283 void next(); // Advance to next loop tree 1284 1285 IdealLoopTree* current() { return _curnt; } // Return current value of iterator. 1286 }; 1287 1288 #endif // SHARE_VM_OPTO_LOOPNODE_HPP