45 //
46 // I D E A L I Z E D L O O P S
47 //
48 // Idealized loops are the set of loops I perform more interesting
49 // transformations on, beyond simple hoisting.
50
51 //------------------------------LoopNode---------------------------------------
52 // Simple loop header. Fall in path on left, loop-back path on right.
53 class LoopNode : public RegionNode {
54 // Size is bigger to hold the flags. However, the flags do not change
55 // the semantics so it does not appear in the hash & cmp functions.
56 virtual uint size_of() const { return sizeof(*this); }
57 protected:
58 short _loop_flags;
59 // Names for flag bitfields
60 enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3,
61 MainHasNoPreLoop=4,
62 HasExactTripCount=8,
63 InnerLoop=16,
64 PartialPeelLoop=32,
65 PartialPeelFailed=64 };
66 char _unswitch_count;
67 enum { _unswitch_max=3 };
68
69 public:
70 // Names for edge indices
71 enum { Self=0, EntryControl, LoopBackControl };
72
73 int is_inner_loop() const { return _loop_flags & InnerLoop; }
74 void set_inner_loop() { _loop_flags |= InnerLoop; }
75
76 int is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; }
77 void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; }
78 int partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
79 void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
80
81 int unswitch_max() { return _unswitch_max; }
82 int unswitch_count() { return _unswitch_count; }
83 void set_unswitch_count(int val) {
84 assert (val <= unswitch_max(), "too many unswitches");
85 _unswitch_count = val;
86 }
87
88 LoopNode( Node *entry, Node *backedge ) : RegionNode(3), _loop_flags(0), _unswitch_count(0) {
89 init_class_id(Class_Loop);
90 init_req(EntryControl, entry);
91 init_req(LoopBackControl, backedge);
92 }
93
94 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
95 virtual int Opcode() const;
96 bool can_be_counted_loop(PhaseTransform* phase) const {
97 return req() == 3 && in(0) != NULL &&
98 in(1) != NULL && phase->type(in(1)) != Type::TOP &&
99 in(2) != NULL && phase->type(in(2)) != Type::TOP;
138 // the semantics so it does not appear in the hash & cmp functions.
139 virtual uint size_of() const { return sizeof(*this); }
140
141 // For Pre- and Post-loops during debugging ONLY, this holds the index of
142 // the Main CountedLoop. Used to assert that we understand the graph shape.
143 node_idx_t _main_idx;
144
145 // Known trip count calculated by compute_exact_trip_count()
146 uint _trip_count;
147
148 // Expected trip count from profile data
149 float _profile_trip_cnt;
150
151 // Log2 of original loop bodies in unrolled loop
152 int _unrolled_count_log2;
153
154 // Node count prior to last unrolling - used to decide if
155 // unroll,optimize,unroll,optimize,... is making progress
156 int _node_count_before_unroll;
157
158 public:
159 CountedLoopNode( Node *entry, Node *backedge )
160 : LoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint),
161 _profile_trip_cnt(COUNT_UNKNOWN), _unrolled_count_log2(0),
162 _node_count_before_unroll(0) {
163 init_class_id(Class_CountedLoop);
164 // Initialize _trip_count to the largest possible value.
165 // Will be reset (lower) if the loop's trip count is known.
166 }
167
168 virtual int Opcode() const;
169 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
170
171 Node *init_control() const { return in(EntryControl); }
172 Node *back_control() const { return in(LoopBackControl); }
173 CountedLoopEndNode *loopexit() const;
174 Node *init_trip() const;
175 Node *stride() const;
176 int stride_con() const;
177 bool stride_is_con() const;
182 // Match increment with optional truncation
183 static Node* match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type);
184
185 // A 'main' loop has a pre-loop and a post-loop. The 'main' loop
186 // can run short a few iterations and may start a few iterations in.
187 // It will be RCE'd and unrolled and aligned.
188
189 // A following 'post' loop will run any remaining iterations. Used
190 // during Range Check Elimination, the 'post' loop will do any final
191 // iterations with full checks. Also used by Loop Unrolling, where
192 // the 'post' loop will do any epilog iterations needed. Basically,
193 // a 'post' loop can not profitably be further unrolled or RCE'd.
194
195 // A preceding 'pre' loop will run at least 1 iteration (to do peeling),
196 // it may do under-flow checks for RCE and may do alignment iterations
197 // so the following main loop 'knows' that it is striding down cache
198 // lines.
199
200 // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or
201 // Aligned, may be missing it's pre-loop.
202 int is_normal_loop() const { return (_loop_flags&PreMainPostFlagsMask) == Normal; }
203 int is_pre_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Pre; }
204 int is_main_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Main; }
205 int is_post_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Post; }
206 int is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; }
207 void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; }
208
209 int main_idx() const { return _main_idx; }
210
211
212 void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; }
213 void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; }
214 void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; }
215 void set_normal_loop( ) { _loop_flags &= ~PreMainPostFlagsMask; }
216
217 void set_trip_count(uint tc) { _trip_count = tc; }
218 uint trip_count() { return _trip_count; }
219
220 bool has_exact_trip_count() const { return (_loop_flags & HasExactTripCount) != 0; }
221 void set_exact_trip_count(uint tc) {
222 _trip_count = tc;
223 _loop_flags |= HasExactTripCount;
224 }
225 void set_nonexact_trip_count() {
226 _loop_flags &= ~HasExactTripCount;
227 }
228
229 void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; }
230 float profile_trip_cnt() { return _profile_trip_cnt; }
231
232 void double_unrolled_count() { _unrolled_count_log2++; }
233 int unrolled_count() { return 1 << MIN2(_unrolled_count_log2, BitsPerInt-3); }
234
235 void set_node_count_before_unroll(int ct) { _node_count_before_unroll = ct; }
236 int node_count_before_unroll() { return _node_count_before_unroll; }
237
238 #ifndef PRODUCT
239 virtual void dump_spec(outputStream *st) const;
240 #endif
241 };
242
243 //------------------------------CountedLoopEndNode-----------------------------
244 // CountedLoopEndNodes end simple trip counted loops. They act much like
245 // IfNodes.
246 class CountedLoopEndNode : public IfNode {
247 public:
248 enum { TestControl, TestValue };
249
250 CountedLoopEndNode( Node *control, Node *test, float prob, float cnt )
251 : IfNode( control, test, prob, cnt) {
252 init_class_id(Class_CountedLoopEnd);
253 }
254 virtual int Opcode() const;
255
256 Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : NULL; }
319 virtual const Type *Value( PhaseTransform *phase ) const;
320 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
321 virtual Node *Identity( PhaseTransform *phase );
322 };
323
324 // -----------------------------IdealLoopTree----------------------------------
325 class IdealLoopTree : public ResourceObj {
326 public:
327 IdealLoopTree *_parent; // Parent in loop tree
328 IdealLoopTree *_next; // Next sibling in loop tree
329 IdealLoopTree *_child; // First child in loop tree
330
331 // The head-tail backedge defines the loop.
332 // If tail is NULL then this loop has multiple backedges as part of the
333 // same loop. During cleanup I'll peel off the multiple backedges; merge
334 // them at the loop bottom and flow 1 real backedge into the loop.
335 Node *_head; // Head of loop
336 Node *_tail; // Tail of loop
337 inline Node *tail(); // Handle lazy update of _tail field
338 PhaseIdealLoop* _phase;
339
340 Node_List _body; // Loop body for inner loops
341
342 uint8_t _nest; // Nesting depth
343 uint8_t _irreducible:1, // True if irreducible
344 _has_call:1, // True if has call safepoint
345 _has_sfpt:1, // True if has non-call safepoint
346 _rce_candidate:1; // True if candidate for range check elimination
347
348 Node_List* _safepts; // List of safepoints in this loop
349 Node_List* _required_safept; // A inner loop cannot delete these safepts;
350 bool _allow_optimizations; // Allow loop optimizations
351
352 IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail )
353 : _parent(0), _next(0), _child(0),
354 _head(head), _tail(tail),
355 _phase(phase),
356 _safepts(NULL),
357 _required_safept(NULL),
358 _allow_optimizations(true),
359 _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0)
360 { }
361
362 // Is 'l' a member of 'this'?
363 int is_member( const IdealLoopTree *l ) const; // Test for nested membership
364
365 // Set loop nesting depth. Accumulate has_call bits.
366 int set_nest( uint depth );
367
368 // Split out multiple fall-in edges from the loop header. Move them to a
369 // private RegionNode before the loop. This becomes the loop landing pad.
370 void split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt );
371
372 // Split out the outermost loop from this shared header.
373 void split_outer_loop( PhaseIdealLoop *phase );
374
375 // Merge all the backedges from the shared header into a private Region.
376 // Feed that region as the one backedge to this loop.
377 void merge_many_backedges( PhaseIdealLoop *phase );
378
379 // Split shared headers and insert loop landing pads.
427 // loop with an invariant test
428 bool policy_unswitching( PhaseIdealLoop *phase ) const;
429
430 // Micro-benchmark spamming. Remove empty loops.
431 bool policy_do_remove_empty_loop( PhaseIdealLoop *phase );
432
433 // Convert one iteration loop into normal code.
434 bool policy_do_one_iteration_loop( PhaseIdealLoop *phase );
435
436 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
437 // make some loop-invariant test (usually a null-check) happen before the
438 // loop.
439 bool policy_peeling( PhaseIdealLoop *phase ) const;
440
441 // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any
442 // known trip count in the counted loop node.
443 bool policy_maximally_unroll( PhaseIdealLoop *phase ) const;
444
445 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if
446 // the loop is a CountedLoop and the body is small enough.
447 bool policy_unroll( PhaseIdealLoop *phase ) const;
448
449 // Return TRUE or FALSE if the loop should be range-check-eliminated.
450 // Gather a list of IF tests that are dominated by iteration splitting;
451 // also gather the end of the first split and the start of the 2nd split.
452 bool policy_range_check( PhaseIdealLoop *phase ) const;
453
454 // Return TRUE or FALSE if the loop should be cache-line aligned.
455 // Gather the expression that does the alignment. Note that only
456 // one array base can be aligned in a loop (unless the VM guarantees
457 // mutual alignment). Note that if we vectorize short memory ops
458 // into longer memory ops, we may want to increase alignment.
459 bool policy_align( PhaseIdealLoop *phase ) const;
460
461 // Return TRUE if "iff" is a range check.
462 bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const;
463
464 // Compute loop exact trip count if possible
465 void compute_exact_trip_count( PhaseIdealLoop *phase );
466
467 // Compute loop trip count from profile data
|
45 //
46 // I D E A L I Z E D L O O P S
47 //
48 // Idealized loops are the set of loops I perform more interesting
49 // transformations on, beyond simple hoisting.
50
51 //------------------------------LoopNode---------------------------------------
52 // Simple loop header. Fall in path on left, loop-back path on right.
53 class LoopNode : public RegionNode {
54 // Size is bigger to hold the flags. However, the flags do not change
55 // the semantics so it does not appear in the hash & cmp functions.
56 virtual uint size_of() const { return sizeof(*this); }
57 protected:
58 short _loop_flags;
59 // Names for flag bitfields
60 enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3,
61 MainHasNoPreLoop=4,
62 HasExactTripCount=8,
63 InnerLoop=16,
64 PartialPeelLoop=32,
65 PartialPeelFailed=64,
66 HasReductions=128,
67 PassedSlpAnalysis=256 };
68 char _unswitch_count;
69 enum { _unswitch_max=3 };
70
71 public:
72 // Names for edge indices
73 enum { Self=0, EntryControl, LoopBackControl };
74
75 int is_inner_loop() const { return _loop_flags & InnerLoop; }
76 void set_inner_loop() { _loop_flags |= InnerLoop; }
77
78 int is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; }
79 void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; }
80 int partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
81 void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
82 void mark_has_reductions() { _loop_flags |= HasReductions; }
83 void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; }
84
85 int unswitch_max() { return _unswitch_max; }
86 int unswitch_count() { return _unswitch_count; }
87 void set_unswitch_count(int val) {
88 assert (val <= unswitch_max(), "too many unswitches");
89 _unswitch_count = val;
90 }
91
92 LoopNode( Node *entry, Node *backedge ) : RegionNode(3), _loop_flags(0), _unswitch_count(0) {
93 init_class_id(Class_Loop);
94 init_req(EntryControl, entry);
95 init_req(LoopBackControl, backedge);
96 }
97
98 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
99 virtual int Opcode() const;
100 bool can_be_counted_loop(PhaseTransform* phase) const {
101 return req() == 3 && in(0) != NULL &&
102 in(1) != NULL && phase->type(in(1)) != Type::TOP &&
103 in(2) != NULL && phase->type(in(2)) != Type::TOP;
142 // the semantics so it does not appear in the hash & cmp functions.
143 virtual uint size_of() const { return sizeof(*this); }
144
145 // For Pre- and Post-loops during debugging ONLY, this holds the index of
146 // the Main CountedLoop. Used to assert that we understand the graph shape.
147 node_idx_t _main_idx;
148
149 // Known trip count calculated by compute_exact_trip_count()
150 uint _trip_count;
151
152 // Expected trip count from profile data
153 float _profile_trip_cnt;
154
155 // Log2 of original loop bodies in unrolled loop
156 int _unrolled_count_log2;
157
158 // Node count prior to last unrolling - used to decide if
159 // unroll,optimize,unroll,optimize,... is making progress
160 int _node_count_before_unroll;
161
162 // If slp analysis is performed we record the maximum
163 // vector mapped unroll factor here
164 int slp_maximum_unroll_factor;
165
166 public:
167 CountedLoopNode( Node *entry, Node *backedge )
168 : LoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint),
169 _profile_trip_cnt(COUNT_UNKNOWN), _unrolled_count_log2(0),
170 _node_count_before_unroll(0) {
171 init_class_id(Class_CountedLoop);
172 // Initialize _trip_count to the largest possible value.
173 // Will be reset (lower) if the loop's trip count is known.
174 }
175
176 virtual int Opcode() const;
177 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
178
179 Node *init_control() const { return in(EntryControl); }
180 Node *back_control() const { return in(LoopBackControl); }
181 CountedLoopEndNode *loopexit() const;
182 Node *init_trip() const;
183 Node *stride() const;
184 int stride_con() const;
185 bool stride_is_con() const;
190 // Match increment with optional truncation
191 static Node* match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type);
192
193 // A 'main' loop has a pre-loop and a post-loop. The 'main' loop
194 // can run short a few iterations and may start a few iterations in.
195 // It will be RCE'd and unrolled and aligned.
196
197 // A following 'post' loop will run any remaining iterations. Used
198 // during Range Check Elimination, the 'post' loop will do any final
199 // iterations with full checks. Also used by Loop Unrolling, where
200 // the 'post' loop will do any epilog iterations needed. Basically,
201 // a 'post' loop can not profitably be further unrolled or RCE'd.
202
203 // A preceding 'pre' loop will run at least 1 iteration (to do peeling),
204 // it may do under-flow checks for RCE and may do alignment iterations
205 // so the following main loop 'knows' that it is striding down cache
206 // lines.
207
208 // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or
209 // Aligned, may be missing it's pre-loop.
210 int is_normal_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Normal; }
211 int is_pre_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Pre; }
212 int is_main_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Main; }
213 int is_post_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Post; }
214 int is_reduction_loop() const { return (_loop_flags&HasReductions) == HasReductions; }
215 int has_passed_slp () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; }
216 int is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; }
217 void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; }
218
219 int main_idx() const { return _main_idx; }
220
221
222 void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; }
223 void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; }
224 void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; }
225 void set_normal_loop( ) { _loop_flags &= ~PreMainPostFlagsMask; }
226
227 void set_trip_count(uint tc) { _trip_count = tc; }
228 uint trip_count() { return _trip_count; }
229
230 bool has_exact_trip_count() const { return (_loop_flags & HasExactTripCount) != 0; }
231 void set_exact_trip_count(uint tc) {
232 _trip_count = tc;
233 _loop_flags |= HasExactTripCount;
234 }
235 void set_nonexact_trip_count() {
236 _loop_flags &= ~HasExactTripCount;
237 }
238
239 void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; }
240 float profile_trip_cnt() { return _profile_trip_cnt; }
241
242 void double_unrolled_count() { _unrolled_count_log2++; }
243 int unrolled_count() { return 1 << MIN2(_unrolled_count_log2, BitsPerInt-3); }
244
245 void set_node_count_before_unroll(int ct) { _node_count_before_unroll = ct; }
246 int node_count_before_unroll() { return _node_count_before_unroll; }
247 void set_slp_max_unroll(int unroll_factor) { slp_maximum_unroll_factor = unroll_factor; }
248 int slp_max_unroll() { return slp_maximum_unroll_factor; }
249
250 #ifndef PRODUCT
251 virtual void dump_spec(outputStream *st) const;
252 #endif
253 };
254
255 //------------------------------CountedLoopEndNode-----------------------------
256 // CountedLoopEndNodes end simple trip counted loops. They act much like
257 // IfNodes.
258 class CountedLoopEndNode : public IfNode {
259 public:
260 enum { TestControl, TestValue };
261
262 CountedLoopEndNode( Node *control, Node *test, float prob, float cnt )
263 : IfNode( control, test, prob, cnt) {
264 init_class_id(Class_CountedLoopEnd);
265 }
266 virtual int Opcode() const;
267
268 Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : NULL; }
331 virtual const Type *Value( PhaseTransform *phase ) const;
332 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
333 virtual Node *Identity( PhaseTransform *phase );
334 };
335
336 // -----------------------------IdealLoopTree----------------------------------
337 class IdealLoopTree : public ResourceObj {
338 public:
339 IdealLoopTree *_parent; // Parent in loop tree
340 IdealLoopTree *_next; // Next sibling in loop tree
341 IdealLoopTree *_child; // First child in loop tree
342
343 // The head-tail backedge defines the loop.
344 // If tail is NULL then this loop has multiple backedges as part of the
345 // same loop. During cleanup I'll peel off the multiple backedges; merge
346 // them at the loop bottom and flow 1 real backedge into the loop.
347 Node *_head; // Head of loop
348 Node *_tail; // Tail of loop
349 inline Node *tail(); // Handle lazy update of _tail field
350 PhaseIdealLoop* _phase;
351 int _local_loop_unroll_limit;
352 int _local_loop_unroll_factor;
353
354 Node_List _body; // Loop body for inner loops
355
356 uint8_t _nest; // Nesting depth
357 uint8_t _irreducible:1, // True if irreducible
358 _has_call:1, // True if has call safepoint
359 _has_sfpt:1, // True if has non-call safepoint
360 _rce_candidate:1; // True if candidate for range check elimination
361
362 Node_List* _safepts; // List of safepoints in this loop
363 Node_List* _required_safept; // A inner loop cannot delete these safepts;
364 bool _allow_optimizations; // Allow loop optimizations
365
366 IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail )
367 : _parent(0), _next(0), _child(0),
368 _head(head), _tail(tail),
369 _phase(phase),
370 _safepts(NULL),
371 _required_safept(NULL),
372 _allow_optimizations(true),
373 _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0),
374 _local_loop_unroll_limit(0), _local_loop_unroll_factor(0)
375 { }
376
377 // Is 'l' a member of 'this'?
378 int is_member( const IdealLoopTree *l ) const; // Test for nested membership
379
380 // Set loop nesting depth. Accumulate has_call bits.
381 int set_nest( uint depth );
382
383 // Split out multiple fall-in edges from the loop header. Move them to a
384 // private RegionNode before the loop. This becomes the loop landing pad.
385 void split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt );
386
387 // Split out the outermost loop from this shared header.
388 void split_outer_loop( PhaseIdealLoop *phase );
389
390 // Merge all the backedges from the shared header into a private Region.
391 // Feed that region as the one backedge to this loop.
392 void merge_many_backedges( PhaseIdealLoop *phase );
393
394 // Split shared headers and insert loop landing pads.
442 // loop with an invariant test
443 bool policy_unswitching( PhaseIdealLoop *phase ) const;
444
445 // Micro-benchmark spamming. Remove empty loops.
446 bool policy_do_remove_empty_loop( PhaseIdealLoop *phase );
447
448 // Convert one iteration loop into normal code.
449 bool policy_do_one_iteration_loop( PhaseIdealLoop *phase );
450
451 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
452 // make some loop-invariant test (usually a null-check) happen before the
453 // loop.
454 bool policy_peeling( PhaseIdealLoop *phase ) const;
455
456 // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any
457 // known trip count in the counted loop node.
458 bool policy_maximally_unroll( PhaseIdealLoop *phase ) const;
459
460 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if
461 // the loop is a CountedLoop and the body is small enough.
462 bool policy_unroll( PhaseIdealLoop *phase );
463
464 // Return TRUE or FALSE if the loop analyzes to map to a maximal
465 // superword unrolling for vectorization.
466 void policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct);
467
468 // Return TRUE or FALSE if the loop should be range-check-eliminated.
469 // Gather a list of IF tests that are dominated by iteration splitting;
470 // also gather the end of the first split and the start of the 2nd split.
471 bool policy_range_check( PhaseIdealLoop *phase ) const;
472
473 // Return TRUE or FALSE if the loop should be cache-line aligned.
474 // Gather the expression that does the alignment. Note that only
475 // one array base can be aligned in a loop (unless the VM guarantees
476 // mutual alignment). Note that if we vectorize short memory ops
477 // into longer memory ops, we may want to increase alignment.
478 bool policy_align( PhaseIdealLoop *phase ) const;
479
480 // Return TRUE if "iff" is a range check.
481 bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const;
482
483 // Compute loop exact trip count if possible
484 void compute_exact_trip_count( PhaseIdealLoop *phase );
485
486 // Compute loop trip count from profile data
|