2082 }
2083 }
2084 }
2085 test = phase->idom(test);
2086 }
2087 }
2088
2089
2090 //------------------------------policy_do_remove_empty_loop--------------------
2091 // Micro-benchmark spamming. Policy is to always remove empty loops.
2092 // The 'DO' part is to replace the trip counter with the value it will
2093 // have on the last iteration. This will break the loop.
2094 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
2095 // Minimum size must be empty loop
2096 if (_body.size() > EMPTY_LOOP_SIZE)
2097 return false;
2098
2099 if (!_head->is_CountedLoop())
2100 return false; // Dead loop
2101 CountedLoopNode *cl = _head->as_CountedLoop();
2102 if (!cl->loopexit())
2103 return false; // Malformed loop
2104 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
2105 return false; // Infinite loop
2106
2107 #ifdef ASSERT
2108 // Ensure only one phi which is the iv.
2109 Node* iv = NULL;
2110 for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
2111 Node* n = cl->fast_out(i);
2112 if (n->Opcode() == Op_Phi) {
2113 assert(iv == NULL, "Too many phis" );
2114 iv = n;
2115 }
2116 }
2117 assert(iv == cl->phi(), "Wrong phi" );
2118 #endif
2119
2120 // main and post loops have explicitly created zero trip guard
2121 bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop();
2122 if (needs_guard) {
2238
2239 // Non-counted loops may be peeled; exactly 1 iteration is peeled.
2240 // This removes loop-invariant tests (usually null checks).
2241 if (!_head->is_CountedLoop()) { // Non-counted loop
2242 if (PartialPeelLoop && phase->partial_peel(this, old_new)) {
2243 // Partial peel succeeded so terminate this round of loop opts
2244 return false;
2245 }
2246 if (should_peel) { // Should we peel?
2247 #ifndef PRODUCT
2248 if (PrintOpto) tty->print_cr("should_peel");
2249 #endif
2250 phase->do_peeling(this,old_new);
2251 } else if (should_unswitch) {
2252 phase->do_unswitching(this, old_new);
2253 }
2254 return true;
2255 }
2256 CountedLoopNode *cl = _head->as_CountedLoop();
2257
2258 if (!cl->loopexit()) return true; // Ignore various kinds of broken loops
2259
2260 // Do nothing special to pre- and post- loops
2261 if (cl->is_pre_loop() || cl->is_post_loop()) return true;
2262
2263 // Compute loop trip count from profile data
2264 compute_profile_trip_cnt(phase);
2265
2266 // Before attempting fancy unrolling, RCE or alignment, see if we want
2267 // to completely unroll this loop or do loop unswitching.
2268 if (cl->is_normal_loop()) {
2269 if (should_unswitch) {
2270 phase->do_unswitching(this, old_new);
2271 return true;
2272 }
2273 bool should_maximally_unroll = policy_maximally_unroll(phase);
2274 if (should_maximally_unroll) {
2275 // Here we did some unrolling and peeling. Eventually we will
2276 // completely unroll this loop and it will no longer be a loop.
2277 phase->do_maximally_unroll(this,old_new);
2278 return true;
2619 store->dump();
2620 if (Verbose) {
2621 lpt->_body.dump();
2622 }
2623 }
2624 #endif
2625
2626 return msg == NULL;
2627 }
2628
2629
2630
2631 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
2632 // Only for counted inner loops
2633 if (!lpt->is_counted() || !lpt->is_inner()) {
2634 return false;
2635 }
2636
2637 // Must have constant stride
2638 CountedLoopNode* head = lpt->_head->as_CountedLoop();
2639 if (!head->stride_is_con() || !head->is_normal_loop()) {
2640 return false;
2641 }
2642
2643 // Check that the body only contains a store of a loop invariant
2644 // value that is indexed by the loop phi.
2645 Node* store = NULL;
2646 Node* store_value = NULL;
2647 Node* shift = NULL;
2648 Node* offset = NULL;
2649 if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
2650 return false;
2651 }
2652
2653 #ifndef PRODUCT
2654 if (TraceLoopOpts) {
2655 tty->print("ArrayFill ");
2656 lpt->dump_head();
2657 }
2658 #endif
2659
|
2082 }
2083 }
2084 }
2085 test = phase->idom(test);
2086 }
2087 }
2088
2089
2090 //------------------------------policy_do_remove_empty_loop--------------------
2091 // Micro-benchmark spamming. Policy is to always remove empty loops.
2092 // The 'DO' part is to replace the trip counter with the value it will
2093 // have on the last iteration. This will break the loop.
2094 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
2095 // Minimum size must be empty loop
2096 if (_body.size() > EMPTY_LOOP_SIZE)
2097 return false;
2098
2099 if (!_head->is_CountedLoop())
2100 return false; // Dead loop
2101 CountedLoopNode *cl = _head->as_CountedLoop();
2102 if (!cl->is_valid_counted_loop())
2103 return false; // Malformed loop
2104 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
2105 return false; // Infinite loop
2106
2107 #ifdef ASSERT
2108 // Ensure only one phi which is the iv.
2109 Node* iv = NULL;
2110 for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
2111 Node* n = cl->fast_out(i);
2112 if (n->Opcode() == Op_Phi) {
2113 assert(iv == NULL, "Too many phis" );
2114 iv = n;
2115 }
2116 }
2117 assert(iv == cl->phi(), "Wrong phi" );
2118 #endif
2119
2120 // main and post loops have explicitly created zero trip guard
2121 bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop();
2122 if (needs_guard) {
2238
2239 // Non-counted loops may be peeled; exactly 1 iteration is peeled.
2240 // This removes loop-invariant tests (usually null checks).
2241 if (!_head->is_CountedLoop()) { // Non-counted loop
2242 if (PartialPeelLoop && phase->partial_peel(this, old_new)) {
2243 // Partial peel succeeded so terminate this round of loop opts
2244 return false;
2245 }
2246 if (should_peel) { // Should we peel?
2247 #ifndef PRODUCT
2248 if (PrintOpto) tty->print_cr("should_peel");
2249 #endif
2250 phase->do_peeling(this,old_new);
2251 } else if (should_unswitch) {
2252 phase->do_unswitching(this, old_new);
2253 }
2254 return true;
2255 }
2256 CountedLoopNode *cl = _head->as_CountedLoop();
2257
2258 if (!cl->is_valid_counted_loop()) return true; // Ignore various kinds of broken loops
2259
2260 // Do nothing special to pre- and post- loops
2261 if (cl->is_pre_loop() || cl->is_post_loop()) return true;
2262
2263 // Compute loop trip count from profile data
2264 compute_profile_trip_cnt(phase);
2265
2266 // Before attempting fancy unrolling, RCE or alignment, see if we want
2267 // to completely unroll this loop or do loop unswitching.
2268 if (cl->is_normal_loop()) {
2269 if (should_unswitch) {
2270 phase->do_unswitching(this, old_new);
2271 return true;
2272 }
2273 bool should_maximally_unroll = policy_maximally_unroll(phase);
2274 if (should_maximally_unroll) {
2275 // Here we did some unrolling and peeling. Eventually we will
2276 // completely unroll this loop and it will no longer be a loop.
2277 phase->do_maximally_unroll(this,old_new);
2278 return true;
2619 store->dump();
2620 if (Verbose) {
2621 lpt->_body.dump();
2622 }
2623 }
2624 #endif
2625
2626 return msg == NULL;
2627 }
2628
2629
2630
2631 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
2632 // Only for counted inner loops
2633 if (!lpt->is_counted() || !lpt->is_inner()) {
2634 return false;
2635 }
2636
2637 // Must have constant stride
2638 CountedLoopNode* head = lpt->_head->as_CountedLoop();
2639 if (!head->is_valid_counted_loop() || !head->is_normal_loop()) {
2640 return false;
2641 }
2642
2643 // Check that the body only contains a store of a loop invariant
2644 // value that is indexed by the loop phi.
2645 Node* store = NULL;
2646 Node* store_value = NULL;
2647 Node* shift = NULL;
2648 Node* offset = NULL;
2649 if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
2650 return false;
2651 }
2652
2653 #ifndef PRODUCT
2654 if (TraceLoopOpts) {
2655 tty->print("ArrayFill ");
2656 lpt->dump_head();
2657 }
2658 #endif
2659
|