< prev index next >

src/share/vm/opto/loopTransform.cpp

Print this page

        

*** 122,142 **** } float trip_cnt = (float)max_jint; // default is big Node* back = head->in(LoopNode::LoopBackControl); while (back != head) { ! if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && back->in(0) && back->in(0)->is_If() && back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN && back->in(0)->as_If()->_prob != PROB_UNKNOWN) { break; } back = phase->idom(back); } if (back != head) { ! assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && back->in(0), "if-projection exists"); IfNode* back_if = back->in(0)->as_If(); float loop_back_cnt = back_if->_fcnt * back_if->_prob; // Now compute a loop exit count --- 122,142 ---- } float trip_cnt = (float)max_jint; // default is big Node* back = head->in(LoopNode::LoopBackControl); while (back != head) { ! if ((back->Opcode() == Opcodes::Op_IfTrue || back->Opcode() == Opcodes::Op_IfFalse) && back->in(0) && back->in(0)->is_If() && back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN && back->in(0)->as_If()->_prob != PROB_UNKNOWN) { break; } back = phase->idom(back); } if (back != head) { ! assert((back->Opcode() == Opcodes::Op_IfTrue || back->Opcode() == Opcodes::Op_IfFalse) && back->in(0), "if-projection exists"); IfNode* back_if = back->in(0)->as_If(); float loop_back_cnt = back_if->_fcnt * back_if->_prob; // Now compute a loop exit count
*** 147,157 **** IfNode *iff = n->as_If(); if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) { Node *exit = is_loop_exit(iff); if( exit ) { float exit_prob = iff->_prob; ! if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob; if (exit_prob > PROB_MIN) { float exit_cnt = iff->_fcnt * exit_prob; loop_exit_cnt += exit_cnt; } } --- 147,157 ---- IfNode *iff = n->as_If(); if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) { Node *exit = is_loop_exit(iff); if( exit ) { float exit_prob = iff->_prob; ! if (exit->Opcode() == Opcodes::Op_IfFalse) exit_prob = 1.0 - exit_prob; if (exit_prob > PROB_MIN) { float exit_cnt = iff->_fcnt * exit_prob; loop_exit_cnt += exit_cnt; } }
*** 175,186 **** //---------------------is_invariant_addition----------------------------- // Return nonzero index of invariant operand for an Add or Sub // of (nonconstant) invariant and variant values. Helper for reassociate_invariants. int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) { ! int op = n->Opcode(); ! if (op == Op_AddI || op == Op_SubI) { bool in1_invar = this->is_invariant(n->in(1)); bool in2_invar = this->is_invariant(n->in(2)); if (in1_invar && !in2_invar) return 1; if (!in1_invar && in2_invar) return 2; } --- 175,186 ---- //---------------------is_invariant_addition----------------------------- // Return nonzero index of invariant operand for an Add or Sub // of (nonconstant) invariant and variant values. Helper for reassociate_invariants. int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) { ! Opcodes op = n->Opcode(); ! if (op == Opcodes::Op_AddI || op == Opcodes::Op_SubI) { bool in1_invar = this->is_invariant(n->in(1)); bool in2_invar = this->is_invariant(n->in(2)); if (in1_invar && !in2_invar) return 1; if (!in1_invar && in2_invar) return 2; }
*** 288,298 **** if( test->is_If() ) { // Test? Node *ctrl = phase->get_ctrl(test->in(1)); if (ctrl->is_top()) return false; // Found dead test on live IF? No peeling! // Standard IF only has one input value to check for loop invariance ! assert(test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd || test->Opcode() == Op_RangeCheck, "Check this code when new subtype is added"); // Condition is not a member of this loop? if( !is_member(phase->get_loop(ctrl)) && is_loop_exit(test) ) return true; // Found reason to peel! } --- 288,298 ---- if( test->is_If() ) { // Test? Node *ctrl = phase->get_ctrl(test->in(1)); if (ctrl->is_top()) return false; // Found dead test on live IF? No peeling! // Standard IF only has one input value to check for loop invariance ! assert(test->Opcode() == Opcodes::Op_If || test->Opcode() == Opcodes::Op_CountedLoopEnd || test->Opcode() == Opcodes::Op_RangeCheck, "Check this code when new subtype is added"); // Condition is not a member of this loop? if( !is_member(phase->get_loop(ctrl)) && is_loop_exit(test) ) return true; // Found reason to peel! }
*** 313,324 **** progress = false; // Reset for next iteration Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail(); Node *test = prev->in(0); while( test != loop->_head ) { // Scan till run off top of loop ! int p_op = prev->Opcode(); ! if( (p_op == Op_IfFalse || p_op == Op_IfTrue) && test->is_If() && // Test? !test->in(1)->is_Con() && // And not already obvious? // Condition is not a member of this loop? !loop->is_member(get_loop(get_ctrl(test->in(1))))){ // Walk loop body looking for instances of this test --- 313,324 ---- progress = false; // Reset for next iteration Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail(); Node *test = prev->in(0); while( test != loop->_head ) { // Scan till run off top of loop ! Opcodes p_op = prev->Opcode(); ! if( (p_op == Opcodes::Op_IfFalse || p_op == Opcodes::Op_IfTrue) && test->is_If() && // Test? !test->in(1)->is_Con() && // And not already obvious? // Condition is not a member of this loop? !loop->is_member(get_loop(get_ctrl(test->in(1))))){ // Walk loop body looking for instances of this test
*** 617,638 **** // Do not unroll a loop with String intrinsics code. // String intrinsics are large and have loops. for (uint k = 0; k < _body.size(); k++) { Node* n = _body.at(k); switch (n->Opcode()) { ! case Op_StrComp: ! case Op_StrEquals: ! case Op_StrIndexOf: ! case Op_StrIndexOfChar: ! case Op_EncodeISOArray: ! case Op_AryEq: ! case Op_HasNegatives: { return false; } #if INCLUDE_RTM_OPT ! case Op_FastLock: ! case Op_FastUnlock: { // Don't unroll RTM locking code because it is large. if (UseRTMLocking) { return false; } } --- 617,638 ---- // Do not unroll a loop with String intrinsics code. // String intrinsics are large and have loops. for (uint k = 0; k < _body.size(); k++) { Node* n = _body.at(k); switch (n->Opcode()) { ! case Opcodes::Op_StrComp: ! case Opcodes::Op_StrEquals: ! case Opcodes::Op_StrIndexOf: ! case Opcodes::Op_StrIndexOfChar: ! case Opcodes::Op_EncodeISOArray: ! case Opcodes::Op_AryEq: ! case Opcodes::Op_HasNegatives: { return false; } #if INCLUDE_RTM_OPT ! case Opcodes::Op_FastLock: ! case Opcodes::Op_FastUnlock: { // Don't unroll RTM locking code because it is large. if (UseRTMLocking) { return false; } }
*** 735,762 **** int xors_in_loop = 0; // Also count ModL, DivL and MulL which expand mightly for (uint k = 0; k < _body.size(); k++) { Node* n = _body.at(k); switch (n->Opcode()) { ! case Op_XorI: xors_in_loop++; break; // CRC32 java code ! case Op_ModL: body_size += 30; break; ! case Op_DivL: body_size += 30; break; ! case Op_MulL: body_size += 10; break; ! case Op_StrComp: ! case Op_StrEquals: ! case Op_StrIndexOf: ! case Op_StrIndexOfChar: ! case Op_EncodeISOArray: ! case Op_AryEq: ! case Op_HasNegatives: { // Do not unroll a loop with String intrinsics code. // String intrinsics are large and have loops. return false; } #if INCLUDE_RTM_OPT ! case Op_FastLock: ! case Op_FastUnlock: { // Don't unroll RTM locking code because it is large. if (UseRTMLocking) { return false; } } --- 735,762 ---- int xors_in_loop = 0; // Also count ModL, DivL and MulL which expand mightly for (uint k = 0; k < _body.size(); k++) { Node* n = _body.at(k); switch (n->Opcode()) { ! case Opcodes::Op_XorI: xors_in_loop++; break; // CRC32 java code ! case Opcodes::Op_ModL: body_size += 30; break; ! case Opcodes::Op_DivL: body_size += 30; break; ! case Opcodes::Op_MulL: body_size += 10; break; ! case Opcodes::Op_StrComp: ! case Opcodes::Op_StrEquals: ! case Opcodes::Op_StrIndexOf: ! case Opcodes::Op_StrIndexOfChar: ! case Opcodes::Op_EncodeISOArray: ! case Opcodes::Op_AryEq: ! case Opcodes::Op_HasNegatives: { // Do not unroll a loop with String intrinsics code. // String intrinsics are large and have loops. return false; } #if INCLUDE_RTM_OPT ! case Opcodes::Op_FastLock: ! case Opcodes::Op_FastUnlock: { // Don't unroll RTM locking code because it is large. if (UseRTMLocking) { return false; } }
*** 859,876 **** // Check loop body for tests of trip-counter plus loop-invariant vs // loop-invariant. for (uint i = 0; i < _body.size(); i++) { Node *iff = _body[i]; ! if (iff->Opcode() == Op_If || ! iff->Opcode() == Op_RangeCheck) { // Test? // Comparing trip+off vs limit Node *bol = iff->in(1); if (bol->req() != 2) continue; // dead constant test if (!bol->is_Bool()) { ! assert(bol->Opcode() == Op_Conv2B, "predicate check only"); continue; } if (bol->as_Bool()->_test._test == BoolTest::ne) continue; // not RC --- 859,876 ---- // Check loop body for tests of trip-counter plus loop-invariant vs // loop-invariant. for (uint i = 0; i < _body.size(); i++) { Node *iff = _body[i]; ! if (iff->Opcode() == Opcodes::Op_If || ! iff->Opcode() == Opcodes::Op_RangeCheck) { // Test? // Comparing trip+off vs limit Node *bol = iff->in(1); if (bol->req() != 2) continue; // dead constant test if (!bol->is_Bool()) { ! assert(bol->Opcode() == Opcodes::Op_Conv2B, "predicate check only"); continue; } if (bol->as_Bool()->_test._test == BoolTest::ne) continue; // not RC
*** 1045,1055 **** // Reduce the pre-loop trip count. pre_end->_prob = PROB_FAIR; // Find the pre-loop normal exit. Node* pre_exit = pre_end->proj_out(false); ! assert( pre_exit->Opcode() == Op_IfFalse, "" ); IfFalseNode *new_pre_exit = new IfFalseNode(pre_end); _igvn.register_new_node_with_optimizer( new_pre_exit ); set_idom(new_pre_exit, pre_end, dd_main_head); set_loop(new_pre_exit, loop->_parent); --- 1045,1055 ---- // Reduce the pre-loop trip count. pre_end->_prob = PROB_FAIR; // Find the pre-loop normal exit. Node* pre_exit = pre_end->proj_out(false); ! assert( pre_exit->Opcode() == Opcodes::Op_IfFalse, "" ); IfFalseNode *new_pre_exit = new IfFalseNode(pre_end); _igvn.register_new_node_with_optimizer( new_pre_exit ); set_idom(new_pre_exit, pre_end, dd_main_head); set_loop(new_pre_exit, loop->_parent);
*** 1300,1316 **** Node *incr, Node *limit, CountedLoopNode *&post_head) { //------------------------------ // Step A: Create a new post-Loop. Node* main_exit = main_end->proj_out(false); ! assert(main_exit->Opcode() == Op_IfFalse, ""); int dd_main_exit = dom_depth(main_exit); // Step A1: Clone the loop body of main. The clone becomes the vector post-loop. // The main loop pre-header illegally has 2 control users (old & new loops). clone_loop(loop, old_new, dd_main_exit); ! assert(old_new[main_end->_idx]->Opcode() == Op_CountedLoopEnd, ""); post_head = old_new[main_head->_idx]->as_CountedLoop(); post_head->set_normal_loop(); post_head->set_post_loop(main_head); // Reduce the post-loop trip count. --- 1300,1316 ---- Node *incr, Node *limit, CountedLoopNode *&post_head) { //------------------------------ // Step A: Create a new post-Loop. Node* main_exit = main_end->proj_out(false); ! assert(main_exit->Opcode() == Opcodes::Op_IfFalse, ""); int dd_main_exit = dom_depth(main_exit); // Step A1: Clone the loop body of main. The clone becomes the vector post-loop. // The main loop pre-header illegally has 2 control users (old & new loops). clone_loop(loop, old_new, dd_main_exit); ! assert(old_new[main_end->_idx]->Opcode() == Opcodes::Op_CountedLoopEnd, ""); post_head = old_new[main_head->_idx]->as_CountedLoop(); post_head->set_normal_loop(); post_head->set_post_loop(main_head); // Reduce the post-loop trip count.
*** 1532,1545 **** set_ctrl(adj_max, C->root()); Node* old_limit = NULL; Node* adj_limit = NULL; Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL; if (loop_head->unrolled_count() > 1 && ! limit->is_CMove() && limit->Opcode() == Op_CMoveI && limit->in(CMoveNode::IfTrue) == adj_max && bol->as_Bool()->_test._test == bt && ! bol->in(1)->Opcode() == Op_CmpI && bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) { // Loop was unrolled before. // Optimize the limit to avoid nested CMove: // use original limit as old limit. old_limit = bol->in(1)->in(1); --- 1532,1545 ---- set_ctrl(adj_max, C->root()); Node* old_limit = NULL; Node* adj_limit = NULL; Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL; if (loop_head->unrolled_count() > 1 && ! limit->is_CMove() && limit->Opcode() == Opcodes::Op_CMoveI && limit->in(CMoveNode::IfTrue) == adj_max && bol->as_Bool()->_test._test == bt && ! bol->in(1)->Opcode() == Opcodes::Op_CmpI && bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) { // Loop was unrolled before. // Optimize the limit to avoid nested CMove: // use original limit as old limit. old_limit = bol->in(1)->in(1);
*** 1715,1725 **** if (def_node != NULL) { Node* n_ctrl = get_ctrl(def_node); if (n_ctrl != NULL && loop->is_member(get_loop(n_ctrl))) { // Now test it to see if it fits the standard pattern for a reduction operator. ! int opc = def_node->Opcode(); if (opc != ReductionNode::opcode(opc, def_node->bottom_type()->basic_type())) { if (!def_node->is_reduction()) { // Not marked yet // To be a reduction, the arithmetic node must have the phi as input and provide a def to it bool ok = false; for (unsigned j = 1; j < def_node->req(); j++) { --- 1715,1725 ---- if (def_node != NULL) { Node* n_ctrl = get_ctrl(def_node); if (n_ctrl != NULL && loop->is_member(get_loop(n_ctrl))) { // Now test it to see if it fits the standard pattern for a reduction operator. ! Opcodes opc = def_node->Opcode(); if (opc != ReductionNode::opcode(opc, def_node->bottom_type()->basic_type())) { if (!def_node->is_reduction()) { // Not marked yet // To be a reduction, the arithmetic node must have the phi as input and provide a def to it bool ok = false; for (unsigned j = 1; j < def_node->req(); j++) {
*** 1916,1927 **** if (p_scale != NULL) { *p_scale = 1; } return true; } ! int opc = exp->Opcode(); ! if (opc == Op_MulI) { if (exp->in(1) == iv && exp->in(2)->is_Con()) { if (p_scale != NULL) { *p_scale = exp->in(2)->get_int(); } return true; --- 1916,1927 ---- if (p_scale != NULL) { *p_scale = 1; } return true; } ! Opcodes opc = exp->Opcode(); ! if (opc == Opcodes::Op_MulI) { if (exp->in(1) == iv && exp->in(2)->is_Con()) { if (p_scale != NULL) { *p_scale = exp->in(2)->get_int(); } return true;
*** 1930,1940 **** if (p_scale != NULL) { *p_scale = exp->in(1)->get_int(); } return true; } ! } else if (opc == Op_LShiftI) { if (exp->in(1) == iv && exp->in(2)->is_Con()) { if (p_scale != NULL) { *p_scale = 1 << exp->in(2)->get_int(); } return true; --- 1930,1940 ---- if (p_scale != NULL) { *p_scale = exp->in(1)->get_int(); } return true; } ! } else if (opc == Opcodes::Op_LShiftI) { if (exp->in(1) == iv && exp->in(2)->is_Con()) { if (p_scale != NULL) { *p_scale = 1 << exp->in(2)->get_int(); } return true;
*** 1952,1963 **** set_ctrl(zero, C->root()); *p_offset = zero; } return true; } ! int opc = exp->Opcode(); ! if (opc == Op_AddI) { if (is_scaled_iv(exp->in(1), iv, p_scale)) { if (p_offset != NULL) { *p_offset = exp->in(2); } return true; --- 1952,1963 ---- set_ctrl(zero, C->root()); *p_offset = zero; } return true; } ! Opcodes opc = exp->Opcode(); ! if (opc == Opcodes::Op_AddI) { if (is_scaled_iv(exp->in(1), iv, p_scale)) { if (p_offset != NULL) { *p_offset = exp->in(2); } return true;
*** 1980,1990 **** *p_offset = offset; } return true; } } ! } else if (opc == Op_SubI) { if (is_scaled_iv(exp->in(1), iv, p_scale)) { if (p_offset != NULL) { Node *zero = _igvn.intcon(0); set_ctrl(zero, C->root()); Node *ctrl_off = get_ctrl(exp->in(2)); --- 1980,1990 ---- *p_offset = offset; } return true; } } ! } else if (opc == Opcodes::Op_SubI) { if (is_scaled_iv(exp->in(1), iv, p_scale)) { if (p_offset != NULL) { Node *zero = _igvn.intcon(0); set_ctrl(zero, C->root()); Node *ctrl_off = get_ctrl(exp->in(2));
*** 2047,2066 **** // Find the pre-loop limit; we will expand its iterations to // not ever trip low tests. Node *p_f = iffm->in(0); // pre loop may have been optimized out ! if (p_f->Opcode() != Op_IfFalse) { return closed_range_checks; } CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); assert(pre_end->loopnode()->is_pre_loop(), ""); Node *pre_opaq1 = pre_end->limit(); // Occasionally it's possible for a pre-loop Opaque1 node to be // optimized away and then another round of loop opts attempted. // We can not optimize this particular loop in that case. ! if (pre_opaq1->Opcode() != Op_Opaque1) return closed_range_checks; Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1; Node *pre_limit = pre_opaq->in(1); // Where do we put new limit calculations --- 2047,2066 ---- // Find the pre-loop limit; we will expand its iterations to // not ever trip low tests. Node *p_f = iffm->in(0); // pre loop may have been optimized out ! if (p_f->Opcode() != Opcodes::Op_IfFalse) { return closed_range_checks; } CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); assert(pre_end->loopnode()->is_pre_loop(), ""); Node *pre_opaq1 = pre_end->limit(); // Occasionally it's possible for a pre-loop Opaque1 node to be // optimized away and then another round of loop opts attempted. // We can not optimize this particular loop in that case. ! if (pre_opaq1->Opcode() != Opcodes::Op_Opaque1) return closed_range_checks; Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1; Node *pre_limit = pre_opaq->in(1); // Where do we put new limit calculations
*** 2097,2114 **** // Check loop body for tests of trip-counter plus loop-invariant vs // loop-invariant. for( uint i = 0; i < loop->_body.size(); i++ ) { Node *iff = loop->_body[i]; ! if (iff->Opcode() == Op_If || ! iff->Opcode() == Op_RangeCheck) { // Test? // Test is an IfNode, has 2 projections. If BOTH are in the loop // we need loop unswitching instead of iteration splitting. closed_range_checks++; Node *exit = loop->is_loop_exit(iff); if( !exit ) continue; ! int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0; // Get boolean condition to test Node *i1 = iff->in(1); if( !i1->is_Bool() ) continue; BoolNode *bol = i1->as_Bool(); --- 2097,2114 ---- // Check loop body for tests of trip-counter plus loop-invariant vs // loop-invariant. for( uint i = 0; i < loop->_body.size(); i++ ) { Node *iff = loop->_body[i]; ! if (iff->Opcode() == Opcodes::Op_If || ! iff->Opcode() == Opcodes::Op_RangeCheck) { // Test? // Test is an IfNode, has 2 projections. If BOTH are in the loop // we need loop unswitching instead of iteration splitting. closed_range_checks++; Node *exit = loop->is_loop_exit(iff); if( !exit ) continue; ! int flip = (exit->Opcode() == Opcodes::Op_IfTrue) ? 1 : 0; // Get boolean condition to test Node *i1 = iff->in(1); if( !i1->is_Bool() ) continue; BoolNode *bol = i1->as_Bool();
*** 2173,2183 **** // monotonically increases by stride_con, a constant. Both (or either) // stride_con and scale_con can be negative which will flip about the // sense of the test. // Adjust pre and main loop limits to guard the correct iteration set ! if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests if( b_test._test == BoolTest::lt ) { // Range checks always use lt // The underflow and overflow limits: 0 <= scale*I+offset < limit add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit ); // (0-offset)/scale could be outside of loop iterations range. conditional_rc = true; --- 2173,2183 ---- // monotonically increases by stride_con, a constant. Both (or either) // stride_con and scale_con can be negative which will flip about the // sense of the test. // Adjust pre and main loop limits to guard the correct iteration set ! if( cmp->Opcode() == Opcodes::Op_CmpU ) {// Unsigned compare is really 2 tests if( b_test._test == BoolTest::lt ) { // Range checks always use lt // The underflow and overflow limits: 0 <= scale*I+offset < limit add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit ); // (0-offset)/scale could be outside of loop iterations range. conditional_rc = true;
*** 2241,2251 **** _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL --i; --imax; } } ! if (limit->Opcode() == Op_LoadRange) { closed_range_checks--; } } // End of is IF --- 2241,2251 ---- _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL --i; --imax; } } ! if (limit->Opcode() == Opcodes::Op_LoadRange) { closed_range_checks--; } } // End of is IF
*** 2299,2310 **** if (cl->has_been_range_checked()) return; // Now check for existance of range checks for (uint i = 0; i < loop->_body.size(); i++) { Node *iff = loop->_body[i]; ! int iff_opc = iff->Opcode(); ! if (iff_opc == Op_If || iff_opc == Op_RangeCheck) { cl->mark_has_range_checks(); break; } } cl->set_has_been_range_checked(); --- 2299,2310 ---- if (cl->has_been_range_checked()) return; // Now check for existance of range checks for (uint i = 0; i < loop->_body.size(); i++) { Node *iff = loop->_body[i]; ! Opcodes iff_opc = iff->Opcode(); ! if (iff_opc == Opcodes::Op_If || iff_opc == Opcodes::Op_RangeCheck) { cl->mark_has_range_checks(); break; } } cl->set_has_been_range_checked();
*** 2321,2332 **** // Check for existance of range checks using the unique instance to make a guard with Unique_Node_List worklist; for (uint i = 0; i < legacy_loop->_body.size(); i++) { Node *iff = legacy_loop->_body[i]; ! int iff_opc = iff->Opcode(); ! if (iff_opc == Op_If || iff_opc == Op_RangeCheck) { worklist.push(iff); } } // Find RCE'd post loop so that we can stage its guard. --- 2321,2332 ---- // Check for existance of range checks using the unique instance to make a guard with Unique_Node_List worklist; for (uint i = 0; i < legacy_loop->_body.size(); i++) { Node *iff = legacy_loop->_body[i]; ! Opcodes iff_opc = iff->Opcode(); ! if (iff_opc == Opcodes::Op_If || iff_opc == Opcodes::Op_RangeCheck) { worklist.push(iff); } } // Find RCE'd post loop so that we can stage its guard.
*** 2380,2390 **** Node *rc_bolzm = rc_iffm->in(1); if (rc_bolzm->is_Bool()) { Node *rc_cmpzm = rc_bolzm->in(1); if (rc_cmpzm->is_Cmp()) { Node *rc_left = rc_cmpzm->in(2); ! if (rc_left->Opcode() != Op_LoadRange) { multi_version_succeeded = false; break; } if (first_time) { last_min = rc_left; --- 2380,2390 ---- Node *rc_bolzm = rc_iffm->in(1); if (rc_bolzm->is_Bool()) { Node *rc_cmpzm = rc_bolzm->in(1); if (rc_cmpzm->is_Cmp()) { Node *rc_left = rc_cmpzm->in(2); ! if (rc_left->Opcode() != Opcodes::Op_LoadRange) { multi_version_succeeded = false; break; } if (first_time) { last_min = rc_left;
*** 2459,2503 **** // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage. // Replace with a 1-in-10 exit guess. void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) { Node *test = tail(); while( test != _head ) { ! uint top = test->Opcode(); ! if( top == Op_IfTrue || top == Op_IfFalse ) { int test_con = ((ProjNode*)test)->_con; ! assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity"); IfNode *iff = test->in(0)->as_If(); if( iff->outcnt() == 2 ) { // Ignore dead tests Node *bol = iff->in(1); if( bol && bol->req() > 1 && bol->in(1) && ! ((bol->in(1)->Opcode() == Op_StorePConditional ) || ! (bol->in(1)->Opcode() == Op_StoreIConditional ) || ! (bol->in(1)->Opcode() == Op_StoreLConditional ) || ! (bol->in(1)->Opcode() == Op_CompareAndExchangeB ) || ! (bol->in(1)->Opcode() == Op_CompareAndExchangeS ) || ! (bol->in(1)->Opcode() == Op_CompareAndExchangeI ) || ! (bol->in(1)->Opcode() == Op_CompareAndExchangeL ) || ! (bol->in(1)->Opcode() == Op_CompareAndExchangeP ) || ! (bol->in(1)->Opcode() == Op_CompareAndExchangeN ) || ! (bol->in(1)->Opcode() == Op_WeakCompareAndSwapB ) || ! (bol->in(1)->Opcode() == Op_WeakCompareAndSwapS ) || ! (bol->in(1)->Opcode() == Op_WeakCompareAndSwapI ) || ! (bol->in(1)->Opcode() == Op_WeakCompareAndSwapL ) || ! (bol->in(1)->Opcode() == Op_WeakCompareAndSwapP ) || ! (bol->in(1)->Opcode() == Op_WeakCompareAndSwapN ) || ! (bol->in(1)->Opcode() == Op_CompareAndSwapB ) || ! (bol->in(1)->Opcode() == Op_CompareAndSwapS ) || ! (bol->in(1)->Opcode() == Op_CompareAndSwapI ) || ! (bol->in(1)->Opcode() == Op_CompareAndSwapL ) || ! (bol->in(1)->Opcode() == Op_CompareAndSwapP ) || ! (bol->in(1)->Opcode() == Op_CompareAndSwapN ))) return; // Allocation loops RARELY take backedge // Find the OTHER exit path from the IF Node* ex = iff->proj_out(1-test_con); float p = iff->_prob; if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) { ! if( top == Op_IfTrue ) { if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) { iff->_prob = PROB_STATIC_FREQUENT; } } else { if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) { --- 2459,2503 ---- // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage. // Replace with a 1-in-10 exit guess. void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) { Node *test = tail(); while( test != _head ) { ! Opcodes top = test->Opcode(); ! if( top == Opcodes::Op_IfTrue || top == Opcodes::Op_IfFalse ) { int test_con = ((ProjNode*)test)->_con; ! assert(top == (test_con? Opcodes::Op_IfTrue: Opcodes::Op_IfFalse), "sanity"); IfNode *iff = test->in(0)->as_If(); if( iff->outcnt() == 2 ) { // Ignore dead tests Node *bol = iff->in(1); if( bol && bol->req() > 1 && bol->in(1) && ! ((bol->in(1)->Opcode() == Opcodes::Op_StorePConditional ) || ! (bol->in(1)->Opcode() == Opcodes::Op_StoreIConditional ) || ! (bol->in(1)->Opcode() == Opcodes::Op_StoreLConditional ) || ! (bol->in(1)->Opcode() == Opcodes::Op_CompareAndExchangeB ) || ! (bol->in(1)->Opcode() == Opcodes::Op_CompareAndExchangeS ) || ! (bol->in(1)->Opcode() == Opcodes::Op_CompareAndExchangeI ) || ! (bol->in(1)->Opcode() == Opcodes::Op_CompareAndExchangeL ) || ! (bol->in(1)->Opcode() == Opcodes::Op_CompareAndExchangeP ) || ! (bol->in(1)->Opcode() == Opcodes::Op_CompareAndExchangeN ) || ! (bol->in(1)->Opcode() == Opcodes::Op_WeakCompareAndSwapB ) || ! (bol->in(1)->Opcode() == Opcodes::Op_WeakCompareAndSwapS ) || ! (bol->in(1)->Opcode() == Opcodes::Op_WeakCompareAndSwapI ) || ! (bol->in(1)->Opcode() == Opcodes::Op_WeakCompareAndSwapL ) || ! (bol->in(1)->Opcode() == Opcodes::Op_WeakCompareAndSwapP ) || ! (bol->in(1)->Opcode() == Opcodes::Op_WeakCompareAndSwapN ) || ! (bol->in(1)->Opcode() == Opcodes::Op_CompareAndSwapB ) || ! (bol->in(1)->Opcode() == Opcodes::Op_CompareAndSwapS ) || ! (bol->in(1)->Opcode() == Opcodes::Op_CompareAndSwapI ) || ! (bol->in(1)->Opcode() == Opcodes::Op_CompareAndSwapL ) || ! (bol->in(1)->Opcode() == Opcodes::Op_CompareAndSwapP ) || ! (bol->in(1)->Opcode() == Opcodes::Op_CompareAndSwapN ))) return; // Allocation loops RARELY take backedge // Find the OTHER exit path from the IF Node* ex = iff->proj_out(1-test_con); float p = iff->_prob; if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) { ! if( top == Opcodes::Op_IfTrue ) { if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) { iff->_prob = PROB_STATIC_FREQUENT; } } else { if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
*** 2512,2526 **** } #ifdef ASSERT static CountedLoopNode* locate_pre_from_main(CountedLoopNode *cl) { Node *ctrl = cl->in(LoopNode::EntryControl); ! assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, ""); Node *iffm = ctrl->in(0); ! assert(iffm->Opcode() == Op_If, ""); Node *p_f = iffm->in(0); ! assert(p_f->Opcode() == Op_IfFalse, ""); CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); assert(pre_end->loopnode()->is_pre_loop(), ""); return pre_end->loopnode(); } #endif --- 2512,2526 ---- } #ifdef ASSERT static CountedLoopNode* locate_pre_from_main(CountedLoopNode *cl) { Node *ctrl = cl->in(LoopNode::EntryControl); ! assert(ctrl->Opcode() == Opcodes::Op_IfTrue || ctrl->Opcode() == Opcodes::Op_IfFalse, ""); Node *iffm = ctrl->in(0); ! assert(iffm->Opcode() == Opcodes::Op_If, ""); Node *p_f = iffm->in(0); ! assert(p_f->Opcode() == Opcodes::Op_IfFalse, ""); CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); assert(pre_end->loopnode()->is_pre_loop(), ""); return pre_end->loopnode(); } #endif
*** 2528,2538 **** // Remove the main and post loops and make the pre loop execute all // iterations. Useful when the pre loop is found empty. void IdealLoopTree::remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase) { CountedLoopEndNode* pre_end = cl->loopexit(); Node* pre_cmp = pre_end->cmp_node(); ! if (pre_cmp->in(2)->Opcode() != Op_Opaque1) { // Only safe to remove the main loop if the compiler optimized it // out based on an unknown number of iterations return; } --- 2528,2538 ---- // Remove the main and post loops and make the pre loop execute all // iterations. Useful when the pre loop is found empty. void IdealLoopTree::remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase) { CountedLoopEndNode* pre_end = cl->loopexit(); Node* pre_cmp = pre_end->cmp_node(); ! if (pre_cmp->in(2)->Opcode() != Opcodes::Op_Opaque1) { // Only safe to remove the main loop if the compiler optimized it // out based on an unknown number of iterations return; }
*** 2556,2566 **** // Remove the Opaque1Node of the pre loop and make it execute all iterations phase->_igvn.replace_input_of(pre_cmp, 2, pre_cmp->in(2)->in(2)); // Remove the Opaque1Node of the main loop so it can be optimized out Node* main_cmp = main_iff->in(1)->in(1); ! assert(main_cmp->in(2)->Opcode() == Op_Opaque1, "main loop has no opaque node?"); phase->_igvn.replace_input_of(main_cmp, 2, main_cmp->in(2)->in(1)); } //------------------------------policy_do_remove_empty_loop-------------------- // Micro-benchmark spamming. Policy is to always remove empty loops. --- 2556,2566 ---- // Remove the Opaque1Node of the pre loop and make it execute all iterations phase->_igvn.replace_input_of(pre_cmp, 2, pre_cmp->in(2)->in(2)); // Remove the Opaque1Node of the main loop so it can be optimized out Node* main_cmp = main_iff->in(1)->in(1); ! assert(main_cmp->in(2)->Opcode() == Opcodes::Op_Opaque1, "main loop has no opaque node?"); phase->_igvn.replace_input_of(main_cmp, 2, main_cmp->in(2)->in(1)); } //------------------------------policy_do_remove_empty_loop-------------------- // Micro-benchmark spamming. Policy is to always remove empty loops.
*** 2588,2598 **** #ifdef ASSERT // Ensure only one phi which is the iv. Node* iv = NULL; for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) { Node* n = cl->fast_out(i); ! if (n->Opcode() == Op_Phi) { assert(iv == NULL, "Too many phis" ); iv = n; } } assert(iv == cl->phi(), "Wrong phi" ); --- 2588,2598 ---- #ifdef ASSERT // Ensure only one phi which is the iv. Node* iv = NULL; for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) { Node* n = cl->fast_out(i); ! if (n->Opcode() == Opcodes::Op_Phi) { assert(iv == NULL, "Too many phis" ); iv = n; } } assert(iv == cl->phi(), "Wrong phi" );
*** 2612,2623 **** } } if (needs_guard) { // Check for an obvious zero trip guard. Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl)); ! if (inctrl->Opcode() == Op_IfTrue || inctrl->Opcode() == Op_IfFalse) { ! bool maybe_swapped = (inctrl->Opcode() == Op_IfFalse); // The test should look like just the backedge of a CountedLoop Node* iff = inctrl->in(0); if (iff->is_If()) { Node* bol = iff->in(1); if (bol->is_Bool()) { --- 2612,2623 ---- } } if (needs_guard) { // Check for an obvious zero trip guard. Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl)); ! if (inctrl->Opcode() == Opcodes::Op_IfTrue || inctrl->Opcode() == Opcodes::Op_IfFalse) { ! bool maybe_swapped = (inctrl->Opcode() == Opcodes::Op_IfFalse); // The test should look like just the backedge of a CountedLoop Node* iff = inctrl->in(0); if (iff->is_If()) { Node* bol = iff->in(1); if (bol->is_Bool()) {
*** 2926,2937 **** if (n->is_Store()) { if (store != NULL) { msg = "multiple stores"; break; } ! int opc = n->Opcode(); ! if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreNKlass || opc == Op_StoreCM) { msg = "oop fills not handled"; break; } Node* value = n->in(MemNode::ValueIn); if (!lpt->is_invariant(value)) { --- 2926,2937 ---- if (n->is_Store()) { if (store != NULL) { msg = "multiple stores"; break; } ! Opcodes opc = n->Opcode(); ! if (opc == Opcodes::Op_StoreP || opc == Opcodes::Op_StoreN || opc == Opcodes::Op_StoreNKlass || opc == Opcodes::Op_StoreCM) { msg = "oop fills not handled"; break; } Node* value = n->in(MemNode::ValueIn); if (!lpt->is_invariant(value)) {
*** 3001,3018 **** int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements)); for (int e = 0; e < count; e++) { Node* n = elements[e]; if (n->is_Con() && con == NULL) { con = n; ! } else if (n->Opcode() == Op_LShiftX && shift == NULL) { Node* value = n->in(1); #ifdef _LP64 ! if (value->Opcode() == Op_ConvI2L) { conv = value; value = value->in(1); } ! if (value->Opcode() == Op_CastII && value->as_CastII()->has_range_check()) { // Skip range check dependent CastII nodes cast = value; value = value->in(1); } --- 3001,3018 ---- int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements)); for (int e = 0; e < count; e++) { Node* n = elements[e]; if (n->is_Con() && con == NULL) { con = n; ! } else if (n->Opcode() == Opcodes::Op_LShiftX && shift == NULL) { Node* value = n->in(1); #ifdef _LP64 ! if (value->Opcode() == Opcodes::Op_ConvI2L) { conv = value; value = value->in(1); } ! if (value->Opcode() == Opcodes::Op_CastII && value->as_CastII()->has_range_check()) { // Skip range check dependent CastII nodes cast = value; value = value->in(1); }
*** 3025,3038 **** } else { found_index = true; shift = n; } } ! } else if (n->Opcode() == Op_ConvI2L && conv == NULL) { conv = n; n = n->in(1); ! if (n->Opcode() == Op_CastII && n->as_CastII()->has_range_check()) { // Skip range check dependent CastII nodes cast = n; n = n->in(1); } --- 3025,3038 ---- } else { found_index = true; shift = n; } } ! } else if (n->Opcode() == Opcodes::Op_ConvI2L && conv == NULL) { conv = n; n = n->in(1); ! if (n->Opcode() == Opcodes::Op_CastII && n->as_CastII()->has_range_check()) { // Skip range check dependent CastII nodes cast = n; n = n->in(1); }
< prev index next >