2728 if (t != NULL && t->is_con()) {
2729 juint shift = t->get_con();
2730 if (shift > mask) { // Unsigned cmp
2731 cnt = ConNode::make(TypeInt::make(shift & mask));
2732 }
2733 } else {
2734 if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
2735 cnt = ConNode::make(TypeInt::make(mask));
2736 _igvn.register_new_node_with_optimizer(cnt);
2737 cnt = new AndINode(opd, cnt);
2738 _igvn.register_new_node_with_optimizer(cnt);
2739 _phase->set_ctrl(cnt, _phase->get_ctrl(opd));
2740 }
2741 assert(opd->bottom_type()->isa_int(), "int type only");
2742 if (!opd->bottom_type()->isa_int()) {
2743 NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("Should be int type only");})
2744 return NULL;
2745 }
2746 }
2747 // Move shift count into vector register.
2748 cnt = VectorNode::shift_count(p0, cnt, vlen, velt_basic_type(p0));
2749 _igvn.register_new_node_with_optimizer(cnt);
2750 _phase->set_ctrl(cnt, _phase->get_ctrl(opd));
2751 return cnt;
2752 }
2753 assert(!opd->is_StoreVector(), "such vector is not expected here");
2754 if (opd->is_StoreVector()) {
2755 NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("StoreVector is not expected here");})
2756 return NULL;
2757 }
2758 // Convert scalar input to vector with the same number of elements as
2759 // p0's vector. Use p0's type because size of operand's container in
2760 // vector should match p0's size regardless operand's size.
2761 const Type* p0_t = velt_type(p0);
2762 VectorNode* vn = VectorNode::scalar2vector(opd, vlen, p0_t);
2763
2764 _igvn.register_new_node_with_optimizer(vn);
2765 _phase->set_ctrl(vn, _phase->get_ctrl(opd));
2766 #ifdef ASSERT
2767 if (TraceNewVectors) {
2768 tty->print("new Vector node: ");
|
2728 if (t != NULL && t->is_con()) {
2729 juint shift = t->get_con();
2730 if (shift > mask) { // Unsigned cmp
2731 cnt = ConNode::make(TypeInt::make(shift & mask));
2732 }
2733 } else {
2734 if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
2735 cnt = ConNode::make(TypeInt::make(mask));
2736 _igvn.register_new_node_with_optimizer(cnt);
2737 cnt = new AndINode(opd, cnt);
2738 _igvn.register_new_node_with_optimizer(cnt);
2739 _phase->set_ctrl(cnt, _phase->get_ctrl(opd));
2740 }
2741 assert(opd->bottom_type()->isa_int(), "int type only");
2742 if (!opd->bottom_type()->isa_int()) {
2743 NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("Should be int type only");})
2744 return NULL;
2745 }
2746 }
2747 // Move shift count into vector register.
2748 cnt = VectorNode::shift_count(p0->Opcode(), cnt, vlen, velt_basic_type(p0));
2749 _igvn.register_new_node_with_optimizer(cnt);
2750 _phase->set_ctrl(cnt, _phase->get_ctrl(opd));
2751 return cnt;
2752 }
2753 assert(!opd->is_StoreVector(), "such vector is not expected here");
2754 if (opd->is_StoreVector()) {
2755 NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("StoreVector is not expected here");})
2756 return NULL;
2757 }
2758 // Convert scalar input to vector with the same number of elements as
2759 // p0's vector. Use p0's type because size of operand's container in
2760 // vector should match p0's size regardless operand's size.
2761 const Type* p0_t = velt_type(p0);
2762 VectorNode* vn = VectorNode::scalar2vector(opd, vlen, p0_t);
2763
2764 _igvn.register_new_node_with_optimizer(vn);
2765 _phase->set_ctrl(vn, _phase->get_ctrl(opd));
2766 #ifdef ASSERT
2767 if (TraceNewVectors) {
2768 tty->print("new Vector node: ");
|