< prev index next >

src/hotspot/cpu/x86/x86.ad

Print this page

        

@@ -1164,18 +1164,10 @@
     return 5 + NativeJump::instruction_size; // pushl(); jmp;
   }
 #endif
 };
 
-class Node::PD {
-public:
-  enum NodeFlags {
-    Flag_intel_jcc_erratum = Node::_last_flag << 1,
-    _last_flag             = Flag_intel_jcc_erratum
-  };
-};
-
 
 inline uint vector_length(const Node* n) {
   const TypeVect* vt = n->bottom_type()->is_vect();
   return vt->length();
 }

@@ -1230,10 +1222,18 @@
   uint def_idx = use->operand_index(opnd);
   Node* def = use->in(def_idx);
   return vector_length_encoding(def);
 }
 
+class Node::PD {
+public:
+  enum NodeFlags {
+    Flag_intel_jcc_erratum = Node::_last_flag << 1,
+    _last_flag             = Flag_intel_jcc_erratum
+  };
+};
+
 %} // end source_hpp
 
 source %{
 
 #include "opto/addnode.hpp"

@@ -1503,18 +1503,10 @@
     case Op_RoundDoubleModeV:
       if (VM_Version::supports_avx() == false) {
         return false; // 128bit vroundpd is not available
       }
       break;
-    case Op_MacroLogicV:
-      if (UseAVX < 3 || !UseVectorMacroLogic) {
-        return false;
-      }
-      break;
-    case Op_VLShiftV:
-    case Op_VRShiftV:
-    case Op_VURShiftV:
     case Op_LoadVectorGather:
       if (UseAVX < 2) {
         return false;
       }
       break;

@@ -1522,10 +1514,15 @@
     case Op_FmaVF:
       if (!UseFMA) {
         return false;
       }
       break;
+    case Op_MacroLogicV:
+      if (UseAVX < 3 || !UseVectorMacroLogic) {
+        return false;
+      }
+      break;
 #ifndef _LP64
     case Op_AddReductionVF:
     case Op_AddReductionVD:
     case Op_MulReductionVF:
     case Op_MulReductionVD:

@@ -1560,11 +1557,10 @@
   //   * AVX2 supports 256bit vectors for all types;
   //   * AVX512F supports 512bit vectors only for INT, FLOAT, and DOUBLE types;
   //   * AVX512BW supports 512bit vectors for BYTE, SHORT, and CHAR types.
   // There's also a limit on minimum vector size supported: 2 elements (or 4 bytes for BYTE).
   // And MaxVectorSize is taken into account as well.
-
   if (!vector_size_supported(bt, vlen)) {
     return false;
   }
   // Special cases which require vector length follow:
   //   * implementation limitations

@@ -1793,10 +1789,14 @@
   }
 }
 
 //------------------------------------------------------------------------
 
+bool Matcher::supports_vector_variable_shifts(void) {
+  return (UseAVX >= 2);
+}
+
 const bool Matcher::has_predicated_vectors(void) {
   bool ret_value = false;
   if (UseAVX > 2) {
     ret_value = VM_Version::supports_avx512vl();
   }

@@ -4191,16 +4191,16 @@
 %}
 
 // ====================VECTOR INSERT=======================================
 
 instruct insert(vec dst, rRegI val, immU8 idx) %{
-  predicate(vector_length_in_bytes(n) >=  8 &&
-            vector_length_in_bytes(n) <= 16);
+  predicate(vector_length_in_bytes(n) < 32);
   match(Set dst (VectorInsert (Binary dst val) idx));
   format %{ "vector_insert $dst,$val,$idx" %}
   ins_encode %{
     assert(UseSSE >= 4, "required");
+    assert(vector_length_in_bytes(this) >= 8, "required");
 
     BasicType elem_bt = vector_element_basic_type(this);
 
     assert(is_integral_type(elem_bt), "");
     assert($idx$$constant < (int)vector_length(this), "out of bounds");

@@ -4226,11 +4226,11 @@
 
     uint x_idx = $idx$$constant & right_n_bits(log2epr);
     uint y_idx = ($idx$$constant >> log2epr) & 1;
     __ vextracti128($vtmp$$XMMRegister, $src$$XMMRegister, y_idx);
     __ vinsert(elem_bt, $vtmp$$XMMRegister, $vtmp$$XMMRegister, $val$$Register, x_idx);
-    __ vinserti128($dst$$XMMRegister, $dst$$XMMRegister, $vtmp$$XMMRegister, y_idx);
+    __ vinserti128($dst$$XMMRegister, $src$$XMMRegister, $vtmp$$XMMRegister, y_idx);
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct insert64(vec dst, vec src, rRegI val, immU8 idx, legVec vtmp) %{

@@ -4250,11 +4250,11 @@
 
     uint x_idx = $idx$$constant & right_n_bits(log2epr);
     uint y_idx = ($idx$$constant >> log2epr) & 3;
     __ vextracti32x4($vtmp$$XMMRegister, $src$$XMMRegister, y_idx);
     __ vinsert(elem_bt, $vtmp$$XMMRegister, $vtmp$$XMMRegister, $val$$Register, x_idx);
-    __ vinserti32x4($dst$$XMMRegister, $dst$$XMMRegister, $vtmp$$XMMRegister, y_idx);
+    __ vinserti32x4($dst$$XMMRegister, $src$$XMMRegister, $vtmp$$XMMRegister, y_idx);
   %}
   ins_pipe( pipe_slow );
 %}
 
 #ifdef _LP64

@@ -4284,11 +4284,11 @@
     uint x_idx = $idx$$constant & right_n_bits(1);
     uint y_idx = ($idx$$constant >> 1) & 1;
     int vlen_enc = Assembler::AVX_256bit;
     __ vextracti128($vtmp$$XMMRegister, $src$$XMMRegister, y_idx);
     __ vpinsrq($vtmp$$XMMRegister, $vtmp$$XMMRegister, $val$$Register, x_idx);
-    __ vinserti128($dst$$XMMRegister, $dst$$XMMRegister, $vtmp$$XMMRegister, y_idx);
+    __ vinserti128($dst$$XMMRegister, $src$$XMMRegister, $vtmp$$XMMRegister, y_idx);
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct insert8L(vec dst, vec src, rRegL val, immU8 idx, legVec vtmp) %{

@@ -4302,19 +4302,18 @@
 
     uint x_idx = $idx$$constant & right_n_bits(1);
     uint y_idx = ($idx$$constant >> 1) & 3;
     __ vextracti32x4($vtmp$$XMMRegister, $src$$XMMRegister, y_idx);
     __ vpinsrq($vtmp$$XMMRegister, $vtmp$$XMMRegister, $val$$Register, x_idx);
-    __ vinserti32x4($dst$$XMMRegister, $dst$$XMMRegister, $vtmp$$XMMRegister, y_idx);
+    __ vinserti32x4($dst$$XMMRegister, $src$$XMMRegister, $vtmp$$XMMRegister, y_idx);
   %}
   ins_pipe( pipe_slow );
 %}
 #endif
 
 instruct insertF(vec dst, regF val, immU8 idx) %{
-  predicate(vector_length(n) >= 2 &&
-            vector_length(n) <= 4);
+  predicate(vector_length(n) < 8);
   match(Set dst (VectorInsert (Binary dst val) idx));
   format %{ "vector_insert $dst,$val,$idx" %}
   ins_encode %{
     assert(UseSSE >= 4, "sanity");
 

@@ -4340,17 +4339,17 @@
     if (vlen == 8) {
       uint y_idx = ($idx$$constant >> 2) & 1;
       int vlen_enc = Assembler::AVX_256bit;
       __ vextracti128($vtmp$$XMMRegister, $src$$XMMRegister, y_idx);
       __ vinsertps($vtmp$$XMMRegister, $vtmp$$XMMRegister, $val$$XMMRegister, x_idx);
-      __ vinserti128($dst$$XMMRegister, $dst$$XMMRegister, $vtmp$$XMMRegister, y_idx);
+      __ vinserti128($dst$$XMMRegister, $src$$XMMRegister, $vtmp$$XMMRegister, y_idx);
     } else {
       assert(vlen == 16, "sanity");
       uint y_idx = ($idx$$constant >> 2) & 3;
       __ vextracti32x4($vtmp$$XMMRegister, $src$$XMMRegister, y_idx);
       __ vinsertps($vtmp$$XMMRegister, $vtmp$$XMMRegister, $val$$XMMRegister, x_idx);
-      __ vinserti32x4($dst$$XMMRegister, $dst$$XMMRegister, $vtmp$$XMMRegister, y_idx);
+      __ vinserti32x4($dst$$XMMRegister, $src$$XMMRegister, $vtmp$$XMMRegister, y_idx);
     }
   %}
   ins_pipe( pipe_slow );
 %}
 

@@ -4384,11 +4383,11 @@
     uint y_idx = ($idx$$constant >> 1) & 1;
     int vlen_enc = Assembler::AVX_256bit;
     __ movq($tmp$$Register, $val$$XMMRegister);
     __ vextracti128($vtmp$$XMMRegister, $src$$XMMRegister, y_idx);
     __ vpinsrq($vtmp$$XMMRegister, $vtmp$$XMMRegister, $tmp$$Register, x_idx);
-    __ vinserti128($dst$$XMMRegister, $dst$$XMMRegister, $vtmp$$XMMRegister, y_idx);
+    __ vinserti128($dst$$XMMRegister, $src$$XMMRegister, $vtmp$$XMMRegister, y_idx);
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct insert8D(vec dst, vec src, regD val, immI idx, rRegL tmp, legVec vtmp) %{

@@ -4403,11 +4402,11 @@
     uint x_idx = $idx$$constant & right_n_bits(1);
     uint y_idx = ($idx$$constant >> 1) & 3;
     __ movq($tmp$$Register, $val$$XMMRegister);
     __ vextracti32x4($vtmp$$XMMRegister, $src$$XMMRegister, y_idx);
     __ vpinsrq($vtmp$$XMMRegister, $vtmp$$XMMRegister, $tmp$$Register, x_idx);
-    __ vinserti32x4($dst$$XMMRegister, $dst$$XMMRegister, $vtmp$$XMMRegister, y_idx);
+    __ vinserti32x4($dst$$XMMRegister, $src$$XMMRegister, $vtmp$$XMMRegister, y_idx);
   %}
   ins_pipe( pipe_slow );
 %}
 #endif
 

@@ -5902,40 +5901,41 @@
   ins_pipe( pipe_slow );
 %}
 
 // Byte vector shift
 instruct vshiftB(vec dst, vec src, vec shift, vec tmp, rRegI scratch) %{
-  predicate(vector_length(n) <= 8);
+  predicate(vector_length(n) <= 8 && VectorNode::is_vshift_cnt(n->in(2)));
   match(Set dst ( LShiftVB src shift));
   match(Set dst ( RShiftVB src shift));
   match(Set dst (URShiftVB src shift));
   effect(TEMP dst, USE src, USE shift, TEMP tmp, TEMP scratch);
   format %{"vector_byte_shift $dst,$src,$shift" %}
   ins_encode %{
     assert(UseSSE > 3, "required");
     int opcode = this->ideal_Opcode();
-    bool sign = (opcode == Op_URShiftVB) ? false : true;
+    bool sign = (opcode != Op_URShiftVB);
     __ vextendbw(sign, $tmp$$XMMRegister, $src$$XMMRegister);
     __ vshiftw(opcode, $tmp$$XMMRegister, $shift$$XMMRegister);
     __ movdqu($dst$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), $scratch$$Register);
     __ pand($dst$$XMMRegister, $tmp$$XMMRegister);
     __ packuswb($dst$$XMMRegister, $dst$$XMMRegister);
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct vshift16B(vec dst, vec src, vec shift, vec tmp1, vec tmp2, rRegI scratch) %{
-  predicate(vector_length(n) == 16 && UseAVX <= 1);
+  predicate(vector_length(n) == 16 && VectorNode::is_vshift_cnt(n->in(2)) &&
+            UseAVX <= 1);
   match(Set dst ( LShiftVB src shift));
   match(Set dst ( RShiftVB src shift));
   match(Set dst (URShiftVB src shift));
   effect(TEMP dst, USE src, USE shift, TEMP tmp1, TEMP tmp2, TEMP scratch);
   format %{"vector_byte_shift $dst,$src,$shift" %}
   ins_encode %{
     assert(UseSSE > 3, "required");
     int opcode = this->ideal_Opcode();
-    bool sign = (opcode == Op_URShiftVB) ? false : true;
+    bool sign = (opcode != Op_URShiftVB);
     __ vextendbw(sign, $tmp1$$XMMRegister, $src$$XMMRegister);
     __ vshiftw(opcode, $tmp1$$XMMRegister, $shift$$XMMRegister);
     __ pshufd($tmp2$$XMMRegister, $src$$XMMRegister, 0xE);
     __ vextendbw(sign, $tmp2$$XMMRegister, $tmp2$$XMMRegister);
     __ vshiftw(opcode, $tmp2$$XMMRegister, $shift$$XMMRegister);

@@ -5946,19 +5946,20 @@
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct vshift16B_avx(vec dst, vec src, vec shift, vec tmp, rRegI scratch) %{
-  predicate(vector_length(n) == 16 && UseAVX > 1);
+  predicate(vector_length(n) == 16 && VectorNode::is_vshift_cnt(n->in(2)) &&
+            UseAVX > 1);
   match(Set dst ( LShiftVB src shift));
   match(Set dst ( RShiftVB src shift));
   match(Set dst (URShiftVB src shift));
   effect(TEMP dst, TEMP tmp, TEMP scratch);
   format %{"vector_byte_shift $dst,$src,$shift" %}
   ins_encode %{
     int opcode = this->ideal_Opcode();
-    bool sign = (opcode == Op_URShiftVB) ? false : true;
+    bool sign = (opcode != Op_URShiftVB);
     int vlen_enc = Assembler::AVX_256bit;
     __ vextendbw(sign, $tmp$$XMMRegister, $src$$XMMRegister, vlen_enc);
     __ vshiftw(opcode, $tmp$$XMMRegister, $tmp$$XMMRegister, $shift$$XMMRegister, vlen_enc);
     __ vpand($tmp$$XMMRegister, $tmp$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), vlen_enc, $scratch$$Register);
     __ vextracti128_high($dst$$XMMRegister, $tmp$$XMMRegister);

@@ -5966,20 +5967,20 @@
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct vshift32B_avx(vec dst, vec src, vec shift, vec tmp, rRegI scratch) %{
-  predicate(vector_length(n) == 32);
+  predicate(vector_length(n) == 32 && VectorNode::is_vshift_cnt(n->in(2)));
   match(Set dst ( LShiftVB src shift));
   match(Set dst ( RShiftVB src shift));
   match(Set dst (URShiftVB src shift));
   effect(TEMP dst, TEMP tmp, TEMP scratch);
   format %{"vector_byte_shift $dst,$src,$shift" %}
   ins_encode %{
     assert(UseAVX > 1, "required");
     int opcode = this->ideal_Opcode();
-    bool sign = (opcode == Op_URShiftVB) ? false : true;
+    bool sign = (opcode != Op_URShiftVB);
     int vlen_enc = Assembler::AVX_256bit;
     __ vextracti128_high($tmp$$XMMRegister, $src$$XMMRegister);
     __ vextendbw(sign, $tmp$$XMMRegister, $tmp$$XMMRegister, vlen_enc);
     __ vextendbw(sign, $dst$$XMMRegister, $src$$XMMRegister, vlen_enc);
     __ vshiftw(opcode, $tmp$$XMMRegister, $tmp$$XMMRegister, $shift$$XMMRegister, vlen_enc);

@@ -5991,20 +5992,20 @@
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct vshift64B_avx(vec dst, vec src, vec shift, vec tmp1, vec tmp2, rRegI scratch) %{
-  predicate(vector_length(n) == 64);
+  predicate(vector_length(n) == 64 && VectorNode::is_vshift_cnt(n->in(2)));
   match(Set dst ( LShiftVB src shift));
   match(Set dst  (RShiftVB src shift));
   match(Set dst (URShiftVB src shift));
   effect(TEMP dst, TEMP tmp1, TEMP tmp2, TEMP scratch);
   format %{"vector_byte_shift $dst,$src,$shift" %}
   ins_encode %{
     assert(UseAVX > 2, "required");
     int opcode = this->ideal_Opcode();
-    bool sign = (opcode == Op_URShiftVB) ? false : true;
+    bool sign = (opcode != Op_URShiftVB);
     int vlen_enc = Assembler::AVX_512bit;
     __ vextracti64x4($tmp1$$XMMRegister, $src$$XMMRegister, 1);
     __ vextendbw(sign, $tmp1$$XMMRegister, $tmp1$$XMMRegister, vlen_enc);
     __ vextendbw(sign, $tmp2$$XMMRegister, $src$$XMMRegister, vlen_enc);
     __ vshiftw(opcode, $tmp1$$XMMRegister, $tmp1$$XMMRegister, $shift$$XMMRegister, vlen_enc);

@@ -6024,10 +6025,11 @@
 // for negative data because java code convert short value into int with
 // sign extension before a shift. But char vectors are fine since chars are
 // unsigned values.
 // Shorts/Chars vector left shift
 instruct vshiftS(vec dst, vec src, vec shift) %{
+  predicate(VectorNode::is_vshift_cnt(n->in(2)));
   match(Set dst ( LShiftVS src shift));
   match(Set dst ( RShiftVS src shift));
   match(Set dst (URShiftVS src shift));
   effect(TEMP dst, USE src, USE shift);
   format %{ "vshiftw  $dst,$src,$shift\t! shift packedS" %}

@@ -6054,10 +6056,11 @@
   ins_pipe( pipe_slow );
 %}
 
 // Integers vector left shift
 instruct vshiftI(vec dst, vec src, vec shift) %{
+  predicate(VectorNode::is_vshift_cnt(n->in(2)));
   match(Set dst ( LShiftVI src shift));
   match(Set dst ( RShiftVI src shift));
   match(Set dst (URShiftVI src shift));
   effect(TEMP dst, USE src, USE shift);
   format %{ "vshiftd  $dst,$src,$shift\t! shift packedI" %}

@@ -6081,10 +6084,11 @@
   ins_pipe( pipe_slow );
 %}
 
 // Longs vector shift
 instruct vshiftL(vec dst, vec src, vec shift) %{
+  predicate(VectorNode::is_vshift_cnt(n->in(2)));
   match(Set dst ( LShiftVL src shift));
   match(Set dst (URShiftVL src shift));
   effect(TEMP dst, USE src, USE shift);
   format %{ "vshiftq  $dst,$src,$shift\t! shift packedL" %}
   ins_encode %{

@@ -6102,11 +6106,11 @@
 %}
 
 // -------------------ArithmeticRightShift -----------------------------------
 // Long vector arithmetic right shift
 instruct vshiftL_arith_reg(vec dst, vec src, vec shift, vec tmp, rRegI scratch) %{
-  predicate(UseAVX <= 2);
+  predicate(VectorNode::is_vshift_cnt(n->in(2)) && UseAVX <= 2);
   match(Set dst (RShiftVL src shift));
   effect(TEMP dst, TEMP tmp, TEMP scratch);
   format %{ "vshiftq $dst,$src,$shift" %}
   ins_encode %{
     uint vlen = vector_length(this);

@@ -6131,11 +6135,11 @@
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct vshiftL_arith_reg_evex(vec dst, vec src, vec shift) %{
-  predicate(UseAVX > 2);
+  predicate(VectorNode::is_vshift_cnt(n->in(2)) && UseAVX > 2);
   match(Set dst (RShiftVL src shift));
   format %{ "vshiftq $dst,$src,$shift" %}
   ins_encode %{
     int vlen_enc = vector_length_encoding(this);
     __ evpsraq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vlen_enc);

@@ -6144,15 +6148,16 @@
 %}
 
 // ------------------- Variable Shift -----------------------------
 // Byte variable shift
 instruct vshift8B_var_nobw(vec dst, vec src, vec shift, vec vtmp, rRegP scratch) %{
-  predicate(vector_length(n) <= 8 && vector_element_basic_type(n) == T_BYTE &&
+  predicate(vector_length(n) <= 8 &&
+            !VectorNode::is_vshift_cnt(n->in(2)) &&
             !VM_Version::supports_avx512bw());
-  match(Set dst ( VLShiftV src shift));
-  match(Set dst ( VRShiftV src shift));
-  match(Set dst (VURShiftV src shift));
+  match(Set dst ( LShiftVB src shift));
+  match(Set dst ( RShiftVB src shift));
+  match(Set dst (URShiftVB src shift));
   effect(TEMP dst, TEMP vtmp, TEMP scratch);
   format %{ "vector_varshift_byte $dst, $src, $shift\n\t! using $vtmp, $scratch as TEMP" %}
   ins_encode %{
     assert(UseAVX >= 2, "required");
 

@@ -6163,15 +6168,16 @@
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct vshift16B_var_nobw(vec dst, vec src, vec shift, vec vtmp1, vec vtmp2, rRegP scratch) %{
-  predicate(vector_length(n) == 16 && vector_element_basic_type(n) == T_BYTE &&
+  predicate(vector_length(n) == 16 &&
+            !VectorNode::is_vshift_cnt(n->in(2)) &&
             !VM_Version::supports_avx512bw());
-  match(Set dst ( VLShiftV src shift));
-  match(Set dst ( VRShiftV src shift));
-  match(Set dst (VURShiftV src shift));
+  match(Set dst ( LShiftVB src shift));
+  match(Set dst ( RShiftVB src shift));
+  match(Set dst (URShiftVB src shift));
   effect(TEMP dst, TEMP vtmp1, TEMP vtmp2, TEMP scratch);
   format %{ "vector_varshift_byte $dst, $src, $shift\n\t! using $vtmp1, $vtmp2 and $scratch as TEMP" %}
   ins_encode %{
     assert(UseAVX >= 2, "required");
 

@@ -6190,15 +6196,16 @@
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct vshift32B_var_nobw(vec dst, vec src, vec shift, vec vtmp1, vec vtmp2, vec vtmp3, vec vtmp4, rRegP scratch) %{
-  predicate(vector_length(n) == 32 && vector_element_basic_type(n) == T_BYTE &&
+  predicate(vector_length(n) == 32 &&
+            !VectorNode::is_vshift_cnt(n->in(2)) &&
             !VM_Version::supports_avx512bw());
-  match(Set dst ( VLShiftV src shift));
-  match(Set dst ( VRShiftV src shift));
-  match(Set dst (VURShiftV src shift));
+  match(Set dst ( LShiftVB src shift));
+  match(Set dst ( RShiftVB src shift));
+  match(Set dst (URShiftVB src shift));
   effect(TEMP dst, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP scratch);
   format %{ "vector_varshift_byte $dst, $src, $shift\n\t using $vtmp1, $vtmp2, $vtmp3, $vtmp4 and $scratch as TEMP" %}
   ins_encode %{
     assert(UseAVX >= 2, "required");
 

@@ -6225,15 +6232,16 @@
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct vshiftB_var_evex_bw(vec dst, vec src, vec shift, vec vtmp, rRegP scratch) %{
-  predicate(vector_length(n) <= 32 && vector_element_basic_type(n) == T_BYTE &&
+  predicate(vector_length(n) <= 32 &&
+            !VectorNode::is_vshift_cnt(n->in(2)) &&
             VM_Version::supports_avx512bw());
-  match(Set dst ( VLShiftV src shift));
-  match(Set dst ( VRShiftV src shift));
-  match(Set dst (VURShiftV src shift));
+  match(Set dst ( LShiftVB src shift));
+  match(Set dst ( RShiftVB src shift));
+  match(Set dst (URShiftVB src shift));
   effect(TEMP dst, TEMP vtmp, TEMP scratch);
   format %{ "vector_varshift_byte $dst, $src, $shift\n\t! using $vtmp, $scratch as TEMP" %}
   ins_encode %{
     assert(UseAVX > 2, "required");
 

@@ -6243,15 +6251,16 @@
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct vshift64B_var_evex_bw(vec dst, vec src, vec shift, vec vtmp1, vec vtmp2, rRegP scratch) %{
-  predicate(vector_length(n) == 64 && vector_element_basic_type(n) == T_BYTE &&
+  predicate(vector_length(n) == 64 &&
+            !VectorNode::is_vshift_cnt(n->in(2)) &&
             VM_Version::supports_avx512bw());
-  match(Set dst ( VLShiftV src shift));
-  match(Set dst ( VRShiftV src shift));
-  match(Set dst (VURShiftV src shift));
+  match(Set dst ( LShiftVB src shift));
+  match(Set dst ( RShiftVB src shift));
+  match(Set dst (URShiftVB src shift));
   effect(TEMP dst, TEMP vtmp1, TEMP vtmp2, TEMP scratch);
   format %{ "vector_varshift_byte $dst, $src, $shift\n\t! using $vtmp1, $vtmp2 and $scratch as TEMP" %}
   ins_encode %{
     assert(UseAVX > 2, "required");
 

@@ -6266,22 +6275,23 @@
   ins_pipe( pipe_slow );
 %}
 
 // Short variable shift
 instruct vshift8S_var_nobw(vec dst, vec src, vec shift, vec vtmp, rRegP scratch) %{
-  predicate(vector_length(n) <= 8 && vector_element_basic_type(n) == T_SHORT &&
+  predicate(vector_length(n) <= 8 &&
+            !VectorNode::is_vshift_cnt(n->in(2)) &&
             !VM_Version::supports_avx512bw());
-  match(Set dst (VLShiftV  src shift));
-  match(Set dst (VRShiftV  src shift));
-  match(Set dst (VURShiftV src shift));
+  match(Set dst ( LShiftVS src shift));
+  match(Set dst ( RShiftVS src shift));
+  match(Set dst (URShiftVS src shift));
   effect(TEMP dst, TEMP vtmp, TEMP scratch);
   format %{ "vector_var_shift_left_short $dst, $src, $shift\n\t" %}
   ins_encode %{
     assert(UseAVX >= 2, "required");
 
     int opcode = this->ideal_Opcode();
-    bool sign = (opcode == Op_VURShiftV) ? false : true;
+    bool sign = (opcode != Op_URShiftVS);
     int vlen_enc = Assembler::AVX_256bit;
     __ vextendwd(sign, $dst$$XMMRegister, $src$$XMMRegister, 1);
     __ vpmovzxwd($vtmp$$XMMRegister, $shift$$XMMRegister, 1);
     __ varshiftd(opcode, $dst$$XMMRegister, $dst$$XMMRegister, $vtmp$$XMMRegister, vlen_enc);
     __ vpand($dst$$XMMRegister, $dst$$XMMRegister, ExternalAddress(vector_int_to_short_mask()), vlen_enc, $scratch$$Register);

@@ -6290,22 +6300,23 @@
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct vshift16S_var_nobw(vec dst, vec src, vec shift, vec vtmp1, vec vtmp2, rRegP scratch) %{
-  predicate(vector_length(n) == 16 && vector_element_basic_type(n) == T_SHORT &&
+  predicate(vector_length(n) == 16 &&
+            !VectorNode::is_vshift_cnt(n->in(2)) &&
             !VM_Version::supports_avx512bw());
-  match(Set dst (VLShiftV  src shift));
-  match(Set dst (VRShiftV  src shift));
-  match(Set dst (VURShiftV src shift));
+  match(Set dst ( LShiftVS src shift));
+  match(Set dst ( RShiftVS src shift));
+  match(Set dst (URShiftVS src shift));
   effect(TEMP dst, TEMP vtmp1, TEMP vtmp2, TEMP scratch);
   format %{ "vector_var_shift_left_short $dst, $src, $shift\n\t" %}
   ins_encode %{
     assert(UseAVX >= 2, "required");
 
     int opcode = this->ideal_Opcode();
-    bool sign = (opcode == Op_VURShiftV) ? false : true;
+    bool sign = (opcode != Op_URShiftVS);
     int vlen_enc = Assembler::AVX_256bit;
     // Shift lower half, with result in vtmp2 usign vtmp1 as TEMP
     __ vextendwd(sign, $vtmp2$$XMMRegister, $src$$XMMRegister, vlen_enc);
     __ vpmovzxwd($vtmp1$$XMMRegister, $shift$$XMMRegister, vlen_enc);
     __ varshiftd(opcode, $vtmp2$$XMMRegister, $vtmp2$$XMMRegister, $vtmp1$$XMMRegister, vlen_enc);

@@ -6325,15 +6336,15 @@
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct vshift16S_var_evex_bw(vec dst, vec src, vec shift) %{
-  predicate(vector_element_basic_type(n) == T_SHORT &&
+  predicate(!VectorNode::is_vshift_cnt(n->in(2)) &&
             VM_Version::supports_avx512bw());
-  match(Set dst (VLShiftV src shift));
-  match(Set dst (VRShiftV src shift));
-  match(Set dst (VURShiftV src shift));
+  match(Set dst ( LShiftVS src shift));
+  match(Set dst ( RShiftVS src shift));
+  match(Set dst (URShiftVS src shift));
   format %{ "vector_varshift_short $dst,$src,$shift\t!" %}
   ins_encode %{
     assert(UseAVX > 2, "required");
 
     int opcode = this->ideal_Opcode();

@@ -6346,14 +6357,14 @@
   ins_pipe( pipe_slow );
 %}
 
 //Integer variable shift
 instruct vshiftI_var(vec dst, vec src, vec shift) %{
-  predicate(vector_element_basic_type(n) == T_INT);
-  match(Set dst ( VLShiftV src shift));
-  match(Set dst ( VRShiftV src shift));
-  match(Set dst (VURShiftV src shift));
+  predicate(!VectorNode::is_vshift_cnt(n->in(2)));
+  match(Set dst ( LShiftVI src shift));
+  match(Set dst ( RShiftVI src shift));
+  match(Set dst (URShiftVI src shift));
   format %{ "vector_varshift_int $dst,$src,$shift\t!" %}
   ins_encode %{
     assert(UseAVX >= 2, "required");
 
     int opcode = this->ideal_Opcode();

@@ -6363,13 +6374,13 @@
   ins_pipe( pipe_slow );
 %}
 
 //Long variable shift
 instruct vshiftL_var(vec dst, vec src, vec shift) %{
-  predicate(vector_element_basic_type(n) == T_LONG);
-  match(Set dst ( VLShiftV src shift));
-  match(Set dst (VURShiftV src shift));
+  predicate(!VectorNode::is_vshift_cnt(n->in(2)));
+  match(Set dst ( LShiftVL src shift));
+  match(Set dst (URShiftVL src shift));
   format %{ "vector_varshift_long $dst,$src,$shift\t!" %}
   ins_encode %{
     assert(UseAVX >= 2, "required");
 
     int opcode = this->ideal_Opcode();

@@ -6379,13 +6390,14 @@
   ins_pipe( pipe_slow );
 %}
 
 //Long variable right shift arithmetic
 instruct vshiftL_arith_var(vec dst, vec src, vec shift, vec vtmp) %{
-  predicate(vector_length(n) <= 4 && vector_element_basic_type(n) == T_LONG &&
+  predicate(vector_length(n) <= 4 &&
+            !VectorNode::is_vshift_cnt(n->in(2)) &&
             UseAVX == 2);
-  match(Set dst (VRShiftV src shift));
+  match(Set dst (RShiftVL src shift));
   effect(TEMP dst, TEMP vtmp);
   format %{ "vector_varshift_long  $dst,$src,$shift\n\t! using $vtmp as TEMP" %}
   ins_encode %{
     int opcode = this->ideal_Opcode();
     int vlen_enc = vector_length_encoding(this);

@@ -6394,13 +6406,13 @@
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct vshiftL_arith_var_evex(vec dst, vec src, vec shift) %{
-  predicate(vector_element_basic_type(n) == T_LONG &&
+  predicate(!VectorNode::is_vshift_cnt(n->in(2)) &&
             UseAVX > 2);
-  match(Set dst (VRShiftV src shift));
+  match(Set dst (RShiftVL src shift));
   format %{ "vector_varfshift_long $dst,$src,$shift\t!" %}
   ins_encode %{
     int opcode = this->ideal_Opcode();
     int vlen_enc = vector_length_encoding(this);
     __ varshiftq(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vlen_enc);

@@ -6832,14 +6844,15 @@
   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
   format %{ "vector_compare $dst,$src1,$src2,$cond\t!" %}
   ins_encode %{
     int vlen_enc = vector_length_encoding(this, $src1);
     Assembler::ComparisonPredicateFP cmp = booltest_pred_to_comparison_pred_fp($cond$$constant);
-    if (vector_element_basic_type(this, $src1) == T_FLOAT)
+    if (vector_element_basic_type(this, $src1) == T_FLOAT) {
       __ vcmpps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, cmp, vlen_enc);
-    else
+    } else {
       __ vcmppd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, cmp, vlen_enc);
+    }
   %}
   ins_pipe( pipe_slow );
 %}
 
 instruct evcmpFD(vec dst, vec src1, vec src2, immI8 cond, rRegP scratch) %{

@@ -7214,52 +7227,10 @@
     }
   %}
   ins_pipe( pipe_slow );
 %}
 
-//------------------------------------- NOT --------------------------------------------
-
-instruct vnotB(vec dst, vec src) %{
-  predicate(UseAVX == 0);
-  match(Set dst (NotV src));
-  effect(TEMP dst);
-  format %{ "vector_not  $dst,$src\t!" %}
-  ins_encode %{
-    int vlen = vector_length_in_bytes(this);
-    switch(vlen) {
-      default:
-        assert(0, "Incorrect vector length");
-        break;
-      case 4: {
-        __ movdl($dst$$XMMRegister, ExternalAddress(vector_all_bits_set()));
-        __ pxor($dst$$XMMRegister, $src$$XMMRegister);
-      } break;
-      case 8: {
-        __ movq($dst$$XMMRegister, ExternalAddress(vector_all_bits_set()));
-        __ pxor($dst$$XMMRegister, $src$$XMMRegister);
-      } break;
-      case 16: {
-        __ movdqu($dst$$XMMRegister, ExternalAddress(vector_all_bits_set()));
-        __ pxor($dst$$XMMRegister, $src$$XMMRegister);
-      } break;
-    }
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct vnotB_reg(vec dst, vec src, rRegP scratch) %{
-  predicate(UseAVX > 0);
-  match(Set dst (NotV src));
-  effect(TEMP scratch);
-  format %{ "vector_not  $dst,$src\t! using $scratch as rRegP" %}
-  ins_encode %{
-    int vlen_enc = vector_length_encoding(this);
-    __ vpxor($dst$$XMMRegister, $src$$XMMRegister, ExternalAddress(vector_all_bits_set()), vlen_enc, $scratch$$Register);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 //------------------------------------- VectorTest --------------------------------------------
 
 #ifdef _LP64
 instruct vptest_alltrue(rRegI dst, legVec src1, legVec src2, rFlagsReg cr) %{
   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::overflow);
< prev index next >