< prev index next >

src/hotspot/cpu/x86/assembler_x86.cpp

Print this page
rev 61869 : manual merge with vectorIntrinsics


 967     }
 968     ip++; // skip opcode
 969     debug_only(has_disp32 = true); // has both kinds of operands!
 970     break;
 971 
 972   case 0x62: // EVEX_4bytes
 973     assert(VM_Version::supports_evex(), "shouldn't have EVEX prefix");
 974     assert(ip == inst+1, "no prefixes allowed");
 975     // no EVEX collisions, all instructions that have 0x62 opcodes
 976     // have EVEX versions and are subopcodes of 0x66
 977     ip++; // skip P0 and exmaine W in P1
 978     is_64bit = ((VEX_W & *ip) == VEX_W);
 979     ip++; // move to P2
 980     ip++; // skip P2, move to opcode
 981     // To find the end of instruction (which == end_pc_operand).
 982     switch (0xFF & *ip) {
 983     case 0x22: // pinsrd r, r/a, #8
 984     case 0x61: // pcmpestri r, r/a, #8
 985     case 0x70: // pshufd r, r/a, #8
 986     case 0x73: // psrldq r, #8


 987       tail_size = 1;  // the imm8
 988       break;
 989     default:
 990       break;
 991     }
 992     ip++; // skip opcode
 993     debug_only(has_disp32 = true); // has both kinds of operands!
 994     break;
 995 
 996   case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
 997   case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
 998   case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
 999   case 0xDD: // fld_d a; fst_d a; fstp_d a
1000   case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
1001   case 0xDF: // fild_d a; fistp_d a
1002   case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
1003   case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
1004   case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
1005     debug_only(has_disp32 = true);
1006     break;


1192 
1193 void Assembler::adcl(Register dst, Register src) {
1194   (void) prefix_and_encode(dst->encoding(), src->encoding());
1195   emit_arith(0x13, 0xC0, dst, src);
1196 }
1197 
1198 void Assembler::addl(Address dst, int32_t imm32) {
1199   InstructionMark im(this);
1200   prefix(dst);
1201   emit_arith_operand(0x81, rax, dst, imm32);
1202 }
1203 
1204 void Assembler::addb(Address dst, int imm8) {
1205   InstructionMark im(this);
1206   prefix(dst);
1207   emit_int8((unsigned char)0x80);
1208   emit_operand(rax, dst, 1);
1209   emit_int8(imm8);
1210 }
1211 





1212 void Assembler::addw(Address dst, int imm16) {
1213   InstructionMark im(this);
1214   emit_int8(0x66);
1215   prefix(dst);
1216   emit_int8((unsigned char)0x81);
1217   emit_operand(rax, dst, 2);
1218   emit_int16(imm16);
1219 }
1220 
1221 void Assembler::addl(Address dst, Register src) {
1222   InstructionMark im(this);
1223   prefix(dst, src);
1224   emit_int8(0x01);
1225   emit_operand(src, dst);
1226 }
1227 
1228 void Assembler::addl(Register dst, int32_t imm32) {
1229   prefix(dst);
1230   emit_arith(0x81, 0xC0, dst, imm32);
1231 }


1398   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1399   emit_int8((unsigned char)0xDD);
1400   emit_operand(dst, src);
1401 }
1402 
1403 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
1404   assert(VM_Version::supports_aes(), "");
1405   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1406   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1407   emit_int16((unsigned char)0xDD, (0xC0 | encode));
1408 }
1409 
1410 void Assembler::vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1411   assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling");
1412   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1413   attributes.set_is_evex_instruction();
1414   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1415   emit_int16((unsigned char)0xDD, (0xC0 | encode));
1416 }
1417 





1418 void Assembler::andl(Address dst, int32_t imm32) {
1419   InstructionMark im(this);
1420   prefix(dst);
1421   emit_int8((unsigned char)0x81);
1422   emit_operand(rsp, dst, 4);
1423   emit_int32(imm32);
1424 }
1425 
1426 void Assembler::andl(Register dst, int32_t imm32) {
1427   prefix(dst);
1428   emit_arith(0x81, 0xE0, dst, imm32);
1429 }
1430 
1431 void Assembler::andl(Register dst, Address src) {
1432   InstructionMark im(this);
1433   prefix(src, dst);
1434   emit_int8(0x23);
1435   emit_operand(dst, src);
1436 }
1437 


1766   LP64_ONLY(case 8:)
1767     // This instruction is not valid in 32 bits
1768     p = REX_W;
1769     break;
1770   default:
1771     assert(0, "Unsupported value for a sizeInBytes argument");
1772     break;
1773   }
1774   LP64_ONLY(prefix(crc, adr, p);)
1775   emit_int24(0x0F, 0x38, (0xF0 | w));
1776   emit_operand(crc, adr);
1777 }
1778 
1779 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
1780   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1781   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1782   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1783   emit_int16((unsigned char)0xE6, (0xC0 | encode));
1784 }
1785 







1786 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
1787   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1788   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1789   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
1790   emit_int16(0x5B, (0xC0 | encode));
1791 }
1792 







1793 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
1794   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1795   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1796   attributes.set_rex_vex_w_reverted();
1797   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1798   emit_int16(0x5A, (0xC0 | encode));
1799 }
1800 
1801 void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
1802   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1803   InstructionMark im(this);
1804   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1805   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
1806   attributes.set_rex_vex_w_reverted();
1807   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1808   emit_int8(0x5A);
1809   emit_operand(dst, src);
1810 }
1811 
1812 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {


1895   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
1896   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1897   emit_int16(0x1C, (0xC0 | encode));
1898 }
1899 
1900 void Assembler::pabsw(XMMRegister dst, XMMRegister src) {
1901   assert(VM_Version::supports_ssse3(), "");
1902   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
1903   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1904   emit_int16(0x1D, (0xC0 | encode));
1905 }
1906 
1907 void Assembler::pabsd(XMMRegister dst, XMMRegister src) {
1908   assert(VM_Version::supports_ssse3(), "");
1909   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1910   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1911   emit_int16(0x1E, (0xC0 | encode));
1912 }
1913 
1914 void Assembler::vpabsb(XMMRegister dst, XMMRegister src, int vector_len) {
1915   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
1916   vector_len == AVX_256bit? VM_Version::supports_avx2() :
1917   vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
1918   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
1919   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1920   emit_int16(0x1C, (0xC0 | encode));
1921 }
1922 
1923 void Assembler::vpabsw(XMMRegister dst, XMMRegister src, int vector_len) {
1924   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
1925   vector_len == AVX_256bit? VM_Version::supports_avx2() :
1926   vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
1927   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
1928   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1929   emit_int16(0x1D, (0xC0 | encode));
1930 }
1931 
1932 void Assembler::vpabsd(XMMRegister dst, XMMRegister src, int vector_len) {
1933   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
1934   vector_len == AVX_256bit? VM_Version::supports_avx2() :
1935   vector_len == AVX_512bit? VM_Version::supports_evex() : 0, "");
1936   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1937   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1938   emit_int16(0x1E, (0xC0 | encode));
1939 }
1940 
1941 void Assembler::evpabsq(XMMRegister dst, XMMRegister src, int vector_len) {
1942   assert(UseAVX > 2, "");
1943   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1944   attributes.set_is_evex_instruction();
1945   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1946   emit_int16(0x1F, (0xC0 | encode));
1947 }
1948 















































































1949 void Assembler::decl(Address dst) {
1950   // Don't use it directly. Use MacroAssembler::decrement() instead.
1951   InstructionMark im(this);
1952   prefix(dst);
1953   emit_int8((unsigned char)0xFF);
1954   emit_operand(rcx, dst);
1955 }
1956 
1957 void Assembler::divsd(XMMRegister dst, Address src) {
1958   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1959   InstructionMark im(this);
1960   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1961   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
1962   attributes.set_rex_vex_w_reverted();
1963   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1964   emit_int8(0x5E);
1965   emit_operand(dst, src);
1966 }
1967 
1968 void Assembler::divsd(XMMRegister dst, XMMRegister src) {


2526   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2527   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2528   emit_int8(0x6F);
2529   emit_operand(dst, src);
2530 }
2531 
2532 void Assembler::vmovdqu(Address dst, XMMRegister src) {
2533   assert(UseAVX > 0, "");
2534   InstructionMark im(this);
2535   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2536   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2537   attributes.reset_is_clear_context();
2538   // swap src<->dst for encoding
2539   assert(src != xnoreg, "sanity");
2540   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2541   emit_int8(0x7F);
2542   emit_operand(src, dst);
2543 }
2544 
2545 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64)
2546 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) {
2547   assert(VM_Version::supports_evex(), "");
2548   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2549   attributes.set_is_evex_instruction();



2550   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2551   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2552   emit_int16(0x6F, (0xC0 | encode));
2553 }
2554 
2555 void Assembler::evmovdqub(XMMRegister dst, Address src, int vector_len) {
2556   assert(VM_Version::supports_evex(), "");
2557   InstructionMark im(this);
2558   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2559   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2560   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2561   attributes.set_is_evex_instruction();



2562   vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2563   emit_int8(0x6F);
2564   emit_operand(dst, src);
2565 }
2566 
2567 void Assembler::evmovdqub(Address dst, XMMRegister src, int vector_len) {
2568   assert(VM_Version::supports_evex(), "");
2569   assert(src != xnoreg, "sanity");
2570   InstructionMark im(this);
2571   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2572   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2573   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2574   attributes.set_is_evex_instruction();



2575   vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2576   emit_int8(0x7F);
2577   emit_operand(src, dst);
2578 }
2579 
2580 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, Address src, int vector_len) {
2581   assert(VM_Version::supports_avx512vlbw(), "");
2582   InstructionMark im(this);
2583   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2584   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2585   attributes.set_embedded_opmask_register_specifier(mask);
2586   attributes.set_is_evex_instruction();



2587   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2588   emit_int8(0x6F);
2589   emit_operand(dst, src);
2590 }
2591 
2592 void Assembler::evmovdquw(XMMRegister dst, Address src, int vector_len) {
2593   assert(VM_Version::supports_evex(), "");
2594   InstructionMark im(this);
2595   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2596   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2597   attributes.set_is_evex_instruction();



2598   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2599   vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2600   emit_int8(0x6F);
2601   emit_operand(dst, src);
2602 }
2603 
2604 void Assembler::evmovdquw(XMMRegister dst, KRegister mask, Address src, int vector_len) {
2605   assert(VM_Version::supports_avx512vlbw(), "");
2606   InstructionMark im(this);
2607   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2608   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2609   attributes.set_embedded_opmask_register_specifier(mask);
2610   attributes.set_is_evex_instruction();



2611   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2612   emit_int8(0x6F);
2613   emit_operand(dst, src);
2614 }
2615 
2616 void Assembler::evmovdquw(Address dst, XMMRegister src, int vector_len) {
2617   assert(VM_Version::supports_evex(), "");
2618   assert(src != xnoreg, "sanity");
2619   InstructionMark im(this);
2620   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2621   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2622   attributes.set_is_evex_instruction();



2623   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2624   vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2625   emit_int8(0x7F);
2626   emit_operand(src, dst);
2627 }
2628 
2629 void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, int vector_len) {
2630   assert(VM_Version::supports_avx512vlbw(), "");
2631   assert(src != xnoreg, "sanity");
2632   InstructionMark im(this);
2633   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2634   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2635   attributes.reset_is_clear_context();
2636   attributes.set_embedded_opmask_register_specifier(mask);
2637   attributes.set_is_evex_instruction();



2638   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2639   emit_int8(0x7F);
2640   emit_operand(src, dst);
2641 }
2642 
2643 void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {





2644   assert(VM_Version::supports_evex(), "");
2645   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);

2646   attributes.set_is_evex_instruction();



2647   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2648   emit_int16(0x6F, (0xC0 | encode));
2649 }
2650 
2651 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) {





2652   assert(VM_Version::supports_evex(), "");
2653   InstructionMark im(this);
2654   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ true);
2655   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);

2656   attributes.set_is_evex_instruction();



2657   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2658   emit_int8(0x6F);
2659   emit_operand(dst, src);
2660 }
2661 
2662 void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) {





2663   assert(VM_Version::supports_evex(), "");
2664   assert(src != xnoreg, "sanity");
2665   InstructionMark im(this);
2666   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2667   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2668   attributes.reset_is_clear_context();
2669   attributes.set_is_evex_instruction();



2670   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2671   emit_int8(0x7F);
2672   emit_operand(src, dst);
2673 }
2674 
2675 void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {






2676   assert(VM_Version::supports_evex(), "");
2677   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);

2678   attributes.set_is_evex_instruction();



2679   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2680   emit_int16(0x6F, (0xC0 | encode));
2681 }
2682 
2683 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) {





2684   assert(VM_Version::supports_evex(), "");
2685   InstructionMark im(this);
2686   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2687   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);

2688   attributes.set_is_evex_instruction();



2689   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2690   emit_int8(0x6F);
2691   emit_operand(dst, src);
2692 }
2693 
2694 void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) {





2695   assert(VM_Version::supports_evex(), "");
2696   assert(src != xnoreg, "sanity");
2697   InstructionMark im(this);
2698   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2699   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);


2700   attributes.reset_is_clear_context();

2701   attributes.set_is_evex_instruction();
2702   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2703   emit_int8(0x7F);
2704   emit_operand(src, dst);
2705 }
2706 
2707 // Uses zero extension on 64bit
2708 
2709 void Assembler::movl(Register dst, int32_t imm32) {
2710   int encode = prefix_and_encode(dst->encoding());
2711   emit_int8(0xB8 | encode);
2712   emit_int32(imm32);
2713 }
2714 
2715 void Assembler::movl(Register dst, Register src) {
2716   int encode = prefix_and_encode(dst->encoding(), src->encoding());
2717   emit_int16((unsigned char)0x8B, (0xC0 | encode));
2718 }
2719 
2720 void Assembler::movl(Register dst, Address src) {


2758   InstructionMark im(this);
2759   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2760   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2761   attributes.set_rex_vex_w_reverted();
2762   simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2763   emit_int8(0x7E);
2764   emit_operand(dst, src);
2765 }
2766 
2767 void Assembler::movq(Address dst, XMMRegister src) {
2768   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2769   InstructionMark im(this);
2770   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2771   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2772   attributes.set_rex_vex_w_reverted();
2773   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2774   emit_int8((unsigned char)0xD6);
2775   emit_operand(src, dst);
2776 }
2777 























2778 void Assembler::movsbl(Register dst, Address src) { // movsxb
2779   InstructionMark im(this);
2780   prefix(src, dst);
2781   emit_int16(0x0F, (unsigned char)0xBE);
2782   emit_operand(dst, src);
2783 }
2784 
2785 void Assembler::movsbl(Register dst, Register src) { // movsxb
2786   NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
2787   int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true);
2788   emit_int24(0x0F, (unsigned char)0xBE, (0xC0 | encode));
2789 }
2790 
2791 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
2792   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2793   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2794   attributes.set_rex_vex_w_reverted();
2795   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2796   emit_int16(0x10, (0xC0 | encode));
2797 }


3257   switch (i) {
3258     case 4:
3259       emit_int8(0x66);
3260     case 3:
3261       emit_int8(0x66);
3262     case 2:
3263       emit_int8(0x66);
3264     case 1:
3265       emit_int8((unsigned char)0x90);
3266       break;
3267     default:
3268       assert(i == 0, " ");
3269   }
3270 }
3271 
3272 void Assembler::notl(Register dst) {
3273   int encode = prefix_and_encode(dst->encoding());
3274   emit_int16((unsigned char)0xF7, (0xD0 | encode));
3275 }
3276 





3277 void Assembler::orl(Address dst, int32_t imm32) {
3278   InstructionMark im(this);
3279   prefix(dst);
3280   emit_arith_operand(0x81, rcx, dst, imm32);
3281 }
3282 
3283 void Assembler::orl(Register dst, int32_t imm32) {
3284   prefix(dst);
3285   emit_arith(0x81, 0xC8, dst, imm32);
3286 }
3287 
3288 void Assembler::orl(Register dst, Address src) {
3289   InstructionMark im(this);
3290   prefix(src, dst);
3291   emit_int8(0x0B);
3292   emit_operand(dst, src);
3293 }
3294 
3295 void Assembler::orl(Register dst, Register src) {
3296   (void) prefix_and_encode(dst->encoding(), src->encoding());
3297   emit_arith(0x0B, 0xC0, dst, src);
3298 }
3299 
3300 void Assembler::orl(Address dst, Register src) {
3301   InstructionMark im(this);
3302   prefix(dst, src);
3303   emit_int8(0x09);
3304   emit_operand(src, dst);
3305 }
3306 
3307 void Assembler::orb(Address dst, int imm8) {
3308   InstructionMark im(this);
3309   prefix(dst);
3310   emit_int8((unsigned char)0x80);
3311   emit_operand(rcx, dst, 1);
3312   emit_int8(imm8);
3313 }
3314 




























3315 void Assembler::packuswb(XMMRegister dst, Address src) {
3316   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3317   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
3318   InstructionMark im(this);
3319   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3320   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
3321   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3322   emit_int8(0x67);
3323   emit_operand(dst, src);
3324 }
3325 
3326 void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
3327   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3328   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3329   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3330   emit_int16(0x67, (0xC0 | encode));
3331 }
3332 
3333 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3334   assert(UseAVX > 0, "some form of AVX must be enabled");
3335   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3336   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3337   emit_int16(0x67, (0xC0 | encode));
3338 }
3339 














3340 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
3341   assert(VM_Version::supports_avx2(), "");


3342   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3343   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3344   emit_int24(0x00, (0xC0 | encode), imm8);
3345 }
3346 
3347 void Assembler::vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3348   assert(UseAVX > 2, "requires AVX512F");

3349   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3350   attributes.set_is_evex_instruction();
3351   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3352   emit_int16(0x36, (0xC0 | encode));
3353 }
3354 




































3355 void Assembler::vperm2i128(XMMRegister dst,  XMMRegister nds, XMMRegister src, int imm8) {
3356   assert(VM_Version::supports_avx2(), "");
3357   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3358   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3359   emit_int24(0x46, (0xC0 | encode), imm8);
3360 }
3361 
3362 void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
3363   assert(VM_Version::supports_avx(), "");
3364   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3365   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3366   emit_int24(0x06, (0xC0 | encode), imm8);
3367 }
3368 






















3369 void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3370   assert(VM_Version::supports_evex(), "");
3371   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3372   attributes.set_is_evex_instruction();
3373   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3374   emit_int16(0x76, (0xC0 | encode));
3375 }
3376 
3377 
3378 void Assembler::pause() {
3379   emit_int16((unsigned char)0xF3, (unsigned char)0x90);
3380 }
3381 
3382 void Assembler::ud2() {
3383   emit_int16(0x0F, 0x0B);
3384 }
3385 
3386 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
3387   assert(VM_Version::supports_sse4_2(), "");
3388   InstructionMark im(this);
3389   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3390   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3391   emit_int8(0x61);
3392   emit_operand(dst, src);
3393   emit_int8(imm8);
3394 }
3395 
3396 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
3397   assert(VM_Version::supports_sse4_2(), "");
3398   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3399   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3400   emit_int24(0x61, (0xC0 | encode), imm8);
3401 }
3402 
3403 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3404 void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
3405   assert(VM_Version::supports_sse2(), "");
3406   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3407   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3408   emit_int16(0x74, (0xC0 | encode));
3409 }
3410 








3411 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3412 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3413   assert(VM_Version::supports_avx(), "");

3414   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3415   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3416   emit_int16(0x74, (0xC0 | encode));
3417 }
3418 
3419 // In this context, kdst is written the mask used to process the equal components
3420 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
3421   assert(VM_Version::supports_avx512bw(), "");
3422   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3423   attributes.set_is_evex_instruction();
3424   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3425   emit_int16(0x74, (0xC0 | encode));
3426 }
3427 
3428 void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3429   assert(VM_Version::supports_avx512vlbw(), "");
3430   InstructionMark im(this);
3431   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3432   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3433   attributes.set_is_evex_instruction();


3480   emit_int8(0x3E);
3481   emit_operand(as_Register(dst_enc), src);
3482   emit_int8(vcc);
3483 }
3484 
3485 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3486   assert(VM_Version::supports_avx512bw(), "");
3487   InstructionMark im(this);
3488   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3489   attributes.set_is_evex_instruction();
3490   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3491   int dst_enc = kdst->encoding();
3492   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3493   emit_int8(0x74);
3494   emit_operand(as_Register(dst_enc), src);
3495 }
3496 
3497 void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
3498   assert(VM_Version::supports_avx512vlbw(), "");
3499   InstructionMark im(this);
3500   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_reg_mask */ false, /* uses_vl */ true);
3501   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3502   attributes.reset_is_clear_context();
3503   attributes.set_embedded_opmask_register_specifier(mask);
3504   attributes.set_is_evex_instruction();
3505   vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3506   emit_int8(0x74);
3507   emit_operand(as_Register(kdst->encoding()), src);
3508 }
3509 
3510 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3511 void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
3512   assert(VM_Version::supports_sse2(), "");
3513   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3514   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3515   emit_int16(0x75, (0xC0 | encode));
3516 }
3517 
3518 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3519 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3520   assert(VM_Version::supports_avx(), "");

3521   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3522   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3523   emit_int16(0x75, (0xC0 | encode));
3524 }
3525 
3526 // In this context, kdst is written the mask used to process the equal components
3527 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
3528   assert(VM_Version::supports_avx512bw(), "");
3529   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3530   attributes.set_is_evex_instruction();
3531   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3532   emit_int16(0x75, (0xC0 | encode));
3533 }
3534 
3535 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3536   assert(VM_Version::supports_avx512bw(), "");
3537   InstructionMark im(this);
3538   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3539   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3540   attributes.set_is_evex_instruction();
3541   int dst_enc = kdst->encoding();
3542   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3543   emit_int8(0x75);
3544   emit_operand(as_Register(dst_enc), src);
3545 }
3546 
3547 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3548 void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
3549   assert(VM_Version::supports_sse2(), "");
3550   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3551   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3552   emit_int16(0x76, (0xC0 | encode));
3553 }
3554 
3555 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3556 void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3557   assert(VM_Version::supports_avx(), "");
3558   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);

3559   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3560   emit_int16(0x76, (0xC0 | encode));
3561 }
3562 
3563 // In this context, kdst is written the mask used to process the equal components
3564 void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
3565   assert(VM_Version::supports_evex(), "");
3566   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3567   attributes.set_is_evex_instruction();
3568   attributes.reset_is_clear_context();

3569   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3570   emit_int16(0x76, (0xC0 | encode));
3571 }
3572 
3573 void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3574   assert(VM_Version::supports_evex(), "");
3575   InstructionMark im(this);
3576   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3577   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
3578   attributes.reset_is_clear_context();
3579   attributes.set_is_evex_instruction();


3580   int dst_enc = kdst->encoding();
3581   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3582   emit_int8(0x76);
3583   emit_operand(as_Register(dst_enc), src);
3584 }
3585 
3586 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3587 void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) {
3588   assert(VM_Version::supports_sse4_1(), "");
3589   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3590   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3591   emit_int16(0x29, (0xC0 | encode));
3592 }
3593 







3594 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3595 void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3596   assert(VM_Version::supports_avx(), "");
3597   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3598   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3599   emit_int16(0x29, (0xC0 | encode));
3600 }
3601 
3602 // In this context, kdst is written the mask used to process the equal components
3603 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
3604   assert(VM_Version::supports_evex(), "");
3605   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3606   attributes.reset_is_clear_context();
3607   attributes.set_is_evex_instruction();
3608   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3609   emit_int16(0x29, (0xC0 | encode));
3610 }
3611 
3612 // In this context, kdst is written the mask used to process the equal components
3613 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3614   assert(VM_Version::supports_evex(), "");
3615   InstructionMark im(this);
3616   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3617   attributes.reset_is_clear_context();
3618   attributes.set_is_evex_instruction();
3619   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
3620   int dst_enc = kdst->encoding();
3621   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3622   emit_int8(0x29);
3623   emit_operand(as_Register(dst_enc), src);
3624 }
3625 

























3626 void Assembler::pmovmskb(Register dst, XMMRegister src) {
3627   assert(VM_Version::supports_sse2(), "");
3628   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3629   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3630   emit_int16((unsigned char)0xD7, (0xC0 | encode));
3631 }
3632 
3633 void Assembler::vpmovmskb(Register dst, XMMRegister src) {
3634   assert(VM_Version::supports_avx2(), "");
3635   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3636   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3637   emit_int16((unsigned char)0xD7, (0xC0 | encode));
3638 }
3639 
3640 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
3641   assert(VM_Version::supports_sse4_1(), "");
3642   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3643   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3644   emit_int24(0x16, (0xC0 | encode), imm8);
3645 }
3646 
3647 void Assembler::pextrd(Address dst, XMMRegister src, int imm8) {
3648   assert(VM_Version::supports_sse4_1(), "");
3649   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3650   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
3651   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3652   emit_int8(0x16);
3653   emit_operand(src, dst);
3654   emit_int8(imm8);
3655 }
3656 
3657 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
3658   assert(VM_Version::supports_sse4_1(), "");
3659   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3660   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3661   emit_int24(0x16, (0xC0 | encode), imm8);
3662 }
3663 
3664 void Assembler::pextrq(Address dst, XMMRegister src, int imm8) {
3665   assert(VM_Version::supports_sse4_1(), "");
3666   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3667   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
3668   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3669   emit_int8(0x16);
3670   emit_operand(src, dst);
3671   emit_int8(imm8);
3672 }
3673 
3674 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) {
3675   assert(VM_Version::supports_sse2(), "");
3676   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3677   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3678   emit_int24((unsigned char)0xC5, (0xC0 | encode), imm8);
3679 }
3680 
3681 void Assembler::pextrw(Address dst, XMMRegister src, int imm8) {
3682   assert(VM_Version::supports_sse4_1(), "");
3683   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3684   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
3685   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3686   emit_int8(0x15);
3687   emit_operand(src, dst);
3688   emit_int8(imm8);
3689 }
3690 







3691 void Assembler::pextrb(Address dst, XMMRegister src, int imm8) {
3692   assert(VM_Version::supports_sse4_1(), "");
3693   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3694   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
3695   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3696   emit_int8(0x14);
3697   emit_operand(src, dst);
3698   emit_int8(imm8);
3699 }
3700 
3701 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
3702   assert(VM_Version::supports_sse4_1(), "");
3703   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3704   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3705   emit_int24(0x22, (0xC0 | encode), imm8);
3706 }
3707 
3708 void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) {
3709   assert(VM_Version::supports_sse4_1(), "");
3710   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3711   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
3712   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3713   emit_int8(0x22);
3714   emit_operand(dst,src);
3715   emit_int8(imm8);
3716 }
3717 







3718 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
3719   assert(VM_Version::supports_sse4_1(), "");
3720   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3721   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3722   emit_int24(0x22, (0xC0 | encode), imm8);
3723 }
3724 
3725 void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) {
3726   assert(VM_Version::supports_sse4_1(), "");
3727   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3728   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
3729   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3730   emit_int8(0x22);
3731   emit_operand(dst, src);
3732   emit_int8(imm8);
3733 }
3734 







3735 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) {
3736   assert(VM_Version::supports_sse2(), "");
3737   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3738   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3739   emit_int24((unsigned char)0xC4, (0xC0 | encode), imm8);
3740 }
3741 
3742 void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) {
3743   assert(VM_Version::supports_sse2(), "");
3744   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3745   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
3746   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3747   emit_int8((unsigned char)0xC4);
3748   emit_operand(dst, src);
3749   emit_int8(imm8);
3750 }
3751 







3752 void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) {
3753   assert(VM_Version::supports_sse4_1(), "");
3754   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3755   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
3756   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3757   emit_int8(0x20);
3758   emit_operand(dst, src);
3759   emit_int8(imm8);
3760 }
3761 




























3762 void Assembler::pmovzxbw(XMMRegister dst, Address src) {
3763   assert(VM_Version::supports_sse4_1(), "");
3764   InstructionMark im(this);
3765   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3766   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
3767   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3768   emit_int8(0x30);
3769   emit_operand(dst, src);
3770 }
3771 
3772 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
3773   assert(VM_Version::supports_sse4_1(), "");
3774   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3775   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3776   emit_int16(0x30, (0xC0 | encode));
3777 }
3778 
3779 void Assembler::pmovsxbw(XMMRegister dst, XMMRegister src) {
3780   assert(VM_Version::supports_sse4_1(), "");
3781   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3782   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3783   emit_int16(0x20, (0xC0 | encode));
3784 }
3785 



































3786 void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
3787   assert(VM_Version::supports_avx(), "");
3788   InstructionMark im(this);
3789   assert(dst != xnoreg, "sanity");
3790   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3791   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
3792   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3793   emit_int8(0x30);
3794   emit_operand(dst, src);
3795 }
3796 
3797 void Assembler::vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) {
3798   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
3799   vector_len == AVX_256bit? VM_Version::supports_avx2() :
3800   vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
3801   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3802   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3803   emit_int16(0x30, (unsigned char) (0xC0 | encode));
3804 }
3805 
3806 void Assembler::vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len) {
3807   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
3808   vector_len == AVX_256bit? VM_Version::supports_avx2() :
3809   vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
3810   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3811   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3812   emit_int16(0x20, (0xC0 | encode));
3813 }
3814 
3815 void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) {
3816   assert(VM_Version::supports_avx512vlbw(), "");
3817   assert(dst != xnoreg, "sanity");
3818   InstructionMark im(this);
3819   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3820   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
3821   attributes.set_embedded_opmask_register_specifier(mask);
3822   attributes.set_is_evex_instruction();
3823   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3824   emit_int8(0x30);
3825   emit_operand(dst, src);
3826 }
















































































3827 void Assembler::evpmovwb(Address dst, XMMRegister src, int vector_len) {
3828   assert(VM_Version::supports_avx512vlbw(), "");
3829   assert(src != xnoreg, "sanity");
3830   InstructionMark im(this);
3831   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3832   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
3833   attributes.set_is_evex_instruction();
3834   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
3835   emit_int8(0x30);
3836   emit_operand(src, dst);
3837 }
3838 
3839 void Assembler::evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len) {
3840   assert(VM_Version::supports_avx512vlbw(), "");
3841   assert(src != xnoreg, "sanity");
3842   InstructionMark im(this);
3843   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3844   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
3845   attributes.reset_is_clear_context();
3846   attributes.set_embedded_opmask_register_specifier(mask);


4033          (vector_len == AVX_512bit? VM_Version::supports_evex() : 0)), "");
4034   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4035   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4036   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4037   emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
4038 }
4039 
4040 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
4041   assert(isByte(mode), "invalid value");
4042   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4043   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4044   InstructionMark im(this);
4045   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4046   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
4047   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4048   emit_int8(0x70);
4049   emit_operand(dst, src);
4050   emit_int8(mode & 0xFF);
4051 }
4052 








4053 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
4054   assert(isByte(mode), "invalid value");
4055   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4056   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4057   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4058   emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
4059 }
4060 
4061 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
4062   assert(isByte(mode), "invalid value");
4063   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4064   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4065   InstructionMark im(this);
4066   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4067   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4068   simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4069   emit_int8(0x70);
4070   emit_operand(dst, src);
4071   emit_int8(mode & 0xFF);
4072 }
4073 
4074 void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
4075   assert(VM_Version::supports_evex(), "requires EVEX support");
4076   assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, "");
4077   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4078   attributes.set_is_evex_instruction();
4079   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4080   emit_int24(0x43, (0xC0 | encode), imm8 & 0xFF);
4081 }
4082 





























4083 void Assembler::psrldq(XMMRegister dst, int shift) {
4084   // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
4085   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4086   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4087   int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4088   emit_int24(0x73, (0xC0 | encode), shift);
4089 }
4090 
4091 void Assembler::vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
4092   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
4093          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
4094          vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, "");
4095   InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4096   int encode = vex_prefix_and_encode(xmm3->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4097   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
4098 }
4099 
4100 void Assembler::pslldq(XMMRegister dst, int shift) {
4101   // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
4102   NOT_LP64(assert(VM_Version::supports_sse2(), ""));


4134 }
4135 
4136 void Assembler::vptest(XMMRegister dst, Address src) {
4137   assert(VM_Version::supports_avx(), "");
4138   InstructionMark im(this);
4139   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4140   assert(dst != xnoreg, "sanity");
4141   // swap src<->dst for encoding
4142   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4143   emit_int8(0x17);
4144   emit_operand(dst, src);
4145 }
4146 
4147 void Assembler::vptest(XMMRegister dst, XMMRegister src) {
4148   assert(VM_Version::supports_avx(), "");
4149   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4150   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4151   emit_int16(0x17, (0xC0 | encode));
4152 }
4153 







4154 void Assembler::punpcklbw(XMMRegister dst, Address src) {
4155   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4156   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4157   InstructionMark im(this);
4158   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true);
4159   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4160   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4161   emit_int8(0x60);
4162   emit_operand(dst, src);
4163 }
4164 
4165 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
4166   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4167   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true);
4168   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4169   emit_int16(0x60, (0xC0 | encode));
4170 }
4171 
4172 void Assembler::punpckldq(XMMRegister dst, Address src) {
4173   NOT_LP64(assert(VM_Version::supports_sse2(), ""));


4802 
4803 void Assembler::xorl(Register dst, Address src) {
4804   InstructionMark im(this);
4805   prefix(src, dst);
4806   emit_int8(0x33);
4807   emit_operand(dst, src);
4808 }
4809 
4810 void Assembler::xorl(Register dst, Register src) {
4811   (void) prefix_and_encode(dst->encoding(), src->encoding());
4812   emit_arith(0x33, 0xC0, dst, src);
4813 }
4814 
4815 void Assembler::xorb(Register dst, Address src) {
4816   InstructionMark im(this);
4817   prefix(src, dst);
4818   emit_int8(0x32);
4819   emit_operand(dst, src);
4820 }
4821 





4822 // AVX 3-operands scalar float-point arithmetic instructions
4823 
4824 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
4825   assert(VM_Version::supports_avx(), "");
4826   InstructionMark im(this);
4827   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4828   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
4829   attributes.set_rex_vex_w_reverted();
4830   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4831   emit_int8(0x58);
4832   emit_operand(dst, src);
4833 }
4834 
4835 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
4836   assert(VM_Version::supports_avx(), "");
4837   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4838   attributes.set_rex_vex_w_reverted();
4839   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4840   emit_int16(0x58, (0xC0 | encode));
4841 }


5715   attributes.set_rex_vex_w_reverted();
5716   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5717   emit_int8((unsigned char)0xFB);
5718   emit_operand(dst, src);
5719 }
5720 
5721 void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
5722   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5723   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5724   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5725   emit_int16((unsigned char)0xD5, (0xC0 | encode));
5726 }
5727 
5728 void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
5729   assert(VM_Version::supports_sse4_1(), "");
5730   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5731   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5732   emit_int16(0x40, (0xC0 | encode));
5733 }
5734 







5735 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5736   assert(UseAVX > 0, "requires some form of AVX");
5737   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5738   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5739   emit_int16((unsigned char)0xD5, (0xC0 | encode));
5740 }
5741 
5742 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5743   assert(UseAVX > 0, "requires some form of AVX");
5744   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5745   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5746   emit_int16(0x40, (0xC0 | encode));
5747 }
5748 
5749 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5750   assert(UseAVX > 2, "requires some form of EVEX");
5751   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5752   attributes.set_is_evex_instruction();
5753   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5754   emit_int16(0x40, (0xC0 | encode));
5755 }
5756 







5757 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5758   assert(UseAVX > 0, "requires some form of AVX");
5759   InstructionMark im(this);
5760   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5761   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
5762   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5763   emit_int8((unsigned char)0xD5);
5764   emit_operand(dst, src);
5765 }
5766 
5767 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5768   assert(UseAVX > 0, "requires some form of AVX");
5769   InstructionMark im(this);
5770   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5771   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5772   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5773   emit_int8(0x40);
5774   emit_operand(dst, src);
5775 }
5776 
5777 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5778   assert(UseAVX > 2, "requires some form of EVEX");
5779   InstructionMark im(this);
5780   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5781   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5782   attributes.set_is_evex_instruction();
5783   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5784   emit_int8(0x40);
5785   emit_operand(dst, src);
5786 }
5787 
5788 // Shift packed integers left by specified number of bits.
5789 void Assembler::psllw(XMMRegister dst, int shift) {
5790   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5791   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5792   // XMM6 is for /6 encoding: 66 0F 71 /6 ib
5793   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5794   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
5795 }
5796 
5797 void Assembler::pslld(XMMRegister dst, int shift) {
5798   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5799   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5800   // XMM6 is for /6 encoding: 66 0F 72 /6 ib
5801   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5802   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
5803 }
5804 
5805 void Assembler::psllq(XMMRegister dst, int shift) {
5806   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5807   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5808   // XMM6 is for /6 encoding: 66 0F 73 /6 ib
5809   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5810   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
5811 }
5812 
5813 void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
5814   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5815   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5816   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5817   emit_int16((unsigned char)0xF1, (0xC0 | encode));

5818 }
5819 
5820 void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
5821   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5822   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5823   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5824   emit_int16((unsigned char)0xF2, (0xC0 | encode));
5825 }
5826 
5827 void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
5828   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5829   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5830   attributes.set_rex_vex_w_reverted();
5831   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5832   emit_int16((unsigned char)0xF3, (0xC0 | encode));
5833 }
5834 
5835 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
5836   assert(UseAVX > 0, "requires some form of AVX");
5837   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5838   // XMM6 is for /6 encoding: 66 0F 71 /6 ib
5839   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5840   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
5841 }
5842 
5843 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
5844   assert(UseAVX > 0, "requires some form of AVX");
5845   NOT_LP64(assert(VM_Version::supports_sse2(), ""));





5846   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5847   // XMM6 is for /6 encoding: 66 0F 72 /6 ib
5848   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5849   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
5850 }
5851 
5852 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
5853   assert(UseAVX > 0, "requires some form of AVX");
5854   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5855   attributes.set_rex_vex_w_reverted();
5856   // XMM6 is for /6 encoding: 66 0F 73 /6 ib
5857   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5858   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
5859 }
5860 
5861 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
































































































































































5862   assert(UseAVX > 0, "requires some form of AVX");
5863   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5864   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5865   emit_int16((unsigned char)0xF1, (0xC0 | encode));
5866 }
5867 
5868 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
5869   assert(UseAVX > 0, "requires some form of AVX");
5870   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5871   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5872   emit_int16((unsigned char)0xF2, (0xC0 | encode));
5873 }
5874 
5875 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
5876   assert(UseAVX > 0, "requires some form of AVX");
5877   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5878   attributes.set_rex_vex_w_reverted();
5879   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5880   emit_int16((unsigned char)0xF3, (0xC0 | encode));
5881 }


6089   emit_int16((unsigned char)0xDB, (0xC0 | encode));
6090 }
6091 
6092 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6093   assert(UseAVX > 0, "requires some form of AVX");
6094   InstructionMark im(this);
6095   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6096   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
6097   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6098   emit_int8((unsigned char)0xDB);
6099   emit_operand(dst, src);
6100 }
6101 
6102 void Assembler::vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6103   assert(VM_Version::supports_evex(), "");
6104   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6105   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6106   emit_int16((unsigned char)0xDB, (0xC0 | encode));
6107 }
6108 























































6109 void Assembler::vpshldvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6110   assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2");
6111   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6112   attributes.set_is_evex_instruction();
6113   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6114   emit_int8(0x71);
6115   emit_int8((0xC0 | encode));
6116 }
6117 
6118 void Assembler::vpshrdvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6119   assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2");
6120   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6121   attributes.set_is_evex_instruction();
6122   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6123   emit_int16(0x73, (0xC0 | encode));
6124 }
6125 
6126 void Assembler::pandn(XMMRegister dst, XMMRegister src) {
6127   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6128   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6129   attributes.set_rex_vex_w_reverted();
6130   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6131   emit_int16((unsigned char)0xDF, (0xC0 | encode));
6132 }
6133 
6134 void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6135   assert(UseAVX > 0, "requires some form of AVX");
6136   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6137   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6138   emit_int16((unsigned char)0xDF, (0xC0 | encode));
6139 }
6140 
6141 
6142 void Assembler::por(XMMRegister dst, XMMRegister src) {
6143   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6144   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6145   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6146   emit_int16((unsigned char)0xEB, (0xC0 | encode));
6147 }
6148 
6149 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6150   assert(UseAVX > 0, "requires some form of AVX");
6151   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6152   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6153   emit_int16((unsigned char)0xEB, (0xC0 | encode));
6154 }
6155 
6156 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6157   assert(UseAVX > 0, "requires some form of AVX");
6158   InstructionMark im(this);
6159   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6160   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
6161   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6162   emit_int8((unsigned char)0xEB);
6163   emit_operand(dst, src);
6164 }
6165 
6166 void Assembler::vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6167   assert(VM_Version::supports_evex(), "");
6168   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6169   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6170   emit_int16((unsigned char)0xEB, (0xC0 | encode));
6171 }
6172 
6173 





























6174 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
6175   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6176   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6177   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6178   emit_int16((unsigned char)0xEF, (0xC0 | encode));
6179 }
6180 
6181 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6182   assert(UseAVX > 0, "requires some form of AVX");
6183   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6184   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6185   emit_int16((unsigned char)0xEF, (0xC0 | encode));
6186 }
6187 
6188 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6189   assert(UseAVX > 0, "requires some form of AVX");
6190   InstructionMark im(this);
6191   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6192   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
6193   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6194   emit_int8((unsigned char)0xEF);
6195   emit_operand(dst, src);
6196 }
6197 





















6198 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6199   assert(VM_Version::supports_evex(), "requires EVEX support");
6200   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6201   attributes.set_is_evex_instruction();
6202   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6203   emit_int8((unsigned char)0xEF);
6204   emit_int8((0xC0 | encode));
6205 }
6206 
6207 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6208   assert(VM_Version::supports_evex(), "requires EVEX support");
6209   assert(dst != xnoreg, "sanity");
6210   InstructionMark im(this);
6211   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6212   attributes.set_is_evex_instruction();
6213   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
6214   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6215   emit_int8((unsigned char)0xEF);
6216   emit_operand(dst, src);
6217 }
6218 
6219 void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) {
6220   assert(VM_Version::supports_evex(), "requires EVEX support");
6221   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
6222   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6223   attributes.set_is_evex_instruction();
6224   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);


6809   emit_int16(0x7B, (0xC0 | encode));
6810 }
6811 
6812 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
6813 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) {
6814   assert(VM_Version::supports_evex(), "");
6815   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6816   attributes.set_is_evex_instruction();
6817   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6818   emit_int16(0x7C, (0xC0 | encode));
6819 }
6820 
6821 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
6822 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) {
6823   assert(VM_Version::supports_evex(), "");
6824   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6825   attributes.set_is_evex_instruction();
6826   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6827   emit_int16(0x7C, (0xC0 | encode));
6828 }




















































6829 void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) {
6830   assert(VM_Version::supports_evex(), "");
6831   assert(dst != xnoreg, "sanity");



6832   InstructionMark im(this);
6833   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
6834   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
6835   attributes.reset_is_clear_context();
6836   attributes.set_embedded_opmask_register_specifier(mask);
6837   attributes.set_is_evex_instruction();
6838   // swap src<->dst for encoding
6839   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6840   emit_int8((unsigned char)0x90);
6841   emit_operand(dst, src);
6842 }
6843 // Carry-Less Multiplication Quadword
6844 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
6845   assert(VM_Version::supports_clmul(), "");
6846   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
6847   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6848   emit_int24(0x44, (0xC0 | encode), (unsigned char)mask);
6849 }
6850 
6851 // Carry-Less Multiplication Quadword
6852 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
6853   assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
6854   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
6855   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6856   emit_int24(0x44, (0xC0 | encode), (unsigned char)mask);
6857 }














































































































6858 
6859 void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) {
6860   assert(VM_Version::supports_avx512_vpclmulqdq(), "Requires vector carryless multiplication support");
6861   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6862   attributes.set_is_evex_instruction();
6863   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6864   emit_int24(0x44, (0xC0 | encode), (unsigned char)mask);
6865 }
6866 
6867 void Assembler::vzeroupper_uncached() {
6868   if (VM_Version::supports_vzeroupper()) {
6869     InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6870     (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
6871     emit_int8(0x77);
6872   }
6873 }
6874 
6875 #ifndef _LP64
6876 // 32bit only pieces of the assembler
6877 


7420   int byte3 = ((~nds_enc) & 0xf) << 3;
7421   // p[10] is always 1
7422   byte3 |= EVEX_F;
7423   byte3 |= (vex_w & 1) << 7;
7424   // confine pre opcode extensions in pp bits to lower two bits
7425   // of form {66, F3, F2}
7426   byte3 |= pre;
7427 
7428   // P2: byte 4 as zL'Lbv'aaa
7429   // kregs are implemented in the low 3 bits as aaa
7430   int byte4 = (_attributes->is_no_reg_mask()) ?
7431               0 :
7432               _attributes->get_embedded_opmask_register_specifier();
7433   // EVEX.v` for extending EVEX.vvvv or VIDX
7434   byte4 |= (evex_v ? 0: EVEX_V);
7435   // third EXEC.b for broadcast actions
7436   byte4 |= (_attributes->is_extended_context() ? EVEX_Rb : 0);
7437   // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024
7438   byte4 |= ((_attributes->get_vector_len())& 0x3) << 5;
7439   // last is EVEX.z for zero/merge actions
7440   if (_attributes->is_no_reg_mask() == false) {

7441     byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0);
7442   }
7443 
7444   emit_int32(EVEX_4bytes, byte2, byte3, byte4);
7445 }
7446 
7447 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) {
7448   bool vex_r = (xreg_enc & 8) == 8;
7449   bool vex_b = adr.base_needs_rex();
7450   bool vex_x;
7451   if (adr.isxmmindex()) {
7452     vex_x = adr.xmmindex_needs_rex();
7453   } else {
7454     vex_x = adr.index_needs_rex();
7455   }
7456   set_attributes(attributes);
7457   attributes->set_current_assembler(this);
7458 
7459   // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction
7460   // is allowed in legacy mode and has resources which will fit in it.


7588   attributes.set_rex_vex_w_reverted();
7589   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
7590   emit_int16(0x5F, (0xC0 | encode));
7591 }
7592 
7593 void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
7594   assert(VM_Version::supports_avx(), "");
7595   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7596   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7597   emit_int16(0x5D, (0xC0 | encode));
7598 }
7599 
7600 void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
7601   assert(VM_Version::supports_avx(), "");
7602   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7603   attributes.set_rex_vex_w_reverted();
7604   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
7605   emit_int16(0x5D, (0xC0 | encode));
7606 }
7607 
7608 void Assembler::cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) {
7609   assert(VM_Version::supports_avx(), "");
7610   assert(vector_len <= AVX_256bit, "");
7611   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7612   int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7613   emit_int24((unsigned char)0xC2, (0xC0 | encode), (0xF & cop));
7614 }
7615 
7616 void Assembler::blendvpb(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
7617   assert(VM_Version::supports_avx(), "");
7618   assert(vector_len <= AVX_256bit, "");
7619   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7620   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7621   int src2_enc = src2->encoding();
7622   emit_int24(0x4C, (0xC0 | encode), (0xF0 & src2_enc << 4));
7623 }
7624 
7625 void Assembler::blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
7626   assert(VM_Version::supports_avx(), "");
7627   assert(vector_len <= AVX_256bit, "");
7628   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7629   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7630   int src2_enc = src2->encoding();
7631   emit_int24(0x4B, (0xC0 | encode), (0xF0 & src2_enc << 4));
7632 }
7633 
7634 void Assembler::cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) {
7635   assert(VM_Version::supports_avx(), "");
7636   assert(vector_len <= AVX_256bit, "");
7637   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7638   int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7639   emit_int24((unsigned char)0xC2, (0xC0 | encode), (0xF & cop));
7640 }
7641 
7642 void Assembler::blendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
7643   assert(VM_Version::supports_avx(), "");
7644   assert(vector_len <= AVX_256bit, "");
7645   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);























































7646   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7647   int src2_enc = src2->encoding();
7648   emit_int24(0x4A, (0xC0 | encode), (0xF0 & src2_enc << 4));
7649 }
7650 
7651 void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
7652   assert(VM_Version::supports_avx2(), "");
7653   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7654   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7655   emit_int24(0x02, (0xC0 | encode), (unsigned char)imm8);
























































































































































































































































7656 }
7657 
7658 void Assembler::shlxl(Register dst, Register src1, Register src2) {
7659   assert(VM_Version::supports_bmi2(), "");
7660   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7661   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7662   emit_int16((unsigned char)0xF7, (0xC0 | encode));
7663 }
7664 
7665 void Assembler::shlxq(Register dst, Register src1, Register src2) {
7666   assert(VM_Version::supports_bmi2(), "");
7667   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7668   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7669   emit_int16((unsigned char)0xF7, (0xC0 | encode));
7670 }
7671 
7672 #ifndef _LP64
7673 
7674 void Assembler::incl(Register dst) {
7675   // Don't use it directly. Use MacroAssembler::incrementl() instead.




 967     }
 968     ip++; // skip opcode
 969     debug_only(has_disp32 = true); // has both kinds of operands!
 970     break;
 971 
 972   case 0x62: // EVEX_4bytes
 973     assert(VM_Version::supports_evex(), "shouldn't have EVEX prefix");
 974     assert(ip == inst+1, "no prefixes allowed");
 975     // no EVEX collisions, all instructions that have 0x62 opcodes
 976     // have EVEX versions and are subopcodes of 0x66
 977     ip++; // skip P0 and exmaine W in P1
 978     is_64bit = ((VEX_W & *ip) == VEX_W);
 979     ip++; // move to P2
 980     ip++; // skip P2, move to opcode
 981     // To find the end of instruction (which == end_pc_operand).
 982     switch (0xFF & *ip) {
 983     case 0x22: // pinsrd r, r/a, #8
 984     case 0x61: // pcmpestri r, r/a, #8
 985     case 0x70: // pshufd r, r/a, #8
 986     case 0x73: // psrldq r, #8
 987     case 0x1f: // evpcmpd/evpcmpq
 988     case 0x3f: // evpcmpb/evpcmpw
 989       tail_size = 1;  // the imm8
 990       break;
 991     default:
 992       break;
 993     }
 994     ip++; // skip opcode
 995     debug_only(has_disp32 = true); // has both kinds of operands!
 996     break;
 997 
 998   case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
 999   case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
1000   case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
1001   case 0xDD: // fld_d a; fst_d a; fstp_d a
1002   case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
1003   case 0xDF: // fild_d a; fistp_d a
1004   case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
1005   case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
1006   case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
1007     debug_only(has_disp32 = true);
1008     break;


1194 
1195 void Assembler::adcl(Register dst, Register src) {
1196   (void) prefix_and_encode(dst->encoding(), src->encoding());
1197   emit_arith(0x13, 0xC0, dst, src);
1198 }
1199 
1200 void Assembler::addl(Address dst, int32_t imm32) {
1201   InstructionMark im(this);
1202   prefix(dst);
1203   emit_arith_operand(0x81, rax, dst, imm32);
1204 }
1205 
1206 void Assembler::addb(Address dst, int imm8) {
1207   InstructionMark im(this);
1208   prefix(dst);
1209   emit_int8((unsigned char)0x80);
1210   emit_operand(rax, dst, 1);
1211   emit_int8(imm8);
1212 }
1213 
1214 void Assembler::addw(Register dst, Register src) {
1215   (void)prefix_and_encode(dst->encoding(), src->encoding());
1216   emit_arith(0x03, 0xC0, dst, src);
1217 }
1218 
1219 void Assembler::addw(Address dst, int imm16) {
1220   InstructionMark im(this);
1221   emit_int8(0x66);
1222   prefix(dst);
1223   emit_int8((unsigned char)0x81);
1224   emit_operand(rax, dst, 2);
1225   emit_int16(imm16);
1226 }
1227 
1228 void Assembler::addl(Address dst, Register src) {
1229   InstructionMark im(this);
1230   prefix(dst, src);
1231   emit_int8(0x01);
1232   emit_operand(src, dst);
1233 }
1234 
1235 void Assembler::addl(Register dst, int32_t imm32) {
1236   prefix(dst);
1237   emit_arith(0x81, 0xC0, dst, imm32);
1238 }


1405   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1406   emit_int8((unsigned char)0xDD);
1407   emit_operand(dst, src);
1408 }
1409 
1410 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
1411   assert(VM_Version::supports_aes(), "");
1412   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1413   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1414   emit_int16((unsigned char)0xDD, (0xC0 | encode));
1415 }
1416 
1417 void Assembler::vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1418   assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling");
1419   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1420   attributes.set_is_evex_instruction();
1421   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1422   emit_int16((unsigned char)0xDD, (0xC0 | encode));
1423 }
1424 
1425 void Assembler::andw(Register dst, Register src) {
1426   (void)prefix_and_encode(dst->encoding(), src->encoding());
1427   emit_arith(0x23, 0xC0, dst, src);
1428 }
1429 
1430 void Assembler::andl(Address dst, int32_t imm32) {
1431   InstructionMark im(this);
1432   prefix(dst);
1433   emit_int8((unsigned char)0x81);
1434   emit_operand(rsp, dst, 4);
1435   emit_int32(imm32);
1436 }
1437 
1438 void Assembler::andl(Register dst, int32_t imm32) {
1439   prefix(dst);
1440   emit_arith(0x81, 0xE0, dst, imm32);
1441 }
1442 
1443 void Assembler::andl(Register dst, Address src) {
1444   InstructionMark im(this);
1445   prefix(src, dst);
1446   emit_int8(0x23);
1447   emit_operand(dst, src);
1448 }
1449 


1778   LP64_ONLY(case 8:)
1779     // This instruction is not valid in 32 bits
1780     p = REX_W;
1781     break;
1782   default:
1783     assert(0, "Unsupported value for a sizeInBytes argument");
1784     break;
1785   }
1786   LP64_ONLY(prefix(crc, adr, p);)
1787   emit_int24(0x0F, 0x38, (0xF0 | w));
1788   emit_operand(crc, adr);
1789 }
1790 
1791 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
1792   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1793   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1794   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1795   emit_int16((unsigned char)0xE6, (0xC0 | encode));
1796 }
1797 
1798 void Assembler::vcvtdq2pd(XMMRegister dst, XMMRegister src, int vector_len) {
1799   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
1800   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1801   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1802   emit_int16((unsigned char)0xE6, (0xC0 | encode));
1803 }
1804 
1805 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
1806   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1807   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1808   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
1809   emit_int16(0x5B, (0xC0 | encode));
1810 }
1811 
1812 void Assembler::vcvtdq2ps(XMMRegister dst, XMMRegister src, int vector_len) {
1813   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
1814   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1815   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
1816   emit_int16(0x5B, (0xC0 | encode));
1817 }
1818 
1819 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
1820   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1821   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1822   attributes.set_rex_vex_w_reverted();
1823   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1824   emit_int16(0x5A, (0xC0 | encode));
1825 }
1826 
1827 void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
1828   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1829   InstructionMark im(this);
1830   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1831   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
1832   attributes.set_rex_vex_w_reverted();
1833   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1834   emit_int8(0x5A);
1835   emit_operand(dst, src);
1836 }
1837 
1838 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {


1921   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
1922   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1923   emit_int16(0x1C, (0xC0 | encode));
1924 }
1925 
1926 void Assembler::pabsw(XMMRegister dst, XMMRegister src) {
1927   assert(VM_Version::supports_ssse3(), "");
1928   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
1929   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1930   emit_int16(0x1D, (0xC0 | encode));
1931 }
1932 
1933 void Assembler::pabsd(XMMRegister dst, XMMRegister src) {
1934   assert(VM_Version::supports_ssse3(), "");
1935   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1936   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1937   emit_int16(0x1E, (0xC0 | encode));
1938 }
1939 
1940 void Assembler::vpabsb(XMMRegister dst, XMMRegister src, int vector_len) {
1941   assert(vector_len == AVX_128bit ? VM_Version::supports_avx()      :
1942          vector_len == AVX_256bit ? VM_Version::supports_avx2()     :
1943          vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, "not supported");
1944   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
1945   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1946   emit_int16(0x1C, (0xC0 | encode));
1947 }
1948 
1949 void Assembler::vpabsw(XMMRegister dst, XMMRegister src, int vector_len) {
1950   assert(vector_len == AVX_128bit ? VM_Version::supports_avx()      :
1951          vector_len == AVX_256bit ? VM_Version::supports_avx2()     :
1952          vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, "");
1953   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
1954   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1955   emit_int16(0x1D, (0xC0 | encode));
1956 }
1957 
1958 void Assembler::vpabsd(XMMRegister dst, XMMRegister src, int vector_len) {
1959   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
1960   vector_len == AVX_256bit? VM_Version::supports_avx2() :
1961   vector_len == AVX_512bit? VM_Version::supports_evex() : 0, "");
1962   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1963   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1964   emit_int16(0x1E, (0xC0 | encode));
1965 }
1966 
1967 void Assembler::evpabsq(XMMRegister dst, XMMRegister src, int vector_len) {
1968   assert(UseAVX > 2, "");
1969   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1970   attributes.set_is_evex_instruction();
1971   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1972   emit_int16(0x1F, (0xC0 | encode));
1973 }
1974 
1975 void Assembler::vcvtps2pd(XMMRegister dst, XMMRegister src, int vector_len) {
1976   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
1977   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1978   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
1979   emit_int16(0x5A, (0xC0 | encode));
1980 }
1981 
1982 void Assembler::vcvtpd2ps(XMMRegister dst, XMMRegister src, int vector_len) {
1983   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
1984   InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1985   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
1986   attributes.set_rex_vex_w_reverted();
1987   emit_int16(0x5A, (0xC0 | encode));
1988 }
1989 
1990 void Assembler::evcvtqq2ps(XMMRegister dst, XMMRegister src, int vector_len) {
1991   assert(UseAVX > 2 && VM_Version::supports_avx512dq(), "");
1992   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1993   attributes.set_is_evex_instruction();
1994   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
1995   emit_int16(0x5B, (0xC0 | encode));
1996 }
1997 
1998 void Assembler::evcvtqq2pd(XMMRegister dst, XMMRegister src, int vector_len) {
1999   assert(UseAVX > 2 && VM_Version::supports_avx512dq(), "");
2000   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2001   attributes.set_is_evex_instruction();
2002   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2003   emit_int16((unsigned char)0xE6, (0xC0 | encode));
2004 }
2005 
2006 void Assembler::evpmovwb(XMMRegister dst, XMMRegister src, int vector_len) {
2007   assert(UseAVX > 2  && VM_Version::supports_avx512bw(), "");
2008   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2009   attributes.set_is_evex_instruction();
2010   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2011   emit_int16(0x30, (0xC0 | encode));
2012 }
2013 
2014 void Assembler::evpmovdw(XMMRegister dst, XMMRegister src, int vector_len) {
2015   assert(UseAVX > 2, "");
2016   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2017   attributes.set_is_evex_instruction();
2018   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2019   emit_int16(0x33, (0xC0 | encode));
2020 }
2021 
2022 void Assembler::evpmovdb(XMMRegister dst, XMMRegister src, int vector_len) {
2023   assert(UseAVX > 2, "");
2024   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2025   attributes.set_is_evex_instruction();
2026   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2027   emit_int16(0x31, (0xC0 | encode));
2028 }
2029 
2030 void Assembler::evpmovqd(XMMRegister dst, XMMRegister src, int vector_len) {
2031   assert(UseAVX > 2, "");
2032   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2033   attributes.set_is_evex_instruction();
2034   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2035   emit_int16(0x35, (0xC0 | encode));
2036 }
2037 
2038 void Assembler::evpmovqb(XMMRegister dst, XMMRegister src, int vector_len) {
2039   assert(UseAVX > 2, "");
2040   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2041   attributes.set_is_evex_instruction();
2042   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2043   emit_int16(0x32, (0xC0 | encode));
2044 }
2045 
2046 void Assembler::evpmovqw(XMMRegister dst, XMMRegister src, int vector_len) {
2047   assert(UseAVX > 2, "");
2048   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2049   attributes.set_is_evex_instruction();
2050   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2051   emit_int16(0x34, (0xC0 | encode));
2052 }
2053 
2054 void Assembler::decl(Address dst) {
2055   // Don't use it directly. Use MacroAssembler::decrement() instead.
2056   InstructionMark im(this);
2057   prefix(dst);
2058   emit_int8((unsigned char)0xFF);
2059   emit_operand(rcx, dst);
2060 }
2061 
2062 void Assembler::divsd(XMMRegister dst, Address src) {
2063   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2064   InstructionMark im(this);
2065   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2066   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2067   attributes.set_rex_vex_w_reverted();
2068   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2069   emit_int8(0x5E);
2070   emit_operand(dst, src);
2071 }
2072 
2073 void Assembler::divsd(XMMRegister dst, XMMRegister src) {


2631   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2632   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2633   emit_int8(0x6F);
2634   emit_operand(dst, src);
2635 }
2636 
2637 void Assembler::vmovdqu(Address dst, XMMRegister src) {
2638   assert(UseAVX > 0, "");
2639   InstructionMark im(this);
2640   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2641   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2642   attributes.reset_is_clear_context();
2643   // swap src<->dst for encoding
2644   assert(src != xnoreg, "sanity");
2645   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2646   emit_int8(0x7F);
2647   emit_operand(src, dst);
2648 }
2649 
2650 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64)
2651 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
2652   assert(VM_Version::supports_evex(), "");
2653   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2654   attributes.set_is_evex_instruction();
2655   if (merge) {
2656     attributes.reset_is_clear_context();
2657   }
2658   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2659   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2660   emit_int16(0x6F, (0xC0 | encode));
2661 }
2662 
2663 void Assembler::evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) {
2664   assert(VM_Version::supports_evex(), "");
2665   InstructionMark im(this);
2666   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2667   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2668   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2669   attributes.set_is_evex_instruction();
2670   if (merge) {
2671     attributes.reset_is_clear_context();
2672   }
2673   vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2674   emit_int8(0x6F);
2675   emit_operand(dst, src);
2676 }
2677 
2678 void Assembler::evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len) {
2679   assert(VM_Version::supports_evex(), "");
2680   assert(src != xnoreg, "sanity");
2681   InstructionMark im(this);
2682   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2683   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2684   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2685   attributes.set_is_evex_instruction();
2686   if (merge) {
2687     attributes.reset_is_clear_context();
2688   }
2689   vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2690   emit_int8(0x7F);
2691   emit_operand(src, dst);
2692 }
2693 
2694 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
2695   assert(VM_Version::supports_avx512vlbw(), "");
2696   InstructionMark im(this);
2697   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2698   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2699   attributes.set_embedded_opmask_register_specifier(mask);
2700   attributes.set_is_evex_instruction();
2701   if (merge) {
2702     attributes.reset_is_clear_context();
2703   }
2704   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2705   emit_int8(0x6F);
2706   emit_operand(dst, src);
2707 }
2708 
2709 void Assembler::evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len) {
2710   assert(VM_Version::supports_evex(), "");
2711   InstructionMark im(this);
2712   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2713   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2714   attributes.set_is_evex_instruction();
2715   if (merge) {
2716     attributes.reset_is_clear_context();
2717   }
2718   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2719   vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2720   emit_int8(0x6F);
2721   emit_operand(dst, src);
2722 }
2723 
2724 void Assembler::evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
2725   assert(VM_Version::supports_avx512vlbw(), "");
2726   InstructionMark im(this);
2727   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2728   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2729   attributes.set_embedded_opmask_register_specifier(mask);
2730   attributes.set_is_evex_instruction();
2731   if (merge) {
2732     attributes.reset_is_clear_context();
2733   }
2734   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2735   emit_int8(0x6F);
2736   emit_operand(dst, src);
2737 }
2738 
2739 void Assembler::evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len) {
2740   assert(VM_Version::supports_evex(), "");
2741   assert(src != xnoreg, "sanity");
2742   InstructionMark im(this);
2743   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2744   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2745   attributes.set_is_evex_instruction();
2746   if (merge) {
2747     attributes.reset_is_clear_context();
2748   }
2749   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2750   vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2751   emit_int8(0x7F);
2752   emit_operand(src, dst);
2753 }
2754 
2755 void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
2756   assert(VM_Version::supports_avx512vlbw(), "");
2757   assert(src != xnoreg, "sanity");
2758   InstructionMark im(this);
2759   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2760   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);

2761   attributes.set_embedded_opmask_register_specifier(mask);
2762   attributes.set_is_evex_instruction();
2763   if (merge) {
2764     attributes.reset_is_clear_context();
2765   }
2766   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2767   emit_int8(0x7F);
2768   emit_operand(src, dst);
2769 }
2770 
2771 void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
2772   // Unmasked instruction
2773   evmovdqul(dst, k0, src, /*merge*/ false, vector_len);
2774 }
2775 
2776 void Assembler::evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
2777   assert(VM_Version::supports_evex(), "");
2778   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2779   attributes.set_embedded_opmask_register_specifier(mask);
2780   attributes.set_is_evex_instruction();
2781   if (merge) {
2782     attributes.reset_is_clear_context();
2783   }
2784   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2785   emit_int16(0x6F, (0xC0 | encode));
2786 }
2787 
2788 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) {
2789   // Unmasked instruction
2790   evmovdqul(dst, k0, src, /*merge*/ false, vector_len);
2791 }
2792 
2793 void Assembler::evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
2794   assert(VM_Version::supports_evex(), "");
2795   InstructionMark im(this);
2796   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ true);
2797   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2798   attributes.set_embedded_opmask_register_specifier(mask);
2799   attributes.set_is_evex_instruction();
2800   if (merge) {
2801     attributes.reset_is_clear_context();
2802   }
2803   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2804   emit_int8(0x6F);
2805   emit_operand(dst, src);
2806 }
2807 
2808 void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) {
2809   // Unmasked isntruction
2810   evmovdqul(dst, k0, src, /*merge*/ true, vector_len);
2811 }
2812 
2813 void Assembler::evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
2814   assert(VM_Version::supports_evex(), "");
2815   assert(src != xnoreg, "sanity");
2816   InstructionMark im(this);
2817   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2818   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2819   attributes.set_embedded_opmask_register_specifier(mask);
2820   attributes.set_is_evex_instruction();
2821   if (merge) {
2822     attributes.reset_is_clear_context();
2823   }
2824   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2825   emit_int8(0x7F);
2826   emit_operand(src, dst);
2827 }
2828 
2829 void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
2830   // Unmasked instruction
2831   if (dst->encoding() == src->encoding()) return;
2832   evmovdquq(dst, k0, src, /*merge*/ false, vector_len);
2833 }
2834 
2835 void Assembler::evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
2836   assert(VM_Version::supports_evex(), "");
2837   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2838   attributes.set_embedded_opmask_register_specifier(mask);
2839   attributes.set_is_evex_instruction();
2840   if (merge) {
2841     attributes.reset_is_clear_context();
2842   }
2843   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2844   emit_int16(0x6F, (0xC0 | encode));
2845 }
2846 
2847 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) {
2848   // Unmasked instruction
2849   evmovdquq(dst, k0, src, /*merge*/ false, vector_len);
2850 }
2851 
2852 void Assembler::evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
2853   assert(VM_Version::supports_evex(), "");
2854   InstructionMark im(this);
2855   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2856   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2857   attributes.set_embedded_opmask_register_specifier(mask);
2858   attributes.set_is_evex_instruction();
2859   if (merge) {
2860     attributes.reset_is_clear_context();
2861   }
2862   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2863   emit_int8(0x6F);
2864   emit_operand(dst, src);
2865 }
2866 
2867 void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) {
2868   // Unmasked instruction
2869   evmovdquq(dst, k0, src, /*merge*/ true, vector_len);
2870 }
2871 
2872 void Assembler::evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
2873   assert(VM_Version::supports_evex(), "");
2874   assert(src != xnoreg, "sanity");
2875   InstructionMark im(this);
2876   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2877   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2878   attributes.set_embedded_opmask_register_specifier(mask);
2879   if (merge) {
2880     attributes.reset_is_clear_context();
2881   }
2882   attributes.set_is_evex_instruction();
2883   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2884   emit_int8(0x7F);
2885   emit_operand(src, dst);
2886 }
2887 
2888 // Uses zero extension on 64bit
2889 
2890 void Assembler::movl(Register dst, int32_t imm32) {
2891   int encode = prefix_and_encode(dst->encoding());
2892   emit_int8(0xB8 | encode);
2893   emit_int32(imm32);
2894 }
2895 
2896 void Assembler::movl(Register dst, Register src) {
2897   int encode = prefix_and_encode(dst->encoding(), src->encoding());
2898   emit_int16((unsigned char)0x8B, (0xC0 | encode));
2899 }
2900 
2901 void Assembler::movl(Register dst, Address src) {


2939   InstructionMark im(this);
2940   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2941   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2942   attributes.set_rex_vex_w_reverted();
2943   simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2944   emit_int8(0x7E);
2945   emit_operand(dst, src);
2946 }
2947 
2948 void Assembler::movq(Address dst, XMMRegister src) {
2949   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2950   InstructionMark im(this);
2951   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2952   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2953   attributes.set_rex_vex_w_reverted();
2954   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2955   emit_int8((unsigned char)0xD6);
2956   emit_operand(src, dst);
2957 }
2958 
2959 void Assembler::movq(XMMRegister dst, XMMRegister src) {
2960   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2961   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2962   attributes.set_rex_vex_w_reverted();
2963   int encode = simd_prefix_and_encode(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2964   emit_int16((unsigned char)0xD6, (0xC0 | encode));
2965 }
2966 
2967 void Assembler::movq(Register dst, XMMRegister src) {
2968   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2969   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2970   // swap src/dst to get correct prefix
2971   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2972   emit_int16(0x7E, (0xC0 | encode));
2973 }
2974 
2975 void Assembler::movq(XMMRegister dst, Register src) {
2976   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2977   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2978   int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2979   emit_int16(0x6E, (0xC0 | encode));
2980 }
2981 
2982 void Assembler::movsbl(Register dst, Address src) { // movsxb
2983   InstructionMark im(this);
2984   prefix(src, dst);
2985   emit_int16(0x0F, (unsigned char)0xBE);
2986   emit_operand(dst, src);
2987 }
2988 
2989 void Assembler::movsbl(Register dst, Register src) { // movsxb
2990   NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
2991   int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true);
2992   emit_int24(0x0F, (unsigned char)0xBE, (0xC0 | encode));
2993 }
2994 
2995 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
2996   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2997   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2998   attributes.set_rex_vex_w_reverted();
2999   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3000   emit_int16(0x10, (0xC0 | encode));
3001 }


3461   switch (i) {
3462     case 4:
3463       emit_int8(0x66);
3464     case 3:
3465       emit_int8(0x66);
3466     case 2:
3467       emit_int8(0x66);
3468     case 1:
3469       emit_int8((unsigned char)0x90);
3470       break;
3471     default:
3472       assert(i == 0, " ");
3473   }
3474 }
3475 
3476 void Assembler::notl(Register dst) {
3477   int encode = prefix_and_encode(dst->encoding());
3478   emit_int16((unsigned char)0xF7, (0xD0 | encode));
3479 }
3480 
3481 void Assembler::orw(Register dst, Register src) {
3482   (void)prefix_and_encode(dst->encoding(), src->encoding());
3483   emit_arith(0x0B, 0xC0, dst, src);
3484 }
3485 
3486 void Assembler::orl(Address dst, int32_t imm32) {
3487   InstructionMark im(this);
3488   prefix(dst);
3489   emit_arith_operand(0x81, rcx, dst, imm32);
3490 }
3491 
3492 void Assembler::orl(Register dst, int32_t imm32) {
3493   prefix(dst);
3494   emit_arith(0x81, 0xC8, dst, imm32);
3495 }
3496 
3497 void Assembler::orl(Register dst, Address src) {
3498   InstructionMark im(this);
3499   prefix(src, dst);
3500   emit_int8(0x0B);
3501   emit_operand(dst, src);
3502 }
3503 
3504 void Assembler::orl(Register dst, Register src) {
3505   (void) prefix_and_encode(dst->encoding(), src->encoding());
3506   emit_arith(0x0B, 0xC0, dst, src);
3507 }
3508 
3509 void Assembler::orl(Address dst, Register src) {
3510   InstructionMark im(this);
3511   prefix(dst, src);
3512   emit_int8(0x09);
3513   emit_operand(src, dst);
3514 }
3515 
3516 void Assembler::orb(Address dst, int imm8) {
3517   InstructionMark im(this);
3518   prefix(dst);
3519   emit_int8((unsigned char)0x80);
3520   emit_operand(rcx, dst, 1);
3521   emit_int8(imm8);
3522 }
3523 
3524 void Assembler::packsswb(XMMRegister dst, XMMRegister src) {
3525   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3526   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3527   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3528   emit_int16(0x63, (0xC0 | encode));
3529 }
3530 
3531 void Assembler::vpacksswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3532   assert(UseAVX > 0, "some form of AVX must be enabled");
3533   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3534   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3535   emit_int16(0x63, (0xC0 | encode));
3536 }
3537 
3538 void Assembler::packssdw(XMMRegister dst, XMMRegister src) {
3539   assert(VM_Version::supports_sse2(), "");
3540   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3541   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3542   emit_int16(0x6B, (0xC0 | encode));
3543 }
3544 
3545 void Assembler::vpackssdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3546   assert(UseAVX > 0, "some form of AVX must be enabled");
3547   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3548   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3549   emit_int16(0x6B, (0xC0 | encode));
3550 }
3551 
3552 void Assembler::packuswb(XMMRegister dst, Address src) {
3553   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3554   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
3555   InstructionMark im(this);
3556   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3557   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
3558   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3559   emit_int8(0x67);
3560   emit_operand(dst, src);
3561 }
3562 
3563 void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
3564   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3565   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3566   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3567   emit_int16(0x67, (0xC0 | encode));
3568 }
3569 
3570 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3571   assert(UseAVX > 0, "some form of AVX must be enabled");
3572   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3573   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3574   emit_int16(0x67, (0xC0 | encode));
3575 }
3576 
3577 void Assembler::packusdw(XMMRegister dst, XMMRegister src) {
3578   assert(VM_Version::supports_sse4_1(), "");
3579   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3580   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3581   emit_int16(0x2B, (0xC0 | encode));
3582 }
3583 
3584 void Assembler::vpackusdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3585   assert(UseAVX > 0, "some form of AVX must be enabled");
3586   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3587   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3588   emit_int16(0x2B, (0xC0 | encode));
3589 }
3590 
3591 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
3592   assert(VM_Version::supports_avx2(), "");
3593   assert(vector_len != AVX_128bit, "");
3594   // VEX.256.66.0F3A.W1 00 /r ib
3595   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3596   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3597   emit_int24(0x00, (0xC0 | encode), imm8);
3598 }
3599 
3600 void Assembler::vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3601   assert(vector_len == AVX_256bit ? VM_Version::supports_avx512vl() :
3602          vector_len == AVX_512bit ? VM_Version::supports_evex()     : false, "not supported");
3603   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3604   attributes.set_is_evex_instruction();
3605   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3606   emit_int16(0x36, (0xC0 | encode));
3607 }
3608 
3609 void Assembler::vpermb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3610   assert(VM_Version::supports_avx512_vbmi(), "");
3611   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3612   attributes.set_is_evex_instruction();
3613   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3614   emit_int16((unsigned char)0x8D, (0xC0 | encode));
3615 }
3616 
3617 void Assembler::vpermw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3618   assert(vector_len == AVX_128bit ? VM_Version::supports_avx512vlbw() :
3619          vector_len == AVX_256bit ? VM_Version::supports_avx512vlbw() :
3620          vector_len == AVX_512bit ? VM_Version::supports_avx512bw()   : false, "not supported");
3621   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3622   attributes.set_is_evex_instruction();
3623   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3624   emit_int16((unsigned char)0x8D, (0xC0 | encode));
3625 }
3626 
3627 void Assembler::vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3628   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex(), "");
3629   // VEX.NDS.256.66.0F38.W0 36 /r
3630   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3631   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3632   emit_int16(0x36, (0xC0 | encode));
3633 }
3634 
3635 void Assembler::vpermd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3636   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex(), "");
3637   // VEX.NDS.256.66.0F38.W0 36 /r
3638   InstructionMark im(this);
3639   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3640   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3641   emit_int8(0x36);
3642   emit_operand(dst, src);
3643 }
3644 
3645 void Assembler::vperm2i128(XMMRegister dst,  XMMRegister nds, XMMRegister src, int imm8) {
3646   assert(VM_Version::supports_avx2(), "");
3647   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3648   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3649   emit_int24(0x46, (0xC0 | encode), imm8);
3650 }
3651 
3652 void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
3653   assert(VM_Version::supports_avx(), "");
3654   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3655   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3656   emit_int24(0x06, (0xC0 | encode), imm8);
3657 }
3658 
3659 void Assembler::vpermilps(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
3660   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
3661   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3662   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3663   emit_int24(0x04, (0xC0 | encode), imm8);
3664 }
3665 
3666 void Assembler::vpermilpd(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
3667   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
3668   InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(),/* legacy_mode */ false,/* no_mask_reg */ true, /* uses_vl */ false);
3669   attributes.set_rex_vex_w_reverted();
3670   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3671   emit_int24(0x05, (0xC0 | encode), imm8);
3672 }
3673 
3674 void Assembler::vpermpd(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
3675   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex(), "");
3676   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */false, /* no_mask_reg */ true, /* uses_vl */ false);
3677   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3678   emit_int24(0x01, (0xC0 | encode), imm8);
3679 }
3680 
3681 void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3682   assert(VM_Version::supports_evex(), "");
3683   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3684   attributes.set_is_evex_instruction();
3685   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3686   emit_int16(0x76, (0xC0 | encode));
3687 }
3688 

3689 void Assembler::pause() {
3690   emit_int16((unsigned char)0xF3, (unsigned char)0x90);
3691 }
3692 
3693 void Assembler::ud2() {
3694   emit_int16(0x0F, 0x0B);
3695 }
3696 
3697 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
3698   assert(VM_Version::supports_sse4_2(), "");
3699   InstructionMark im(this);
3700   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3701   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3702   emit_int8(0x61);
3703   emit_operand(dst, src);
3704   emit_int8(imm8);
3705 }
3706 
3707 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
3708   assert(VM_Version::supports_sse4_2(), "");
3709   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3710   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3711   emit_int24(0x61, (0xC0 | encode), imm8);
3712 }
3713 
3714 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3715 void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
3716   assert(VM_Version::supports_sse2(), "");
3717   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3718   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3719   emit_int16(0x74, (0xC0 | encode));
3720 }
3721 
3722 void Assembler::vpcmpCCbwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, int vector_len) {
3723   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
3724   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
3725   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3726   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3727   emit_int16(cond_encoding, (0xC0 | encode));
3728 }
3729 
3730 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3731 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3732   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
3733   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
3734   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3735   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3736   emit_int16(0x74, (0xC0 | encode));
3737 }
3738 
3739 // In this context, kdst is written the mask used to process the equal components
3740 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
3741   assert(VM_Version::supports_avx512bw(), "");
3742   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3743   attributes.set_is_evex_instruction();
3744   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3745   emit_int16(0x74, (0xC0 | encode));
3746 }
3747 
3748 void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3749   assert(VM_Version::supports_avx512vlbw(), "");
3750   InstructionMark im(this);
3751   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3752   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3753   attributes.set_is_evex_instruction();


3800   emit_int8(0x3E);
3801   emit_operand(as_Register(dst_enc), src);
3802   emit_int8(vcc);
3803 }
3804 
3805 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3806   assert(VM_Version::supports_avx512bw(), "");
3807   InstructionMark im(this);
3808   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3809   attributes.set_is_evex_instruction();
3810   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3811   int dst_enc = kdst->encoding();
3812   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3813   emit_int8(0x74);
3814   emit_operand(as_Register(dst_enc), src);
3815 }
3816 
3817 void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
3818   assert(VM_Version::supports_avx512vlbw(), "");
3819   InstructionMark im(this);
3820   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3821   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3822   attributes.reset_is_clear_context();
3823   attributes.set_embedded_opmask_register_specifier(mask);
3824   attributes.set_is_evex_instruction();
3825   vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3826   emit_int8(0x74);
3827   emit_operand(as_Register(kdst->encoding()), src);
3828 }
3829 
3830 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3831 void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
3832   assert(VM_Version::supports_sse2(), "");
3833   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3834   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3835   emit_int16(0x75, (0xC0 | encode));
3836 }
3837 
3838 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3839 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3840   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
3841   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
3842   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3843   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3844   emit_int16(0x75, (0xC0 | encode));
3845 }
3846 
3847 // In this context, kdst is written the mask used to process the equal components
3848 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
3849   assert(VM_Version::supports_avx512bw(), "");
3850   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3851   attributes.set_is_evex_instruction();
3852   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3853   emit_int16(0x75, (0xC0 | encode));
3854 }
3855 
3856 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3857   assert(VM_Version::supports_avx512bw(), "");
3858   InstructionMark im(this);
3859   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3860   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3861   attributes.set_is_evex_instruction();
3862   int dst_enc = kdst->encoding();
3863   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3864   emit_int8(0x75);
3865   emit_operand(as_Register(dst_enc), src);
3866 }
3867 
3868 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3869 void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
3870   assert(VM_Version::supports_sse2(), "");
3871   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3872   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3873   emit_int16(0x76, (0xC0 | encode));
3874 }
3875 
3876 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3877 void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3878   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
3879   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
3880   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3881   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3882   emit_int16(0x76, (0xC0 | encode));
3883 }
3884 
3885 // In this context, kdst is written the mask used to process the equal components
3886 void Assembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int vector_len) {
3887   assert(VM_Version::supports_evex(), "");
3888   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3889   attributes.set_is_evex_instruction();
3890   attributes.reset_is_clear_context();
3891   attributes.set_embedded_opmask_register_specifier(mask);
3892   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3893   emit_int16(0x76, (0xC0 | encode));
3894 }
3895 
3896 void Assembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
3897   assert(VM_Version::supports_evex(), "");
3898   InstructionMark im(this);
3899   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3900   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);

3901   attributes.set_is_evex_instruction();
3902   attributes.reset_is_clear_context();
3903   attributes.set_embedded_opmask_register_specifier(mask);
3904   int dst_enc = kdst->encoding();
3905   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3906   emit_int8(0x76);
3907   emit_operand(as_Register(dst_enc), src);
3908 }
3909 
3910 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3911 void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) {
3912   assert(VM_Version::supports_sse4_1(), "");
3913   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3914   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3915   emit_int16(0x29, (0xC0 | encode));
3916 }
3917 
3918 void Assembler::vpcmpCCq(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, int vector_len) {
3919   assert(VM_Version::supports_avx(), "");
3920   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3921   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3922   emit_int16(cond_encoding, (0xC0 | encode));
3923 }
3924 
3925 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
3926 void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3927   assert(VM_Version::supports_avx(), "");
3928   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3929   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3930   emit_int16(0x29, (0xC0 | encode));
3931 }
3932 
3933 // In this context, kdst is written the mask used to process the equal components
3934 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
3935   assert(VM_Version::supports_evex(), "");
3936   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3937   attributes.reset_is_clear_context();
3938   attributes.set_is_evex_instruction();
3939   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3940   emit_int16(0x29, (0xC0 | encode));
3941 }
3942 
3943 // In this context, kdst is written the mask used to process the equal components
3944 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3945   assert(VM_Version::supports_evex(), "");
3946   InstructionMark im(this);
3947   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3948   attributes.reset_is_clear_context();
3949   attributes.set_is_evex_instruction();
3950   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
3951   int dst_enc = kdst->encoding();
3952   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3953   emit_int8(0x29);
3954   emit_operand(as_Register(dst_enc), src);
3955 }
3956 
3957 void Assembler::evpmovd2m(KRegister kdst, XMMRegister src, int vector_len) {
3958   assert(UseAVX > 2  && VM_Version::supports_avx512dq(), "");
3959   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
3960   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3961   attributes.set_is_evex_instruction();
3962   int encode = vex_prefix_and_encode(kdst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
3963   emit_int16(0x39, (0xC0 | encode));
3964 }
3965 
3966 void Assembler::evpmovq2m(KRegister kdst, XMMRegister src, int vector_len) {
3967   assert(UseAVX > 2  && VM_Version::supports_avx512dq(), "");
3968   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
3969   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3970   attributes.set_is_evex_instruction();
3971   int encode = vex_prefix_and_encode(kdst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
3972   emit_int16(0x39, (0xC0 | encode));
3973 }
3974 
3975 void Assembler::pcmpgtq(XMMRegister dst, XMMRegister src) {
3976   assert(VM_Version::supports_sse4_1(), "");
3977   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3978   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3979   emit_int16(0x37, (0xC0 | encode));
3980 }
3981 
3982 void Assembler::pmovmskb(Register dst, XMMRegister src) {
3983   assert(VM_Version::supports_sse2(), "");
3984   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3985   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3986   emit_int16((unsigned char)0xD7, (0xC0 | encode));
3987 }
3988 
3989 void Assembler::vpmovmskb(Register dst, XMMRegister src) {
3990   assert(VM_Version::supports_avx2(), "");
3991   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3992   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3993   emit_int16((unsigned char)0xD7, (0xC0 | encode));
3994 }
3995 
3996 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
3997   assert(VM_Version::supports_sse4_1(), "");
3998   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
3999   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4000   emit_int24(0x16, (0xC0 | encode), imm8);
4001 }
4002 
4003 void Assembler::pextrd(Address dst, XMMRegister src, int imm8) {
4004   assert(VM_Version::supports_sse4_1(), "");
4005   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
4006   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
4007   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4008   emit_int8(0x16);
4009   emit_operand(src, dst);
4010   emit_int8(imm8);
4011 }
4012 
4013 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
4014   assert(VM_Version::supports_sse4_1(), "");
4015   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
4016   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4017   emit_int24(0x16, (0xC0 | encode), imm8);
4018 }
4019 
4020 void Assembler::pextrq(Address dst, XMMRegister src, int imm8) {
4021   assert(VM_Version::supports_sse4_1(), "");
4022   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
4023   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
4024   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4025   emit_int8(0x16);
4026   emit_operand(src, dst);
4027   emit_int8(imm8);
4028 }
4029 
4030 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) {
4031   assert(VM_Version::supports_sse2(), "");
4032   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
4033   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4034   emit_int24((unsigned char)0xC5, (0xC0 | encode), imm8);
4035 }
4036 
4037 void Assembler::pextrw(Address dst, XMMRegister src, int imm8) {
4038   assert(VM_Version::supports_sse4_1(), "");
4039   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
4040   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
4041   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4042   emit_int8(0x15);
4043   emit_operand(src, dst);
4044   emit_int8(imm8);
4045 }
4046 
4047 void Assembler::pextrb(Register dst, XMMRegister src, int imm8) {
4048   assert(VM_Version::supports_sse4_1(), "");
4049   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
4050   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4051   emit_int24(0x14, (0xC0 | encode), imm8);
4052 }
4053 
4054 void Assembler::pextrb(Address dst, XMMRegister src, int imm8) {
4055   assert(VM_Version::supports_sse4_1(), "");
4056   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
4057   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
4058   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4059   emit_int8(0x14);
4060   emit_operand(src, dst);
4061   emit_int8(imm8);
4062 }
4063 
4064 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
4065   assert(VM_Version::supports_sse4_1(), "");
4066   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
4067   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4068   emit_int24(0x22, (0xC0 | encode), imm8);
4069 }
4070 
4071 void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) {
4072   assert(VM_Version::supports_sse4_1(), "");
4073   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
4074   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
4075   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4076   emit_int8(0x22);
4077   emit_operand(dst,src);
4078   emit_int8(imm8);
4079 }
4080 
4081 void Assembler::vpinsrd(XMMRegister dst, XMMRegister nds, Register src, int imm8) {
4082   assert(VM_Version::supports_avx(), "");
4083   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
4084   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4085   emit_int24(0x22, (0xC0 | encode), imm8);
4086 }
4087 
4088 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
4089   assert(VM_Version::supports_sse4_1(), "");
4090   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
4091   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4092   emit_int24(0x22, (0xC0 | encode), imm8);
4093 }
4094 
4095 void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) {
4096   assert(VM_Version::supports_sse4_1(), "");
4097   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
4098   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
4099   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4100   emit_int8(0x22);
4101   emit_operand(dst, src);
4102   emit_int8(imm8);
4103 }
4104 
4105 void Assembler::vpinsrq(XMMRegister dst, XMMRegister nds, Register src, int imm8) {
4106   assert(VM_Version::supports_avx(), "");
4107   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
4108   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4109   emit_int24(0x22, (0xC0 | encode), imm8);
4110 }
4111 
4112 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) {
4113   assert(VM_Version::supports_sse2(), "");
4114   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
4115   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4116   emit_int24((unsigned char)0xC4, (0xC0 | encode), imm8);
4117 }
4118 
4119 void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) {
4120   assert(VM_Version::supports_sse2(), "");
4121   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
4122   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
4123   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4124   emit_int8((unsigned char)0xC4);
4125   emit_operand(dst, src);
4126   emit_int8(imm8);
4127 }
4128 
4129 void Assembler::vpinsrw(XMMRegister dst, XMMRegister nds, Register src, int imm8) {
4130   assert(VM_Version::supports_avx(), "");
4131   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
4132   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4133   emit_int24((unsigned char)0xC4, (0xC0 | encode), imm8);
4134 }
4135 
4136 void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) {
4137   assert(VM_Version::supports_sse4_1(), "");
4138   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
4139   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
4140   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4141   emit_int8(0x20);
4142   emit_operand(dst, src);
4143   emit_int8(imm8);
4144 }
4145 
4146 void Assembler::pinsrb(XMMRegister dst, Register src, int imm8) {
4147   assert(VM_Version::supports_sse4_1(), "");
4148   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
4149   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4150   emit_int24(0x20, (0xC0 | encode), imm8);
4151 }
4152 
4153 void Assembler::vpinsrb(XMMRegister dst, XMMRegister nds, Register src, int imm8) {
4154   assert(VM_Version::supports_avx(), "");
4155   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
4156   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4157   emit_int24(0x20, (0xC0 | encode), imm8);
4158 }
4159 
4160 void Assembler::insertps(XMMRegister dst, XMMRegister src, int imm8) {
4161   assert(VM_Version::supports_sse4_1(), "");
4162   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4163   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4164   emit_int24(0x21, (0xC0 | encode), imm8);
4165 }
4166 
4167 void Assembler::vinsertps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
4168   assert(VM_Version::supports_avx(), "");
4169   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4170   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4171   emit_int24(0x21, (0xC0 | encode), imm8);
4172 }
4173 
4174 void Assembler::pmovzxbw(XMMRegister dst, Address src) {
4175   assert(VM_Version::supports_sse4_1(), "");
4176   InstructionMark im(this);
4177   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4178   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
4179   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4180   emit_int8(0x30);
4181   emit_operand(dst, src);
4182 }
4183 
4184 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
4185   assert(VM_Version::supports_sse4_1(), "");
4186   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4187   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4188   emit_int16(0x30, (0xC0 | encode));
4189 }
4190 
4191 void Assembler::pmovsxbw(XMMRegister dst, XMMRegister src) {
4192   assert(VM_Version::supports_sse4_1(), "");
4193   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4194   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4195   emit_int16(0x20, (0xC0 | encode));
4196 }
4197 
4198 void Assembler::pmovzxdq(XMMRegister dst, XMMRegister src) {
4199   assert(VM_Version::supports_sse4_1(), "");
4200   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4201   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4202   emit_int16(0x35, (0xC0 | encode));
4203 }
4204 
4205 void Assembler::pmovsxbd(XMMRegister dst, XMMRegister src) {
4206   assert(VM_Version::supports_sse4_1(), "");
4207   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4208   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4209   emit_int16(0x21, (0xC0 | encode));
4210 }
4211 
4212 void Assembler::pmovzxbd(XMMRegister dst, XMMRegister src) {
4213   assert(VM_Version::supports_sse4_1(), "");
4214   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4215   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4216   emit_int16(0x31, (0xC0 | encode));
4217 }
4218 
4219 void Assembler::pmovsxbq(XMMRegister dst, XMMRegister src) {
4220   assert(VM_Version::supports_sse4_1(), "");
4221   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4222   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4223   emit_int16(0x22, (0xC0 | encode));
4224 }
4225 
4226 void Assembler::pmovsxwd(XMMRegister dst, XMMRegister src) {
4227   assert(VM_Version::supports_sse4_1(), "");
4228   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4229   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4230   emit_int16(0x23, (0xC0 | encode));
4231 }
4232 
4233 void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
4234   assert(VM_Version::supports_avx(), "");
4235   InstructionMark im(this);
4236   assert(dst != xnoreg, "sanity");
4237   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4238   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
4239   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4240   emit_int8(0x30);
4241   emit_operand(dst, src);
4242 }
4243 
4244 void Assembler::vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) {
4245   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
4246   vector_len == AVX_256bit? VM_Version::supports_avx2() :
4247   vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
4248   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4249   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4250   emit_int16(0x30, (unsigned char) (0xC0 | encode));
4251 }
4252 
4253 void Assembler::vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len) {
4254   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
4255   vector_len == AVX_256bit? VM_Version::supports_avx2() :
4256   vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
4257   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4258   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4259   emit_int16(0x20, (0xC0 | encode));
4260 }
4261 
4262 void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) {
4263   assert(VM_Version::supports_avx512vlbw(), "");
4264   assert(dst != xnoreg, "sanity");
4265   InstructionMark im(this);
4266   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
4267   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
4268   attributes.set_embedded_opmask_register_specifier(mask);
4269   attributes.set_is_evex_instruction();
4270   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4271   emit_int8(0x30);
4272   emit_operand(dst, src);
4273 }
4274 
4275 void Assembler::evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
4276   assert(VM_Version::supports_evex(), "");
4277   // Encoding: EVEX.NDS.XXX.66.0F.W0 DB /r
4278   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
4279   attributes.set_is_evex_instruction();
4280   attributes.set_embedded_opmask_register_specifier(mask);
4281   if (merge) {
4282     attributes.reset_is_clear_context();
4283   }
4284   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4285   emit_int16((unsigned char)0xDB, (0xC0 | encode));
4286 }
4287 
4288 void Assembler::vpmovzxdq(XMMRegister dst, XMMRegister src, int vector_len) {
4289   assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), "");
4290   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4291   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4292   emit_int16(0x35, (0xC0 | encode));
4293 }
4294 
4295 void Assembler::vpmovzxbd(XMMRegister dst, XMMRegister src, int vector_len) {
4296   assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), "");
4297   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4298   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4299   emit_int16(0x31, (0xC0 | encode));
4300 }
4301 
4302 void Assembler::vpmovzxbq(XMMRegister dst, XMMRegister src, int vector_len) {
4303   assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), "");
4304   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4305   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4306   emit_int16(0x32, (0xC0 | encode));
4307 }
4308 
4309 void Assembler::vpmovsxbd(XMMRegister dst, XMMRegister src, int vector_len) {
4310   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
4311          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
4312              VM_Version::supports_evex(), "");
4313   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4314   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4315   emit_int16(0x21, (0xC0 | encode));
4316 }
4317 
4318 void Assembler::vpmovsxbq(XMMRegister dst, XMMRegister src, int vector_len) {
4319   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
4320          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
4321              VM_Version::supports_evex(), "");
4322   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4323   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4324   emit_int16(0x22, (0xC0 | encode));
4325 }
4326 
4327 void Assembler::vpmovsxwd(XMMRegister dst, XMMRegister src, int vector_len) {
4328   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
4329          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
4330              VM_Version::supports_evex(), "");
4331   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4332   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4333   emit_int16(0x23, (0xC0 | encode));
4334 }
4335 
4336 void Assembler::vpmovsxwq(XMMRegister dst, XMMRegister src, int vector_len) {
4337   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
4338          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
4339              VM_Version::supports_evex(), "");
4340   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4341   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4342   emit_int16(0x24, (0xC0 | encode));
4343 }
4344 
4345 void Assembler::vpmovsxdq(XMMRegister dst, XMMRegister src, int vector_len) {
4346   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
4347          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
4348              VM_Version::supports_evex(), "");
4349   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4350   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4351   emit_int16(0x25, (0xC0 | encode));
4352 }
4353 
4354 void Assembler::evpmovwb(Address dst, XMMRegister src, int vector_len) {
4355   assert(VM_Version::supports_avx512vlbw(), "");
4356   assert(src != xnoreg, "sanity");
4357   InstructionMark im(this);
4358   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4359   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
4360   attributes.set_is_evex_instruction();
4361   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
4362   emit_int8(0x30);
4363   emit_operand(src, dst);
4364 }
4365 
4366 void Assembler::evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len) {
4367   assert(VM_Version::supports_avx512vlbw(), "");
4368   assert(src != xnoreg, "sanity");
4369   InstructionMark im(this);
4370   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
4371   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
4372   attributes.reset_is_clear_context();
4373   attributes.set_embedded_opmask_register_specifier(mask);


4560          (vector_len == AVX_512bit? VM_Version::supports_evex() : 0)), "");
4561   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4562   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4563   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4564   emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
4565 }
4566 
4567 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
4568   assert(isByte(mode), "invalid value");
4569   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4570   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4571   InstructionMark im(this);
4572   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4573   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
4574   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4575   emit_int8(0x70);
4576   emit_operand(dst, src);
4577   emit_int8(mode & 0xFF);
4578 }
4579 
4580 void Assembler::pshufhw(XMMRegister dst, XMMRegister src, int mode) {
4581   assert(isByte(mode), "invalid value");
4582   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4583   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4584   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
4585   emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
4586 }
4587 
4588 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
4589   assert(isByte(mode), "invalid value");
4590   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4591   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4592   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4593   emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
4594 }
4595 
4596 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
4597   assert(isByte(mode), "invalid value");
4598   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4599   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4600   InstructionMark im(this);
4601   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4602   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4603   simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4604   emit_int8(0x70);
4605   emit_operand(dst, src);
4606   emit_int8(mode & 0xFF);
4607 }
4608 
4609 void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
4610   assert(VM_Version::supports_evex(), "requires EVEX support");
4611   assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, "");
4612   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4613   attributes.set_is_evex_instruction();
4614   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4615   emit_int24(0x43, (0xC0 | encode), imm8 & 0xFF);
4616 }
4617 
4618 void Assembler::pshufpd(XMMRegister dst, XMMRegister src, int imm8) {
4619   assert(isByte(imm8), "invalid value");
4620   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4621   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4622   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4623   emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
4624 }
4625 
4626 void Assembler::vpshufpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
4627   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4628   attributes.set_rex_vex_w_reverted();
4629   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4630   emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
4631 }
4632 
4633 void Assembler::pshufps(XMMRegister dst, XMMRegister src, int imm8) {
4634   assert(isByte(imm8), "invalid value");
4635   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4636   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4637   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
4638   emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
4639 }
4640 
4641 void Assembler::vpshufps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
4642   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4643   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
4644   emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
4645 }
4646 
4647 void Assembler::psrldq(XMMRegister dst, int shift) {
4648   // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
4649   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4650   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4651   int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4652   emit_int24(0x73, (0xC0 | encode), shift);
4653 }
4654 
4655 void Assembler::vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
4656   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
4657          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
4658          vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, "");
4659   InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4660   int encode = vex_prefix_and_encode(xmm3->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4661   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
4662 }
4663 
4664 void Assembler::pslldq(XMMRegister dst, int shift) {
4665   // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
4666   NOT_LP64(assert(VM_Version::supports_sse2(), ""));


4698 }
4699 
4700 void Assembler::vptest(XMMRegister dst, Address src) {
4701   assert(VM_Version::supports_avx(), "");
4702   InstructionMark im(this);
4703   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4704   assert(dst != xnoreg, "sanity");
4705   // swap src<->dst for encoding
4706   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4707   emit_int8(0x17);
4708   emit_operand(dst, src);
4709 }
4710 
4711 void Assembler::vptest(XMMRegister dst, XMMRegister src) {
4712   assert(VM_Version::supports_avx(), "");
4713   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4714   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4715   emit_int16(0x17, (0xC0 | encode));
4716 }
4717 
4718 void Assembler::vptest(XMMRegister dst, XMMRegister src, int vector_len) {
4719   assert(VM_Version::supports_avx(), "");
4720   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4721   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4722   emit_int16(0x17, (0xC0 | encode));
4723 }
4724 
4725 void Assembler::punpcklbw(XMMRegister dst, Address src) {
4726   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4727   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4728   InstructionMark im(this);
4729   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true);
4730   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4731   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4732   emit_int8(0x60);
4733   emit_operand(dst, src);
4734 }
4735 
4736 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
4737   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4738   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true);
4739   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4740   emit_int16(0x60, (0xC0 | encode));
4741 }
4742 
4743 void Assembler::punpckldq(XMMRegister dst, Address src) {
4744   NOT_LP64(assert(VM_Version::supports_sse2(), ""));


5373 
5374 void Assembler::xorl(Register dst, Address src) {
5375   InstructionMark im(this);
5376   prefix(src, dst);
5377   emit_int8(0x33);
5378   emit_operand(dst, src);
5379 }
5380 
5381 void Assembler::xorl(Register dst, Register src) {
5382   (void) prefix_and_encode(dst->encoding(), src->encoding());
5383   emit_arith(0x33, 0xC0, dst, src);
5384 }
5385 
5386 void Assembler::xorb(Register dst, Address src) {
5387   InstructionMark im(this);
5388   prefix(src, dst);
5389   emit_int8(0x32);
5390   emit_operand(dst, src);
5391 }
5392 
5393 void Assembler::xorw(Register dst, Register src) {
5394   (void)prefix_and_encode(dst->encoding(), src->encoding());
5395   emit_arith(0x33, 0xC0, dst, src);
5396 }
5397 
5398 // AVX 3-operands scalar float-point arithmetic instructions
5399 
5400 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
5401   assert(VM_Version::supports_avx(), "");
5402   InstructionMark im(this);
5403   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5404   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
5405   attributes.set_rex_vex_w_reverted();
5406   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
5407   emit_int8(0x58);
5408   emit_operand(dst, src);
5409 }
5410 
5411 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
5412   assert(VM_Version::supports_avx(), "");
5413   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5414   attributes.set_rex_vex_w_reverted();
5415   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
5416   emit_int16(0x58, (0xC0 | encode));
5417 }


6291   attributes.set_rex_vex_w_reverted();
6292   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6293   emit_int8((unsigned char)0xFB);
6294   emit_operand(dst, src);
6295 }
6296 
6297 void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
6298   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6299   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6300   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6301   emit_int16((unsigned char)0xD5, (0xC0 | encode));
6302 }
6303 
6304 void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
6305   assert(VM_Version::supports_sse4_1(), "");
6306   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6307   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6308   emit_int16(0x40, (0xC0 | encode));
6309 }
6310 
6311 void Assembler::pmuludq(XMMRegister dst, XMMRegister src) {
6312   assert(VM_Version::supports_sse2(), "");
6313   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6314   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6315   emit_int16((unsigned char)0xF4, (0xC0 | encode));
6316 }
6317 
6318 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6319   assert(UseAVX > 0, "requires some form of AVX");
6320   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6321   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6322   emit_int16((unsigned char)0xD5, (0xC0 | encode));
6323 }
6324 
6325 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6326   assert(UseAVX > 0, "requires some form of AVX");
6327   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6328   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6329   emit_int16(0x40, (0xC0 | encode));
6330 }
6331 
6332 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6333   assert(UseAVX > 2, "requires some form of EVEX");
6334   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
6335   attributes.set_is_evex_instruction();
6336   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6337   emit_int16(0x40, (0xC0 | encode));
6338 }
6339 
6340 void Assembler::vpmuludq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6341   assert(UseAVX > 0, "requires some form of AVX");
6342   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6343   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6344   emit_int16((unsigned char)0xF4, (0xC0 | encode));
6345 }
6346 
6347 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6348   assert(UseAVX > 0, "requires some form of AVX");
6349   InstructionMark im(this);
6350   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6351   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
6352   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6353   emit_int8((unsigned char)0xD5);
6354   emit_operand(dst, src);
6355 }
6356 
6357 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6358   assert(UseAVX > 0, "requires some form of AVX");
6359   InstructionMark im(this);
6360   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6361   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
6362   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6363   emit_int8(0x40);
6364   emit_operand(dst, src);
6365 }
6366 
6367 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6368   assert(UseAVX > 2, "requires some form of EVEX");
6369   InstructionMark im(this);
6370   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
6371   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
6372   attributes.set_is_evex_instruction();
6373   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6374   emit_int8(0x40);
6375   emit_operand(dst, src);
6376 }
6377 
6378 // Min, max
6379 void Assembler::pminsb(XMMRegister dst, XMMRegister src) {
6380   assert(VM_Version::supports_sse4_1(), "");
6381   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6382   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6383   emit_int16(0x38, (0xC0 | encode));

6384 }
6385 
6386 void Assembler::vpminsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6387   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
6388         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
6389   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6390   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6391   emit_int16(0x38, (0xC0 | encode));
6392 }
6393 
6394 void Assembler::pminsw(XMMRegister dst, XMMRegister src) {
6395   assert(VM_Version::supports_sse2(), "");
6396   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6397   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6398   emit_int16((unsigned char)0xEA, (0xC0 | encode));

6399 }
6400 
6401 void Assembler::vpminsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6402   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
6403         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
6404   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6405   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6406   emit_int16((unsigned char)0xEA, (0xC0 | encode));
6407 }
6408 
6409 void Assembler::pminsd(XMMRegister dst, XMMRegister src) {
6410   assert(VM_Version::supports_sse4_1(), "");
6411   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6412   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6413   emit_int16(0x39, (0xC0 | encode));
6414 }
6415 
6416 void Assembler::vpminsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6417   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
6418         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), "");
6419   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6420   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6421   emit_int16(0x39, (0xC0 | encode));
6422 }
6423 
6424 void Assembler::vpminsq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6425   assert(UseAVX > 2, "requires AVX512F");
6426   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6427   attributes.set_is_evex_instruction();
6428   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6429   emit_int16(0x39, (0xC0 | encode));
6430 }
6431 
6432 void Assembler::minps(XMMRegister dst, XMMRegister src) {
6433   NOT_LP64(assert(VM_Version::supports_sse(), ""));
6434   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6435   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
6436   emit_int16(0x5D, (0xC0 | encode));
6437 }
6438 void Assembler::vminps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6439   assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
6440   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6441   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
6442   emit_int16(0x5D, (0xC0 | encode));

6443 }
6444 
6445 void Assembler::minpd(XMMRegister dst, XMMRegister src) {
6446   NOT_LP64(assert(VM_Version::supports_sse(), ""));
6447   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6448   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6449   emit_int16(0x5D, (0xC0 | encode));


6450 }
6451 void Assembler::vminpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6452   assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
6453   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6454   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6455   emit_int16(0x5D, (0xC0 | encode));
6456 }
6457 
6458 void Assembler::pmaxsb(XMMRegister dst, XMMRegister src) {
6459   assert(VM_Version::supports_sse4_1(), "");
6460   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6461   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6462   emit_int16(0x3C, (0xC0 | encode));
6463 }
6464 
6465 void Assembler::vpmaxsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6466   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
6467         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
6468   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6469   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6470   emit_int16(0x3C, (0xC0 | encode));
6471 }
6472 
6473 void Assembler::pmaxsw(XMMRegister dst, XMMRegister src) {
6474   assert(VM_Version::supports_sse2(), "");
6475   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6476   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6477   emit_int16((unsigned char)0xEE, (0xC0 | encode));
6478 }
6479 
6480 void Assembler::vpmaxsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6481   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
6482         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
6483   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6484   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6485   emit_int16((unsigned char)0xEE, (0xC0 | encode));
6486 }
6487 
6488 void Assembler::pmaxsd(XMMRegister dst, XMMRegister src) {
6489   assert(VM_Version::supports_sse4_1(), "");
6490   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6491   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6492   emit_int16(0x3D, (0xC0 | encode));
6493 }
6494 
6495 void Assembler::vpmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6496   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
6497         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), "");
6498   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6499   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6500   emit_int16(0x3D, (0xC0 | encode));
6501 }
6502 
6503 void Assembler::vpmaxsq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6504   assert(UseAVX > 2, "requires AVX512F");
6505   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6506   attributes.set_is_evex_instruction();
6507   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6508   emit_int16(0x3D, (0xC0 | encode));
6509 }
6510 
6511 void Assembler::maxps(XMMRegister dst, XMMRegister src) {
6512   NOT_LP64(assert(VM_Version::supports_sse(), ""));
6513   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6514   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
6515   emit_int16(0x5F, (0xC0 | encode));
6516 }
6517 
6518 void Assembler::vmaxps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6519   assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
6520   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6521   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
6522   emit_int16(0x5F, (0xC0 | encode));
6523 }
6524 
6525 void Assembler::maxpd(XMMRegister dst, XMMRegister src) {
6526   NOT_LP64(assert(VM_Version::supports_sse(), ""));
6527   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6528   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6529   emit_int16(0x5F, (0xC0 | encode));
6530 }
6531 
6532 void Assembler::vmaxpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6533   assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
6534   InstructionAttr attributes(vector_len, /* vex_w */true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6535   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6536   emit_int16(0x5F, (0xC0 | encode));
6537 }
6538 
6539 // Shift packed integers left by specified number of bits.
6540 void Assembler::psllw(XMMRegister dst, int shift) {
6541   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6542   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6543   // XMM6 is for /6 encoding: 66 0F 71 /6 ib
6544   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6545   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
6546 }
6547 
6548 void Assembler::pslld(XMMRegister dst, int shift) {
6549   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6550   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6551   // XMM6 is for /6 encoding: 66 0F 72 /6 ib
6552   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6553   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
6554 }
6555 
6556 void Assembler::psllq(XMMRegister dst, int shift) {
6557   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6558   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6559   // XMM6 is for /6 encoding: 66 0F 73 /6 ib
6560   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6561   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
6562 }
6563 
6564 void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
6565   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6566   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6567   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6568   emit_int16((unsigned char)0xF1, (0xC0 | encode));
6569 }
6570 
6571 void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
6572   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6573   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6574   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6575   emit_int16((unsigned char)0xF2, (0xC0 | encode));
6576 }
6577 
6578 void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
6579   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6580   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6581   attributes.set_rex_vex_w_reverted();
6582   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6583   emit_int16((unsigned char)0xF3, (0xC0 | encode));
6584 }
6585 
6586 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6587   assert(UseAVX > 0, "requires some form of AVX");
6588   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6589   // XMM6 is for /6 encoding: 66 0F 71 /6 ib
6590   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6591   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
6592 }
6593 
6594 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6595   assert(UseAVX > 0, "requires some form of AVX");
6596   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6597   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6598   // XMM6 is for /6 encoding: 66 0F 72 /6 ib
6599   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6600   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
6601 }
6602 
6603 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6604   assert(UseAVX > 0, "requires some form of AVX");
6605   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6606   attributes.set_rex_vex_w_reverted();
6607   // XMM6 is for /6 encoding: 66 0F 73 /6 ib
6608   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6609   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
6610 }
6611 
6612 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6613   assert(UseAVX > 0, "requires some form of AVX");
6614   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6615   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6616   emit_int16((unsigned char)0xF1, (0xC0 | encode));
6617 }
6618 
6619 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6620   assert(UseAVX > 0, "requires some form of AVX");
6621   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6622   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6623   emit_int16((unsigned char)0xF2, (0xC0 | encode));
6624 }
6625 
6626 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6627   assert(UseAVX > 0, "requires some form of AVX");
6628   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6629   attributes.set_rex_vex_w_reverted();
6630   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6631   emit_int16((unsigned char)0xF3, (0xC0 | encode));
6632 }


6840   emit_int16((unsigned char)0xDB, (0xC0 | encode));
6841 }
6842 
6843 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6844   assert(UseAVX > 0, "requires some form of AVX");
6845   InstructionMark im(this);
6846   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6847   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
6848   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6849   emit_int8((unsigned char)0xDB);
6850   emit_operand(dst, src);
6851 }
6852 
6853 void Assembler::vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6854   assert(VM_Version::supports_evex(), "");
6855   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6856   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6857   emit_int16((unsigned char)0xDB, (0xC0 | encode));
6858 }
6859 
6860 //Variable Shift packed integers logically left.
6861 void Assembler::vpsllvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6862   assert(UseAVX > 1, "requires AVX2");
6863   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6864   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6865   emit_int16(0x47, (0xC0 | encode));
6866 }
6867 
6868 void Assembler::vpsllvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6869   assert(UseAVX > 1, "requires AVX2");
6870   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6871   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6872   emit_int16(0x47, (0xC0 | encode));
6873 }
6874 
6875 //Variable Shift packed integers logically right.
6876 void Assembler::vpsrlvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6877   assert(UseAVX > 1, "requires AVX2");
6878   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6879   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6880   emit_int16(0x45, (0xC0 | encode));
6881 }
6882 
6883 void Assembler::vpsrlvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6884   assert(UseAVX > 1, "requires AVX2");
6885   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6886   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6887   emit_int16(0x45, (0xC0 | encode));
6888 }
6889 
6890 //Variable right Shift arithmetic packed integers .
6891 void Assembler::vpsravd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6892   assert(UseAVX > 1, "requires AVX2");
6893   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6894   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6895   emit_int16(0x46, (0xC0 | encode));
6896 }
6897 
6898 void Assembler::evpsravw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6899   assert(VM_Version::supports_avx512bw(), "");
6900   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6901   attributes.set_is_evex_instruction();
6902   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6903   emit_int16(0x11, (0xC0 | encode));
6904 }
6905 
6906 void Assembler::evpsravq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6907   assert(UseAVX > 2, "requires AVX512");
6908   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
6909   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6910   attributes.set_is_evex_instruction();
6911   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6912   emit_int16(0x46, (0xC0 | encode));
6913 }
6914 
6915 void Assembler::vpshldvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6916   assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2");
6917   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6918   attributes.set_is_evex_instruction();
6919   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6920   emit_int16(0x71, (0xC0 | encode));

6921 }
6922 
6923 void Assembler::vpshrdvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6924   assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2");
6925   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6926   attributes.set_is_evex_instruction();
6927   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6928   emit_int16(0x73, (0xC0 | encode));
6929 }
6930 
6931 void Assembler::pandn(XMMRegister dst, XMMRegister src) {
6932   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6933   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6934   attributes.set_rex_vex_w_reverted();
6935   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6936   emit_int16((unsigned char)0xDF, (0xC0 | encode));
6937 }
6938 
6939 void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6940   assert(UseAVX > 0, "requires some form of AVX");
6941   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6942   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6943   emit_int16((unsigned char)0xDF, (0xC0 | encode));
6944 }
6945 

6946 void Assembler::por(XMMRegister dst, XMMRegister src) {
6947   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6948   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6949   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6950   emit_int16((unsigned char)0xEB, (0xC0 | encode));
6951 }
6952 
6953 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6954   assert(UseAVX > 0, "requires some form of AVX");
6955   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6956   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6957   emit_int16((unsigned char)0xEB, (0xC0 | encode));
6958 }
6959 
6960 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6961   assert(UseAVX > 0, "requires some form of AVX");
6962   InstructionMark im(this);
6963   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6964   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
6965   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6966   emit_int8((unsigned char)0xEB);
6967   emit_operand(dst, src);
6968 }
6969 
6970 void Assembler::vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6971   assert(VM_Version::supports_evex(), "");
6972   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6973   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6974   emit_int16((unsigned char)0xEB, (0xC0 | encode));
6975 }
6976 
6977 
6978 void Assembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
6979   assert(VM_Version::supports_evex(), "");
6980   // Encoding: EVEX.NDS.XXX.66.0F.W0 EB /r
6981   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
6982   attributes.set_is_evex_instruction();
6983   attributes.set_embedded_opmask_register_specifier(mask);
6984   if (merge) {
6985     attributes.reset_is_clear_context();
6986   }
6987   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6988   emit_int16((unsigned char)0xEB, (0xC0 | encode));
6989 }
6990 
6991 void Assembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
6992   assert(VM_Version::supports_evex(), "");
6993   // Encoding: EVEX.NDS.XXX.66.0F.W0 EB /r
6994   InstructionMark im(this);
6995   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
6996   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
6997   attributes.set_is_evex_instruction();
6998   attributes.set_embedded_opmask_register_specifier(mask);
6999   if (merge) {
7000     attributes.reset_is_clear_context();
7001   }
7002   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7003   emit_int8((unsigned char)0xEB);
7004   emit_operand(dst, src);
7005 }
7006 
7007 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
7008   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
7009   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7010   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7011   emit_int16((unsigned char)0xEF, (0xC0 | encode));
7012 }
7013 
7014 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
7015   assert(UseAVX > 0, "requires some form of AVX");
7016   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7017   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7018   emit_int16((unsigned char)0xEF, (0xC0 | encode));
7019 }
7020 
7021 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
7022   assert(UseAVX > 0, "requires some form of AVX");
7023   InstructionMark im(this);
7024   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7025   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
7026   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7027   emit_int8((unsigned char)0xEF);
7028   emit_operand(dst, src);
7029 }
7030 
7031 void Assembler::vpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
7032   assert(UseAVX > 2, "requires some form of EVEX");
7033   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7034   attributes.set_rex_vex_w_reverted();
7035   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7036   emit_int16((unsigned char)0xEF, (0xC0 | encode));
7037 }
7038 
7039 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
7040   assert(VM_Version::supports_evex(), "");
7041   // Encoding: EVEX.NDS.XXX.66.0F.W0 EF /r
7042   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
7043   attributes.set_is_evex_instruction();
7044   attributes.set_embedded_opmask_register_specifier(mask);
7045   if (merge) {
7046     attributes.reset_is_clear_context();
7047   }
7048   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7049   emit_int16((unsigned char)0xEF, (0xC0 | encode));
7050 }
7051 
7052 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
7053   assert(VM_Version::supports_evex(), "requires EVEX support");
7054   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7055   attributes.set_is_evex_instruction();
7056   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7057   emit_int16((unsigned char)0xEF, (0xC0 | encode));

7058 }
7059 
7060 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
7061   assert(VM_Version::supports_evex(), "requires EVEX support");
7062   assert(dst != xnoreg, "sanity");
7063   InstructionMark im(this);
7064   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7065   attributes.set_is_evex_instruction();
7066   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
7067   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7068   emit_int8((unsigned char)0xEF);
7069   emit_operand(dst, src);
7070 }
7071 
7072 void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) {
7073   assert(VM_Version::supports_evex(), "requires EVEX support");
7074   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
7075   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7076   attributes.set_is_evex_instruction();
7077   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);


7662   emit_int16(0x7B, (0xC0 | encode));
7663 }
7664 
7665 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
7666 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) {
7667   assert(VM_Version::supports_evex(), "");
7668   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7669   attributes.set_is_evex_instruction();
7670   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7671   emit_int16(0x7C, (0xC0 | encode));
7672 }
7673 
7674 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
7675 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) {
7676   assert(VM_Version::supports_evex(), "");
7677   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7678   attributes.set_is_evex_instruction();
7679   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7680   emit_int16(0x7C, (0xC0 | encode));
7681 }
7682 
7683 void Assembler::vpgatherdd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
7684   assert(VM_Version::supports_avx2(), "");
7685   assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, "");
7686   assert(dst != xnoreg, "sanity");
7687   assert(src.isxmmindex(),"expected to be xmm index");
7688   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
7689   InstructionMark im(this);
7690   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7691   vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7692   emit_int8((unsigned char)0x90);
7693   emit_operand(dst, src);
7694 }
7695 
7696 void Assembler::vpgatherdq(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
7697   assert(VM_Version::supports_avx2(), "");
7698   assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, "");
7699   assert(dst != xnoreg, "sanity");
7700   assert(src.isxmmindex(),"expected to be xmm index");
7701   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
7702   InstructionMark im(this);
7703   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7704   vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7705   emit_int8((unsigned char)0x90);
7706   emit_operand(dst, src);
7707 }
7708 
7709 void Assembler::vgatherdpd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
7710   assert(VM_Version::supports_avx2(), "");
7711   assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, "");
7712   assert(dst != xnoreg, "sanity");
7713   assert(src.isxmmindex(),"expected to be xmm index");
7714   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
7715   InstructionMark im(this);
7716   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7717   vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7718   emit_int8((unsigned char)0x92);
7719   emit_operand(dst, src);
7720 }
7721 
7722 void Assembler::vgatherdps(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
7723   assert(VM_Version::supports_avx2(), "");
7724   assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, "");
7725   assert(dst != xnoreg, "sanity");
7726   assert(src.isxmmindex(),"expected to be xmm index");
7727   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
7728   InstructionMark im(this);
7729   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true);
7730   vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7731   emit_int8((unsigned char)0x92);
7732   emit_operand(dst, src);
7733 }
7734 void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) {
7735   assert(VM_Version::supports_evex(), "");
7736   assert(dst != xnoreg, "sanity");
7737   assert(src.isxmmindex(),"expected to be xmm index");
7738   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
7739   assert(mask != k0, "instruction will #UD if mask is in k0");
7740   InstructionMark im(this);
7741   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
7742   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7743   attributes.reset_is_clear_context();
7744   attributes.set_embedded_opmask_register_specifier(mask);
7745   attributes.set_is_evex_instruction();
7746   // swap src<->dst for encoding
7747   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7748   emit_int8((unsigned char)0x90);
7749   emit_operand(dst, src);
7750 }
7751 
7752 void Assembler::evpgatherdq(XMMRegister dst, KRegister mask, Address src, int vector_len) {
7753   assert(VM_Version::supports_evex(), "");
7754   assert(dst != xnoreg, "sanity");
7755   assert(src.isxmmindex(),"expected to be xmm index");
7756   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
7757   assert(mask != k0, "instruction will #UD if mask is in k0");
7758   InstructionMark im(this);
7759   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
7760   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7761   attributes.reset_is_clear_context();
7762   attributes.set_embedded_opmask_register_specifier(mask);
7763   attributes.set_is_evex_instruction();
7764   // swap src<->dst for encoding
7765   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7766   emit_int8((unsigned char)0x90);
7767   emit_operand(dst, src);
7768 }
7769 
7770 void Assembler::evgatherdpd(XMMRegister dst, KRegister mask, Address src, int vector_len) {
7771   assert(VM_Version::supports_evex(), "");
7772   assert(dst != xnoreg, "sanity");
7773   assert(src.isxmmindex(),"expected to be xmm index");
7774   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
7775   assert(mask != k0, "instruction will #UD if mask is in k0");
7776   InstructionMark im(this);
7777   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
7778   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7779   attributes.reset_is_clear_context();
7780   attributes.set_embedded_opmask_register_specifier(mask);
7781   attributes.set_is_evex_instruction();
7782   // swap src<->dst for encoding
7783   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7784   emit_int8((unsigned char)0x92);
7785   emit_operand(dst, src);
7786 }
7787 
7788 void Assembler::evgatherdps(XMMRegister dst, KRegister mask, Address src, int vector_len) {
7789   assert(VM_Version::supports_evex(), "");
7790   assert(dst != xnoreg, "sanity");
7791   assert(src.isxmmindex(),"expected to be xmm index");
7792   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
7793   assert(mask != k0, "instruction will #UD if mask is in k0");
7794   InstructionMark im(this);
7795   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
7796   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7797   attributes.reset_is_clear_context();
7798   attributes.set_embedded_opmask_register_specifier(mask);
7799   attributes.set_is_evex_instruction();
7800   // swap src<->dst for encoding
7801   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7802   emit_int8((unsigned char)0x92);
7803   emit_operand(dst, src);
7804 }
7805 
7806 void Assembler::evpscatterdd(Address dst, KRegister mask, XMMRegister src, int vector_len) {
7807   assert(VM_Version::supports_evex(), "");
7808   assert(mask != k0, "instruction will #UD if mask is in k0");
7809   InstructionMark im(this);
7810   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
7811   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7812   attributes.reset_is_clear_context();
7813   attributes.set_embedded_opmask_register_specifier(mask);
7814   attributes.set_is_evex_instruction();
7815   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7816   emit_int8((unsigned char)0xA0);
7817   emit_operand(src, dst);
7818 }
7819 
7820 void Assembler::evpscatterdq(Address dst, KRegister mask, XMMRegister src, int vector_len) {
7821   assert(VM_Version::supports_evex(), "");
7822   assert(mask != k0, "instruction will #UD if mask is in k0");
7823   InstructionMark im(this);
7824   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
7825   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7826   attributes.reset_is_clear_context();
7827   attributes.set_embedded_opmask_register_specifier(mask);
7828   attributes.set_is_evex_instruction();
7829   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7830   emit_int8((unsigned char)0xA0);
7831   emit_operand(src, dst);
7832 }
7833 
7834 void Assembler::evscatterdps(Address dst, KRegister mask, XMMRegister src, int vector_len) {
7835   assert(VM_Version::supports_evex(), "");
7836   assert(mask != k0, "instruction will #UD if mask is in k0");
7837   InstructionMark im(this);
7838   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
7839   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7840   attributes.reset_is_clear_context();
7841   attributes.set_embedded_opmask_register_specifier(mask);
7842   attributes.set_is_evex_instruction();
7843   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7844   emit_int8((unsigned char)0xA2);
7845   emit_operand(src, dst);
7846 }
7847 
7848 void Assembler::evscatterdpd(Address dst, KRegister mask, XMMRegister src, int vector_len) {
7849   assert(VM_Version::supports_evex(), "");
7850   assert(mask != k0, "instruction will #UD if mask is in k0");
7851   InstructionMark im(this);
7852   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
7853   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7854   attributes.reset_is_clear_context();
7855   attributes.set_embedded_opmask_register_specifier(mask);
7856   attributes.set_is_evex_instruction();
7857   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7858   emit_int8((unsigned char)0xA2);
7859   emit_operand(src, dst);
7860 }
7861 // Carry-Less Multiplication Quadword
7862 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
7863   assert(VM_Version::supports_clmul(), "");
7864   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7865   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7866   emit_int24(0x44, (0xC0 | encode), (unsigned char)mask);
7867 }
7868 
7869 // Carry-Less Multiplication Quadword
7870 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
7871   assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
7872   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7873   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7874   emit_int24(0x44, (0xC0 | encode), (unsigned char)mask);
7875 }
7876 
7877 void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) {
7878   assert(VM_Version::supports_avx512_vpclmulqdq(), "Requires vector carryless multiplication support");
7879   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7880   attributes.set_is_evex_instruction();
7881   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7882   emit_int24(0x44, (0xC0 | encode), (unsigned char)mask);
7883 }
7884 
7885 void Assembler::vzeroupper_uncached() {
7886   if (VM_Version::supports_vzeroupper()) {
7887     InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
7888     (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7889     emit_int8(0x77);
7890   }
7891 }
7892 
7893 #ifndef _LP64
7894 // 32bit only pieces of the assembler
7895 


8438   int byte3 = ((~nds_enc) & 0xf) << 3;
8439   // p[10] is always 1
8440   byte3 |= EVEX_F;
8441   byte3 |= (vex_w & 1) << 7;
8442   // confine pre opcode extensions in pp bits to lower two bits
8443   // of form {66, F3, F2}
8444   byte3 |= pre;
8445 
8446   // P2: byte 4 as zL'Lbv'aaa
8447   // kregs are implemented in the low 3 bits as aaa
8448   int byte4 = (_attributes->is_no_reg_mask()) ?
8449               0 :
8450               _attributes->get_embedded_opmask_register_specifier();
8451   // EVEX.v` for extending EVEX.vvvv or VIDX
8452   byte4 |= (evex_v ? 0: EVEX_V);
8453   // third EXEC.b for broadcast actions
8454   byte4 |= (_attributes->is_extended_context() ? EVEX_Rb : 0);
8455   // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024
8456   byte4 |= ((_attributes->get_vector_len())& 0x3) << 5;
8457   // last is EVEX.z for zero/merge actions
8458   if (_attributes->is_no_reg_mask() == false &&
8459       _attributes->get_embedded_opmask_register_specifier() != 0) {
8460     byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0);
8461   }
8462 
8463   emit_int32(EVEX_4bytes, byte2, byte3, byte4);
8464 }
8465 
8466 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) {
8467   bool vex_r = (xreg_enc & 8) == 8;
8468   bool vex_b = adr.base_needs_rex();
8469   bool vex_x;
8470   if (adr.isxmmindex()) {
8471     vex_x = adr.xmmindex_needs_rex();
8472   } else {
8473     vex_x = adr.index_needs_rex();
8474   }
8475   set_attributes(attributes);
8476   attributes->set_current_assembler(this);
8477 
8478   // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction
8479   // is allowed in legacy mode and has resources which will fit in it.


8607   attributes.set_rex_vex_w_reverted();
8608   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
8609   emit_int16(0x5F, (0xC0 | encode));
8610 }
8611 
8612 void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
8613   assert(VM_Version::supports_avx(), "");
8614   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8615   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
8616   emit_int16(0x5D, (0xC0 | encode));
8617 }
8618 
8619 void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
8620   assert(VM_Version::supports_avx(), "");
8621   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8622   attributes.set_rex_vex_w_reverted();
8623   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
8624   emit_int16(0x5D, (0xC0 | encode));
8625 }
8626 
8627 void Assembler::vcmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) {
8628   assert(VM_Version::supports_avx(), "");
8629   assert(vector_len <= AVX_256bit, "");
8630   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
8631   int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8632   emit_int24((unsigned char)0xC2, (0xC0 | encode), (0xF & cop));
8633 }
8634 
8635 void Assembler::blendvpb(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
8636   assert(VM_Version::supports_avx(), "");
8637   assert(vector_len <= AVX_256bit, "");
8638   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
8639   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8640   int src2_enc = src2->encoding();
8641   emit_int24(0x4C, (0xC0 | encode), (0xF0 & src2_enc << 4));
8642 }
8643 
8644 void Assembler::vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
8645   assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), "");
8646   assert(vector_len <= AVX_256bit, "");
8647   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
8648   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8649   int src2_enc = src2->encoding();
8650   emit_int24(0x4B, (0xC0 | encode), (0xF0 & src2_enc << 4));
8651 }
8652 
8653 void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
8654   assert(VM_Version::supports_avx2(), "");
8655   assert(vector_len <= AVX_256bit, "");
8656   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
8657   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8658   emit_int24(0x02, (0xC0 | encode), (unsigned char)imm8);
8659 }
8660 
8661 void Assembler::vcmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int comparison, int vector_len) {
8662   assert(VM_Version::supports_avx(), "");
8663   assert(vector_len <= AVX_256bit, "");
8664   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
8665   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8666   emit_int24((unsigned char)0xC2, (0xC0 | encode), (unsigned char)comparison);
8667 }
8668 
8669 void Assembler::evcmpps(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
8670                         ComparisonPredicateFP comparison, int vector_len) {
8671   assert(VM_Version::supports_evex(), "");
8672   // Encoding: EVEX.NDS.XXX.0F.W0 C2 /r ib
8673   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8674   attributes.set_is_evex_instruction();
8675   attributes.set_embedded_opmask_register_specifier(mask);
8676   attributes.reset_is_clear_context();
8677   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8678   emit_int24((unsigned char)0xC2, (0xC0 | encode), comparison);
8679 }
8680 
8681 void Assembler::evcmppd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
8682                         ComparisonPredicateFP comparison, int vector_len) {
8683   assert(VM_Version::supports_evex(), "");
8684   // Encoding: EVEX.NDS.XXX.66.0F.W1 C2 /r ib
8685   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8686   attributes.set_is_evex_instruction();
8687   attributes.set_embedded_opmask_register_specifier(mask);
8688   attributes.reset_is_clear_context();
8689   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8690   emit_int24((unsigned char)0xC2, (0xC0 | encode), comparison);
8691 }
8692 
8693 void Assembler::blendvps(XMMRegister dst, XMMRegister src) {
8694   assert(VM_Version::supports_sse4_1(), "");
8695   assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding");
8696   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8697   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8698   emit_int16(0x14, (0xC0 | encode));
8699 }
8700 
8701 void Assembler::blendvpd(XMMRegister dst, XMMRegister src) {
8702   assert(VM_Version::supports_sse4_1(), "");
8703   assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding");
8704   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8705   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8706   emit_int16(0x15, (0xC0 | encode));
8707 }
8708 
8709 void Assembler::pblendvb(XMMRegister dst, XMMRegister src) {
8710   assert(VM_Version::supports_sse4_1(), "");
8711   assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding");
8712   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8713   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8714   emit_int16(0x10, (0xC0 | encode));
8715 }
8716 
8717 void Assembler::vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
8718   assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), "");
8719   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8720   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8721   int src2_enc = src2->encoding();
8722   emit_int24(0x4A, (0xC0 | encode), (0xF0 & src2_enc << 4));
8723 }
8724 
8725 void Assembler::vblendps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
8726   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);

8727   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8728   emit_int24(0x0C, (0xC0 | encode), imm8);
8729 }
8730 
8731 void Assembler::vpcmpgtb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8732   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
8733   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
8734   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8735   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8736   emit_int16(0x64, (0xC0 | encode));
8737 }
8738 
8739 void Assembler::vpcmpgtw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8740   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
8741   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
8742   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8743   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8744   emit_int16(0x65, (0xC0 | encode));
8745 }
8746 
8747 void Assembler::vpcmpgtd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8748   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
8749   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
8750   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8751   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8752   emit_int16(0x66, (0xC0 | encode));
8753 }
8754 
8755 void Assembler::vpcmpgtq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8756   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
8757   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
8758   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8759   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8760   emit_int16(0x37, (0xC0 | encode));
8761 }
8762 
8763 void Assembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
8764                         int comparison, int vector_len) {
8765   assert(VM_Version::supports_evex(), "");
8766   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
8767   // Encoding: EVEX.NDS.XXX.66.0F3A.W0 1F /r ib
8768   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8769   attributes.set_is_evex_instruction();
8770   attributes.set_embedded_opmask_register_specifier(mask);
8771   attributes.reset_is_clear_context();
8772   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8773   emit_int24(0x1F, (0xC0 | encode), comparison);
8774 }
8775 
8776 void Assembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
8777                         int comparison, int vector_len) {
8778   assert(VM_Version::supports_evex(), "");
8779   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
8780   // Encoding: EVEX.NDS.XXX.66.0F3A.W0 1F /r ib
8781   InstructionMark im(this);
8782   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8783   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8784   attributes.set_is_evex_instruction();
8785   attributes.set_embedded_opmask_register_specifier(mask);
8786   attributes.reset_is_clear_context();
8787   int dst_enc = kdst->encoding();
8788   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8789   emit_int8((unsigned char)0x1F);
8790   emit_operand(as_Register(dst_enc), src);
8791   emit_int8((unsigned char)comparison);
8792 }
8793 
8794 void Assembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
8795                         int comparison, int vector_len) {
8796   assert(VM_Version::supports_evex(), "");
8797   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
8798   // Encoding: EVEX.NDS.XXX.66.0F3A.W1 1F /r ib
8799   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8800   attributes.set_is_evex_instruction();
8801   attributes.set_embedded_opmask_register_specifier(mask);
8802   attributes.reset_is_clear_context();
8803   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8804   emit_int24(0x1F, (0xC0 | encode), comparison);
8805 }
8806 
8807 void Assembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
8808                         int comparison, int vector_len) {
8809   assert(VM_Version::supports_evex(), "");
8810   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
8811   // Encoding: EVEX.NDS.XXX.66.0F3A.W1 1F /r ib
8812   InstructionMark im(this);
8813   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8814   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8815   attributes.set_is_evex_instruction();
8816   attributes.set_embedded_opmask_register_specifier(mask);
8817   attributes.reset_is_clear_context();
8818   int dst_enc = kdst->encoding();
8819   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8820   emit_int8((unsigned char)0x1F);
8821   emit_operand(as_Register(dst_enc), src);
8822   emit_int8((unsigned char)comparison);
8823 }
8824 
8825 void Assembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
8826                         int comparison, int vector_len) {
8827   assert(VM_Version::supports_evex(), "");
8828   assert(VM_Version::supports_avx512bw(), "");
8829   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
8830   // Encoding: EVEX.NDS.XXX.66.0F3A.W0 3F /r ib
8831   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
8832   attributes.set_is_evex_instruction();
8833   attributes.set_embedded_opmask_register_specifier(mask);
8834   attributes.reset_is_clear_context();
8835   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8836   emit_int24(0x3F, (0xC0 | encode), comparison);
8837 }
8838 
8839 void Assembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
8840                         int comparison, int vector_len) {
8841   assert(VM_Version::supports_evex(), "");
8842   assert(VM_Version::supports_avx512bw(), "");
8843   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
8844   // Encoding: EVEX.NDS.XXX.66.0F3A.W0 3F /r ib
8845   InstructionMark im(this);
8846   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
8847   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8848   attributes.set_is_evex_instruction();
8849   attributes.set_embedded_opmask_register_specifier(mask);
8850   attributes.reset_is_clear_context();
8851   int dst_enc = kdst->encoding();
8852   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8853   emit_int8((unsigned char)0x3F);
8854   emit_operand(as_Register(dst_enc), src);
8855   emit_int8((unsigned char)comparison);
8856 }
8857 
8858 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
8859                         int comparison, int vector_len) {
8860   assert(VM_Version::supports_evex(), "");
8861   assert(VM_Version::supports_avx512bw(), "");
8862   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
8863   // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib
8864   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
8865   attributes.set_is_evex_instruction();
8866   attributes.set_embedded_opmask_register_specifier(mask);
8867   attributes.reset_is_clear_context();
8868   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8869   emit_int24(0x3F, (0xC0 | encode), comparison);
8870 }
8871 
8872 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
8873                         int comparison, int vector_len) {
8874   assert(VM_Version::supports_evex(), "");
8875   assert(VM_Version::supports_avx512bw(), "");
8876   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
8877   // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib
8878   InstructionMark im(this);
8879   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
8880   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8881   attributes.set_is_evex_instruction();
8882   attributes.set_embedded_opmask_register_specifier(mask);
8883   attributes.reset_is_clear_context();
8884   int dst_enc = kdst->encoding();
8885   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8886   emit_int8((unsigned char)0x3F);
8887   emit_operand(as_Register(dst_enc), src);
8888   emit_int8((unsigned char)comparison);
8889 }
8890 
8891 void Assembler::vpblendvb(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len) {
8892   assert(VM_Version::supports_avx(), "");
8893   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8894   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8895   int mask_enc = mask->encoding();
8896   emit_int24(0x4C, (0xC0 | encode), 0xF0 & mask_enc << 4);
8897 }
8898 
8899 void Assembler::evblendmpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
8900   assert(VM_Version::supports_evex(), "");
8901   // Encoding: EVEX.NDS.XXX.66.0F38.W1 65 /r
8902   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8903   attributes.set_is_evex_instruction();
8904   attributes.set_embedded_opmask_register_specifier(mask);
8905   if (merge) {
8906     attributes.reset_is_clear_context();
8907   }
8908   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8909   emit_int16(0x65, (0xC0 | encode));
8910 }
8911 
8912 void Assembler::evblendmps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
8913   assert(VM_Version::supports_evex(), "");
8914   // Encoding: EVEX.NDS.XXX.66.0F38.W0 65 /r
8915   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8916   attributes.set_is_evex_instruction();
8917   attributes.set_embedded_opmask_register_specifier(mask);
8918   if (merge) {
8919     attributes.reset_is_clear_context();
8920   }
8921   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8922   emit_int16(0x65, (0xC0 | encode));
8923 }
8924 
8925 void Assembler::evpblendmb (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
8926   assert(VM_Version::supports_evex(), "");
8927   assert(VM_Version::supports_avx512bw(), "");
8928   // Encoding: EVEX.NDS.512.66.0F38.W0 66 /r
8929   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
8930   attributes.set_is_evex_instruction();
8931   attributes.set_embedded_opmask_register_specifier(mask);
8932   if (merge) {
8933     attributes.reset_is_clear_context();
8934   }
8935   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8936   emit_int16(0x66, (0xC0 | encode));
8937 }
8938 
8939 void Assembler::evpblendmw (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
8940   assert(VM_Version::supports_evex(), "");
8941   assert(VM_Version::supports_avx512bw(), "");
8942   // Encoding: EVEX.NDS.512.66.0F38.W1 66 /r
8943   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
8944   attributes.set_is_evex_instruction();
8945   attributes.set_embedded_opmask_register_specifier(mask);
8946   if (merge) {
8947     attributes.reset_is_clear_context();
8948   }
8949   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8950   emit_int16(0x66, (0xC0 | encode));
8951 }
8952 
8953 void Assembler::evpblendmd (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
8954   assert(VM_Version::supports_evex(), "");
8955   //Encoding: EVEX.NDS.512.66.0F38.W0 64 /r
8956   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8957   attributes.set_is_evex_instruction();
8958   attributes.set_embedded_opmask_register_specifier(mask);
8959   if (merge) {
8960     attributes.reset_is_clear_context();
8961   }
8962   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8963   emit_int16(0x64, (0xC0 | encode));
8964 }
8965 
8966 void Assembler::evpblendmq (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
8967   assert(VM_Version::supports_evex(), "");
8968   //Encoding: EVEX.NDS.512.66.0F38.W1 64 /r
8969   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8970   attributes.set_is_evex_instruction();
8971   attributes.set_embedded_opmask_register_specifier(mask);
8972   if (merge) {
8973     attributes.reset_is_clear_context();
8974   }
8975   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8976   emit_int16(0x64, (0xC0 | encode));
8977 }
8978 
8979 void Assembler::shlxl(Register dst, Register src1, Register src2) {
8980   assert(VM_Version::supports_bmi2(), "");
8981   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
8982   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8983   emit_int16((unsigned char)0xF7, (0xC0 | encode));
8984 }
8985 
8986 void Assembler::shlxq(Register dst, Register src1, Register src2) {
8987   assert(VM_Version::supports_bmi2(), "");
8988   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
8989   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8990   emit_int16((unsigned char)0xF7, (0xC0 | encode));
8991 }
8992 
8993 #ifndef _LP64
8994 
8995 void Assembler::incl(Register dst) {
8996   // Don't use it directly. Use MacroAssembler::incrementl() instead.


< prev index next >