< prev index next >

src/cpu/x86/vm/assembler_x86.cpp

Print this page




2877 }
2878 
2879 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
2880   assert(VM_Version::supports_sse4_1(), "");
2881   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F_3A,
2882                                       false, AVX_128bit, (VM_Version::supports_avx512dq() == false));
2883   emit_int8(0x16);
2884   emit_int8((unsigned char)(0xC0 | encode));
2885   emit_int8(imm8);
2886 }
2887 
2888 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
2889   assert(VM_Version::supports_sse4_1(), "");
2890   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F_3A,
2891                                       false, AVX_128bit, (VM_Version::supports_avx512dq() == false));
2892   emit_int8(0x16);
2893   emit_int8((unsigned char)(0xC0 | encode));
2894   emit_int8(imm8);
2895 }
2896 









2897 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
2898   assert(VM_Version::supports_sse4_1(), "");
2899   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, true, VEX_OPCODE_0F_3A,
2900                                       false, AVX_128bit, (VM_Version::supports_avx512dq() == false));
2901   emit_int8(0x22);
2902   emit_int8((unsigned char)(0xC0 | encode));
2903   emit_int8(imm8);
2904 }
2905 
2906 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
2907   assert(VM_Version::supports_sse4_1(), "");
2908   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, true, VEX_OPCODE_0F_3A,
2909                                       false, AVX_128bit, (VM_Version::supports_avx512dq() == false));
2910   emit_int8(0x22);
2911   emit_int8((unsigned char)(0xC0 | encode));
2912   emit_int8(imm8);
2913 }
2914 









2915 void Assembler::pmovzxbw(XMMRegister dst, Address src) {
2916   assert(VM_Version::supports_sse4_1(), "");
2917   if (VM_Version::supports_evex()) {
2918     tuple_type = EVEX_HVM;
2919   }
2920   InstructionMark im(this);
2921   simd_prefix(dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38);
2922   emit_int8(0x30);
2923   emit_operand(dst, src);
2924 }
2925 
2926 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
2927   assert(VM_Version::supports_sse4_1(), "");
2928   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38);
2929   emit_int8(0x30);
2930   emit_int8((unsigned char)(0xC0 | encode));
2931 }
2932 
2933 // generic
2934 void Assembler::pop(Register dst) {


3882 }
3883 
3884 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3885   assert(VM_Version::supports_avx(), "");
3886   if (VM_Version::supports_evex()) {
3887     tuple_type = EVEX_FV;
3888     input_size_in_bits = EVEX_32bit;
3889   }
3890   emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector_len);
3891 }
3892 
3893 void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
3894   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3895   if (VM_Version::supports_evex()) {
3896     emit_simd_arith_q(0x59, dst, src, VEX_SIMD_66);
3897   } else {
3898     emit_simd_arith(0x59, dst, src, VEX_SIMD_66);
3899   }
3900 }
3901 









3902 void Assembler::mulps(XMMRegister dst, XMMRegister src) {
3903   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3904   emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE);
3905 }
3906 
3907 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3908   assert(VM_Version::supports_avx(), "");
3909   if (VM_Version::supports_evex()) {
3910     emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_66, vector_len);
3911   } else {
3912     emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector_len);
3913   }
3914 }
3915 
3916 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3917   assert(VM_Version::supports_avx(), "");
3918   emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector_len);
3919 }
3920 
3921 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {


4041   assert(VM_Version::supports_avx(), "");
4042   if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) {
4043     tuple_type = EVEX_FV;
4044     input_size_in_bits = EVEX_64bit;
4045     emit_vex_arith_q(0x54, dst, nds, src, VEX_SIMD_66, vector_len);
4046   } else {
4047     emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector_len, true);
4048   }
4049 }
4050 
4051 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
4052   assert(VM_Version::supports_avx(), "");
4053   if (VM_Version::supports_evex()) {
4054     tuple_type = EVEX_FV;
4055     input_size_in_bits = EVEX_32bit;
4056   }
4057   emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector_len,
4058                  (VM_Version::supports_avx512dq() == false));
4059 }
4060 


















4061 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
4062   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4063   if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) {
4064     emit_simd_arith_q(0x57, dst, src, VEX_SIMD_66);
4065   } else {
4066     emit_simd_arith(0x57, dst, src, VEX_SIMD_66, false, true);
4067   }
4068 }
4069 
4070 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
4071   NOT_LP64(assert(VM_Version::supports_sse(), ""));
4072   emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE,
4073                   false, (VM_Version::supports_avx512dq() == false));
4074 }
4075 
4076 void Assembler::xorpd(XMMRegister dst, Address src) {
4077   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4078   if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) {
4079     tuple_type = EVEX_FV;
4080     input_size_in_bits = EVEX_64bit;


4657   emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector_len);
4658   emit_int8(shift & 0xFF);
4659 }
4660 
4661 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
4662   assert(UseAVX > 0, "requires some form of AVX");
4663   emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector_len,
4664                  (VM_Version::supports_avx512bw() == false));
4665 }
4666 
4667 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
4668   assert(UseAVX > 0, "requires some form of AVX");
4669   emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector_len);
4670 }
4671 
4672 
4673 // AND packed integers
4674 void Assembler::pand(XMMRegister dst, XMMRegister src) {
4675   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4676   emit_simd_arith(0xDB, dst, src, VEX_SIMD_66);









4677 }
4678 
4679 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4680   assert(UseAVX > 0, "requires some form of AVX");
4681   emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len);
4682 }
4683 
4684 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
4685   assert(UseAVX > 0, "requires some form of AVX");
4686   if (VM_Version::supports_evex()) {
4687     tuple_type = EVEX_FV;
4688     input_size_in_bits = EVEX_32bit;
4689   }
4690   emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len);
4691 }
4692 
4693 void Assembler::por(XMMRegister dst, XMMRegister src) {
4694   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4695   emit_simd_arith(0xEB, dst, src, VEX_SIMD_66);
4696 }




2877 }
2878 
2879 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
2880   assert(VM_Version::supports_sse4_1(), "");
2881   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F_3A,
2882                                       false, AVX_128bit, (VM_Version::supports_avx512dq() == false));
2883   emit_int8(0x16);
2884   emit_int8((unsigned char)(0xC0 | encode));
2885   emit_int8(imm8);
2886 }
2887 
2888 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
2889   assert(VM_Version::supports_sse4_1(), "");
2890   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F_3A,
2891                                       false, AVX_128bit, (VM_Version::supports_avx512dq() == false));
2892   emit_int8(0x16);
2893   emit_int8((unsigned char)(0xC0 | encode));
2894   emit_int8(imm8);
2895 }
2896 
2897 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) {
2898   assert(VM_Version::supports_sse2(), "");
2899   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F_3A,
2900                                       false, AVX_128bit, (VM_Version::supports_avx512bw() == false));
2901   emit_int8(0x15);
2902   emit_int8((unsigned char)(0xC0 | encode));
2903   emit_int8(imm8);
2904 }
2905 
2906 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
2907   assert(VM_Version::supports_sse4_1(), "");
2908   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, true, VEX_OPCODE_0F_3A,
2909                                       false, AVX_128bit, (VM_Version::supports_avx512dq() == false));
2910   emit_int8(0x22);
2911   emit_int8((unsigned char)(0xC0 | encode));
2912   emit_int8(imm8);
2913 }
2914 
2915 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
2916   assert(VM_Version::supports_sse4_1(), "");
2917   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, true, VEX_OPCODE_0F_3A,
2918                                       false, AVX_128bit, (VM_Version::supports_avx512dq() == false));
2919   emit_int8(0x22);
2920   emit_int8((unsigned char)(0xC0 | encode));
2921   emit_int8(imm8);
2922 }
2923 
2924 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) {
2925   assert(VM_Version::supports_sse2(), "");
2926   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, true, VEX_OPCODE_0F,
2927                                       false, AVX_128bit, (VM_Version::supports_avx512bw() == false));
2928   emit_int8((unsigned char)0xC4);
2929   emit_int8((unsigned char)(0xC0 | encode));
2930   emit_int8(imm8);
2931 }
2932 
2933 void Assembler::pmovzxbw(XMMRegister dst, Address src) {
2934   assert(VM_Version::supports_sse4_1(), "");
2935   if (VM_Version::supports_evex()) {
2936     tuple_type = EVEX_HVM;
2937   }
2938   InstructionMark im(this);
2939   simd_prefix(dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38);
2940   emit_int8(0x30);
2941   emit_operand(dst, src);
2942 }
2943 
2944 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
2945   assert(VM_Version::supports_sse4_1(), "");
2946   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38);
2947   emit_int8(0x30);
2948   emit_int8((unsigned char)(0xC0 | encode));
2949 }
2950 
2951 // generic
2952 void Assembler::pop(Register dst) {


3900 }
3901 
3902 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
3903   assert(VM_Version::supports_avx(), "");
3904   if (VM_Version::supports_evex()) {
3905     tuple_type = EVEX_FV;
3906     input_size_in_bits = EVEX_32bit;
3907   }
3908   emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector_len);
3909 }
3910 
3911 void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
3912   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3913   if (VM_Version::supports_evex()) {
3914     emit_simd_arith_q(0x59, dst, src, VEX_SIMD_66);
3915   } else {
3916     emit_simd_arith(0x59, dst, src, VEX_SIMD_66);
3917   }
3918 }
3919 
3920 void Assembler::mulpd(XMMRegister dst, Address src) {
3921   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3922   if (VM_Version::supports_evex()) {
3923     emit_simd_arith_q(0x59, dst, src, VEX_SIMD_66);
3924   } else {
3925     emit_simd_arith(0x59, dst, src, VEX_SIMD_66);
3926   }
3927 }
3928 
3929 void Assembler::mulps(XMMRegister dst, XMMRegister src) {
3930   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3931   emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE);
3932 }
3933 
3934 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3935   assert(VM_Version::supports_avx(), "");
3936   if (VM_Version::supports_evex()) {
3937     emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_66, vector_len);
3938   } else {
3939     emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector_len);
3940   }
3941 }
3942 
3943 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3944   assert(VM_Version::supports_avx(), "");
3945   emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector_len);
3946 }
3947 
3948 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {


4068   assert(VM_Version::supports_avx(), "");
4069   if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) {
4070     tuple_type = EVEX_FV;
4071     input_size_in_bits = EVEX_64bit;
4072     emit_vex_arith_q(0x54, dst, nds, src, VEX_SIMD_66, vector_len);
4073   } else {
4074     emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector_len, true);
4075   }
4076 }
4077 
4078 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
4079   assert(VM_Version::supports_avx(), "");
4080   if (VM_Version::supports_evex()) {
4081     tuple_type = EVEX_FV;
4082     input_size_in_bits = EVEX_32bit;
4083   }
4084   emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector_len,
4085                  (VM_Version::supports_avx512dq() == false));
4086 }
4087 
4088 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) {
4089   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4090   if (VM_Version::supports_evex()) {
4091     emit_simd_arith_q(0x15, dst, src, VEX_SIMD_66);
4092   } else {
4093     emit_simd_arith(0x15, dst, src, VEX_SIMD_66);
4094   }
4095 }
4096 
4097 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) {
4098   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4099   if (VM_Version::supports_evex()) {
4100     emit_simd_arith_q(0x14, dst, src, VEX_SIMD_66);
4101   } else {
4102     emit_simd_arith(0x14, dst, src, VEX_SIMD_66);
4103   }
4104 }
4105 
4106 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
4107   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4108   if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) {
4109     emit_simd_arith_q(0x57, dst, src, VEX_SIMD_66);
4110   } else {
4111     emit_simd_arith(0x57, dst, src, VEX_SIMD_66, false, true);
4112   }
4113 }
4114 
4115 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
4116   NOT_LP64(assert(VM_Version::supports_sse(), ""));
4117   emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE,
4118                   false, (VM_Version::supports_avx512dq() == false));
4119 }
4120 
4121 void Assembler::xorpd(XMMRegister dst, Address src) {
4122   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4123   if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) {
4124     tuple_type = EVEX_FV;
4125     input_size_in_bits = EVEX_64bit;


4702   emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector_len);
4703   emit_int8(shift & 0xFF);
4704 }
4705 
4706 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
4707   assert(UseAVX > 0, "requires some form of AVX");
4708   emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector_len,
4709                  (VM_Version::supports_avx512bw() == false));
4710 }
4711 
4712 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
4713   assert(UseAVX > 0, "requires some form of AVX");
4714   emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector_len);
4715 }
4716 
4717 
4718 // AND packed integers
4719 void Assembler::pand(XMMRegister dst, XMMRegister src) {
4720   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4721   emit_simd_arith(0xDB, dst, src, VEX_SIMD_66);
4722 }
4723 
4724 void Assembler::pandn(XMMRegister dst, XMMRegister src) {
4725   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4726   if (VM_Version::supports_evex()) {
4727     emit_simd_arith_q(0xDF, dst, src, VEX_SIMD_66);
4728   } else {
4729     emit_simd_arith(0xDF, dst, src, VEX_SIMD_66);
4730   }
4731 }
4732 
4733 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4734   assert(UseAVX > 0, "requires some form of AVX");
4735   emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len);
4736 }
4737 
4738 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
4739   assert(UseAVX > 0, "requires some form of AVX");
4740   if (VM_Version::supports_evex()) {
4741     tuple_type = EVEX_FV;
4742     input_size_in_bits = EVEX_32bit;
4743   }
4744   emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len);
4745 }
4746 
4747 void Assembler::por(XMMRegister dst, XMMRegister src) {
4748   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4749   emit_simd_arith(0xEB, dst, src, VEX_SIMD_66);
4750 }


< prev index next >