< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.hpp

Print this page
rev 61868 : manual merge with default


1062   void addss(XMMRegister dst, Address src)        { Assembler::addss(dst, src); }
1063   void addss(XMMRegister dst, AddressLiteral src);
1064 
1065   void addpd(XMMRegister dst, XMMRegister src)    { Assembler::addpd(dst, src); }
1066   void addpd(XMMRegister dst, Address src)        { Assembler::addpd(dst, src); }
1067   void addpd(XMMRegister dst, AddressLiteral src);
1068 
1069   void divsd(XMMRegister dst, XMMRegister src)    { Assembler::divsd(dst, src); }
1070   void divsd(XMMRegister dst, Address src)        { Assembler::divsd(dst, src); }
1071   void divsd(XMMRegister dst, AddressLiteral src);
1072 
1073   void divss(XMMRegister dst, XMMRegister src)    { Assembler::divss(dst, src); }
1074   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
1075   void divss(XMMRegister dst, AddressLiteral src);
1076 
1077   // Move Unaligned Double Quadword
1078   void movdqu(Address     dst, XMMRegister src);
1079   void movdqu(XMMRegister dst, Address src);
1080   void movdqu(XMMRegister dst, XMMRegister src);
1081   void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1);






1082   // AVX Unaligned forms
1083   void vmovdqu(Address     dst, XMMRegister src);
1084   void vmovdqu(XMMRegister dst, Address src);
1085   void vmovdqu(XMMRegister dst, XMMRegister src);
1086   void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);




























1087   void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1088   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1089   void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1090   void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch);











1091 
1092   // Move Aligned Double Quadword
1093   void movdqa(XMMRegister dst, Address src)       { Assembler::movdqa(dst, src); }
1094   void movdqa(XMMRegister dst, XMMRegister src)   { Assembler::movdqa(dst, src); }
1095   void movdqa(XMMRegister dst, AddressLiteral src);
1096 
1097   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1098   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
1099   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
1100   void movsd(XMMRegister dst, AddressLiteral src);
1101 
1102   void mulpd(XMMRegister dst, XMMRegister src)    { Assembler::mulpd(dst, src); }
1103   void mulpd(XMMRegister dst, Address src)        { Assembler::mulpd(dst, src); }
1104   void mulpd(XMMRegister dst, AddressLiteral src);
1105 
1106   void mulsd(XMMRegister dst, XMMRegister src)    { Assembler::mulsd(dst, src); }
1107   void mulsd(XMMRegister dst, Address src)        { Assembler::mulsd(dst, src); }
1108   void mulsd(XMMRegister dst, AddressLiteral src);
1109 
1110   void mulss(XMMRegister dst, XMMRegister src)    { Assembler::mulss(dst, src); }


1192   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1193   void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1194 
1195   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1196   void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1197 
1198   void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1199   void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1200   void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
1201 
1202   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1203   void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1204   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1205 
1206   void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
1207   void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); }
1208 
1209   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1210 
1211   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
























1212 
1213   void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1214   void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1215 
1216   void vpmovmskb(Register dst, XMMRegister src);
1217 
1218   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1219   void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1220 
1221   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1222   void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1223 
1224   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1225   void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1226 
1227   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1228   void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1229 
1230   void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1231   void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1232 
1233   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1234   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1235 
1236   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1237   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1238 
1239   void vptest(XMMRegister dst, XMMRegister src);

1240 
1241   void punpcklbw(XMMRegister dst, XMMRegister src);
1242   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1243 
1244   void pshufd(XMMRegister dst, Address src, int mode);
1245   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1246 
1247   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1248   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1249 
1250   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1251   void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandpd(dst, nds, src, vector_len); }
1252   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1253 
1254   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1255   void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandps(dst, nds, src, vector_len); }
1256   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1257 


1258   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1259   void vdivsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivsd(dst, nds, src); }
1260   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1261 
1262   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1263   void vdivss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivss(dst, nds, src); }
1264   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1265 
1266   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1267   void vmulsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulsd(dst, nds, src); }
1268   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1269 
1270   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1271   void vmulss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulss(dst, nds, src); }
1272   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1273 
1274   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1275   void vsubsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubsd(dst, nds, src); }
1276   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1277 


1292   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1293   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1294 
1295   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1296     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1297       Assembler::vpxor(dst, nds, src, vector_len);
1298     else
1299       Assembler::vxorpd(dst, nds, src, vector_len);
1300   }
1301   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1302     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1303       Assembler::vpxor(dst, nds, src, vector_len);
1304     else
1305       Assembler::vxorpd(dst, nds, src, vector_len);
1306   }
1307   void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1308 
1309   // Simple version for AVX2 256bit vectors
1310   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1311   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }



1312 
1313   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1314     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1315       Assembler::vinserti32x4(dst, dst, src, imm8);
1316     } else if (UseAVX > 1) {
1317       // vinserti128 is available only in AVX2
1318       Assembler::vinserti128(dst, nds, src, imm8);
1319     } else {
1320       Assembler::vinsertf128(dst, nds, src, imm8);
1321     }
1322   }
1323 
1324   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1325     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1326       Assembler::vinserti32x4(dst, dst, src, imm8);
1327     } else if (UseAVX > 1) {
1328       // vinserti128 is available only in AVX2
1329       Assembler::vinserti128(dst, nds, src, imm8);
1330     } else {
1331       Assembler::vinsertf128(dst, nds, src, imm8);




1062   void addss(XMMRegister dst, Address src)        { Assembler::addss(dst, src); }
1063   void addss(XMMRegister dst, AddressLiteral src);
1064 
1065   void addpd(XMMRegister dst, XMMRegister src)    { Assembler::addpd(dst, src); }
1066   void addpd(XMMRegister dst, Address src)        { Assembler::addpd(dst, src); }
1067   void addpd(XMMRegister dst, AddressLiteral src);
1068 
1069   void divsd(XMMRegister dst, XMMRegister src)    { Assembler::divsd(dst, src); }
1070   void divsd(XMMRegister dst, Address src)        { Assembler::divsd(dst, src); }
1071   void divsd(XMMRegister dst, AddressLiteral src);
1072 
1073   void divss(XMMRegister dst, XMMRegister src)    { Assembler::divss(dst, src); }
1074   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
1075   void divss(XMMRegister dst, AddressLiteral src);
1076 
1077   // Move Unaligned Double Quadword
1078   void movdqu(Address     dst, XMMRegister src);
1079   void movdqu(XMMRegister dst, Address src);
1080   void movdqu(XMMRegister dst, XMMRegister src);
1081   void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1);
1082 
1083   void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); }
1084   void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); }
1085   void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); }
1086   void kmovwl(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1087 
1088   // AVX Unaligned forms
1089   void vmovdqu(Address     dst, XMMRegister src);
1090   void vmovdqu(XMMRegister dst, Address src);
1091   void vmovdqu(XMMRegister dst, XMMRegister src);
1092   void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1093 
1094   // AVX512 Unaligned
1095   void evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1096   void evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1097   void evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1098   void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1099   void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1100 
1101   void evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); }
1102   void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1103   void evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); }
1104   void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1105   void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1106 
1107   void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1108   void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1109   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1110      if (dst->encoding() == src->encoding()) return;
1111      Assembler::evmovdqul(dst, src, vector_len);
1112   }
1113   void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1114   void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1115   void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1116     if (dst->encoding() == src->encoding() && mask == k0) return;
1117     Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1118    }
1119   void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1120 
1121   void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }

1122   void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1123   void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch);
1124   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1125     if (dst->encoding() == src->encoding()) return;
1126     Assembler::evmovdquq(dst, src, vector_len);
1127   }
1128   void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1129   void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1130   void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1131     if (dst->encoding() == src->encoding() && mask == k0) return;
1132     Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1133   }
1134   void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1135 
1136   // Move Aligned Double Quadword
1137   void movdqa(XMMRegister dst, Address src)       { Assembler::movdqa(dst, src); }
1138   void movdqa(XMMRegister dst, XMMRegister src)   { Assembler::movdqa(dst, src); }
1139   void movdqa(XMMRegister dst, AddressLiteral src);
1140 
1141   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1142   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
1143   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
1144   void movsd(XMMRegister dst, AddressLiteral src);
1145 
1146   void mulpd(XMMRegister dst, XMMRegister src)    { Assembler::mulpd(dst, src); }
1147   void mulpd(XMMRegister dst, Address src)        { Assembler::mulpd(dst, src); }
1148   void mulpd(XMMRegister dst, AddressLiteral src);
1149 
1150   void mulsd(XMMRegister dst, XMMRegister src)    { Assembler::mulsd(dst, src); }
1151   void mulsd(XMMRegister dst, Address src)        { Assembler::mulsd(dst, src); }
1152   void mulsd(XMMRegister dst, AddressLiteral src);
1153 
1154   void mulss(XMMRegister dst, XMMRegister src)    { Assembler::mulss(dst, src); }


1236   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1237   void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1238 
1239   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1240   void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1241 
1242   void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1243   void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1244   void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
1245 
1246   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1247   void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1248   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1249 
1250   void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
1251   void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); }
1252 
1253   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1254 
1255   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1256   void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1257 
1258   // Vector compares
1259   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1260                int comparison, int vector_len) { Assembler::evpcmpd(kdst, mask, nds, src, comparison, vector_len); }
1261   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1262                int comparison, int vector_len, Register scratch_reg);
1263   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1264                int comparison, int vector_len) { Assembler::evpcmpq(kdst, mask, nds, src, comparison, vector_len); }
1265   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1266                int comparison, int vector_len, Register scratch_reg);
1267   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1268                int comparison, int vector_len) { Assembler::evpcmpb(kdst, mask, nds, src, comparison, vector_len); }
1269   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1270                int comparison, int vector_len, Register scratch_reg);
1271   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1272                int comparison, int vector_len) { Assembler::evpcmpw(kdst, mask, nds, src, comparison, vector_len); }
1273   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1274                int comparison, int vector_len, Register scratch_reg);
1275 
1276 
1277   // Emit comparison instruction for the specified comparison predicate.
1278   void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, ComparisonPredicate cond, Width width, int vector_len, Register scratch_reg);
1279   void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1280 
1281   void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1282   void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1283 
1284   void vpmovmskb(Register dst, XMMRegister src);
1285 
1286   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1287   void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1288 
1289   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1290   void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1291 
1292   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1293   void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1294 
1295   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1296   void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1297 
1298   void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1299   void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1300 
1301   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1302   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1303 
1304   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1305   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1306 
1307   void vptest(XMMRegister dst, XMMRegister src);
1308   void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1309 
1310   void punpcklbw(XMMRegister dst, XMMRegister src);
1311   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1312 
1313   void pshufd(XMMRegister dst, Address src, int mode);
1314   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1315 
1316   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1317   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1318 
1319   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1320   void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandpd(dst, nds, src, vector_len); }
1321   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1322 
1323   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1324   void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandps(dst, nds, src, vector_len); }
1325   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1326 
1327   void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1328 
1329   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1330   void vdivsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivsd(dst, nds, src); }
1331   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1332 
1333   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1334   void vdivss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivss(dst, nds, src); }
1335   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1336 
1337   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1338   void vmulsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulsd(dst, nds, src); }
1339   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1340 
1341   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1342   void vmulss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulss(dst, nds, src); }
1343   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1344 
1345   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1346   void vsubsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubsd(dst, nds, src); }
1347   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1348 


1363   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1364   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1365 
1366   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1367     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1368       Assembler::vpxor(dst, nds, src, vector_len);
1369     else
1370       Assembler::vxorpd(dst, nds, src, vector_len);
1371   }
1372   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1373     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1374       Assembler::vpxor(dst, nds, src, vector_len);
1375     else
1376       Assembler::vxorpd(dst, nds, src, vector_len);
1377   }
1378   void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1379 
1380   // Simple version for AVX2 256bit vectors
1381   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1382   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1383 
1384   void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1385   void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1386 
1387   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1388     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1389       Assembler::vinserti32x4(dst, dst, src, imm8);
1390     } else if (UseAVX > 1) {
1391       // vinserti128 is available only in AVX2
1392       Assembler::vinserti128(dst, nds, src, imm8);
1393     } else {
1394       Assembler::vinsertf128(dst, nds, src, imm8);
1395     }
1396   }
1397 
1398   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1399     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1400       Assembler::vinserti32x4(dst, dst, src, imm8);
1401     } else if (UseAVX > 1) {
1402       // vinserti128 is available only in AVX2
1403       Assembler::vinserti128(dst, nds, src, imm8);
1404     } else {
1405       Assembler::vinsertf128(dst, nds, src, imm8);


< prev index next >