< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.hpp

Print this page
rev 60472 : 8238217: panama fails to build because of use of x87 instructions in 64 bit mode
Reviewed-by: mcimadamore, vlivanov


1066   void addss(XMMRegister dst, Address src)        { Assembler::addss(dst, src); }
1067   void addss(XMMRegister dst, AddressLiteral src);
1068 
1069   void addpd(XMMRegister dst, XMMRegister src)    { Assembler::addpd(dst, src); }
1070   void addpd(XMMRegister dst, Address src)        { Assembler::addpd(dst, src); }
1071   void addpd(XMMRegister dst, AddressLiteral src);
1072 
1073   void divsd(XMMRegister dst, XMMRegister src)    { Assembler::divsd(dst, src); }
1074   void divsd(XMMRegister dst, Address src)        { Assembler::divsd(dst, src); }
1075   void divsd(XMMRegister dst, AddressLiteral src);
1076 
1077   void divss(XMMRegister dst, XMMRegister src)    { Assembler::divss(dst, src); }
1078   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
1079   void divss(XMMRegister dst, AddressLiteral src);
1080 
1081   // Move Unaligned Double Quadword
1082   void movdqu(Address     dst, XMMRegister src);
1083   void movdqu(XMMRegister dst, Address src);
1084   void movdqu(XMMRegister dst, XMMRegister src);
1085   void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1);






1086   // AVX Unaligned forms
1087   void vmovdqu(Address     dst, XMMRegister src);
1088   void vmovdqu(XMMRegister dst, Address src);
1089   void vmovdqu(XMMRegister dst, XMMRegister src);
1090   void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);




























1091   void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1092   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1093   void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1094   void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch);











1095 
1096   // Move Aligned Double Quadword
1097   void movdqa(XMMRegister dst, Address src)       { Assembler::movdqa(dst, src); }
1098   void movdqa(XMMRegister dst, XMMRegister src)   { Assembler::movdqa(dst, src); }
1099   void movdqa(XMMRegister dst, AddressLiteral src);
1100 
1101   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1102   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
1103   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
1104   void movsd(XMMRegister dst, AddressLiteral src);
1105 
1106   void mulpd(XMMRegister dst, XMMRegister src)    { Assembler::mulpd(dst, src); }
1107   void mulpd(XMMRegister dst, Address src)        { Assembler::mulpd(dst, src); }
1108   void mulpd(XMMRegister dst, AddressLiteral src);
1109 
1110   void mulsd(XMMRegister dst, XMMRegister src)    { Assembler::mulsd(dst, src); }
1111   void mulsd(XMMRegister dst, Address src)        { Assembler::mulsd(dst, src); }
1112   void mulsd(XMMRegister dst, AddressLiteral src);
1113 
1114   void mulss(XMMRegister dst, XMMRegister src)    { Assembler::mulss(dst, src); }


1196   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1197   void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1198 
1199   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1200   void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1201 
1202   void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1203   void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1204   void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
1205 
1206   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1207   void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1208   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1209 
1210   void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
1211   void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); }
1212 
1213   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1214 
1215   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
























1216 
1217   void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1218   void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1219 
1220   void vpmovmskb(Register dst, XMMRegister src);
1221 
1222   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1223   void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1224 
1225   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1226   void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1227 
1228   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1229   void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1230 
1231   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1232   void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1233 
1234   void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1235   void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1236 
1237   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1238   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1239 
1240   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1241   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1242 
1243   void vptest(XMMRegister dst, XMMRegister src);

1244 
1245   void punpcklbw(XMMRegister dst, XMMRegister src);
1246   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1247 
1248   void pshufd(XMMRegister dst, Address src, int mode);
1249   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1250 
1251   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1252   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1253 
1254   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1255   void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandpd(dst, nds, src, vector_len); }
1256   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1257 
1258   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1259   void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandps(dst, nds, src, vector_len); }
1260   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1261 


1262   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1263   void vdivsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivsd(dst, nds, src); }
1264   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1265 
1266   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1267   void vdivss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivss(dst, nds, src); }
1268   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1269 
1270   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1271   void vmulsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulsd(dst, nds, src); }
1272   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1273 
1274   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1275   void vmulss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulss(dst, nds, src); }
1276   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1277 
1278   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1279   void vsubsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubsd(dst, nds, src); }
1280   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1281 


1296   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1297   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1298 
1299   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1300     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1301       Assembler::vpxor(dst, nds, src, vector_len);
1302     else
1303       Assembler::vxorpd(dst, nds, src, vector_len);
1304   }
1305   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1306     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1307       Assembler::vpxor(dst, nds, src, vector_len);
1308     else
1309       Assembler::vxorpd(dst, nds, src, vector_len);
1310   }
1311   void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1312 
1313   // Simple version for AVX2 256bit vectors
1314   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1315   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }



1316 
1317   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1318     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1319       Assembler::vinserti32x4(dst, dst, src, imm8);
1320     } else if (UseAVX > 1) {
1321       // vinserti128 is available only in AVX2
1322       Assembler::vinserti128(dst, nds, src, imm8);
1323     } else {
1324       Assembler::vinsertf128(dst, nds, src, imm8);
1325     }
1326   }
1327 
1328   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1329     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1330       Assembler::vinserti32x4(dst, dst, src, imm8);
1331     } else if (UseAVX > 1) {
1332       // vinserti128 is available only in AVX2
1333       Assembler::vinserti128(dst, nds, src, imm8);
1334     } else {
1335       Assembler::vinsertf128(dst, nds, src, imm8);




1066   void addss(XMMRegister dst, Address src)        { Assembler::addss(dst, src); }
1067   void addss(XMMRegister dst, AddressLiteral src);
1068 
1069   void addpd(XMMRegister dst, XMMRegister src)    { Assembler::addpd(dst, src); }
1070   void addpd(XMMRegister dst, Address src)        { Assembler::addpd(dst, src); }
1071   void addpd(XMMRegister dst, AddressLiteral src);
1072 
1073   void divsd(XMMRegister dst, XMMRegister src)    { Assembler::divsd(dst, src); }
1074   void divsd(XMMRegister dst, Address src)        { Assembler::divsd(dst, src); }
1075   void divsd(XMMRegister dst, AddressLiteral src);
1076 
1077   void divss(XMMRegister dst, XMMRegister src)    { Assembler::divss(dst, src); }
1078   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
1079   void divss(XMMRegister dst, AddressLiteral src);
1080 
1081   // Move Unaligned Double Quadword
1082   void movdqu(Address     dst, XMMRegister src);
1083   void movdqu(XMMRegister dst, Address src);
1084   void movdqu(XMMRegister dst, XMMRegister src);
1085   void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1);
1086 
1087   void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); }
1088   void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); }
1089   void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); }
1090   void kmovwl(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1091 
1092   // AVX Unaligned forms
1093   void vmovdqu(Address     dst, XMMRegister src);
1094   void vmovdqu(XMMRegister dst, Address src);
1095   void vmovdqu(XMMRegister dst, XMMRegister src);
1096   void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1097 
1098   // AVX512 Unaligned
1099   void evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1100   void evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1101   void evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1102   void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1103   void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1104 
1105   void evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); }
1106   void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1107   void evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); }
1108   void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1109   void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1110 
1111   void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1112   void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1113   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1114      if (dst->encoding() == src->encoding()) return;
1115      Assembler::evmovdqul(dst, src, vector_len);
1116   }
1117   void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1118   void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1119   void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1120     if (dst->encoding() == src->encoding() && mask == k0) return;
1121     Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1122    }
1123   void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1124 
1125   void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }

1126   void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1127   void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch);
1128   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1129     if (dst->encoding() == src->encoding()) return;
1130     Assembler::evmovdquq(dst, src, vector_len);
1131   }
1132   void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1133   void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1134   void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1135     if (dst->encoding() == src->encoding() && mask == k0) return;
1136     Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1137   }
1138   void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1139 
1140   // Move Aligned Double Quadword
1141   void movdqa(XMMRegister dst, Address src)       { Assembler::movdqa(dst, src); }
1142   void movdqa(XMMRegister dst, XMMRegister src)   { Assembler::movdqa(dst, src); }
1143   void movdqa(XMMRegister dst, AddressLiteral src);
1144 
1145   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1146   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
1147   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
1148   void movsd(XMMRegister dst, AddressLiteral src);
1149 
1150   void mulpd(XMMRegister dst, XMMRegister src)    { Assembler::mulpd(dst, src); }
1151   void mulpd(XMMRegister dst, Address src)        { Assembler::mulpd(dst, src); }
1152   void mulpd(XMMRegister dst, AddressLiteral src);
1153 
1154   void mulsd(XMMRegister dst, XMMRegister src)    { Assembler::mulsd(dst, src); }
1155   void mulsd(XMMRegister dst, Address src)        { Assembler::mulsd(dst, src); }
1156   void mulsd(XMMRegister dst, AddressLiteral src);
1157 
1158   void mulss(XMMRegister dst, XMMRegister src)    { Assembler::mulss(dst, src); }


1240   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1241   void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1242 
1243   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1244   void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1245 
1246   void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1247   void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1248   void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
1249 
1250   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1251   void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1252   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1253 
1254   void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
1255   void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); }
1256 
1257   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1258 
1259   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1260   void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1261 
1262   // Vector compares
1263   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1264                int comparison, int vector_len) { Assembler::evpcmpd(kdst, mask, nds, src, comparison, vector_len); }
1265   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1266                int comparison, int vector_len, Register scratch_reg);
1267   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1268                int comparison, int vector_len) { Assembler::evpcmpq(kdst, mask, nds, src, comparison, vector_len); }
1269   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1270                int comparison, int vector_len, Register scratch_reg);
1271   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1272                int comparison, int vector_len) { Assembler::evpcmpb(kdst, mask, nds, src, comparison, vector_len); }
1273   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1274                int comparison, int vector_len, Register scratch_reg);
1275   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1276                int comparison, int vector_len) { Assembler::evpcmpw(kdst, mask, nds, src, comparison, vector_len); }
1277   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1278                int comparison, int vector_len, Register scratch_reg);
1279 
1280 
1281   // Emit comparison instruction for the specified comparison predicate.
1282   void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, ComparisonPredicate cond, Width width, int vector_len, Register scratch_reg);
1283   void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1284 
1285   void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1286   void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1287 
1288   void vpmovmskb(Register dst, XMMRegister src);
1289 
1290   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1291   void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1292 
1293   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1294   void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1295 
1296   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1297   void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1298 
1299   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1300   void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1301 
1302   void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1303   void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1304 
1305   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1306   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1307 
1308   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1309   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1310 
1311   void vptest(XMMRegister dst, XMMRegister src);
1312   void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1313 
1314   void punpcklbw(XMMRegister dst, XMMRegister src);
1315   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1316 
1317   void pshufd(XMMRegister dst, Address src, int mode);
1318   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1319 
1320   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1321   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1322 
1323   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1324   void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandpd(dst, nds, src, vector_len); }
1325   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1326 
1327   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1328   void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandps(dst, nds, src, vector_len); }
1329   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1330 
1331   void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1332 
1333   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1334   void vdivsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivsd(dst, nds, src); }
1335   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1336 
1337   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1338   void vdivss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivss(dst, nds, src); }
1339   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1340 
1341   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1342   void vmulsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulsd(dst, nds, src); }
1343   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1344 
1345   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1346   void vmulss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulss(dst, nds, src); }
1347   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1348 
1349   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1350   void vsubsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubsd(dst, nds, src); }
1351   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1352 


1367   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1368   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1369 
1370   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1371     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1372       Assembler::vpxor(dst, nds, src, vector_len);
1373     else
1374       Assembler::vxorpd(dst, nds, src, vector_len);
1375   }
1376   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1377     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1378       Assembler::vpxor(dst, nds, src, vector_len);
1379     else
1380       Assembler::vxorpd(dst, nds, src, vector_len);
1381   }
1382   void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1383 
1384   // Simple version for AVX2 256bit vectors
1385   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1386   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1387 
1388   void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1389   void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1390 
1391   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1392     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1393       Assembler::vinserti32x4(dst, dst, src, imm8);
1394     } else if (UseAVX > 1) {
1395       // vinserti128 is available only in AVX2
1396       Assembler::vinserti128(dst, nds, src, imm8);
1397     } else {
1398       Assembler::vinsertf128(dst, nds, src, imm8);
1399     }
1400   }
1401 
1402   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1403     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1404       Assembler::vinserti32x4(dst, dst, src, imm8);
1405     } else if (UseAVX > 1) {
1406       // vinserti128 is available only in AVX2
1407       Assembler::vinserti128(dst, nds, src, imm8);
1408     } else {
1409       Assembler::vinsertf128(dst, nds, src, imm8);


< prev index next >