src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/cpu/x86/vm

src/cpu/x86/vm/macroAssembler_x86.hpp

Print this page
rev 10354 : imported patch vextrinscleanup2
rev 10357 : [mq]: vextrinscleanup5


1168   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1169   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1170 
1171   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1172     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1173       Assembler::vpxor(dst, nds, src, vector_len);
1174     else
1175       Assembler::vxorpd(dst, nds, src, vector_len);
1176   }
1177   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1178     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1179       Assembler::vpxor(dst, nds, src, vector_len);
1180     else
1181       Assembler::vxorpd(dst, nds, src, vector_len);
1182   }
1183 
1184   // Simple version for AVX2 256bit vectors
1185   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1186   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1187 
1188   // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
1189   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1190     if (UseAVX > 1) // vinserti128h is available only in AVX2
1191       Assembler::vinserti128h(dst, nds, src);
1192     else
1193       Assembler::vinsertf128h(dst, nds, src);
1194   }






















































































































1195 
1196   // Carry-Less Multiplication Quadword
1197   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1198     // 0x00 - multiply lower 64 bits [0:63]
1199     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1200   }
1201   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1202     // 0x11 - multiply upper 64 bits [64:127]
1203     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1204   }
1205 
1206   // Data
1207 
1208   void cmov32( Condition cc, Register dst, Address  src);
1209   void cmov32( Condition cc, Register dst, Register src);
1210 
1211   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1212 
1213   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1214   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }




1168   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1169   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1170 
1171   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1172     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1173       Assembler::vpxor(dst, nds, src, vector_len);
1174     else
1175       Assembler::vxorpd(dst, nds, src, vector_len);
1176   }
1177   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1178     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1179       Assembler::vpxor(dst, nds, src, vector_len);
1180     else
1181       Assembler::vxorpd(dst, nds, src, vector_len);
1182   }
1183 
1184   // Simple version for AVX2 256bit vectors
1185   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1186   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1187 
1188   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1189     if (UseAVX > 1) { // vinserti128 is available only in AVX2
1190       Assembler::vinserti128(dst, nds, src, imm8);
1191     } else {
1192       Assembler::vinsertf128(dst, nds, src, imm8);

1193     }
1194   }
1195 
1196   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1197     if (UseAVX > 1) { // vinserti128 is available only in AVX2
1198       Assembler::vinserti128(dst, nds, src, imm8);
1199     } else {
1200       Assembler::vinsertf128(dst, nds, src, imm8);
1201     }
1202   }
1203 
1204   void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1205     if (UseAVX > 1) { // vextracti128 is available only in AVX2
1206       Assembler::vextracti128(dst, src, imm8);
1207     } else {
1208       Assembler::vextractf128(dst, src, imm8);
1209     }
1210   }
1211 
1212   void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1213     if (UseAVX > 1) { // vextracti128 is available only in AVX2
1214       Assembler::vextracti128(dst, src, imm8);
1215     } else {
1216       Assembler::vextractf128(dst, src, imm8);
1217     }
1218   }
1219 
1220   // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1221   void vinserti128_high(XMMRegister dst, XMMRegister src) {
1222     vinserti128(dst, dst, src, 1);
1223   }
1224   void vinserti128_high(XMMRegister dst, Address src) {
1225     vinserti128(dst, dst, src, 1);
1226   }
1227   void vextracti128_high(XMMRegister dst, XMMRegister src) {
1228     vextracti128(dst, src, 1);
1229   }
1230   void vextracti128_high(Address dst, XMMRegister src) {
1231     vextracti128(dst, src, 1);
1232   }
1233   void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1234     vinsertf128(dst, dst, src, 1);
1235   }
1236   void vinsertf128_high(XMMRegister dst, Address src) {
1237     vinsertf128(dst, dst, src, 1);
1238   }
1239   void vextractf128_high(XMMRegister dst, XMMRegister src) {
1240     vextractf128(dst, src, 1);
1241   }
1242   void vextractf128_high(Address dst, XMMRegister src) {
1243     vextractf128(dst, src, 1);
1244   }
1245 
1246   // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1247   void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1248     vinserti64x4(dst, dst, src, 1);
1249   }
1250   void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1251     vinsertf64x4(dst, dst, src, 1);
1252   }
1253   void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1254     vextracti64x4(dst, src, 1);
1255   }
1256   void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1257     vextractf64x4(dst, src, 1);
1258   }
1259   void vextractf64x4_high(Address dst, XMMRegister src) {
1260     vextractf64x4(dst, src, 1);
1261   }
1262   void vinsertf64x4_high(XMMRegister dst, Address src) {
1263     vinsertf64x4(dst, dst, src, 1);
1264   }
1265 
1266   // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1267   void vinserti128_low(XMMRegister dst, XMMRegister src) {
1268     vinserti128(dst, dst, src, 0);
1269   }
1270   void vinserti128_low(XMMRegister dst, Address src) {
1271     vinserti128(dst, dst, src, 0);
1272   }
1273   void vextracti128_low(XMMRegister dst, XMMRegister src) {
1274     vextracti128(dst, src, 0);
1275   }
1276   void vextracti128_low(Address dst, XMMRegister src) {
1277     vextracti128(dst, src, 0);
1278   }
1279   void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1280     vinsertf128(dst, dst, src, 0);
1281   }
1282   void vinsertf128_low(XMMRegister dst, Address src) {
1283     vinsertf128(dst, dst, src, 0);
1284   }
1285   void vextractf128_low(XMMRegister dst, XMMRegister src) {
1286     vextractf128(dst, src, 0);
1287   }
1288   void vextractf128_low(Address dst, XMMRegister src) {
1289     vextractf128(dst, src, 0);
1290   }
1291 
1292   // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1293   void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1294     vinserti64x4(dst, dst, src, 0);
1295   }
1296   void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1297     vinsertf64x4(dst, dst, src, 0);
1298   }
1299   void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1300     vextracti64x4(dst, src, 0);
1301   }
1302   void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1303     vextractf64x4(dst, src, 0);
1304   }
1305   void vextractf64x4_low(Address dst, XMMRegister src) {
1306     vextractf64x4(dst, src, 0);
1307   }
1308   void vinsertf64x4_low(XMMRegister dst, Address src) {
1309     vinsertf64x4(dst, dst, src, 0);
1310   }
1311 
1312   
1313   // Carry-Less Multiplication Quadword
1314   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1315     // 0x00 - multiply lower 64 bits [0:63]
1316     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1317   }
1318   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1319     // 0x11 - multiply upper 64 bits [64:127]
1320     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1321   }
1322 
1323   // Data
1324 
1325   void cmov32( Condition cc, Register dst, Address  src);
1326   void cmov32( Condition cc, Register dst, Register src);
1327 
1328   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1329 
1330   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1331   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }


src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File