src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/cpu/x86/vm

src/cpu/x86/vm/macroAssembler_x86.hpp

Print this page
rev 10354 : imported patch vextrinscleanup2


1168   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1169   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1170 
1171   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1172     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1173       Assembler::vpxor(dst, nds, src, vector_len);
1174     else
1175       Assembler::vxorpd(dst, nds, src, vector_len);
1176   }
1177   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1178     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1179       Assembler::vpxor(dst, nds, src, vector_len);
1180     else
1181       Assembler::vxorpd(dst, nds, src, vector_len);
1182   }
1183 
1184   // Simple version for AVX2 256bit vectors
1185   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1186   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1187 
1188   // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
1189   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1190     if (UseAVX > 1) // vinserti128h is available only in AVX2
1191       Assembler::vinserti128h(dst, nds, src);
1192     else
1193       Assembler::vinsertf128h(dst, nds, src);
























1194   }
1195 
1196   // Carry-Less Multiplication Quadword
1197   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1198     // 0x00 - multiply lower 64 bits [0:63]
1199     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1200   }
1201   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1202     // 0x11 - multiply upper 64 bits [64:127]
1203     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1204   }
1205 
1206   // Data
1207 
1208   void cmov32( Condition cc, Register dst, Address  src);
1209   void cmov32( Condition cc, Register dst, Register src);
1210 
1211   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1212 
1213   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }




1168   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1169   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1170 
1171   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1172     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1173       Assembler::vpxor(dst, nds, src, vector_len);
1174     else
1175       Assembler::vxorpd(dst, nds, src, vector_len);
1176   }
1177   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1178     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1179       Assembler::vpxor(dst, nds, src, vector_len);
1180     else
1181       Assembler::vxorpd(dst, nds, src, vector_len);
1182   }
1183 
1184   // Simple version for AVX2 256bit vectors
1185   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1186   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1187 
1188   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
1189     if (UseAVX > 1) { // vinserti128h is available only in AVX2
1190       Assembler::vinserti128(dst, nds, src, imm8);
1191     } else {
1192       Assembler::vinsertf128(dst, nds, src, imm8);
1193     }
1194   }
1195 
1196   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
1197     if (UseAVX > 1) { // vinserti128 is available only in AVX2
1198       Assembler::vinserti128(dst, nds, src, imm8);
1199     } else {
1200       Assembler::vinsertf128(dst, nds, src, imm8);
1201     }
1202   }
1203 
1204   void vextracti128(XMMRegister dst, XMMRegister src, int imm8) {
1205     if (UseAVX > 1) { // vextracti128 is available only in AVX2
1206       Assembler::vextracti128(dst, src, imm8);
1207     } else {
1208       Assembler::vextractf128(dst, src, imm8);
1209     }
1210   }
1211 
1212   void vextracti128(Address dst, XMMRegister src, int imm8) {
1213     if (UseAVX > 1) { // vextracti128 is available only in AVX2
1214       Assembler::vextracti128(dst, src, imm8);
1215     } else {
1216       Assembler::vextractf128(dst, src, imm8);
1217     }
1218   }
1219 
1220   // Carry-Less Multiplication Quadword
1221   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1222     // 0x00 - multiply lower 64 bits [0:63]
1223     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1224   }
1225   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1226     // 0x11 - multiply upper 64 bits [64:127]
1227     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1228   }
1229 
1230   // Data
1231 
1232   void cmov32( Condition cc, Register dst, Address  src);
1233   void cmov32( Condition cc, Register dst, Register src);
1234 
1235   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1236 
1237   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }


src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File