< prev index next >

src/cpu/x86/vm/macroAssembler_x86.hpp

Print this page




1074   }
1075   void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
1076     if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
1077       Assembler::vpxor(dst, nds, src, vector256);
1078     else
1079       Assembler::vxorpd(dst, nds, src, vector256);
1080   }
1081 
1082   // Simple version for AVX2 256bit vectors
1083   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1084   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1085 
1086   // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
1087   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1088     if (UseAVX > 1) // vinserti128h is available only in AVX2
1089       Assembler::vinserti128h(dst, nds, src);
1090     else
1091       Assembler::vinsertf128h(dst, nds, src);
1092   }
1093 

























1094   // Carry-Less Multiplication Quadword
1095   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1096     // 0x00 - multiply lower 64 bits [0:63]
1097     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1098   }
1099   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1100     // 0x11 - multiply upper 64 bits [64:127]
1101     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1102   }
1103 
1104   // Data
1105 
1106   void cmov32( Condition cc, Register dst, Address  src);
1107   void cmov32( Condition cc, Register dst, Register src);
1108 
1109   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1110 
1111   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1112   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1113 




1074   }
1075   void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
1076     if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
1077       Assembler::vpxor(dst, nds, src, vector256);
1078     else
1079       Assembler::vxorpd(dst, nds, src, vector256);
1080   }
1081 
1082   // Simple version for AVX2 256bit vectors
1083   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1084   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1085 
1086   // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
1087   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1088     if (UseAVX > 1) // vinserti128h is available only in AVX2
1089       Assembler::vinserti128h(dst, nds, src);
1090     else
1091       Assembler::vinsertf128h(dst, nds, src);
1092   }
1093 
1094   void vextractf128h(XMMRegister dst, XMMRegister src) {
1095     Assembler::vextractf128h(dst, src);
1096   }
1097 
1098   void vextractf128h(Address dst, XMMRegister src) {
1099           Assembler::vextractf128h(dst, src);
1100   }
1101 
1102   // Add horizontal packed integers
1103   void vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
1104     Assembler::vphaddw(dst, nds, src, vector256);
1105   }
1106 
1107   void vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
1108     Assembler::vphaddd(dst, nds, src, vector256);
1109   }
1110 
1111   void phaddw(XMMRegister dst, XMMRegister src) {
1112     Assembler::phaddw(dst, src);
1113   }
1114 
1115   void phaddd(XMMRegister dst, XMMRegister src) {
1116     Assembler::phaddd(dst, src);
1117   }
1118 
1119   // Carry-Less Multiplication Quadword
1120   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1121     // 0x00 - multiply lower 64 bits [0:63]
1122     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1123   }
1124   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1125     // 0x11 - multiply upper 64 bits [64:127]
1126     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1127   }
1128 
1129   // Data
1130 
1131   void cmov32( Condition cc, Register dst, Address  src);
1132   void cmov32( Condition cc, Register dst, Register src);
1133 
1134   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1135 
1136   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1137   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1138 


< prev index next >