< prev index next >

src/cpu/x86/vm/assembler_x86.hpp

Print this page




2124   void evpbroadcastss(XMMRegister dst, Address src, int vector_len);
2125   void evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len);
2126   void evpbroadcastsd(XMMRegister dst, Address src, int vector_len);
2127 
2128   void evpbroadcastb(XMMRegister dst, Register src, int vector_len);
2129   void evpbroadcastw(XMMRegister dst, Register src, int vector_len);
2130   void evpbroadcastd(XMMRegister dst, Register src, int vector_len);
2131   void evpbroadcastq(XMMRegister dst, Register src, int vector_len);
2132 
2133   // Carry-Less Multiplication Quadword
2134   void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
2135   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
2136 
2137   // AVX instruction which is used to clear upper 128 bits of YMM registers and
2138   // to avoid transaction penalty between AVX and SSE states. There is no
2139   // penalty if legacy SSE instructions are encoded using VEX prefix because
2140   // they always clear upper 128 bits. It should be used before calling
2141   // runtime code and native libraries.
2142   void vzeroupper();
2143 





2144  protected:
2145   // Next instructions require address alignment 16 bytes SSE mode.
2146   // They should be called only from corresponding MacroAssembler instructions.
2147   void andpd(XMMRegister dst, Address src);
2148   void andps(XMMRegister dst, Address src);
2149   void xorpd(XMMRegister dst, Address src);
2150   void xorps(XMMRegister dst, Address src);
2151 
2152 };
2153 
2154 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP


2124   void evpbroadcastss(XMMRegister dst, Address src, int vector_len);
2125   void evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len);
2126   void evpbroadcastsd(XMMRegister dst, Address src, int vector_len);
2127 
2128   void evpbroadcastb(XMMRegister dst, Register src, int vector_len);
2129   void evpbroadcastw(XMMRegister dst, Register src, int vector_len);
2130   void evpbroadcastd(XMMRegister dst, Register src, int vector_len);
2131   void evpbroadcastq(XMMRegister dst, Register src, int vector_len);
2132 
2133   // Carry-Less Multiplication Quadword
2134   void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
2135   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
2136 
2137   // AVX instruction which is used to clear upper 128 bits of YMM registers and
2138   // to avoid transaction penalty between AVX and SSE states. There is no
2139   // penalty if legacy SSE instructions are encoded using VEX prefix because
2140   // they always clear upper 128 bits. It should be used before calling
2141   // runtime code and native libraries.
2142   void vzeroupper();
2143 
2144   // AVX support for vectorized conditional move (double). The following two instructions used only coupled.
2145   void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len);
2146   void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len);
2147 
2148 
2149  protected:
2150   // Next instructions require address alignment 16 bytes SSE mode.
2151   // They should be called only from corresponding MacroAssembler instructions.
2152   void andpd(XMMRegister dst, Address src);
2153   void andps(XMMRegister dst, Address src);
2154   void xorpd(XMMRegister dst, Address src);
2155   void xorps(XMMRegister dst, Address src);
2156 
2157 };
2158 
2159 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP
< prev index next >