src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8052081 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/assembler_x86.hpp

Print this page




1820 
1821   // Xor packed integers
1822   void pxor(XMMRegister dst, XMMRegister src);
1823   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
1824   void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
1825 
1826   // Copy low 128bit into high 128bit of YMM registers.
1827   void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1828   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1829 
1830   // Load/store high 128bit of YMM registers which does not destroy other half.
1831   void vinsertf128h(XMMRegister dst, Address src);
1832   void vinserti128h(XMMRegister dst, Address src);
1833   void vextractf128h(Address dst, XMMRegister src);
1834   void vextracti128h(Address dst, XMMRegister src);
1835 
1836   // duplicate 4-bytes integer data from src into 8 locations in dest
1837   void vpbroadcastd(XMMRegister dst, XMMRegister src);
1838 
1839   // Carry-Less Multiplication Quadword

1840   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
1841 
1842   // AVX instruction which is used to clear upper 128 bits of YMM registers and
1843   // to avoid transaction penalty between AVX and SSE states. There is no
1844   // penalty if legacy SSE instructions are encoded using VEX prefix because
1845   // they always clear upper 128 bits. It should be used before calling
1846   // runtime code and native libraries.
1847   void vzeroupper();
1848 
1849  protected:
1850   // Next instructions require address alignment 16 bytes SSE mode.
1851   // They should be called only from corresponding MacroAssembler instructions.
1852   void andpd(XMMRegister dst, Address src);
1853   void andps(XMMRegister dst, Address src);
1854   void xorpd(XMMRegister dst, Address src);
1855   void xorps(XMMRegister dst, Address src);
1856 
1857 };
1858 
1859 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP


1820 
1821   // Xor packed integers
1822   void pxor(XMMRegister dst, XMMRegister src);
1823   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
1824   void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
1825 
1826   // Copy low 128bit into high 128bit of YMM registers.
1827   void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1828   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1829 
1830   // Load/store high 128bit of YMM registers which does not destroy other half.
1831   void vinsertf128h(XMMRegister dst, Address src);
1832   void vinserti128h(XMMRegister dst, Address src);
1833   void vextractf128h(Address dst, XMMRegister src);
1834   void vextracti128h(Address dst, XMMRegister src);
1835 
1836   // duplicate 4-bytes integer data from src into 8 locations in dest
1837   void vpbroadcastd(XMMRegister dst, XMMRegister src);
1838 
1839   // Carry-Less Multiplication Quadword
1840   void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
1841   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
1842 
1843   // AVX instruction which is used to clear upper 128 bits of YMM registers and
1844   // to avoid transaction penalty between AVX and SSE states. There is no
1845   // penalty if legacy SSE instructions are encoded using VEX prefix because
1846   // they always clear upper 128 bits. It should be used before calling
1847   // runtime code and native libraries.
1848   void vzeroupper();
1849 
1850  protected:
1851   // Next instructions require address alignment 16 bytes SSE mode.
1852   // They should be called only from corresponding MacroAssembler instructions.
1853   void andpd(XMMRegister dst, Address src);
1854   void andps(XMMRegister dst, Address src);
1855   void xorpd(XMMRegister dst, Address src);
1856   void xorps(XMMRegister dst, Address src);
1857 
1858 };
1859 
1860 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP
src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File