src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8005544 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/assembler_x86.hpp

Print this page




1736   // Or packed integers
1737   void por(XMMRegister dst, XMMRegister src);
1738   void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
1739   void vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
1740 
1741   // Xor packed integers
1742   void pxor(XMMRegister dst, XMMRegister src);
1743   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
1744   void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
1745 
1746   // Copy low 128bit into high 128bit of YMM registers.
1747   void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1748   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1749 
1750   // Load/store high 128bit of YMM registers which does not destroy other half.
1751   void vinsertf128h(XMMRegister dst, Address src);
1752   void vinserti128h(XMMRegister dst, Address src);
1753   void vextractf128h(Address dst, XMMRegister src);
1754   void vextracti128h(Address dst, XMMRegister src);
1755 



1756   // AVX instruction which is used to clear upper 128 bits of YMM registers and
1757   // to avoid transaction penalty between AVX and SSE states. There is no
1758   // penalty if legacy SSE instructions are encoded using VEX prefix because
1759   // they always clear upper 128 bits. It should be used before calling
1760   // runtime code and native libraries.
1761   void vzeroupper();
1762 
1763  protected:
1764   // Next instructions require address alignment 16 bytes SSE mode.
1765   // They should be called only from corresponding MacroAssembler instructions.
1766   void andpd(XMMRegister dst, Address src);
1767   void andps(XMMRegister dst, Address src);
1768   void xorpd(XMMRegister dst, Address src);
1769   void xorps(XMMRegister dst, Address src);
1770 
1771 };
1772 
1773 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP


1736   // Or packed integers
1737   void por(XMMRegister dst, XMMRegister src);
1738   void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
1739   void vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
1740 
1741   // Xor packed integers
1742   void pxor(XMMRegister dst, XMMRegister src);
1743   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
1744   void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
1745 
1746   // Copy low 128bit into high 128bit of YMM registers.
1747   void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1748   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1749 
1750   // Load/store high 128bit of YMM registers which does not destroy other half.
1751   void vinsertf128h(XMMRegister dst, Address src);
1752   void vinserti128h(XMMRegister dst, Address src);
1753   void vextractf128h(Address dst, XMMRegister src);
1754   void vextracti128h(Address dst, XMMRegister src);
1755 
1756   // duplicate 4-bytes integer data from src into 8 locations in dest
1757   void vpbroadcastd(XMMRegister dst, XMMRegister src);
1758 
1759   // AVX instruction which is used to clear upper 128 bits of YMM registers and
1760   // to avoid transaction penalty between AVX and SSE states. There is no
1761   // penalty if legacy SSE instructions are encoded using VEX prefix because
1762   // they always clear upper 128 bits. It should be used before calling
1763   // runtime code and native libraries.
1764   void vzeroupper();
1765 
1766  protected:
1767   // Next instructions require address alignment 16 bytes SSE mode.
1768   // They should be called only from corresponding MacroAssembler instructions.
1769   void andpd(XMMRegister dst, Address src);
1770   void andps(XMMRegister dst, Address src);
1771   void xorpd(XMMRegister dst, Address src);
1772   void xorps(XMMRegister dst, Address src);
1773 
1774 };
1775 
1776 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP
src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File