src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 7088419 Cdiff src/cpu/x86/vm/assembler_x86.hpp

src/cpu/x86/vm/assembler_x86.hpp

Print this page

        

*** 1264,1273 **** --- 1264,1274 ---- void movdq(XMMRegister dst, Register src); void movdq(Register dst, XMMRegister src); // Move Aligned Double Quadword void movdqa(XMMRegister dst, XMMRegister src); + void movdqa(XMMRegister dst, Address src); // Move Unaligned Double Quadword void movdqu(Address dst, XMMRegister src); void movdqu(XMMRegister dst, Address src); void movdqu(XMMRegister dst, XMMRegister src);
*** 1402,1411 **** --- 1403,1420 ---- // SSE4.2 string instructions void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8); void pcmpestri(XMMRegister xmm1, Address src, int imm8); + // SSE 4.1 extract + void pextrd(Register dst, XMMRegister src, int imm8); + void pextrq(Register dst, XMMRegister src, int imm8); + + // SSE 4.1 insert + void pinsrd(XMMRegister dst, Register src, int imm8); + void pinsrq(XMMRegister dst, Register src, int imm8); + // SSE4.1 packed move void pmovzxbw(XMMRegister dst, XMMRegister src); void pmovzxbw(XMMRegister dst, Address src); #ifndef _LP64 // no 32bit push/pop on amd64
*** 1762,1771 **** --- 1771,1783 ---- void vextracti128h(Address dst, XMMRegister src); // duplicate 4-bytes integer data from src into 8 locations in dest void vpbroadcastd(XMMRegister dst, XMMRegister src); + // Carry-Less Multiplication Quadword + void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask); + // AVX instruction which is used to clear upper 128 bits of YMM registers and // to avoid transaction penalty between AVX and SSE states. There is no // penalty if legacy SSE instructions are encoded using VEX prefix because // they always clear upper 128 bits. It should be used before calling // runtime code and native libraries.
src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File