src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/assembler_x86.hpp	Fri Jul 13 20:36:14 2012
--- new/src/cpu/x86/vm/assembler_x86.hpp	Fri Jul 13 20:36:14 2012

*** 1464,1473 **** --- 1464,1477 ---- // Interleave Low Doublewords void punpckldq(XMMRegister dst, XMMRegister src); void punpckldq(XMMRegister dst, Address src); + // Interleave Low Quadwords + void punpcklqdq(XMMRegister dst, XMMRegister src); + void punpcklqdq(XMMRegister dst, Address src); + #ifndef _LP64 // no 32bit push/pop on amd64 void pushl(Address src); #endif void pushq(Address src);
*** 1604,1620 **** --- 1608,1622 ---- // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values void xorps(XMMRegister dst, XMMRegister src); void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0 ! // AVX 3-operands scalar instructions (encoded with VEX prefix) void vaddsd(XMMRegister dst, XMMRegister nds, Address src); void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src); void vaddss(XMMRegister dst, XMMRegister nds, Address src); void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src); void vandpd(XMMRegister dst, XMMRegister nds, Address src); void vandps(XMMRegister dst, XMMRegister nds, Address src); void vdivsd(XMMRegister dst, XMMRegister nds, Address src); void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src); void vdivss(XMMRegister dst, XMMRegister nds, Address src); void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src); void vmulsd(XMMRegister dst, XMMRegister nds, Address src);
*** 1623,1639 **** --- 1625,1645 ---- void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src); void vsubsd(XMMRegister dst, XMMRegister nds, Address src); void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src); void vsubss(XMMRegister dst, XMMRegister nds, Address src); void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); void vxorpd(XMMRegister dst, XMMRegister nds, Address src); void vxorps(XMMRegister dst, XMMRegister nds, Address src); // AVX Vector instrucitons. + void vandpd(XMMRegister dst, XMMRegister nds, Address src); + void vandps(XMMRegister dst, XMMRegister nds, Address src); + void vxorpd(XMMRegister dst, XMMRegister nds, Address src); + void vxorps(XMMRegister dst, XMMRegister nds, Address src); void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); + void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src); + void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src); // AVX instruction which is used to clear upper 128 bits of YMM registers and // to avoid transaction penalty between AVX and SSE states. There is no // penalty if legacy SSE instructions are encoded using VEX prefix because // they always clear upper 128 bits. It should be used before calling
*** 2561,2571 **** --- 2567,2591 ---- void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); } void vxorps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorps(dst, nds, src); } void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src); + void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { + if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2 + Assembler::vpxor(dst, nds, src, vector256); + else + Assembler::vxorpd(dst, nds, src, vector256); + } + // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector. + void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { + if (UseAVX > 1) // vinserti128h is available only in AVX2 + Assembler::vinserti128h(dst, nds, src); + else + Assembler::vinsertf128h(dst, nds, src); + } + // Data void cmov32( Condition cc, Register dst, Address src); void cmov32( Condition cc, Register dst, Register src);
*** 2613,2622 **** --- 2633,2649 ---- void mov32(Register dst, AddressLiteral src); // to avoid hiding movb void movbyte(ArrayAddress dst, int src); + // Import other mov() methods from the parent class or else + // they will be hidden by the following overriding declaration. + using Assembler::movdl; + using Assembler::movq; + void movdl(XMMRegister dst, AddressLiteral src); + void movq(XMMRegister dst, AddressLiteral src); + // Can push value or effective address void pushptr(AddressLiteral src); void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }

src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File