--- old/src/cpu/x86/vm/assembler_x86.cpp Sat Jun 2 20:03:57 2012 +++ new/src/cpu/x86/vm/assembler_x86.cpp Sat Jun 2 20:03:57 2012 @@ -1637,6 +1637,13 @@ emit_byte(0xC0 | encode); } +void Assembler::movlhps(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE); + emit_byte(0x16); + emit_byte(0xC0 | encode); +} + void Assembler::movb(Register dst, Address src) { NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); InstructionMark im(this); @@ -1686,6 +1693,14 @@ emit_operand(dst, src); } +void Assembler::movdl(Address dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + simd_prefix(dst, src, VEX_SIMD_66); + emit_byte(0x7E); + emit_operand(src, dst); +} + void Assembler::movdqa(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66); @@ -1716,6 +1731,35 @@ emit_operand(src, dst); } +// Move Unaligned 256bit Vector +void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { + assert(UseAVX, ""); + bool vector256 = true; + int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector256); + emit_byte(0x6F); + emit_byte(0xC0 | encode); +} + +void Assembler::vmovdqu(XMMRegister dst, Address src) { + assert(UseAVX, ""); + InstructionMark im(this); + bool vector256 = true; + vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector256); + emit_byte(0x6F); + emit_operand(dst, src); +} + +void Assembler::vmovdqu(Address dst, XMMRegister src) { + assert(UseAVX, ""); + InstructionMark im(this); + bool vector256 = true; + // swap src<->dst for encoding + assert(src != xnoreg, "sanity"); + vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector256); + emit_byte(0x7F); + emit_operand(src, dst); +} + // Uses zero extension on 64bit void Assembler::movl(Register dst, int32_t imm32) { @@ -3112,6 +3156,13 @@ emit_operand(dst, src); } +void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { + assert(VM_Version::supports_avx(), ""); + int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256); + emit_byte(0x57); + emit_byte(0xC0 | encode); +} + void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); @@ -3120,6 +3171,30 @@ emit_operand(dst, src); } +void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { + assert(VM_Version::supports_avx(), ""); + int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, vector256); + emit_byte(0x57); + emit_byte(0xC0 | encode); +} + +void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { + assert(VM_Version::supports_avx(), ""); + bool vector256 = true; + int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A); + emit_byte(0x18); + emit_byte(0xC0 | encode); + // 0x00 - insert into lower 128 bits + // 0x01 - insert into upper 128 bits + emit_byte(0x01); +} + +void Assembler::vzeroupper() { + assert(VM_Version::supports_avx(), ""); + (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE); + emit_byte(0x77); +} + #ifndef _LP64 // 32bit only pieces of the assembler