src/cpu/x86/vm/assembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 7181494 Cdiff src/cpu/x86/vm/assembler_x86.cpp

src/cpu/x86/vm/assembler_x86.cpp

Print this page

        

*** 2571,2580 **** --- 2571,2596 ---- int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66); emit_byte(0x62); emit_byte(0xC0 | encode); } + void Assembler::punpcklqdq(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); + InstructionMark im(this); + simd_prefix(dst, dst, src, VEX_SIMD_66); + emit_byte(0x6C); + emit_operand(dst, src); + } + + void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66); + emit_byte(0x6C); + emit_byte(0xC0 | encode); + } + void Assembler::push(int32_t imm32) { // in 64bits we push 64bits onto the stack but only // take a 32bit immediate emit_byte(0x68); emit_long(imm32);
*** 3176,3185 **** --- 3192,3208 ---- int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, vector256); emit_byte(0x57); emit_byte(0xC0 | encode); } + void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { + assert(VM_Version::supports_avx2() || (!vector256) && VM_Version::supports_avx(), ""); + int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256); + emit_byte(0xEF); + emit_byte(0xC0 | encode); + } + void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); bool vector256 = true; int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A); emit_byte(0x18);
*** 3187,3196 **** --- 3210,3230 ---- // 0x00 - insert into lower 128 bits // 0x01 - insert into upper 128 bits emit_byte(0x01); } + void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { + assert(VM_Version::supports_avx2(), ""); + bool vector256 = true; + int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A); + emit_byte(0x38); + emit_byte(0xC0 | encode); + // 0x00 - insert into lower 128 bits + // 0x01 - insert into upper 128 bits + emit_byte(0x01); + } + void Assembler::vzeroupper() { assert(VM_Version::supports_avx(), ""); (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE); emit_byte(0x77); }
*** 7478,7487 **** --- 7512,7539 ---- void MacroAssembler::movbyte(ArrayAddress dst, int src) { movb(as_Address(dst), src); } + void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) { + if (reachable(src)) { + movdl(dst, as_Address(src)); + } else { + lea(rscratch1, src); + movdl(dst, Address(rscratch1, 0)); + } + } + + void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) { + if (reachable(src)) { + movq(dst, as_Address(src)); + } else { + lea(rscratch1, src); + movq(dst, Address(rscratch1, 0)); + } + } + void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) { if (reachable(src)) { if (UseXmmLoadAndClearUpper) { movsd (dst, as_Address(src)); } else {
src/cpu/x86/vm/assembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File