src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/macroAssembler_x86.hpp	Wed Mar  2 14:58:33 2016
--- new/src/cpu/x86/vm/macroAssembler_x86.hpp	Wed Mar  2 14:58:33 2016

*** 1183,1198 **** --- 1183,1222 ---- // Simple version for AVX2 256bit vectors void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); } void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector. void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { if (UseAVX > 1) // vinserti128h is available only in AVX2 Assembler::vinserti128h(dst, nds, src); else Assembler::vinsertf128h(dst, nds, src); + void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { + if (UseAVX > 1) { // vinserti128h is available only in AVX2 + Assembler::vinserti128(dst, nds, src, imm8); + } else { + Assembler::vinsertf128(dst, nds, src, imm8); + } + } + + void vinserti128(XMMRegister dst, XMMRegister nds, Address src, int imm8) { + if (UseAVX > 1) { // vinserti128 is available only in AVX2 + Assembler::vinserti128(dst, nds, src, imm8); + } else { + Assembler::vinsertf128(dst, nds, src, imm8); + } + } + + void vextracti128(XMMRegister dst, XMMRegister src, int imm8) { + if (UseAVX > 1) { // vextracti128 is available only in AVX2 + Assembler::vextracti128(dst, src, imm8); + } else { + Assembler::vextractf128(dst, src, imm8); + } + } + + void vextracti128(Address dst, XMMRegister src, int imm8) { + if (UseAVX > 1) { // vextracti128 is available only in AVX2 + Assembler::vextracti128(dst, src, imm8); + } else { + Assembler::vextractf128(dst, src, imm8); + } } // Carry-Less Multiplication Quadword void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { // 0x00 - multiply lower 64 bits [0:63]

src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File