< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page

        

*** 1001,1029 **** if (target % modulus != 0) { nop(modulus - (target % modulus)); } } ! void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) { // Used in sign-masking with aligned address. assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); if (reachable(src)) { Assembler::andpd(dst, as_Address(src)); } else { ! lea(rscratch1, src); ! Assembler::andpd(dst, Address(rscratch1, 0)); } } ! void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) { // Used in sign-masking with aligned address. assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); if (reachable(src)) { Assembler::andps(dst, as_Address(src)); } else { ! lea(rscratch1, src); ! Assembler::andps(dst, Address(rscratch1, 0)); } } void MacroAssembler::andptr(Register dst, int32_t imm32) { LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); --- 1001,1029 ---- if (target % modulus != 0) { nop(modulus - (target % modulus)); } } ! void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg) { // Used in sign-masking with aligned address. assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); if (reachable(src)) { Assembler::andpd(dst, as_Address(src)); } else { ! lea(scratch_reg, src); ! Assembler::andpd(dst, Address(scratch_reg, 0)); } } ! void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register scratch_reg) { // Used in sign-masking with aligned address. assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); if (reachable(src)) { Assembler::andps(dst, as_Address(src)); } else { ! lea(scratch_reg, src); ! Assembler::andps(dst, Address(scratch_reg, 0)); } } void MacroAssembler::andptr(Register dst, int32_t imm32) { LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
*** 3338,3354 **** void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); Assembler::vmovdqu(dst, src); } ! void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src) { if (reachable(src)) { vmovdqu(dst, as_Address(src)); } else { ! lea(rscratch1, src); ! vmovdqu(dst, Address(rscratch1, 0)); } } void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { if (reachable(src)) { --- 3338,3354 ---- void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); Assembler::vmovdqu(dst, src); } ! void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg) { if (reachable(src)) { vmovdqu(dst, as_Address(src)); } else { ! lea(scratch_reg, src); ! vmovdqu(dst, Address(scratch_reg, 0)); } } void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { if (reachable(src)) {
*** 3696,3713 **** lea(rscratch1, src); Assembler::ucomiss(dst, Address(rscratch1, 0)); } } ! void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) { // Used in sign-bit flipping with aligned address. assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); if (reachable(src)) { Assembler::xorpd(dst, as_Address(src)); } else { ! lea(rscratch1, src); ! Assembler::xorpd(dst, Address(rscratch1, 0)); } } void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { --- 3696,3713 ---- lea(rscratch1, src); Assembler::ucomiss(dst, Address(rscratch1, 0)); } } ! void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg) { // Used in sign-bit flipping with aligned address. assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); if (reachable(src)) { Assembler::xorpd(dst, as_Address(src)); } else { ! lea(scratch_reg, src); ! Assembler::xorpd(dst, Address(scratch_reg, 0)); } } void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) {
*** 3724,3741 **** } else { Assembler::xorps(dst, src); } } ! void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) { // Used in sign-bit flipping with aligned address. assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); if (reachable(src)) { Assembler::xorps(dst, as_Address(src)); } else { ! lea(rscratch1, src); ! Assembler::xorps(dst, Address(rscratch1, 0)); } } void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) { // Used in sign-bit flipping with aligned address. --- 3724,3741 ---- } else { Assembler::xorps(dst, src); } } ! void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg) { // Used in sign-bit flipping with aligned address. assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); if (reachable(src)) { Assembler::xorps(dst, as_Address(src)); } else { ! lea(scratch_reg, src); ! Assembler::xorps(dst, Address(scratch_reg, 0)); } } void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) { // Used in sign-bit flipping with aligned address.
*** 3797,3812 **** void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); Assembler::vpaddw(dst, nds, src, vector_len); } ! void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) { if (reachable(src)) { Assembler::vpand(dst, nds, as_Address(src), vector_len); } else { ! lea(rscratch1, src); ! Assembler::vpand(dst, nds, Address(rscratch1, 0), vector_len); } } void MacroAssembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); --- 3797,3812 ---- void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); Assembler::vpaddw(dst, nds, src, vector_len); } ! void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { if (reachable(src)) { Assembler::vpand(dst, nds, as_Address(src), vector_len); } else { ! lea(scratch_reg, src); ! Assembler::vpand(dst, nds, Address(scratch_reg, 0), vector_len); } } void MacroAssembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
*** 3871,3880 **** --- 3871,3896 ---- void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); Assembler::vpsraw(dst, nds, shift, vector_len); } + void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { + assert(UseAVX > 2,""); + if (!VM_Version::supports_avx512vl() && vector_len < 2) { + vector_len = 2; + } + Assembler::evpsraq(dst, nds, shift, vector_len); + } + + void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { + assert(UseAVX > 2,""); + if (!VM_Version::supports_avx512vl() && vector_len < 2) { + vector_len = 2; + } + Assembler::evpsraq(dst, nds, shift, vector_len); + } + void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); Assembler::vpsrlw(dst, nds, shift, vector_len); }
*** 3911,3935 **** void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); Assembler::pshuflw(dst, src, mode); } ! void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) { if (reachable(src)) { vandpd(dst, nds, as_Address(src), vector_len); } else { ! lea(rscratch1, src); ! vandpd(dst, nds, Address(rscratch1, 0), vector_len); } } ! void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) { if (reachable(src)) { vandps(dst, nds, as_Address(src), vector_len); } else { ! lea(rscratch1, src); ! vandps(dst, nds, Address(rscratch1, 0), vector_len); } } void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { if (reachable(src)) { --- 3927,3951 ---- void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); Assembler::pshuflw(dst, src, mode); } ! void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { if (reachable(src)) { vandpd(dst, nds, as_Address(src), vector_len); } else { ! lea(scratch_reg, src); ! vandpd(dst, nds, Address(scratch_reg, 0), vector_len); } } ! void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { if (reachable(src)) { vandps(dst, nds, as_Address(src), vector_len); } else { ! lea(scratch_reg, src); ! vandps(dst, nds, Address(scratch_reg, 0), vector_len); } } void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { if (reachable(src)) {
*** 3993,4017 **** void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); vxorpd(dst, nds, src, Assembler::AVX_128bit); } ! void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) { if (reachable(src)) { vxorpd(dst, nds, as_Address(src), vector_len); } else { ! lea(rscratch1, src); ! vxorpd(dst, nds, Address(rscratch1, 0), vector_len); } } ! void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) { if (reachable(src)) { vxorps(dst, nds, as_Address(src), vector_len); } else { ! lea(rscratch1, src); ! vxorps(dst, nds, Address(rscratch1, 0), vector_len); } } void MacroAssembler::clear_jweak_tag(Register possibly_jweak) { const int32_t inverted_jweak_mask = ~static_cast<int32_t>(JNIHandles::weak_tag_mask); --- 4009,4047 ---- void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); vxorpd(dst, nds, src, Assembler::AVX_128bit); } ! void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { if (reachable(src)) { vxorpd(dst, nds, as_Address(src), vector_len); } else { ! lea(scratch_reg, src); ! vxorpd(dst, nds, Address(scratch_reg, 0), vector_len); } } ! void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { if (reachable(src)) { vxorps(dst, nds, as_Address(src), vector_len); } else { ! lea(scratch_reg, src); ! vxorps(dst, nds, Address(scratch_reg, 0), vector_len); ! } ! } ! ! void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { ! if (UseAVX > 1 || (vector_len < 1)) { ! if (reachable(src)) { ! Assembler::vpxor(dst, nds, as_Address(src), vector_len); ! } else { ! lea(scratch_reg, src); ! Assembler::vpxor(dst, nds, Address(scratch_reg, 0), vector_len); ! } ! } ! else { ! MacroAssembler::vxorpd(dst, nds, src, vector_len, scratch_reg); } } void MacroAssembler::clear_jweak_tag(Register possibly_jweak) { const int32_t inverted_jweak_mask = ~static_cast<int32_t>(JNIHandles::weak_tag_mask);
< prev index next >