< prev index next >

src/cpu/x86/vm/assembler_x86.hpp

Print this page

        

*** 585,594 **** --- 585,604 ---- narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop _WhichOperand_limit = 4 #endif }; + enum ComparisonPredicate { + eq = 0, + lt = 1, + le = 2, + _false = 3, + neq = 4, + nlt = 5, + nle = 6, + _true = 7 + }; // NOTE: The general philopsophy of the declarations here is that 64bit versions // of instructions are freely declared without the need for wrapping them an ifdef. // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
*** 828,838 **** // Following functions are for stub code use only void set_vector_masking(void) { _vector_masking = true; } void clear_vector_masking(void) { _vector_masking = false; } bool is_vector_masking(void) { return _vector_masking; } - void lea(Register dst, Address src); void mov(Register dst, Register src); void pusha(); --- 838,847 ----
*** 1360,1369 **** --- 1369,1381 ---- void kortestbl(KRegister dst, KRegister src); void kortestwl(KRegister dst, KRegister src); void kortestdl(KRegister dst, KRegister src); void kortestql(KRegister dst, KRegister src); + void ktestq(KRegister src1, KRegister src2); + void ktestd(KRegister src1, KRegister src2); + void ktestql(KRegister dst, KRegister src); void movdl(XMMRegister dst, Register src); void movdl(Register dst, XMMRegister src); void movdl(XMMRegister dst, Address src);
*** 1389,1402 **** // Move Unaligned 512bit Vector void evmovdqub(Address dst, XMMRegister src, int vector_len); void evmovdqub(XMMRegister dst, Address src, int vector_len); void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len); ! void evmovdqub(KRegister mask, XMMRegister dst, Address src, int vector_len); void evmovdquw(Address dst, XMMRegister src, int vector_len); void evmovdquw(XMMRegister dst, Address src, int vector_len); ! void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len); void evmovdqul(Address dst, XMMRegister src, int vector_len); void evmovdqul(XMMRegister dst, Address src, int vector_len); void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len); void evmovdquq(Address dst, XMMRegister src, int vector_len); void evmovdquq(XMMRegister dst, Address src, int vector_len); --- 1401,1415 ---- // Move Unaligned 512bit Vector void evmovdqub(Address dst, XMMRegister src, int vector_len); void evmovdqub(XMMRegister dst, Address src, int vector_len); void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len); ! void evmovdqub(XMMRegister dst, KRegister mask, Address src, int vector_len); void evmovdquw(Address dst, XMMRegister src, int vector_len); + void evmovdquw(Address dst, KRegister mask, XMMRegister src, int vector_len); void evmovdquw(XMMRegister dst, Address src, int vector_len); ! void evmovdquw(XMMRegister dst, KRegister mask, Address src, int vector_len); void evmovdqul(Address dst, XMMRegister src, int vector_len); void evmovdqul(XMMRegister dst, Address src, int vector_len); void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len); void evmovdquq(Address dst, XMMRegister src, int vector_len); void evmovdquq(XMMRegister dst, Address src, int vector_len);
*** 1543,1553 **** void pcmpeqb(XMMRegister dst, XMMRegister src); void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len); ! void evpcmpeqb(KRegister mask, KRegister kdst, XMMRegister nds, Address src, int vector_len); void pcmpeqw(XMMRegister dst, XMMRegister src); void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len); --- 1556,1573 ---- void pcmpeqb(XMMRegister dst, XMMRegister src); void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len); ! void evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len); ! ! void evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len); ! void evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len); ! ! void evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len); ! void evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate of, int vector_len); ! void evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len); void pcmpeqw(XMMRegister dst, XMMRegister src); void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len);
*** 1587,1597 **** // SSE4.1 packed move void pmovzxbw(XMMRegister dst, XMMRegister src); void pmovzxbw(XMMRegister dst, Address src); ! void vpmovzxbw(XMMRegister dst, Address src, int vector_len); #ifndef _LP64 // no 32bit push/pop on amd64 void popl(Address dst); #endif --- 1607,1621 ---- // SSE4.1 packed move void pmovzxbw(XMMRegister dst, XMMRegister src); void pmovzxbw(XMMRegister dst, Address src); ! void vpmovzxbw( XMMRegister dst, Address src, int vector_len); ! void evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len); ! ! void evpmovwb(Address dst, XMMRegister src, int vector_len); ! void evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len); #ifndef _LP64 // no 32bit push/pop on amd64 void popl(Address dst); #endif
*** 1837,1846 **** --- 1861,1872 ---- void vsubsd(XMMRegister dst, XMMRegister nds, Address src); void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src); void vsubss(XMMRegister dst, XMMRegister nds, Address src); void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); + void shlxl(Register dst, Register src1, Register src2); + void shlxq(Register dst, Register src1, Register src2); //====================VECTOR ARITHMETIC===================================== // Add Packed Floating-Point Values void addpd(XMMRegister dst, XMMRegister src);
*** 2071,2083 **** // AVX support for vectorized conditional move (double). The following two instructions used only coupled. void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len); void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len); - void shlxl(Register dst, Register src1, Register src2); - void shlxq(Register dst, Register src1, Register src2); - protected: // Next instructions require address alignment 16 bytes SSE mode. // They should be called only from corresponding MacroAssembler instructions. void andpd(XMMRegister dst, Address src); void andps(XMMRegister dst, Address src); --- 2097,2106 ----
< prev index next >