< prev index next >

src/cpu/x86/vm/assembler_x86.hpp

Print this page

        

*** 585,594 **** --- 585,604 ---- narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop _WhichOperand_limit = 4 #endif }; + enum ComparisonPredicate { + eq = 0, + lt = 1, + le = 2, + _false = 3, + neq = 4, + nlt = 5, + nle = 6, + _true = 7 + }; // NOTE: The general philopsophy of the declarations here is that 64bit versions // of instructions are freely declared without the need for wrapping them an ifdef. // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
*** 604,613 **** --- 614,624 ---- bool _legacy_mode_bw; bool _legacy_mode_dq; bool _legacy_mode_vl; bool _legacy_mode_vlbw; bool _is_managed; + bool _vector_masking; // For stub code use only class InstructionAttr *_attributes; // 64bit prefixes int prefix_and_encode(int reg_enc, bool byteinst = false);
*** 811,830 **** --- 822,847 ---- _legacy_mode_bw = (VM_Version::supports_avx512bw() == false); _legacy_mode_dq = (VM_Version::supports_avx512dq() == false); _legacy_mode_vl = (VM_Version::supports_avx512vl() == false); _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false); _is_managed = false; + _vector_masking = false; _attributes = NULL; } void set_attributes(InstructionAttr *attributes) { _attributes = attributes; } void clear_attributes(void) { _attributes = NULL; } void set_managed(void) { _is_managed = true; } void clear_managed(void) { _is_managed = false; } bool is_managed(void) { return _is_managed; } + // Following functions are for stub code use only + void set_vector_masking(void) { _vector_masking = true; } + void clear_vector_masking(void) { _vector_masking = false; } + bool is_vector_masking(void) { return _vector_masking; } + void lea(Register dst, Address src); void mov(Register dst, Register src); void pusha();
*** 1347,1361 **** --- 1364,1382 ---- void kmovql(KRegister dst, Register src); void kmovql(Register dst, KRegister src); void knotwl(KRegister dst, KRegister src); + void kortestbl(KRegister dst, KRegister src); void kortestwl(KRegister dst, KRegister src); void kortestdl(KRegister dst, KRegister src); void kortestql(KRegister dst, KRegister src); + void ktestq(KRegister src1, KRegister src2); + void ktestd(KRegister src1, KRegister src2); + void movdl(XMMRegister dst, Register src); void movdl(Register dst, XMMRegister src); void movdl(XMMRegister dst, Address src); void movdl(Address dst, XMMRegister src);
*** 1380,1391 **** // Move Unaligned 512bit Vector void evmovdqub(Address dst, XMMRegister src, int vector_len); void evmovdqub(XMMRegister dst, Address src, int vector_len); void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len); void evmovdquw(Address dst, XMMRegister src, int vector_len); void evmovdquw(XMMRegister dst, Address src, int vector_len); ! void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len); void evmovdqul(Address dst, XMMRegister src, int vector_len); void evmovdqul(XMMRegister dst, Address src, int vector_len); void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len); void evmovdquq(Address dst, XMMRegister src, int vector_len); void evmovdquq(XMMRegister dst, Address src, int vector_len); --- 1401,1413 ---- // Move Unaligned 512bit Vector void evmovdqub(Address dst, XMMRegister src, int vector_len); void evmovdqub(XMMRegister dst, Address src, int vector_len); void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len); void evmovdquw(Address dst, XMMRegister src, int vector_len); + void evmovdquw(Address dst, KRegister mask, XMMRegister src, int vector_len); void evmovdquw(XMMRegister dst, Address src, int vector_len); ! void evmovdquw(XMMRegister dst, KRegister mask, Address src, int vector_len); void evmovdqul(Address dst, XMMRegister src, int vector_len); void evmovdqul(XMMRegister dst, Address src, int vector_len); void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len); void evmovdquq(Address dst, XMMRegister src, int vector_len); void evmovdquq(XMMRegister dst, Address src, int vector_len);
*** 1532,1541 **** --- 1554,1570 ---- void pcmpeqb(XMMRegister dst, XMMRegister src); void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len); + void evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len); + void evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len); + + void evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len); + void evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate of, int vector_len); + void evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len); + void pcmpeqw(XMMRegister dst, XMMRegister src); void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len);
*** 1574,1584 **** // SSE4.1 packed move void pmovzxbw(XMMRegister dst, XMMRegister src); void pmovzxbw(XMMRegister dst, Address src); ! void vpmovzxbw(XMMRegister dst, Address src, int vector_len); #ifndef _LP64 // no 32bit push/pop on amd64 void popl(Address dst); #endif --- 1603,1617 ---- // SSE4.1 packed move void pmovzxbw(XMMRegister dst, XMMRegister src); void pmovzxbw(XMMRegister dst, Address src); ! void vpmovzxbw( XMMRegister dst, Address src, int vector_len); ! void evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len); ! ! void evpmovwb(Address dst, XMMRegister src, int vector_len); ! void evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len); #ifndef _LP64 // no 32bit push/pop on amd64 void popl(Address dst); #endif
*** 1819,1828 **** --- 1852,1863 ---- void vsubsd(XMMRegister dst, XMMRegister nds, Address src); void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src); void vsubss(XMMRegister dst, XMMRegister nds, Address src); void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); + void shlxl(Register dst, Register src1, Register src2); + void shlxq(Register dst, Register src1, Register src2); //====================VECTOR ARITHMETIC===================================== // Add Packed Floating-Point Values void addpd(XMMRegister dst, XMMRegister src);
*** 2053,2065 **** // AVX support for vectorized conditional move (double). The following two instructions used only coupled. void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len); void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len); - void shlxl(Register dst, Register src1, Register src2); - void shlxq(Register dst, Register src1, Register src2); - protected: // Next instructions require address alignment 16 bytes SSE mode. // They should be called only from corresponding MacroAssembler instructions. void andpd(XMMRegister dst, Address src); void andps(XMMRegister dst, Address src); --- 2088,2097 ----
*** 2090,2100 **** _input_size_in_bits(Assembler::EVEX_NObit), _is_evex_instruction(false), _evex_encoding(0), _is_clear_context(false), _is_extended_context(false), ! _current_assembler(NULL) { if (UseAVX < 3) _legacy_mode = true; } ~InstructionAttr() { if (_current_assembler != NULL) { --- 2122,2133 ---- _input_size_in_bits(Assembler::EVEX_NObit), _is_evex_instruction(false), _evex_encoding(0), _is_clear_context(false), _is_extended_context(false), ! _current_assembler(NULL), ! _embedded_opmask_register_specifier(1) { // hard code k1, it will be initialized for now if (UseAVX < 3) _legacy_mode = true; } ~InstructionAttr() { if (_current_assembler != NULL) {
*** 2114,2123 **** --- 2147,2157 ---- int _input_size_in_bits; bool _is_evex_instruction; int _evex_encoding; bool _is_clear_context; bool _is_extended_context; + int _embedded_opmask_register_specifier; Assembler *_current_assembler; public: // query functions for field accessors
*** 2131,2140 **** --- 2165,2175 ---- int get_input_size(void) const { return _input_size_in_bits; } int is_evex_instruction(void) const { return _is_evex_instruction; } int get_evex_encoding(void) const { return _evex_encoding; } bool is_clear_context(void) const { return _is_clear_context; } bool is_extended_context(void) const { return _is_extended_context; } + int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; } // Set the vector len manually void set_vector_len(int vector_len) { _avx_vector_len = vector_len; } // Set revert rex_vex_w for avx encoding
*** 2164,2171 **** --- 2199,2211 ---- _tuple_type = tuple_type; _input_size_in_bits = input_size_in_bits; } } + // Set embedded opmask register specifier. + void set_embedded_opmask_register_specifier(KRegister mask) { + _embedded_opmask_register_specifier = (*mask).encoding() & 0x7; + } + }; #endif // CPU_X86_VM_ASSEMBLER_X86_HPP
< prev index next >