< prev index next >

src/cpu/x86/vm/assembler_x86.hpp

Print this page




 589 
 590 
 591 
 592   // NOTE: The general philopsophy of the declarations here is that 64bit versions
 593   // of instructions are freely declared without the need for wrapping them an ifdef.
 594   // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
 595   // In the .cpp file the implementations are wrapped so that they are dropped out
 596   // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL
 597   // to the size it was prior to merging up the 32bit and 64bit assemblers.
 598   //
 599   // This does mean you'll get a linker/runtime error if you use a 64bit only instruction
 600   // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
 601 
 602 private:
 603 
 604   bool _legacy_mode_bw;
 605   bool _legacy_mode_dq;
 606   bool _legacy_mode_vl;
 607   bool _legacy_mode_vlbw;
 608   bool _is_managed;

 609 
 610   class InstructionAttr *_attributes;
 611 
 612   // 64bit prefixes
 613   int prefix_and_encode(int reg_enc, bool byteinst = false);
 614   int prefixq_and_encode(int reg_enc);
 615 
 616   int prefix_and_encode(int dst_enc, int src_enc) {
 617     return prefix_and_encode(dst_enc, false, src_enc, false);
 618   }
 619   int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte);
 620   int prefixq_and_encode(int dst_enc, int src_enc);
 621 
 622   void prefix(Register reg);
 623   void prefix(Register dst, Register src, Prefix p);
 624   void prefix(Register dst, Address adr, Prefix p);
 625   void prefix(Address adr);
 626   void prefixq(Address adr);
 627 
 628   void prefix(Address adr, Register reg,  bool byteinst = false);


 796 
 797   // Decoding
 798   static address locate_operand(address inst, WhichOperand which);
 799   static address locate_next_instruction(address inst);
 800 
 801   // Utilities
 802   static bool is_polling_page_far() NOT_LP64({ return false;});
 803   static bool query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len,
 804                                          int cur_tuple_type, int in_size_in_bits, int cur_encoding);
 805 
 806   // Generic instructions
 807   // Does 32bit or 64bit as needed for the platform. In some sense these
 808   // belong in macro assembler but there is no need for both varieties to exist
 809 
 810   void init_attributes(void) {
 811     _legacy_mode_bw = (VM_Version::supports_avx512bw() == false);
 812     _legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
 813     _legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
 814     _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
 815     _is_managed = false;

 816     _attributes = NULL;
 817   }
 818 
 819   void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
 820   void clear_attributes(void) { _attributes = NULL; }
 821 
 822   void set_managed(void) { _is_managed = true; }
 823   void clear_managed(void) { _is_managed = false; }
 824   bool is_managed(void) { return _is_managed; }
 825 






 826   void lea(Register dst, Address src);
 827 
 828   void mov(Register dst, Register src);
 829 
 830   void pusha();
 831   void popa();
 832 
 833   void pushf();
 834   void popf();
 835 
 836   void push(int32_t imm32);
 837 
 838   void push(Register src);
 839 
 840   void pop(Register dst);
 841 
 842   // These are dummies to prevent surprise implicit conversions to Register
 843   void push(void* v);
 844   void pop(void* v);
 845 


1337   void kmovbl(KRegister dst, Register src);
1338   void kmovbl(Register dst, KRegister src);
1339   void kmovwl(KRegister dst, Register src);
1340   void kmovwl(KRegister dst, Address src);
1341   void kmovwl(Register dst, KRegister src);
1342   void kmovdl(KRegister dst, Register src);
1343   void kmovdl(Register dst, KRegister src);
1344   void kmovql(KRegister dst, KRegister src);
1345   void kmovql(Address dst, KRegister src);
1346   void kmovql(KRegister dst, Address src);
1347   void kmovql(KRegister dst, Register src);
1348   void kmovql(Register dst, KRegister src);
1349 
1350   void knotwl(KRegister dst, KRegister src);
1351 
1352   void kortestbl(KRegister dst, KRegister src);
1353   void kortestwl(KRegister dst, KRegister src);
1354   void kortestdl(KRegister dst, KRegister src);
1355   void kortestql(KRegister dst, KRegister src);
1356 


1357   void movdl(XMMRegister dst, Register src);
1358   void movdl(Register dst, XMMRegister src);
1359   void movdl(XMMRegister dst, Address src);
1360   void movdl(Address dst, XMMRegister src);
1361 
1362   // Move Double Quadword
1363   void movdq(XMMRegister dst, Register src);
1364   void movdq(Register dst, XMMRegister src);
1365 
1366   // Move Aligned Double Quadword
1367   void movdqa(XMMRegister dst, XMMRegister src);
1368   void movdqa(XMMRegister dst, Address src);
1369 
1370   // Move Unaligned Double Quadword
1371   void movdqu(Address     dst, XMMRegister src);
1372   void movdqu(XMMRegister dst, Address src);
1373   void movdqu(XMMRegister dst, XMMRegister src);
1374 
1375   // Move Unaligned 256bit Vector
1376   void vmovdqu(Address dst, XMMRegister src);
1377   void vmovdqu(XMMRegister dst, Address src);
1378   void vmovdqu(XMMRegister dst, XMMRegister src);
1379 
1380    // Move Unaligned 512bit Vector
1381   void evmovdqub(Address dst, XMMRegister src, int vector_len);
1382   void evmovdqub(XMMRegister dst, Address src, int vector_len);
1383   void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len);

1384   void evmovdquw(Address dst, XMMRegister src, int vector_len);
1385   void evmovdquw(XMMRegister dst, Address src, int vector_len);
1386   void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len);
1387   void evmovdqul(Address dst, XMMRegister src, int vector_len);
1388   void evmovdqul(XMMRegister dst, Address src, int vector_len);
1389   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len);
1390   void evmovdquq(Address dst, XMMRegister src, int vector_len);
1391   void evmovdquq(XMMRegister dst, Address src, int vector_len);
1392   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len);
1393 
1394   // Move lower 64bit to high 64bit in 128bit register
1395   void movlhps(XMMRegister dst, XMMRegister src);
1396 
1397   void movl(Register dst, int32_t imm32);
1398   void movl(Address dst, int32_t imm32);
1399   void movl(Register dst, Register src);
1400   void movl(Register dst, Address src);
1401   void movl(Address dst, Register src);
1402 
1403   // These dummies prevent using movl from converting a zero (like NULL) into Register


1516 
1517   // Pack with unsigned saturation
1518   void packuswb(XMMRegister dst, XMMRegister src);
1519   void packuswb(XMMRegister dst, Address src);
1520   void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1521 
1522   // Pemutation of 64bit words
1523   void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len);
1524   void vpermq(XMMRegister dst, XMMRegister src, int imm8);
1525 
1526   void pause();
1527 
1528   // SSE4.2 string instructions
1529   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1530   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1531 
1532   void pcmpeqb(XMMRegister dst, XMMRegister src);
1533   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1534   void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1535   void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len);

1536 
1537   void pcmpeqw(XMMRegister dst, XMMRegister src);
1538   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1539   void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1540   void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1541 
1542   void pcmpeqd(XMMRegister dst, XMMRegister src);
1543   void vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1544   void evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1545   void evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1546 
1547   void pcmpeqq(XMMRegister dst, XMMRegister src);
1548   void vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1549   void evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1550   void evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1551 
1552   void pmovmskb(Register dst, XMMRegister src);
1553   void vpmovmskb(Register dst, XMMRegister src);
1554 
1555   // SSE 4.1 extract


2075 public:
2076   InstructionAttr(
2077     int vector_len,     // The length of vector to be applied in encoding - for both AVX and EVEX
2078     bool rex_vex_w,     // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true
2079     bool legacy_mode,   // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX
2080     bool no_reg_mask,   // when true, k0 is used when EVEX encoding is chosen, else k1 is used under the same condition
2081     bool uses_vl)       // This instruction may have legacy constraints based on vector length for EVEX
2082     :
2083       _avx_vector_len(vector_len),
2084       _rex_vex_w(rex_vex_w),
2085       _rex_vex_w_reverted(false),
2086       _legacy_mode(legacy_mode),
2087       _no_reg_mask(no_reg_mask),
2088       _uses_vl(uses_vl),
2089       _tuple_type(Assembler::EVEX_ETUP),
2090       _input_size_in_bits(Assembler::EVEX_NObit),
2091       _is_evex_instruction(false),
2092       _evex_encoding(0),
2093       _is_clear_context(false),
2094       _is_extended_context(false),
2095       _current_assembler(NULL) {

2096     if (UseAVX < 3) _legacy_mode = true;
2097   }
2098 
2099   ~InstructionAttr() {
2100     if (_current_assembler != NULL) {
2101       _current_assembler->clear_attributes();
2102     }
2103     _current_assembler = NULL;
2104   }
2105 
2106 private:
2107   int  _avx_vector_len;
2108   bool _rex_vex_w;
2109   bool _rex_vex_w_reverted;
2110   bool _legacy_mode;
2111   bool _no_reg_mask;
2112   bool _uses_vl;
2113   int  _tuple_type;
2114   int  _input_size_in_bits;
2115   bool _is_evex_instruction;
2116   int  _evex_encoding;
2117   bool _is_clear_context;
2118   bool _is_extended_context;

2119 
2120   Assembler *_current_assembler;
2121 
2122 public:
2123   // query functions for field accessors
2124   int  get_vector_len(void) const { return _avx_vector_len; }
2125   bool is_rex_vex_w(void) const { return _rex_vex_w; }
2126   bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; }
2127   bool is_legacy_mode(void) const { return _legacy_mode; }
2128   bool is_no_reg_mask(void) const { return _no_reg_mask; }
2129   bool uses_vl(void) const { return _uses_vl; }
2130   int  get_tuple_type(void) const { return _tuple_type; }
2131   int  get_input_size(void) const { return _input_size_in_bits; }
2132   int  is_evex_instruction(void) const { return _is_evex_instruction; }
2133   int  get_evex_encoding(void) const { return _evex_encoding; }
2134   bool is_clear_context(void) const { return _is_clear_context; }
2135   bool is_extended_context(void) const { return _is_extended_context; }

2136 
2137   // Set the vector len manually
2138   void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }
2139 
2140   // Set revert rex_vex_w for avx encoding
2141   void set_rex_vex_w_reverted(void) { _rex_vex_w_reverted = true; }
2142 
2143   // Set rex_vex_w based on state
2144   void set_rex_vex_w(bool state) { _rex_vex_w = state; }
2145 
2146   // Set the instruction to be encoded in AVX mode
2147   void set_is_legacy_mode(void) { _legacy_mode = true; }
2148 
2149   // Set the current instuction to be encoded as an EVEX instuction
2150   void set_is_evex_instruction(void) { _is_evex_instruction = true; }
2151 
2152   // Internal encoding data used in compressed immediate offset programming
2153   void set_evex_encoding(int value) { _evex_encoding = value; }
2154 
2155   // Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components
2156   void set_is_clear_context(void) { _is_clear_context = true; }
2157 
2158   // Map back to current asembler so that we can manage object level assocation
2159   void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; }
2160 
2161   // Address modifiers used for compressed displacement calculation
2162   void set_address_attributes(int tuple_type, int input_size_in_bits) {
2163     if (VM_Version::supports_evex()) {
2164       _tuple_type = tuple_type;
2165       _input_size_in_bits = input_size_in_bits;
2166     }





2167   }
2168 
2169 };
2170 
2171 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP


 589 
 590 
 591 
 592   // NOTE: The general philopsophy of the declarations here is that 64bit versions
 593   // of instructions are freely declared without the need for wrapping them an ifdef.
 594   // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
 595   // In the .cpp file the implementations are wrapped so that they are dropped out
 596   // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL
 597   // to the size it was prior to merging up the 32bit and 64bit assemblers.
 598   //
 599   // This does mean you'll get a linker/runtime error if you use a 64bit only instruction
 600   // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
 601 
 602 private:
 603 
 604   bool _legacy_mode_bw;
 605   bool _legacy_mode_dq;
 606   bool _legacy_mode_vl;
 607   bool _legacy_mode_vlbw;
 608   bool _is_managed;
 609   bool _vector_masking;    // For stub code use only
 610 
 611   class InstructionAttr *_attributes;
 612 
 613   // 64bit prefixes
 614   int prefix_and_encode(int reg_enc, bool byteinst = false);
 615   int prefixq_and_encode(int reg_enc);
 616 
 617   int prefix_and_encode(int dst_enc, int src_enc) {
 618     return prefix_and_encode(dst_enc, false, src_enc, false);
 619   }
 620   int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte);
 621   int prefixq_and_encode(int dst_enc, int src_enc);
 622 
 623   void prefix(Register reg);
 624   void prefix(Register dst, Register src, Prefix p);
 625   void prefix(Register dst, Address adr, Prefix p);
 626   void prefix(Address adr);
 627   void prefixq(Address adr);
 628 
 629   void prefix(Address adr, Register reg,  bool byteinst = false);


 797 
 798   // Decoding
 799   static address locate_operand(address inst, WhichOperand which);
 800   static address locate_next_instruction(address inst);
 801 
 802   // Utilities
 803   static bool is_polling_page_far() NOT_LP64({ return false;});
 804   static bool query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len,
 805                                          int cur_tuple_type, int in_size_in_bits, int cur_encoding);
 806 
 807   // Generic instructions
 808   // Does 32bit or 64bit as needed for the platform. In some sense these
 809   // belong in macro assembler but there is no need for both varieties to exist
 810 
 811   void init_attributes(void) {
 812     _legacy_mode_bw = (VM_Version::supports_avx512bw() == false);
 813     _legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
 814     _legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
 815     _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
 816     _is_managed = false;
 817     _vector_masking = false;
 818     _attributes = NULL;
 819   }
 820 
 821   void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
 822   void clear_attributes(void) { _attributes = NULL; }
 823 
 824   void set_managed(void) { _is_managed = true; }
 825   void clear_managed(void) { _is_managed = false; }
 826   bool is_managed(void) { return _is_managed; }
 827 
 828   // Following functions are for stub code use only
 829   void set_vector_masking(void) { _vector_masking = true; }
 830   void clear_vector_masking(void) { _vector_masking = false; }
 831   bool is_vector_masking(void) { return _vector_masking; }
 832 
 833 
 834   void lea(Register dst, Address src);
 835 
 836   void mov(Register dst, Register src);
 837 
 838   void pusha();
 839   void popa();
 840 
 841   void pushf();
 842   void popf();
 843 
 844   void push(int32_t imm32);
 845 
 846   void push(Register src);
 847 
 848   void pop(Register dst);
 849 
 850   // These are dummies to prevent surprise implicit conversions to Register
 851   void push(void* v);
 852   void pop(void* v);
 853 


1345   void kmovbl(KRegister dst, Register src);
1346   void kmovbl(Register dst, KRegister src);
1347   void kmovwl(KRegister dst, Register src);
1348   void kmovwl(KRegister dst, Address src);
1349   void kmovwl(Register dst, KRegister src);
1350   void kmovdl(KRegister dst, Register src);
1351   void kmovdl(Register dst, KRegister src);
1352   void kmovql(KRegister dst, KRegister src);
1353   void kmovql(Address dst, KRegister src);
1354   void kmovql(KRegister dst, Address src);
1355   void kmovql(KRegister dst, Register src);
1356   void kmovql(Register dst, KRegister src);
1357 
1358   void knotwl(KRegister dst, KRegister src);
1359 
1360   void kortestbl(KRegister dst, KRegister src);
1361   void kortestwl(KRegister dst, KRegister src);
1362   void kortestdl(KRegister dst, KRegister src);
1363   void kortestql(KRegister dst, KRegister src);
1364 
1365   void ktestql(KRegister dst, KRegister src);
1366 
1367   void movdl(XMMRegister dst, Register src);
1368   void movdl(Register dst, XMMRegister src);
1369   void movdl(XMMRegister dst, Address src);
1370   void movdl(Address dst, XMMRegister src);
1371 
1372   // Move Double Quadword
1373   void movdq(XMMRegister dst, Register src);
1374   void movdq(Register dst, XMMRegister src);
1375 
1376   // Move Aligned Double Quadword
1377   void movdqa(XMMRegister dst, XMMRegister src);
1378   void movdqa(XMMRegister dst, Address src);
1379 
1380   // Move Unaligned Double Quadword
1381   void movdqu(Address     dst, XMMRegister src);
1382   void movdqu(XMMRegister dst, Address src);
1383   void movdqu(XMMRegister dst, XMMRegister src);
1384 
1385   // Move Unaligned 256bit Vector
1386   void vmovdqu(Address dst, XMMRegister src);
1387   void vmovdqu(XMMRegister dst, Address src);
1388   void vmovdqu(XMMRegister dst, XMMRegister src);
1389 
1390    // Move Unaligned 512bit Vector
1391   void evmovdqub(Address dst, XMMRegister src, int vector_len);
1392   void evmovdqub(XMMRegister dst, Address src, int vector_len);
1393   void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len);
1394   void evmovdqub(KRegister mask, XMMRegister dst, Address src, int vector_len);
1395   void evmovdquw(Address dst, XMMRegister src, int vector_len);
1396   void evmovdquw(XMMRegister dst, Address src, int vector_len);
1397   void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len);
1398   void evmovdqul(Address dst, XMMRegister src, int vector_len);
1399   void evmovdqul(XMMRegister dst, Address src, int vector_len);
1400   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len);
1401   void evmovdquq(Address dst, XMMRegister src, int vector_len);
1402   void evmovdquq(XMMRegister dst, Address src, int vector_len);
1403   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len);
1404 
1405   // Move lower 64bit to high 64bit in 128bit register
1406   void movlhps(XMMRegister dst, XMMRegister src);
1407 
1408   void movl(Register dst, int32_t imm32);
1409   void movl(Address dst, int32_t imm32);
1410   void movl(Register dst, Register src);
1411   void movl(Register dst, Address src);
1412   void movl(Address dst, Register src);
1413 
1414   // These dummies prevent using movl from converting a zero (like NULL) into Register


1527 
1528   // Pack with unsigned saturation
1529   void packuswb(XMMRegister dst, XMMRegister src);
1530   void packuswb(XMMRegister dst, Address src);
1531   void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1532 
1533   // Pemutation of 64bit words
1534   void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len);
1535   void vpermq(XMMRegister dst, XMMRegister src, int imm8);
1536 
1537   void pause();
1538 
1539   // SSE4.2 string instructions
1540   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1541   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1542 
1543   void pcmpeqb(XMMRegister dst, XMMRegister src);
1544   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1545   void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1546   void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1547   void evpcmpeqb(KRegister mask, KRegister kdst, XMMRegister nds, Address src, int vector_len);
1548 
1549   void pcmpeqw(XMMRegister dst, XMMRegister src);
1550   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1551   void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1552   void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1553 
1554   void pcmpeqd(XMMRegister dst, XMMRegister src);
1555   void vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1556   void evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1557   void evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1558 
1559   void pcmpeqq(XMMRegister dst, XMMRegister src);
1560   void vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1561   void evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1562   void evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1563 
1564   void pmovmskb(Register dst, XMMRegister src);
1565   void vpmovmskb(Register dst, XMMRegister src);
1566 
1567   // SSE 4.1 extract


2087 public:
2088   InstructionAttr(
2089     int vector_len,     // The length of vector to be applied in encoding - for both AVX and EVEX
2090     bool rex_vex_w,     // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true
2091     bool legacy_mode,   // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX
2092     bool no_reg_mask,   // when true, k0 is used when EVEX encoding is chosen, else k1 is used under the same condition
2093     bool uses_vl)       // This instruction may have legacy constraints based on vector length for EVEX
2094     :
2095       _avx_vector_len(vector_len),
2096       _rex_vex_w(rex_vex_w),
2097       _rex_vex_w_reverted(false),
2098       _legacy_mode(legacy_mode),
2099       _no_reg_mask(no_reg_mask),
2100       _uses_vl(uses_vl),
2101       _tuple_type(Assembler::EVEX_ETUP),
2102       _input_size_in_bits(Assembler::EVEX_NObit),
2103       _is_evex_instruction(false),
2104       _evex_encoding(0),
2105       _is_clear_context(false),
2106       _is_extended_context(false),
2107       _current_assembler(NULL),
2108       _embedded_opmask_register_specifier(1) { // hard code k1, it will be initialized for now
2109     if (UseAVX < 3) _legacy_mode = true;
2110   }
2111 
2112   ~InstructionAttr() {
2113     if (_current_assembler != NULL) {
2114       _current_assembler->clear_attributes();
2115     }
2116     _current_assembler = NULL;
2117   }
2118 
2119 private:
2120   int  _avx_vector_len;
2121   bool _rex_vex_w;
2122   bool _rex_vex_w_reverted;
2123   bool _legacy_mode;
2124   bool _no_reg_mask;
2125   bool _uses_vl;
2126   int  _tuple_type;
2127   int  _input_size_in_bits;
2128   bool _is_evex_instruction;
2129   int  _evex_encoding;
2130   bool _is_clear_context;
2131   bool _is_extended_context;
2132   int _embedded_opmask_register_specifier;
2133 
2134   Assembler *_current_assembler;
2135 
2136 public:
2137   // query functions for field accessors
2138   int  get_vector_len(void) const { return _avx_vector_len; }
2139   bool is_rex_vex_w(void) const { return _rex_vex_w; }
2140   bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; }
2141   bool is_legacy_mode(void) const { return _legacy_mode; }
2142   bool is_no_reg_mask(void) const { return _no_reg_mask; }
2143   bool uses_vl(void) const { return _uses_vl; }
2144   int  get_tuple_type(void) const { return _tuple_type; }
2145   int  get_input_size(void) const { return _input_size_in_bits; }
2146   int  is_evex_instruction(void) const { return _is_evex_instruction; }
2147   int  get_evex_encoding(void) const { return _evex_encoding; }
2148   bool is_clear_context(void) const { return _is_clear_context; }
2149   bool is_extended_context(void) const { return _is_extended_context; }
2150   int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; }
2151 
2152   // Set the vector len manually
2153   void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }
2154 
2155   // Set revert rex_vex_w for avx encoding
2156   void set_rex_vex_w_reverted(void) { _rex_vex_w_reverted = true; }
2157 
2158   // Set rex_vex_w based on state
2159   void set_rex_vex_w(bool state) { _rex_vex_w = state; }
2160 
2161   // Set the instruction to be encoded in AVX mode
2162   void set_is_legacy_mode(void) { _legacy_mode = true; }
2163 
2164   // Set the current instuction to be encoded as an EVEX instuction
2165   void set_is_evex_instruction(void) { _is_evex_instruction = true; }
2166 
2167   // Internal encoding data used in compressed immediate offset programming
2168   void set_evex_encoding(int value) { _evex_encoding = value; }
2169 
2170   // Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components
2171   void set_is_clear_context(void) { _is_clear_context = true; }
2172 
2173   // Map back to current asembler so that we can manage object level assocation
2174   void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; }
2175 
2176   // Address modifiers used for compressed displacement calculation
2177   void set_address_attributes(int tuple_type, int input_size_in_bits) {
2178     if (VM_Version::supports_evex()) {
2179       _tuple_type = tuple_type;
2180       _input_size_in_bits = input_size_in_bits;
2181     }
2182   }
2183 
2184   // Set embedded opmask register specifier.
2185   void set_embedded_opmask_register_specifier(KRegister mask) {
2186     _embedded_opmask_register_specifier = (*mask).encoding() & 0x7;
2187   }
2188 
2189 };
2190 
2191 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP
< prev index next >