< prev index next >

src/cpu/x86/vm/assembler_x86.hpp

Print this page




 589 
 590 
 591 
 592   // NOTE: The general philopsophy of the declarations here is that 64bit versions
 593   // of instructions are freely declared without the need for wrapping them an ifdef.
 594   // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
 595   // In the .cpp file the implementations are wrapped so that they are dropped out
 596   // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL
 597   // to the size it was prior to merging up the 32bit and 64bit assemblers.
 598   //
 599   // This does mean you'll get a linker/runtime error if you use a 64bit only instruction
 600   // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
 601 
 602 private:
 603 
 604   bool _legacy_mode_bw;
 605   bool _legacy_mode_dq;
 606   bool _legacy_mode_vl;
 607   bool _legacy_mode_vlbw;
 608   bool _is_managed;

 609 
 610   class InstructionAttr *_attributes;
 611 
 612   // 64bit prefixes
 613   int prefix_and_encode(int reg_enc, bool byteinst = false);
 614   int prefixq_and_encode(int reg_enc);
 615 
 616   int prefix_and_encode(int dst_enc, int src_enc) {
 617     return prefix_and_encode(dst_enc, false, src_enc, false);
 618   }
 619   int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte);
 620   int prefixq_and_encode(int dst_enc, int src_enc);
 621 
 622   void prefix(Register reg);
 623   void prefix(Register dst, Register src, Prefix p);
 624   void prefix(Register dst, Address adr, Prefix p);
 625   void prefix(Address adr);
 626   void prefixq(Address adr);
 627 
 628   void prefix(Address adr, Register reg,  bool byteinst = false);


 796 
 797   // Decoding
 798   static address locate_operand(address inst, WhichOperand which);
 799   static address locate_next_instruction(address inst);
 800 
 801   // Utilities
 802   static bool is_polling_page_far() NOT_LP64({ return false;});
 803   static bool query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len,
 804                                          int cur_tuple_type, int in_size_in_bits, int cur_encoding);
 805 
 806   // Generic instructions
 807   // Does 32bit or 64bit as needed for the platform. In some sense these
 808   // belong in macro assembler but there is no need for both varieties to exist
 809 
 810   void init_attributes(void) {
 811     _legacy_mode_bw = (VM_Version::supports_avx512bw() == false);
 812     _legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
 813     _legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
 814     _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
 815     _is_managed = false;

 816     _attributes = NULL;
 817   }
 818 
 819   void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
 820   void clear_attributes(void) { _attributes = NULL; }
 821 
 822   void set_managed(void) { _is_managed = true; }
 823   void clear_managed(void) { _is_managed = false; }
 824   bool is_managed(void) { return _is_managed; }
 825 





 826   void lea(Register dst, Address src);
 827 
 828   void mov(Register dst, Register src);
 829 
 830   void pusha();
 831   void popa();
 832 
 833   void pushf();
 834   void popf();
 835 
 836   void push(int32_t imm32);
 837 
 838   void push(Register src);
 839 
 840   void pop(Register dst);
 841 
 842   // These are dummies to prevent surprise implicit conversions to Register
 843   void push(void* v);
 844   void pop(void* v);
 845 


1337   void kmovbl(KRegister dst, Register src);
1338   void kmovbl(Register dst, KRegister src);
1339   void kmovwl(KRegister dst, Register src);
1340   void kmovwl(KRegister dst, Address src);
1341   void kmovwl(Register dst, KRegister src);
1342   void kmovdl(KRegister dst, Register src);
1343   void kmovdl(Register dst, KRegister src);
1344   void kmovql(KRegister dst, KRegister src);
1345   void kmovql(Address dst, KRegister src);
1346   void kmovql(KRegister dst, Address src);
1347   void kmovql(KRegister dst, Register src);
1348   void kmovql(Register dst, KRegister src);
1349 
1350   void knotwl(KRegister dst, KRegister src);
1351 
1352   void kortestbl(KRegister dst, KRegister src);
1353   void kortestwl(KRegister dst, KRegister src);
1354   void kortestdl(KRegister dst, KRegister src);
1355   void kortestql(KRegister dst, KRegister src);
1356 


1357   void movdl(XMMRegister dst, Register src);
1358   void movdl(Register dst, XMMRegister src);
1359   void movdl(XMMRegister dst, Address src);
1360   void movdl(Address dst, XMMRegister src);
1361 
1362   // Move Double Quadword
1363   void movdq(XMMRegister dst, Register src);
1364   void movdq(Register dst, XMMRegister src);
1365 
1366   // Move Aligned Double Quadword
1367   void movdqa(XMMRegister dst, XMMRegister src);
1368   void movdqa(XMMRegister dst, Address src);
1369 
1370   // Move Unaligned Double Quadword
1371   void movdqu(Address     dst, XMMRegister src);
1372   void movdqu(XMMRegister dst, Address src);
1373   void movdqu(XMMRegister dst, XMMRegister src);
1374 
1375   // Move Unaligned 256bit Vector
1376   void vmovdqu(Address dst, XMMRegister src);
1377   void vmovdqu(XMMRegister dst, Address src);
1378   void vmovdqu(XMMRegister dst, XMMRegister src);
1379 
1380    // Move Unaligned 512bit Vector
1381   void evmovdqub(Address dst, XMMRegister src, int vector_len);
1382   void evmovdqub(XMMRegister dst, Address src, int vector_len);
1383   void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len);

1384   void evmovdquw(Address dst, XMMRegister src, int vector_len);
1385   void evmovdquw(XMMRegister dst, Address src, int vector_len);
1386   void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len);
1387   void evmovdqul(Address dst, XMMRegister src, int vector_len);
1388   void evmovdqul(XMMRegister dst, Address src, int vector_len);
1389   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len);
1390   void evmovdquq(Address dst, XMMRegister src, int vector_len);
1391   void evmovdquq(XMMRegister dst, Address src, int vector_len);
1392   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len);
1393 
1394   // Move lower 64bit to high 64bit in 128bit register
1395   void movlhps(XMMRegister dst, XMMRegister src);
1396 
1397   void movl(Register dst, int32_t imm32);
1398   void movl(Address dst, int32_t imm32);
1399   void movl(Register dst, Register src);
1400   void movl(Register dst, Address src);
1401   void movl(Address dst, Register src);
1402 
1403   // These dummies prevent using movl from converting a zero (like NULL) into Register


1516 
1517   // Pack with unsigned saturation
1518   void packuswb(XMMRegister dst, XMMRegister src);
1519   void packuswb(XMMRegister dst, Address src);
1520   void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1521 
1522   // Pemutation of 64bit words
1523   void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len);
1524   void vpermq(XMMRegister dst, XMMRegister src, int imm8);
1525 
1526   void pause();
1527 
1528   // SSE4.2 string instructions
1529   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1530   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1531 
1532   void pcmpeqb(XMMRegister dst, XMMRegister src);
1533   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1534   void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1535   void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len);

1536 
1537   void pcmpeqw(XMMRegister dst, XMMRegister src);
1538   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1539   void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1540   void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1541 
1542   void pcmpeqd(XMMRegister dst, XMMRegister src);
1543   void vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1544   void evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1545   void evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1546 
1547   void pcmpeqq(XMMRegister dst, XMMRegister src);
1548   void vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1549   void evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1550   void evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1551 
1552   void pmovmskb(Register dst, XMMRegister src);
1553   void vpmovmskb(Register dst, XMMRegister src);
1554 
1555   // SSE 4.1 extract


2075 public:
2076   InstructionAttr(
2077     int vector_len,     // The length of vector to be applied in encoding - for both AVX and EVEX
2078     bool rex_vex_w,     // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true
2079     bool legacy_mode,   // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX
2080     bool no_reg_mask,   // when true, k0 is used when EVEX encoding is chosen, else k1 is used under the same condition
2081     bool uses_vl)       // This instruction may have legacy constraints based on vector length for EVEX
2082     :
2083       _avx_vector_len(vector_len),
2084       _rex_vex_w(rex_vex_w),
2085       _rex_vex_w_reverted(false),
2086       _legacy_mode(legacy_mode),
2087       _no_reg_mask(no_reg_mask),
2088       _uses_vl(uses_vl),
2089       _tuple_type(Assembler::EVEX_ETUP),
2090       _input_size_in_bits(Assembler::EVEX_NObit),
2091       _is_evex_instruction(false),
2092       _evex_encoding(0),
2093       _is_clear_context(false),
2094       _is_extended_context(false),
2095       _current_assembler(NULL) {

2096     if (UseAVX < 3) _legacy_mode = true;
2097   }
2098 
2099   ~InstructionAttr() {
2100     if (_current_assembler != NULL) {
2101       _current_assembler->clear_attributes();
2102     }
2103     _current_assembler = NULL;
2104   }
2105 
2106 private:
2107   int  _avx_vector_len;
2108   bool _rex_vex_w;
2109   bool _rex_vex_w_reverted;
2110   bool _legacy_mode;
2111   bool _no_reg_mask;
2112   bool _uses_vl;
2113   int  _tuple_type;
2114   int  _input_size_in_bits;
2115   bool _is_evex_instruction;
2116   int  _evex_encoding;
2117   bool _is_clear_context;
2118   bool _is_extended_context;

2119 
2120   Assembler *_current_assembler;
2121 
2122 public:
2123   // query functions for field accessors
2124   int  get_vector_len(void) const { return _avx_vector_len; }
2125   bool is_rex_vex_w(void) const { return _rex_vex_w; }
2126   bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; }
2127   bool is_legacy_mode(void) const { return _legacy_mode; }
2128   bool is_no_reg_mask(void) const { return _no_reg_mask; }
2129   bool uses_vl(void) const { return _uses_vl; }
2130   int  get_tuple_type(void) const { return _tuple_type; }
2131   int  get_input_size(void) const { return _input_size_in_bits; }
2132   int  is_evex_instruction(void) const { return _is_evex_instruction; }
2133   int  get_evex_encoding(void) const { return _evex_encoding; }
2134   bool is_clear_context(void) const { return _is_clear_context; }
2135   bool is_extended_context(void) const { return _is_extended_context; }
2136 
2137   // Set the vector len manually
2138   void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }


2147   void set_is_legacy_mode(void) { _legacy_mode = true; }
2148 
2149   // Set the current instuction to be encoded as an EVEX instuction
2150   void set_is_evex_instruction(void) { _is_evex_instruction = true; }
2151 
2152   // Internal encoding data used in compressed immediate offset programming
2153   void set_evex_encoding(int value) { _evex_encoding = value; }
2154 
2155   // Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components
2156   void set_is_clear_context(void) { _is_clear_context = true; }
2157 
2158   // Map back to current asembler so that we can manage object level assocation
2159   void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; }
2160 
2161   // Address modifiers used for compressed displacement calculation
2162   void set_address_attributes(int tuple_type, int input_size_in_bits) {
2163     if (VM_Version::supports_evex()) {
2164       _tuple_type = tuple_type;
2165       _input_size_in_bits = input_size_in_bits;
2166     }





2167   }
2168 
2169 };
2170 
2171 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP


 589 
 590 
 591 
 592   // NOTE: The general philopsophy of the declarations here is that 64bit versions
 593   // of instructions are freely declared without the need for wrapping them an ifdef.
 594   // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
 595   // In the .cpp file the implementations are wrapped so that they are dropped out
 596   // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL
 597   // to the size it was prior to merging up the 32bit and 64bit assemblers.
 598   //
 599   // This does mean you'll get a linker/runtime error if you use a 64bit only instruction
 600   // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
 601 
 602 private:
 603 
 604   bool _legacy_mode_bw;
 605   bool _legacy_mode_dq;
 606   bool _legacy_mode_vl;
 607   bool _legacy_mode_vlbw;
 608   bool _is_managed;
 609   bool _programmed_mask_reg;
 610 
 611   class InstructionAttr *_attributes;
 612 
 613   // 64bit prefixes
 614   int prefix_and_encode(int reg_enc, bool byteinst = false);
 615   int prefixq_and_encode(int reg_enc);
 616 
 617   int prefix_and_encode(int dst_enc, int src_enc) {
 618     return prefix_and_encode(dst_enc, false, src_enc, false);
 619   }
 620   int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte);
 621   int prefixq_and_encode(int dst_enc, int src_enc);
 622 
 623   void prefix(Register reg);
 624   void prefix(Register dst, Register src, Prefix p);
 625   void prefix(Register dst, Address adr, Prefix p);
 626   void prefix(Address adr);
 627   void prefixq(Address adr);
 628 
 629   void prefix(Address adr, Register reg,  bool byteinst = false);


 797 
 798   // Decoding
 799   static address locate_operand(address inst, WhichOperand which);
 800   static address locate_next_instruction(address inst);
 801 
 802   // Utilities
 803   static bool is_polling_page_far() NOT_LP64({ return false;});
 804   static bool query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len,
 805                                          int cur_tuple_type, int in_size_in_bits, int cur_encoding);
 806 
 807   // Generic instructions
 808   // Does 32bit or 64bit as needed for the platform. In some sense these
 809   // belong in macro assembler but there is no need for both varieties to exist
 810 
 811   void init_attributes(void) {
 812     _legacy_mode_bw = (VM_Version::supports_avx512bw() == false);
 813     _legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
 814     _legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
 815     _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
 816     _is_managed = false;
 817     _programmed_mask_reg = false;
 818     _attributes = NULL;
 819   }
 820 
 821   void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
 822   void clear_attributes(void) { _attributes = NULL; }
 823 
 824   void set_managed(void) { _is_managed = true; }
 825   void clear_managed(void) { _is_managed = false; }
 826   bool is_managed(void) { return _is_managed; }
 827 
 828   void set_programmed_mask_reg(void) { _programmed_mask_reg = true; }
 829   void clear_programmed_mask_reg(void) { _programmed_mask_reg = false; }
 830   bool is_programmed_mask_reg(void) { return _programmed_mask_reg; }
 831 
 832 
 833   void lea(Register dst, Address src);
 834 
 835   void mov(Register dst, Register src);
 836 
 837   void pusha();
 838   void popa();
 839 
 840   void pushf();
 841   void popf();
 842 
 843   void push(int32_t imm32);
 844 
 845   void push(Register src);
 846 
 847   void pop(Register dst);
 848 
 849   // These are dummies to prevent surprise implicit conversions to Register
 850   void push(void* v);
 851   void pop(void* v);
 852 


1344   void kmovbl(KRegister dst, Register src);
1345   void kmovbl(Register dst, KRegister src);
1346   void kmovwl(KRegister dst, Register src);
1347   void kmovwl(KRegister dst, Address src);
1348   void kmovwl(Register dst, KRegister src);
1349   void kmovdl(KRegister dst, Register src);
1350   void kmovdl(Register dst, KRegister src);
1351   void kmovql(KRegister dst, KRegister src);
1352   void kmovql(Address dst, KRegister src);
1353   void kmovql(KRegister dst, Address src);
1354   void kmovql(KRegister dst, Register src);
1355   void kmovql(Register dst, KRegister src);
1356 
1357   void knotwl(KRegister dst, KRegister src);
1358 
1359   void kortestbl(KRegister dst, KRegister src);
1360   void kortestwl(KRegister dst, KRegister src);
1361   void kortestdl(KRegister dst, KRegister src);
1362   void kortestql(KRegister dst, KRegister src);
1363 
1364   void ktestql(KRegister dst, KRegister src);
1365 
1366   void movdl(XMMRegister dst, Register src);
1367   void movdl(Register dst, XMMRegister src);
1368   void movdl(XMMRegister dst, Address src);
1369   void movdl(Address dst, XMMRegister src);
1370 
1371   // Move Double Quadword
1372   void movdq(XMMRegister dst, Register src);
1373   void movdq(Register dst, XMMRegister src);
1374 
1375   // Move Aligned Double Quadword
1376   void movdqa(XMMRegister dst, XMMRegister src);
1377   void movdqa(XMMRegister dst, Address src);
1378 
1379   // Move Unaligned Double Quadword
1380   void movdqu(Address     dst, XMMRegister src);
1381   void movdqu(XMMRegister dst, Address src);
1382   void movdqu(XMMRegister dst, XMMRegister src);
1383 
1384   // Move Unaligned 256bit Vector
1385   void vmovdqu(Address dst, XMMRegister src);
1386   void vmovdqu(XMMRegister dst, Address src);
1387   void vmovdqu(XMMRegister dst, XMMRegister src);
1388 
1389    // Move Unaligned 512bit Vector
1390   void evmovdqub(Address dst, XMMRegister src, int vector_len);
1391   void evmovdqub(XMMRegister dst, Address src, int vector_len);
1392   void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len);
1393   void evmovdqub(KRegister mask, bool zeroing, XMMRegister dst, Address src, int vector_len);
1394   void evmovdquw(Address dst, XMMRegister src, int vector_len);
1395   void evmovdquw(XMMRegister dst, Address src, int vector_len);
1396   void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len);
1397   void evmovdqul(Address dst, XMMRegister src, int vector_len);
1398   void evmovdqul(XMMRegister dst, Address src, int vector_len);
1399   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len);
1400   void evmovdquq(Address dst, XMMRegister src, int vector_len);
1401   void evmovdquq(XMMRegister dst, Address src, int vector_len);
1402   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len);
1403 
1404   // Move lower 64bit to high 64bit in 128bit register
1405   void movlhps(XMMRegister dst, XMMRegister src);
1406 
1407   void movl(Register dst, int32_t imm32);
1408   void movl(Address dst, int32_t imm32);
1409   void movl(Register dst, Register src);
1410   void movl(Register dst, Address src);
1411   void movl(Address dst, Register src);
1412 
1413   // These dummies prevent using movl from converting a zero (like NULL) into Register


1526 
1527   // Pack with unsigned saturation
1528   void packuswb(XMMRegister dst, XMMRegister src);
1529   void packuswb(XMMRegister dst, Address src);
1530   void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1531 
1532   // Pemutation of 64bit words
1533   void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len);
1534   void vpermq(XMMRegister dst, XMMRegister src, int imm8);
1535 
1536   void pause();
1537 
1538   // SSE4.2 string instructions
1539   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1540   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1541 
1542   void pcmpeqb(XMMRegister dst, XMMRegister src);
1543   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1544   void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1545   void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1546   void evpcmpeqb(KRegister mask, bool zeroing, KRegister kdst, XMMRegister nds, Address src, int vector_len);
1547 
1548   void pcmpeqw(XMMRegister dst, XMMRegister src);
1549   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1550   void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1551   void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1552 
1553   void pcmpeqd(XMMRegister dst, XMMRegister src);
1554   void vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1555   void evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1556   void evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1557 
1558   void pcmpeqq(XMMRegister dst, XMMRegister src);
1559   void vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1560   void evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1561   void evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1562 
1563   void pmovmskb(Register dst, XMMRegister src);
1564   void vpmovmskb(Register dst, XMMRegister src);
1565 
1566   // SSE 4.1 extract


2086 public:
2087   InstructionAttr(
2088     int vector_len,     // The length of vector to be applied in encoding - for both AVX and EVEX
2089     bool rex_vex_w,     // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true
2090     bool legacy_mode,   // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX
2091     bool no_reg_mask,   // when true, k0 is used when EVEX encoding is chosen, else k1 is used under the same condition
2092     bool uses_vl)       // This instruction may have legacy constraints based on vector length for EVEX
2093     :
2094       _avx_vector_len(vector_len),
2095       _rex_vex_w(rex_vex_w),
2096       _rex_vex_w_reverted(false),
2097       _legacy_mode(legacy_mode),
2098       _no_reg_mask(no_reg_mask),
2099       _uses_vl(uses_vl),
2100       _tuple_type(Assembler::EVEX_ETUP),
2101       _input_size_in_bits(Assembler::EVEX_NObit),
2102       _is_evex_instruction(false),
2103       _evex_encoding(0),
2104       _is_clear_context(false),
2105       _is_extended_context(false),
2106       _current_assembler(NULL),
2107       _embedded_opmask_register_specifier(1) { // hard code k1, it will be initialized for now
2108     if (UseAVX < 3) _legacy_mode = true;
2109   }
2110 
2111   ~InstructionAttr() {
2112     if (_current_assembler != NULL) {
2113       _current_assembler->clear_attributes();
2114     }
2115     _current_assembler = NULL;
2116   }
2117 
2118 private:
2119   int  _avx_vector_len;
2120   bool _rex_vex_w;
2121   bool _rex_vex_w_reverted;
2122   bool _legacy_mode;
2123   bool _no_reg_mask;
2124   bool _uses_vl;
2125   int  _tuple_type;
2126   int  _input_size_in_bits;
2127   bool _is_evex_instruction;
2128   int  _evex_encoding;
2129   bool _is_clear_context;
2130   bool _is_extended_context;
2131   int _embedded_opmask_register_specifier;
2132 
2133   Assembler *_current_assembler;
2134 
2135 public:
2136   // query functions for field accessors
2137   int  get_vector_len(void) const { return _avx_vector_len; }
2138   bool is_rex_vex_w(void) const { return _rex_vex_w; }
2139   bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; }
2140   bool is_legacy_mode(void) const { return _legacy_mode; }
2141   bool is_no_reg_mask(void) const { return _no_reg_mask; }
2142   bool uses_vl(void) const { return _uses_vl; }
2143   int  get_tuple_type(void) const { return _tuple_type; }
2144   int  get_input_size(void) const { return _input_size_in_bits; }
2145   int  is_evex_instruction(void) const { return _is_evex_instruction; }
2146   int  get_evex_encoding(void) const { return _evex_encoding; }
2147   bool is_clear_context(void) const { return _is_clear_context; }
2148   bool is_extended_context(void) const { return _is_extended_context; }
2149 
2150   // Set the vector len manually
2151   void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }


2160   void set_is_legacy_mode(void) { _legacy_mode = true; }
2161 
2162   // Set the current instuction to be encoded as an EVEX instuction
2163   void set_is_evex_instruction(void) { _is_evex_instruction = true; }
2164 
2165   // Internal encoding data used in compressed immediate offset programming
2166   void set_evex_encoding(int value) { _evex_encoding = value; }
2167 
2168   // Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components
2169   void set_is_clear_context(void) { _is_clear_context = true; }
2170 
2171   // Map back to current asembler so that we can manage object level assocation
2172   void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; }
2173 
2174   // Address modifiers used for compressed displacement calculation
2175   void set_address_attributes(int tuple_type, int input_size_in_bits) {
2176     if (VM_Version::supports_evex()) {
2177       _tuple_type = tuple_type;
2178       _input_size_in_bits = input_size_in_bits;
2179     }
2180   }
2181 
2182   // Set embedded opmask register specifier.
2183   void set_embedded_opmask_register_specifier(KRegister mask) {
2184     _embedded_opmask_register_specifier = (*mask).encoding() & 0x7;
2185   }
2186 
2187 };
2188 
2189 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP
< prev index next >