< prev index next >

src/cpu/x86/vm/assembler_x86.hpp

Print this page




1318         addl(Address(rsp, offset), 0);// Assert the lock# signal here
1319       }
1320     }
1321   }
1322 
1323   void mfence();
1324 
1325   // Moves
1326 
1327   void mov64(Register dst, int64_t imm64);
1328 
1329   void movb(Address dst, Register src);
1330   void movb(Address dst, int imm8);
1331   void movb(Register dst, Address src);
1332 
1333   void movddup(XMMRegister dst, XMMRegister src);
1334 
1335   void kmovbl(KRegister dst, Register src);
1336   void kmovbl(Register dst, KRegister src);
1337   void kmovwl(KRegister dst, Register src);

1338   void kmovwl(Register dst, KRegister src);
1339   void kmovdl(KRegister dst, Register src);
1340   void kmovdl(Register dst, KRegister src);
1341   void kmovql(KRegister dst, KRegister src);
1342   void kmovql(Address dst, KRegister src);
1343   void kmovql(KRegister dst, Address src);
1344   void kmovql(KRegister dst, Register src);
1345   void kmovql(Register dst, KRegister src);
1346 
1347   void kortestbl(KRegister dst, KRegister src);
1348   void kortestwl(KRegister dst, KRegister src);
1349   void kortestdl(KRegister dst, KRegister src);
1350   void kortestql(KRegister dst, KRegister src);
1351 
1352   void movdl(XMMRegister dst, Register src);
1353   void movdl(Register dst, XMMRegister src);
1354   void movdl(XMMRegister dst, Address src);
1355   void movdl(Address dst, XMMRegister src);
1356 
1357   // Move Double Quadword


2058   void andps(XMMRegister dst, Address src);
2059   void xorpd(XMMRegister dst, Address src);
2060   void xorps(XMMRegister dst, Address src);
2061 
2062 };
2063 
2064 // The Intel x86/Amd64 Assembler attributes: All fields enclosed here are to guide encoding level decisions.
2065 // Specific set functions are for specialized use, else defaults or whatever was supplied to object construction
2066 // are applied.
2067 class InstructionAttr {
2068 public:
2069   InstructionAttr(
2070     int vector_len,     // The length of vector to be applied in encoding - for both AVX and EVEX
2071     bool rex_vex_w,     // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true
2072     bool legacy_mode,   // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX
2073     bool no_reg_mask,   // when true, k0 is used when EVEX encoding is chosen, else k1 is used under the same condition
2074     bool uses_vl)       // This instruction may have legacy constraints based on vector length for EVEX
2075     :
2076       _avx_vector_len(vector_len),
2077       _rex_vex_w(rex_vex_w),

2078       _legacy_mode(legacy_mode),
2079       _no_reg_mask(no_reg_mask),
2080       _uses_vl(uses_vl),
2081       _tuple_type(Assembler::EVEX_ETUP),
2082       _input_size_in_bits(Assembler::EVEX_NObit),
2083       _is_evex_instruction(false),
2084       _evex_encoding(0),
2085       _is_clear_context(false),
2086       _is_extended_context(false),
2087       _current_assembler(NULL) {
2088     if (UseAVX < 3) _legacy_mode = true;
2089   }
2090 
2091   ~InstructionAttr() {
2092     if (_current_assembler != NULL) {
2093       _current_assembler->clear_attributes();
2094     }
2095     _current_assembler = NULL;
2096   }
2097 
2098 private:
2099   int  _avx_vector_len;
2100   bool _rex_vex_w;

2101   bool _legacy_mode;
2102   bool _no_reg_mask;
2103   bool _uses_vl;
2104   int  _tuple_type;
2105   int  _input_size_in_bits;
2106   bool _is_evex_instruction;
2107   int  _evex_encoding;
2108   bool _is_clear_context;
2109   bool _is_extended_context;
2110 
2111   Assembler *_current_assembler;
2112 
2113 public:
2114   // query functions for field accessors
2115   int  get_vector_len(void) const { return _avx_vector_len; }
2116   bool is_rex_vex_w(void) const { return _rex_vex_w; }

2117   bool is_legacy_mode(void) const { return _legacy_mode; }
2118   bool is_no_reg_mask(void) const { return _no_reg_mask; }
2119   bool uses_vl(void) const { return _uses_vl; }
2120   int  get_tuple_type(void) const { return _tuple_type; }
2121   int  get_input_size(void) const { return _input_size_in_bits; }
2122   int  is_evex_instruction(void) const { return _is_evex_instruction; }
2123   int  get_evex_encoding(void) const { return _evex_encoding; }
2124   bool is_clear_context(void) const { return _is_clear_context; }
2125   bool is_extended_context(void) const { return _is_extended_context; }
2126 
2127   // Set the vector len manually
2128   void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }






2129 
2130   // Set the instruction to be encoded in AVX mode
2131   void set_is_legacy_mode(void) { _legacy_mode = true; }
2132 
2133   // Set the current instuction to be encoded as an EVEX instuction
2134   void set_is_evex_instruction(void) { _is_evex_instruction = true; }
2135 
2136   // Internal encoding data used in compressed immediate offset programming
2137   void set_evex_encoding(int value) { _evex_encoding = value; }
2138 
2139   // Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components
2140   void set_is_clear_context(void) { _is_clear_context = true; }
2141 
2142   // Map back to current asembler so that we can manage object level assocation
2143   void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; }
2144 
2145   // Address modifiers used for compressed displacement calculation
2146   void set_address_attributes(int tuple_type, int input_size_in_bits) {
2147     if (VM_Version::supports_evex()) {
2148       _tuple_type = tuple_type;


1318         addl(Address(rsp, offset), 0);// Assert the lock# signal here
1319       }
1320     }
1321   }
1322 
1323   void mfence();
1324 
1325   // Moves
1326 
1327   void mov64(Register dst, int64_t imm64);
1328 
1329   void movb(Address dst, Register src);
1330   void movb(Address dst, int imm8);
1331   void movb(Register dst, Address src);
1332 
1333   void movddup(XMMRegister dst, XMMRegister src);
1334 
1335   void kmovbl(KRegister dst, Register src);
1336   void kmovbl(Register dst, KRegister src);
1337   void kmovwl(KRegister dst, Register src);
1338   void kmovwl(KRegister dst, Address src);
1339   void kmovwl(Register dst, KRegister src);
1340   void kmovdl(KRegister dst, Register src);
1341   void kmovdl(Register dst, KRegister src);
1342   void kmovql(KRegister dst, KRegister src);
1343   void kmovql(Address dst, KRegister src);
1344   void kmovql(KRegister dst, Address src);
1345   void kmovql(KRegister dst, Register src);
1346   void kmovql(Register dst, KRegister src);
1347 
1348   void kortestbl(KRegister dst, KRegister src);
1349   void kortestwl(KRegister dst, KRegister src);
1350   void kortestdl(KRegister dst, KRegister src);
1351   void kortestql(KRegister dst, KRegister src);
1352 
1353   void movdl(XMMRegister dst, Register src);
1354   void movdl(Register dst, XMMRegister src);
1355   void movdl(XMMRegister dst, Address src);
1356   void movdl(Address dst, XMMRegister src);
1357 
1358   // Move Double Quadword


2059   void andps(XMMRegister dst, Address src);
2060   void xorpd(XMMRegister dst, Address src);
2061   void xorps(XMMRegister dst, Address src);
2062 
2063 };
2064 
2065 // The Intel x86/Amd64 Assembler attributes: All fields enclosed here are to guide encoding level decisions.
2066 // Specific set functions are for specialized use, else defaults or whatever was supplied to object construction
2067 // are applied.
2068 class InstructionAttr {
2069 public:
2070   InstructionAttr(
2071     int vector_len,     // The length of vector to be applied in encoding - for both AVX and EVEX
2072     bool rex_vex_w,     // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true
2073     bool legacy_mode,   // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX
2074     bool no_reg_mask,   // when true, k0 is used when EVEX encoding is chosen, else k1 is used under the same condition
2075     bool uses_vl)       // This instruction may have legacy constraints based on vector length for EVEX
2076     :
2077       _avx_vector_len(vector_len),
2078       _rex_vex_w(rex_vex_w),
2079       _rex_vex_w_reverted(false),
2080       _legacy_mode(legacy_mode),
2081       _no_reg_mask(no_reg_mask),
2082       _uses_vl(uses_vl),
2083       _tuple_type(Assembler::EVEX_ETUP),
2084       _input_size_in_bits(Assembler::EVEX_NObit),
2085       _is_evex_instruction(false),
2086       _evex_encoding(0),
2087       _is_clear_context(false),
2088       _is_extended_context(false),
2089       _current_assembler(NULL) {
2090     if (UseAVX < 3) _legacy_mode = true;
2091   }
2092 
2093   ~InstructionAttr() {
2094     if (_current_assembler != NULL) {
2095       _current_assembler->clear_attributes();
2096     }
2097     _current_assembler = NULL;
2098   }
2099 
2100 private:
2101   int  _avx_vector_len;
2102   bool _rex_vex_w;
2103   bool _rex_vex_w_reverted;
2104   bool _legacy_mode;
2105   bool _no_reg_mask;
2106   bool _uses_vl;
2107   int  _tuple_type;
2108   int  _input_size_in_bits;
2109   bool _is_evex_instruction;
2110   int  _evex_encoding;
2111   bool _is_clear_context;
2112   bool _is_extended_context;
2113 
2114   Assembler *_current_assembler;
2115 
2116 public:
2117   // query functions for field accessors
2118   int  get_vector_len(void) const { return _avx_vector_len; }
2119   bool is_rex_vex_w(void) const { return _rex_vex_w; }
2120   bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; }
2121   bool is_legacy_mode(void) const { return _legacy_mode; }
2122   bool is_no_reg_mask(void) const { return _no_reg_mask; }
2123   bool uses_vl(void) const { return _uses_vl; }
2124   int  get_tuple_type(void) const { return _tuple_type; }
2125   int  get_input_size(void) const { return _input_size_in_bits; }
2126   int  is_evex_instruction(void) const { return _is_evex_instruction; }
2127   int  get_evex_encoding(void) const { return _evex_encoding; }
2128   bool is_clear_context(void) const { return _is_clear_context; }
2129   bool is_extended_context(void) const { return _is_extended_context; }
2130 
2131   // Set the vector len manually
2132   void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }
2133 
2134   // Set revert rex_vex_w for avx encoding
2135   void set_rex_vex_w_reverted(void) { _rex_vex_w_reverted = true; }
2136 
2137   // Set rex_vex_w based on state
2138   void set_rex_vex_w(bool state) { _rex_vex_w = state; }
2139 
2140   // Set the instruction to be encoded in AVX mode
2141   void set_is_legacy_mode(void) { _legacy_mode = true; }
2142 
2143   // Set the current instuction to be encoded as an EVEX instuction
2144   void set_is_evex_instruction(void) { _is_evex_instruction = true; }
2145 
2146   // Internal encoding data used in compressed immediate offset programming
2147   void set_evex_encoding(int value) { _evex_encoding = value; }
2148 
2149   // Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components
2150   void set_is_clear_context(void) { _is_clear_context = true; }
2151 
2152   // Map back to current asembler so that we can manage object level assocation
2153   void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; }
2154 
2155   // Address modifiers used for compressed displacement calculation
2156   void set_address_attributes(int tuple_type, int input_size_in_bits) {
2157     if (VM_Version::supports_evex()) {
2158       _tuple_type = tuple_type;
< prev index next >