< prev index next >

src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64BaseAssembler.java

Print this page
rev 56282 : [mq]: graal


  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 
  25 package org.graalvm.compiler.asm.amd64;
  26 
  27 import static jdk.vm.ci.amd64.AMD64.MASK;
  28 import static jdk.vm.ci.amd64.AMD64.XMM;
  29 import static jdk.vm.ci.amd64.AMD64.r12;
  30 import static jdk.vm.ci.amd64.AMD64.r13;
  31 import static jdk.vm.ci.amd64.AMD64.rbp;
  32 import static jdk.vm.ci.amd64.AMD64.rsp;
  33 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.EVEXPrefixConfig.B0;
  34 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.EVEXPrefixConfig.B1;
  35 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.EVEXPrefixConfig.L512;
  36 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.EVEXPrefixConfig.Z0;
  37 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.EVEXPrefixConfig.Z1;
  38 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.L128;
  39 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.L256;

  40 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.LZ;
  41 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.M_0F;
  42 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.M_0F38;
  43 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.M_0F3A;
  44 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.P_;
  45 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.P_66;
  46 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.P_F2;
  47 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.P_F3;
  48 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.W0;
  49 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.W1;
  50 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.WIG;
  51 import static org.graalvm.compiler.core.common.NumUtil.isByte;
  52 
  53 import org.graalvm.compiler.asm.Assembler;
  54 import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
  55 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
  56 import org.graalvm.compiler.debug.GraalError;
  57 
  58 import jdk.vm.ci.amd64.AMD64;
  59 import jdk.vm.ci.amd64.AMD64.CPUFeature;


 493 
 494     /**
 495      * Emit the ModR/M byte for one register operand and an opcode extension in the R field.
 496      * <p>
 497      * Format: [ 11 reg r/m ]
 498      */
 499     protected final void emitModRM(int reg, Register rm) {
 500         assert (reg & 0x07) == reg;
 501         emitByte(0xC0 | (reg << 3) | (rm.encoding & 0x07));
 502     }
 503 
 504     /**
 505      * Emit the ModR/M byte for two register operands.
 506      * <p>
 507      * Format: [ 11 reg r/m ]
 508      */
 509     protected final void emitModRM(Register reg, Register rm) {
 510         emitModRM(reg.encoding & 0x07, rm);
 511     }
 512 


 513     /**
 514      * Emits the ModR/M byte and optionally the SIB byte for one register and one memory operand.
 515      *
 516      * @param force4Byte use 4 byte encoding for displacements that would normally fit in a byte
 517      */
 518     protected final void emitOperandHelper(Register reg, AMD64Address addr, boolean force4Byte, int additionalInstructionSize) {
 519         assert !reg.equals(Register.None);
 520         emitOperandHelper(encode(reg), addr, force4Byte, additionalInstructionSize, 1);
 521     }
 522 
 523     protected final void emitOperandHelper(int reg, AMD64Address addr, int additionalInstructionSize) {
 524         emitOperandHelper(reg, addr, false, additionalInstructionSize, 1);
 525     }
 526 
 527     protected final void emitOperandHelper(Register reg, AMD64Address addr, int additionalInstructionSize) {
 528         assert !reg.equals(Register.None);
 529         emitOperandHelper(encode(reg), addr, false, additionalInstructionSize, 1);
 530     }
 531 
 532     protected final void emitEVEXOperandHelper(Register reg, AMD64Address addr, int additionalInstructionSize, int evexDisp8Scale) {
 533         assert !reg.equals(Register.None);
 534         emitOperandHelper(encode(reg), addr, false, additionalInstructionSize, evexDisp8Scale);
 535     }
 536 
 537     /**
 538      * Emits the ModR/M byte and optionally the SIB byte for one memory operand and an opcode
 539      * extension in the R field.
 540      *
 541      * @param force4Byte use 4 byte encoding for displacements that would normally fit in a byte
 542      * @param additionalInstructionSize the number of bytes that will be emitted after the operand,
 543      *            so that the start position of the next instruction can be computed even though
 544      *            this instruction has not been completely emitted yet.
 545      * @param evexDisp8Scale the scaling factor for computing the compressed displacement of
 546      *            EVEX-encoded instructions. This scaling factor only matters when the emitted
 547      *            instruction uses one-byte-displacement form.
 548      */
 549     private void emitOperandHelper(int reg, AMD64Address addr, boolean force4Byte, int additionalInstructionSize, int evexDisp8Scale) {
 550         assert (reg & 0x07) == reg;
 551         int regenc = reg << 3;
 552 


 722             assert (!nds.isValid()) || nds.equals(dst) || nds.equals(src);
 723             if (sizePrefix > 0) {
 724                 emitByte(sizePrefix);
 725             }
 726             if (isRexW) {
 727                 prefixq(dst, src);
 728             } else {
 729                 prefix(dst, src);
 730             }
 731             if (opcodeEscapePrefix > 0xFF) {
 732                 emitShort(opcodeEscapePrefix);
 733             } else if (opcodeEscapePrefix > 0) {
 734                 emitByte(opcodeEscapePrefix);
 735             }
 736         }
 737     }
 738 
 739     public static final class VEXPrefixConfig {
 740         public static final int L128 = 0;
 741         public static final int L256 = 1;

 742         public static final int LZ = 0;
 743 
 744         public static final int W0 = 0;
 745         public static final int W1 = 1;
 746         public static final int WIG = 0;
 747 
 748         public static final int P_ = 0x0;
 749         public static final int P_66 = 0x1;
 750         public static final int P_F3 = 0x2;
 751         public static final int P_F2 = 0x3;
 752 
 753         public static final int M_0F = 0x1;
 754         public static final int M_0F38 = 0x2;
 755         public static final int M_0F3A = 0x3;
 756 
 757         private VEXPrefixConfig() {
 758         }
 759     }
 760 
 761     private class VEXEncoderImpl implements SIMDEncoder {


 919 
 920             emitByte(Prefix.VEX3);
 921             emitByte(byte2);
 922             emitByte(byte3);
 923         }
 924     }
 925 
 926     public static int getLFlag(AVXSize size) {
 927         switch (size) {
 928             case XMM:
 929                 return L128;
 930             case YMM:
 931                 return L256;
 932             case ZMM:
 933                 return L512;
 934             default:
 935                 return LZ;
 936         }
 937     }
 938 
 939     public final void vexPrefix(Register dst, Register nds, Register src, AVXSize size, int pp, int mmmmm, int w, boolean checkAVX) {








 940         emitVEX(getLFlag(size), pp, mmmmm, w, getRXB(dst, src), nds.isValid() ? nds.encoding() : 0, checkAVX);

 941     }
 942 
 943     public final void vexPrefix(Register dst, Register nds, AMD64Address src, AVXSize size, int pp, int mmmmm, int w, boolean checkAVX) {




 944         emitVEX(getLFlag(size), pp, mmmmm, w, getRXB(dst, src), nds.isValid() ? nds.encoding() : 0, checkAVX);

 945     }
 946 
 947     protected static final class EVEXPrefixConfig {
 948         public static final int L512 = 2;
 949         public static final int LIG = 0;
 950 
 951         public static final int Z0 = 0x0;
 952         public static final int Z1 = 0x1;
 953 
 954         public static final int B0 = 0x0;
 955         public static final int B1 = 0x1;
 956 
 957         private EVEXPrefixConfig() {
 958         }
 959     }
 960 
 961     private static final int NOT_SUPPORTED_VECTOR_LENGTH = -1;
 962 
 963     /**
 964      * EVEX-encoded instructions use a compressed displacement scheme by multiplying disp8 with a
 965      * scaling factor N depending on the tuple type and the vector length.
 966      *
 967      * Reference: Intel Software Developer's Manual Volume 2, Section 2.6.5
 968      */
 969     protected enum EVEXTuple {

 970         FV_NO_BROADCAST_32BIT(16, 32, 64),
 971         FV_BROADCAST_32BIT(4, 4, 4),
 972         FV_NO_BROADCAST_64BIT(16, 32, 64),
 973         FV_BROADCAST_64BIT(8, 8, 8),
 974         HV_NO_BROADCAST_32BIT(8, 16, 32),
 975         HV_BROADCAST_32BIT(4, 4, 4),
 976         FVM(16, 32, 64),
 977         T1S_8BIT(1, 1, 1),
 978         T1S_16BIT(2, 2, 2),
 979         T1S_32BIT(4, 4, 4),
 980         T1S_64BIT(8, 8, 8),
 981         T1F_32BIT(4, 4, 4),
 982         T1F_64BIT(8, 8, 8),
 983         T2_32BIT(8, 8, 8),
 984         T2_64BIT(NOT_SUPPORTED_VECTOR_LENGTH, 16, 16),
 985         T4_32BIT(NOT_SUPPORTED_VECTOR_LENGTH, 16, 16),
 986         T4_64BIT(NOT_SUPPORTED_VECTOR_LENGTH, NOT_SUPPORTED_VECTOR_LENGTH, 32),
 987         T8_32BIT(NOT_SUPPORTED_VECTOR_LENGTH, NOT_SUPPORTED_VECTOR_LENGTH, 32),
 988         HVM(8, 16, 32),
 989         QVM(4, 8, 16),


1137      * ModRM.rm field.
1138      */
1139     private static int getRXBForEVEX(Register reg, Register rm) {
1140         int rxb = (reg == null ? 0 : reg.encoding & 0x08) >> 1;
1141         rxb |= (rm == null ? 0 : rm.encoding & 0x018) >> 3;
1142         return rxb;
1143     }
1144 
1145     /**
1146      * Helper method for emitting EVEX prefix in the form of RRRR.
1147      */
1148     protected final void evexPrefix(Register dst, Register mask, Register nds, Register src, AVXSize size, int pp, int mm, int w, int z, int b) {
1149         assert !mask.isValid() || inRC(MASK, mask);
1150         emitEVEX(getLFlag(size), pp, mm, w, getRXBForEVEX(dst, src), dst.encoding, nds.isValid() ? nds.encoding() : 0, z, b, mask.isValid() ? mask.encoding : 0);
1151     }
1152 
1153     /**
1154      * Helper method for emitting EVEX prefix in the form of RRRM. Because the memory addressing in
1155      * EVEX-encoded instructions employ a compressed displacement scheme when using disp8 form, the
1156      * user of this API should make sure to encode the operands using
1157      * {@link #emitEVEXOperandHelper(Register, AMD64Address, int, int)}.
1158      */
1159     protected final void evexPrefix(Register dst, Register mask, Register nds, AMD64Address src, AVXSize size, int pp, int mm, int w, int z, int b) {
1160         assert !mask.isValid() || inRC(MASK, mask);
1161         emitEVEX(getLFlag(size), pp, mm, w, getRXB(dst, src), dst.encoding, nds.isValid() ? nds.encoding() : 0, z, b, mask.isValid() ? mask.encoding : 0);
1162     }
1163 
1164 }


  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 
  25 package org.graalvm.compiler.asm.amd64;
  26 
  27 import static jdk.vm.ci.amd64.AMD64.MASK;
  28 import static jdk.vm.ci.amd64.AMD64.XMM;
  29 import static jdk.vm.ci.amd64.AMD64.r12;
  30 import static jdk.vm.ci.amd64.AMD64.r13;
  31 import static jdk.vm.ci.amd64.AMD64.rbp;
  32 import static jdk.vm.ci.amd64.AMD64.rsp;
  33 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.EVEXPrefixConfig.B0;
  34 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.EVEXPrefixConfig.B1;

  35 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.EVEXPrefixConfig.Z0;
  36 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.EVEXPrefixConfig.Z1;
  37 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.L128;
  38 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.L256;
  39 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.L512;
  40 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.LZ;
  41 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.M_0F;
  42 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.M_0F38;
  43 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.M_0F3A;
  44 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.P_;
  45 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.P_66;
  46 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.P_F2;
  47 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.P_F3;
  48 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.W0;
  49 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.W1;
  50 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.VEXPrefixConfig.WIG;
  51 import static org.graalvm.compiler.core.common.NumUtil.isByte;
  52 
  53 import org.graalvm.compiler.asm.Assembler;
  54 import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
  55 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
  56 import org.graalvm.compiler.debug.GraalError;
  57 
  58 import jdk.vm.ci.amd64.AMD64;
  59 import jdk.vm.ci.amd64.AMD64.CPUFeature;


 493 
 494     /**
 495      * Emit the ModR/M byte for one register operand and an opcode extension in the R field.
 496      * <p>
 497      * Format: [ 11 reg r/m ]
 498      */
 499     protected final void emitModRM(int reg, Register rm) {
 500         assert (reg & 0x07) == reg;
 501         emitByte(0xC0 | (reg << 3) | (rm.encoding & 0x07));
 502     }
 503 
 504     /**
 505      * Emit the ModR/M byte for two register operands.
 506      * <p>
 507      * Format: [ 11 reg r/m ]
 508      */
 509     protected final void emitModRM(Register reg, Register rm) {
 510         emitModRM(reg.encoding & 0x07, rm);
 511     }
 512 
 513     public static final int DEFAULT_DISP8_SCALE = 1;
 514 
 515     /**
 516      * Emits the ModR/M byte and optionally the SIB byte for one register and one memory operand.
 517      *
 518      * @param force4Byte use 4 byte encoding for displacements that would normally fit in a byte
 519      */
 520     protected final void emitOperandHelper(Register reg, AMD64Address addr, boolean force4Byte, int additionalInstructionSize) {
 521         assert !reg.equals(Register.None);
 522         emitOperandHelper(encode(reg), addr, force4Byte, additionalInstructionSize, DEFAULT_DISP8_SCALE);
 523     }
 524 
 525     protected final void emitOperandHelper(int reg, AMD64Address addr, int additionalInstructionSize) {
 526         emitOperandHelper(reg, addr, false, additionalInstructionSize, DEFAULT_DISP8_SCALE);
 527     }
 528 
 529     protected final void emitOperandHelper(Register reg, AMD64Address addr, int additionalInstructionSize) {
 530         assert !reg.equals(Register.None);
 531         emitOperandHelper(encode(reg), addr, false, additionalInstructionSize, DEFAULT_DISP8_SCALE);
 532     }
 533 
 534     protected final void emitOperandHelper(Register reg, AMD64Address addr, int additionalInstructionSize, int evexDisp8Scale) {
 535         assert !reg.equals(Register.None);
 536         emitOperandHelper(encode(reg), addr, false, additionalInstructionSize, evexDisp8Scale);
 537     }
 538 
 539     /**
 540      * Emits the ModR/M byte and optionally the SIB byte for one memory operand and an opcode
 541      * extension in the R field.
 542      *
 543      * @param force4Byte use 4 byte encoding for displacements that would normally fit in a byte
 544      * @param additionalInstructionSize the number of bytes that will be emitted after the operand,
 545      *            so that the start position of the next instruction can be computed even though
 546      *            this instruction has not been completely emitted yet.
 547      * @param evexDisp8Scale the scaling factor for computing the compressed displacement of
 548      *            EVEX-encoded instructions. This scaling factor only matters when the emitted
 549      *            instruction uses one-byte-displacement form.
 550      */
 551     private void emitOperandHelper(int reg, AMD64Address addr, boolean force4Byte, int additionalInstructionSize, int evexDisp8Scale) {
 552         assert (reg & 0x07) == reg;
 553         int regenc = reg << 3;
 554 


 724             assert (!nds.isValid()) || nds.equals(dst) || nds.equals(src);
 725             if (sizePrefix > 0) {
 726                 emitByte(sizePrefix);
 727             }
 728             if (isRexW) {
 729                 prefixq(dst, src);
 730             } else {
 731                 prefix(dst, src);
 732             }
 733             if (opcodeEscapePrefix > 0xFF) {
 734                 emitShort(opcodeEscapePrefix);
 735             } else if (opcodeEscapePrefix > 0) {
 736                 emitByte(opcodeEscapePrefix);
 737             }
 738         }
 739     }
 740 
 741     public static final class VEXPrefixConfig {
 742         public static final int L128 = 0;
 743         public static final int L256 = 1;
 744         public static final int L512 = 2;
 745         public static final int LZ = 0;
 746 
 747         public static final int W0 = 0;
 748         public static final int W1 = 1;
 749         public static final int WIG = 0;
 750 
 751         public static final int P_ = 0x0;
 752         public static final int P_66 = 0x1;
 753         public static final int P_F3 = 0x2;
 754         public static final int P_F2 = 0x3;
 755 
 756         public static final int M_0F = 0x1;
 757         public static final int M_0F38 = 0x2;
 758         public static final int M_0F3A = 0x3;
 759 
 760         private VEXPrefixConfig() {
 761         }
 762     }
 763 
 764     private class VEXEncoderImpl implements SIMDEncoder {


 922 
 923             emitByte(Prefix.VEX3);
 924             emitByte(byte2);
 925             emitByte(byte3);
 926         }
 927     }
 928 
 929     public static int getLFlag(AVXSize size) {
 930         switch (size) {
 931             case XMM:
 932                 return L128;
 933             case YMM:
 934                 return L256;
 935             case ZMM:
 936                 return L512;
 937             default:
 938                 return LZ;
 939         }
 940     }
 941 
 942     public static boolean isAVX512Register(Register reg) {
 943         return reg != null && reg.isValid() && AMD64.XMM.equals(reg.getRegisterCategory()) && reg.encoding > 15;
 944     }
 945 
 946     public final boolean vexPrefix(Register dst, Register nds, Register src, AVXSize size, int pp, int mmmmm, int w, int wEvex, boolean checkAVX) {
 947         if (isAVX512Register(dst) || isAVX512Register(nds) || isAVX512Register(src)) {
 948             evexPrefix(dst, Register.None, nds, src, size, pp, mmmmm, wEvex, Z0, B0);
 949             return true;
 950         }
 951         emitVEX(getLFlag(size), pp, mmmmm, w, getRXB(dst, src), nds.isValid() ? nds.encoding() : 0, checkAVX);
 952         return false;
 953     }
 954 
 955     public final boolean vexPrefix(Register dst, Register nds, AMD64Address src, AVXSize size, int pp, int mmmmm, int w, int wEvex, boolean checkAVX) {
 956         if (isAVX512Register(dst) || isAVX512Register(nds)) {
 957             evexPrefix(dst, Register.None, nds, src, size, pp, mmmmm, wEvex, Z0, B0);
 958             return true;
 959         }
 960         emitVEX(getLFlag(size), pp, mmmmm, w, getRXB(dst, src), nds.isValid() ? nds.encoding() : 0, checkAVX);
 961         return false;
 962     }
 963 
 964     protected static final class EVEXPrefixConfig {



 965         public static final int Z0 = 0x0;
 966         public static final int Z1 = 0x1;
 967 
 968         public static final int B0 = 0x0;
 969         public static final int B1 = 0x1;
 970 
 971         private EVEXPrefixConfig() {
 972         }
 973     }
 974 
 975     private static final int NOT_SUPPORTED_VECTOR_LENGTH = -1;
 976 
 977     /**
 978      * EVEX-encoded instructions use a compressed displacement scheme by multiplying disp8 with a
 979      * scaling factor N depending on the tuple type and the vector length.
 980      *
 981      * Reference: Intel Software Developer's Manual Volume 2, Section 2.6.5
 982      */
 983     protected enum EVEXTuple {
 984         INVALID(NOT_SUPPORTED_VECTOR_LENGTH, NOT_SUPPORTED_VECTOR_LENGTH, NOT_SUPPORTED_VECTOR_LENGTH),
 985         FV_NO_BROADCAST_32BIT(16, 32, 64),
 986         FV_BROADCAST_32BIT(4, 4, 4),
 987         FV_NO_BROADCAST_64BIT(16, 32, 64),
 988         FV_BROADCAST_64BIT(8, 8, 8),
 989         HV_NO_BROADCAST_32BIT(8, 16, 32),
 990         HV_BROADCAST_32BIT(4, 4, 4),
 991         FVM(16, 32, 64),
 992         T1S_8BIT(1, 1, 1),
 993         T1S_16BIT(2, 2, 2),
 994         T1S_32BIT(4, 4, 4),
 995         T1S_64BIT(8, 8, 8),
 996         T1F_32BIT(4, 4, 4),
 997         T1F_64BIT(8, 8, 8),
 998         T2_32BIT(8, 8, 8),
 999         T2_64BIT(NOT_SUPPORTED_VECTOR_LENGTH, 16, 16),
1000         T4_32BIT(NOT_SUPPORTED_VECTOR_LENGTH, 16, 16),
1001         T4_64BIT(NOT_SUPPORTED_VECTOR_LENGTH, NOT_SUPPORTED_VECTOR_LENGTH, 32),
1002         T8_32BIT(NOT_SUPPORTED_VECTOR_LENGTH, NOT_SUPPORTED_VECTOR_LENGTH, 32),
1003         HVM(8, 16, 32),
1004         QVM(4, 8, 16),


1152      * ModRM.rm field.
1153      */
1154     private static int getRXBForEVEX(Register reg, Register rm) {
1155         int rxb = (reg == null ? 0 : reg.encoding & 0x08) >> 1;
1156         rxb |= (rm == null ? 0 : rm.encoding & 0x018) >> 3;
1157         return rxb;
1158     }
1159 
1160     /**
1161      * Helper method for emitting EVEX prefix in the form of RRRR.
1162      */
1163     protected final void evexPrefix(Register dst, Register mask, Register nds, Register src, AVXSize size, int pp, int mm, int w, int z, int b) {
1164         assert !mask.isValid() || inRC(MASK, mask);
1165         emitEVEX(getLFlag(size), pp, mm, w, getRXBForEVEX(dst, src), dst.encoding, nds.isValid() ? nds.encoding() : 0, z, b, mask.isValid() ? mask.encoding : 0);
1166     }
1167 
1168     /**
1169      * Helper method for emitting EVEX prefix in the form of RRRM. Because the memory addressing in
1170      * EVEX-encoded instructions employ a compressed displacement scheme when using disp8 form, the
1171      * user of this API should make sure to encode the operands using
1172      * {@link #emitOperandHelper(Register, AMD64Address, int, int)}.
1173      */
1174     protected final void evexPrefix(Register dst, Register mask, Register nds, AMD64Address src, AVXSize size, int pp, int mm, int w, int z, int b) {
1175         assert !mask.isValid() || inRC(MASK, mask);
1176         emitEVEX(getLFlag(size), pp, mm, w, getRXB(dst, src), dst.encoding, nds.isValid() ? nds.encoding() : 0, z, b, mask.isValid() ? mask.encoding : 0);
1177     }
1178 
1179 }
< prev index next >