< prev index next >

src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64Assembler.java

Print this page




 666         }
 667 
 668         protected AMD64MIOp(String opcode, boolean immIsByte, int prefix, int op, int ext, OpAssertion assertion) {
 669             super(opcode, immIsByte, prefix, op, assertion);
 670             this.ext = ext;
 671         }
 672 
 673         public final void emit(AMD64Assembler asm, OperandSize size, Register dst, int imm) {
 674             emit(asm, size, dst, imm, false);
 675         }
 676 
 677         public final void emit(AMD64Assembler asm, OperandSize size, Register dst, int imm, boolean annotateImm) {
 678             assert verify(asm, size, dst, null);
 679             int insnPos = asm.position();
 680             emitOpcode(asm, size, getRXB(null, dst), 0, dst.encoding);
 681             asm.emitModRM(ext, dst);
 682             int immPos = asm.position();
 683             emitImmediate(asm, size, imm);
 684             int nextInsnPos = asm.position();
 685             if (annotateImm && asm.codePatchingAnnotationConsumer != null) {
 686                 asm.codePatchingAnnotationConsumer.accept(new ImmediateOperandAnnotation(insnPos, immPos, nextInsnPos - immPos, nextInsnPos));
 687             }
 688         }
 689 
 690         public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst, int imm) {
 691             emit(asm, size, dst, imm, false);
 692         }
 693 
 694         public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst, int imm, boolean annotateImm) {
 695             assert verify(asm, size, null, null);
 696             int insnPos = asm.position();
 697             emitOpcode(asm, size, getRXB(null, dst), 0, 0);
 698             asm.emitOperandHelper(ext, dst, immediateSize(size));
 699             int immPos = asm.position();
 700             emitImmediate(asm, size, imm);
 701             int nextInsnPos = asm.position();
 702             if (annotateImm && asm.codePatchingAnnotationConsumer != null) {
 703                 asm.codePatchingAnnotationConsumer.accept(new ImmediateOperandAnnotation(insnPos, immPos, nextInsnPos - immPos, nextInsnPos));
 704             }
 705         }
 706     }
 707 
 708     /**
 709      * Opcodes with operand order of RMI.
 710      *
 711      * We only have one form of round as the operation is always treated with single variant input,
 712      * making its extension to 3 address forms redundant.
 713      */
 714     public static class AMD64RMIOp extends AMD64ImmOp {
 715         // @formatter:off
 716         public static final AMD64RMIOp IMUL    = new AMD64RMIOp("IMUL", false, 0x69);
 717         public static final AMD64RMIOp IMUL_SX = new AMD64RMIOp("IMUL", true,  0x6B);
 718         public static final AMD64RMIOp ROUNDSS = new AMD64RMIOp("ROUNDSS", true, P_0F3A, 0x0A, OpAssertion.PackedDoubleAssertion, CPUFeature.SSE4_1);
 719         public static final AMD64RMIOp ROUNDSD = new AMD64RMIOp("ROUNDSD", true, P_0F3A, 0x0B, OpAssertion.PackedDoubleAssertion, CPUFeature.SSE4_1);
 720         // @formatter:on
 721 
 722         protected AMD64RMIOp(String opcode, boolean immIsByte, int op) {
 723             this(opcode, immIsByte, 0, op, OpAssertion.WordOrLargerAssertion, null);


2006 
2007     public final void movb(AMD64Address dst, Register src) {
2008         assert inRC(CPU, src) : "must have byte register";
2009         prefixb(dst, src);
2010         emitByte(0x88);
2011         emitOperandHelper(src, dst, 0);
2012     }
2013 
2014     public final void movl(Register dst, int imm32) {
2015         movl(dst, imm32, false);
2016     }
2017 
2018     public final void movl(Register dst, int imm32, boolean annotateImm) {
2019         int insnPos = position();
2020         prefix(dst);
2021         emitByte(0xB8 + encode(dst));
2022         int immPos = position();
2023         emitInt(imm32);
2024         int nextInsnPos = position();
2025         if (annotateImm && codePatchingAnnotationConsumer != null) {
2026             codePatchingAnnotationConsumer.accept(new ImmediateOperandAnnotation(insnPos, immPos, nextInsnPos - immPos, nextInsnPos));
2027         }
2028     }
2029 
2030     public final void movl(Register dst, Register src) {
2031         prefix(dst, src);
2032         emitByte(0x8B);
2033         emitModRM(dst, src);
2034     }
2035 
2036     public final void movl(Register dst, AMD64Address src) {
2037         prefix(src, dst);
2038         emitByte(0x8B);
2039         emitOperandHelper(dst, src, 0);
2040     }
2041 
2042     /**
2043      * @param wide use 4 byte encoding for displacements that would normally fit in a byte
2044      */
2045     public final void movl(Register dst, AMD64Address src, boolean wide) {
2046         prefix(src, dst);


2184         SSEOp.MUL.emit(this, PD, dst, src);
2185     }
2186 
2187     public final void mulpd(Register dst, AMD64Address src) {
2188         SSEOp.MUL.emit(this, PD, dst, src);
2189     }
2190 
2191     public final void mulsd(Register dst, Register src) {
2192         SSEOp.MUL.emit(this, SD, dst, src);
2193     }
2194 
2195     public final void mulsd(Register dst, AMD64Address src) {
2196         SSEOp.MUL.emit(this, SD, dst, src);
2197     }
2198 
2199     public final void mulss(Register dst, Register src) {
2200         SSEOp.MUL.emit(this, SS, dst, src);
2201     }
2202 
2203     public final void movswl(Register dst, AMD64Address src) {
2204         prefix(src, dst);
2205         emitByte(0x0F);
2206         emitByte(0xBF);
2207         emitOperandHelper(dst, src, 0);

2208     }
2209 
2210     public final void movw(AMD64Address dst, int imm16) {
2211         emitByte(0x66); // switch to 16-bit mode
2212         prefix(dst);
2213         emitByte(0xC7);
2214         emitOperandHelper(0, dst, 2);
2215         emitShort(imm16);
2216     }
2217 
2218     public final void movw(AMD64Address dst, Register src) {
2219         emitByte(0x66);
2220         prefix(dst, src);
2221         emitByte(0x89);
2222         emitOperandHelper(src, dst, 0);
2223     }
2224 







2225     public final void movzbl(Register dst, AMD64Address src) {
2226         prefix(src, dst);
2227         emitByte(0x0F);
2228         emitByte(0xB6);
2229         emitOperandHelper(dst, src, 0);
2230     }
2231 
2232     public final void movzbl(Register dst, Register src) {
2233         AMD64RMOp.MOVZXB.emit(this, DWORD, dst, src);
2234     }
2235 
2236     public final void movzbq(Register dst, Register src) {
2237         AMD64RMOp.MOVZXB.emit(this, QWORD, dst, src);
2238     }
2239 




2240     public final void movzwl(Register dst, AMD64Address src) {
2241         prefix(src, dst);
2242         emitByte(0x0F);
2243         emitByte(0xB7);
2244         emitOperandHelper(dst, src, 0);

2245     }
2246 
2247     public final void negl(Register dst) {
2248         NEG.emit(this, DWORD, dst);
2249     }
2250 
2251     public final void notl(Register dst) {
2252         NOT.emit(this, DWORD, dst);
2253     }
2254 
2255     public final void notq(Register dst) {
2256         NOT.emit(this, QWORD, dst);
2257     }
2258 
2259     @Override
2260     public final void ensureUniquePC() {
2261         nop();
2262     }
2263 
2264     public final void nop() {


2540         emitByte(imm8);
2541     }
2542 
2543     public final void pcmpestri(Register dst, Register src, int imm8) {
2544         assert supports(CPUFeature.SSE4_2);
2545         assert inRC(XMM, dst) && inRC(XMM, src);
2546         simdPrefix(dst, Register.None, src, PD, P_0F3A, false);
2547         emitByte(0x61);
2548         emitModRM(dst, src);
2549         emitByte(imm8);
2550     }
2551 
2552     public final void pmovmskb(Register dst, Register src) {
2553         assert supports(CPUFeature.SSE2);
2554         assert inRC(CPU, dst) && inRC(XMM, src);
2555         simdPrefix(dst, Register.None, src, PD, P_0F, false);
2556         emitByte(0xD7);
2557         emitModRM(dst, src);
2558     }
2559 
2560     // Insn: VPMOVZXBW xmm1, xmm2/m64
2561 
2562     public final void pmovzxbw(Register dst, AMD64Address src) {
2563         assert supports(CPUFeature.SSE4_1);
2564         assert inRC(XMM, dst);
2565         simdPrefix(dst, Register.None, src, PD, P_0F38, false);
2566         emitByte(0x30);
2567         emitOperandHelper(dst, src, 0);
2568     }
2569 

















































2570     public final void pmovzxbw(Register dst, Register src) {
2571         assert supports(CPUFeature.SSE4_1);
2572         assert inRC(XMM, dst) && inRC(XMM, src);
2573         simdPrefix(dst, Register.None, src, PD, P_0F38, false);
2574         emitByte(0x30);
2575         emitModRM(dst, src);
2576     }
2577 
2578     public final void push(Register src) {
2579         prefix(src);
2580         emitByte(0x50 + encode(src));
2581     }
2582 
2583     public void pushfq() {
2584         emitByte(0x9c);
2585     }
2586 
2587     public final void paddd(Register dst, Register src) {
2588         assert inRC(XMM, dst) && inRC(XMM, src);
2589         simdPrefix(dst, dst, src, PD, P_0F, false);


2864     }
2865 
2866     public final void unpckhpd(Register dst, Register src) {
2867         assert inRC(XMM, dst) && inRC(XMM, src);
2868         simdPrefix(dst, dst, src, PD, P_0F, false);
2869         emitByte(0x15);
2870         emitModRM(dst, src);
2871     }
2872 
2873     public final void unpcklpd(Register dst, Register src) {
2874         assert inRC(XMM, dst) && inRC(XMM, src);
2875         simdPrefix(dst, dst, src, PD, P_0F, false);
2876         emitByte(0x14);
2877         emitModRM(dst, src);
2878     }
2879 
2880     public final void xorl(Register dst, Register src) {
2881         XOR.rmOp.emit(this, DWORD, dst, src);
2882     }
2883 




2884     public final void xorpd(Register dst, Register src) {
2885         SSEOp.XOR.emit(this, PD, dst, src);
2886     }
2887 
2888     public final void xorps(Register dst, Register src) {
2889         SSEOp.XOR.emit(this, PS, dst, src);
2890     }
2891 
2892     protected final void decl(Register dst) {
2893         // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
2894         prefix(dst);
2895         emitByte(0xFF);
2896         emitModRM(1, dst);
2897     }
2898 
2899     protected final void incl(Register dst) {
2900         // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
2901         prefix(dst);
2902         emitByte(0xFF);
2903         emitModRM(0, dst);


3028         emitByte(0xFF);
3029         emitModRM(0, dst);
3030     }
3031 
3032     public final void incq(AMD64Address dst) {
3033         INC.emit(this, QWORD, dst);
3034     }
3035 
3036     public final void movq(Register dst, long imm64) {
3037         movq(dst, imm64, false);
3038     }
3039 
3040     public final void movq(Register dst, long imm64, boolean annotateImm) {
3041         int insnPos = position();
3042         prefixq(dst);
3043         emitByte(0xB8 + encode(dst));
3044         int immPos = position();
3045         emitLong(imm64);
3046         int nextInsnPos = position();
3047         if (annotateImm && codePatchingAnnotationConsumer != null) {
3048             codePatchingAnnotationConsumer.accept(new ImmediateOperandAnnotation(insnPos, immPos, nextInsnPos - immPos, nextInsnPos));
3049         }
3050     }
3051 
3052     public final void movslq(Register dst, int imm32) {
3053         prefixq(dst);
3054         emitByte(0xC7);
3055         emitModRM(0, dst);
3056         emitInt(imm32);
3057     }
3058 
3059     public final void movdq(Register dst, AMD64Address src) {
3060         AMD64RMOp.MOVQ.emit(this, QWORD, dst, src);
3061     }
3062 
3063     public final void movdq(AMD64Address dst, Register src) {
3064         AMD64MROp.MOVQ.emit(this, QWORD, dst, src);
3065     }
3066 
3067     public final void movdq(Register dst, Register src) {
3068         if (inRC(XMM, dst) && inRC(CPU, src)) {


3170     }
3171 
3172     public final void shrq(Register dst, int imm8) {
3173         assert isShiftCount(imm8 >> 1) : "illegal shift count";
3174         prefixq(dst);
3175         if (imm8 == 1) {
3176             emitByte(0xD1);
3177             emitModRM(5, dst);
3178         } else {
3179             emitByte(0xC1);
3180             emitModRM(5, dst);
3181             emitByte(imm8);
3182         }
3183     }
3184 
3185     public final void shrq(Register dst) {
3186         prefixq(dst);
3187         emitByte(0xD3);
3188         // Unsigned divide dst by 2, CL times.
3189         emitModRM(5, dst);













3190     }
3191 
3192     public final void sbbq(Register dst, Register src) {
3193         SBB.rmOp.emit(this, QWORD, dst, src);
3194     }
3195 
3196     public final void subq(Register dst, int imm32) {
3197         SUB.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
3198     }
3199 
3200     public final void subq(AMD64Address dst, int imm32) {
3201         SUB.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
3202     }
3203 
3204     public final void subqWide(Register dst, int imm32) {
3205         // don't use the sign-extending version, forcing a 32-bit immediate
3206         SUB.getMIOpcode(QWORD, false).emit(this, QWORD, dst, imm32);
3207     }
3208 
3209     public final void subq(Register dst, Register src) {




 666         }
 667 
 668         protected AMD64MIOp(String opcode, boolean immIsByte, int prefix, int op, int ext, OpAssertion assertion) {
 669             super(opcode, immIsByte, prefix, op, assertion);
 670             this.ext = ext;
 671         }
 672 
 673         public final void emit(AMD64Assembler asm, OperandSize size, Register dst, int imm) {
 674             emit(asm, size, dst, imm, false);
 675         }
 676 
 677         public final void emit(AMD64Assembler asm, OperandSize size, Register dst, int imm, boolean annotateImm) {
 678             assert verify(asm, size, dst, null);
 679             int insnPos = asm.position();
 680             emitOpcode(asm, size, getRXB(null, dst), 0, dst.encoding);
 681             asm.emitModRM(ext, dst);
 682             int immPos = asm.position();
 683             emitImmediate(asm, size, imm);
 684             int nextInsnPos = asm.position();
 685             if (annotateImm && asm.codePatchingAnnotationConsumer != null) {
 686                 asm.codePatchingAnnotationConsumer.accept(new OperandDataAnnotation(insnPos, immPos, nextInsnPos - immPos, nextInsnPos));
 687             }
 688         }
 689 
 690         public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst, int imm) {
 691             emit(asm, size, dst, imm, false);
 692         }
 693 
 694         public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst, int imm, boolean annotateImm) {
 695             assert verify(asm, size, null, null);
 696             int insnPos = asm.position();
 697             emitOpcode(asm, size, getRXB(null, dst), 0, 0);
 698             asm.emitOperandHelper(ext, dst, immediateSize(size));
 699             int immPos = asm.position();
 700             emitImmediate(asm, size, imm);
 701             int nextInsnPos = asm.position();
 702             if (annotateImm && asm.codePatchingAnnotationConsumer != null) {
 703                 asm.codePatchingAnnotationConsumer.accept(new OperandDataAnnotation(insnPos, immPos, nextInsnPos - immPos, nextInsnPos));
 704             }
 705         }
 706     }
 707 
 708     /**
 709      * Opcodes with operand order of RMI.
 710      *
 711      * We only have one form of round as the operation is always treated with single variant input,
 712      * making its extension to 3 address forms redundant.
 713      */
 714     public static class AMD64RMIOp extends AMD64ImmOp {
 715         // @formatter:off
 716         public static final AMD64RMIOp IMUL    = new AMD64RMIOp("IMUL", false, 0x69);
 717         public static final AMD64RMIOp IMUL_SX = new AMD64RMIOp("IMUL", true,  0x6B);
 718         public static final AMD64RMIOp ROUNDSS = new AMD64RMIOp("ROUNDSS", true, P_0F3A, 0x0A, OpAssertion.PackedDoubleAssertion, CPUFeature.SSE4_1);
 719         public static final AMD64RMIOp ROUNDSD = new AMD64RMIOp("ROUNDSD", true, P_0F3A, 0x0B, OpAssertion.PackedDoubleAssertion, CPUFeature.SSE4_1);
 720         // @formatter:on
 721 
 722         protected AMD64RMIOp(String opcode, boolean immIsByte, int op) {
 723             this(opcode, immIsByte, 0, op, OpAssertion.WordOrLargerAssertion, null);


2006 
2007     public final void movb(AMD64Address dst, Register src) {
2008         assert inRC(CPU, src) : "must have byte register";
2009         prefixb(dst, src);
2010         emitByte(0x88);
2011         emitOperandHelper(src, dst, 0);
2012     }
2013 
2014     public final void movl(Register dst, int imm32) {
2015         movl(dst, imm32, false);
2016     }
2017 
2018     public final void movl(Register dst, int imm32, boolean annotateImm) {
2019         int insnPos = position();
2020         prefix(dst);
2021         emitByte(0xB8 + encode(dst));
2022         int immPos = position();
2023         emitInt(imm32);
2024         int nextInsnPos = position();
2025         if (annotateImm && codePatchingAnnotationConsumer != null) {
2026             codePatchingAnnotationConsumer.accept(new OperandDataAnnotation(insnPos, immPos, nextInsnPos - immPos, nextInsnPos));
2027         }
2028     }
2029 
2030     public final void movl(Register dst, Register src) {
2031         prefix(dst, src);
2032         emitByte(0x8B);
2033         emitModRM(dst, src);
2034     }
2035 
2036     public final void movl(Register dst, AMD64Address src) {
2037         prefix(src, dst);
2038         emitByte(0x8B);
2039         emitOperandHelper(dst, src, 0);
2040     }
2041 
2042     /**
2043      * @param wide use 4 byte encoding for displacements that would normally fit in a byte
2044      */
2045     public final void movl(Register dst, AMD64Address src, boolean wide) {
2046         prefix(src, dst);


2184         SSEOp.MUL.emit(this, PD, dst, src);
2185     }
2186 
2187     public final void mulpd(Register dst, AMD64Address src) {
2188         SSEOp.MUL.emit(this, PD, dst, src);
2189     }
2190 
2191     public final void mulsd(Register dst, Register src) {
2192         SSEOp.MUL.emit(this, SD, dst, src);
2193     }
2194 
2195     public final void mulsd(Register dst, AMD64Address src) {
2196         SSEOp.MUL.emit(this, SD, dst, src);
2197     }
2198 
2199     public final void mulss(Register dst, Register src) {
2200         SSEOp.MUL.emit(this, SS, dst, src);
2201     }
2202 
2203     public final void movswl(Register dst, AMD64Address src) {
2204         AMD64RMOp.MOVSX.emit(this, DWORD, dst, src);
2205     }
2206 
2207     public final void movswq(Register dst, AMD64Address src) {
2208         AMD64RMOp.MOVSX.emit(this, QWORD, dst, src);
2209     }
2210 
2211     public final void movw(AMD64Address dst, int imm16) {
2212         emitByte(0x66); // switch to 16-bit mode
2213         prefix(dst);
2214         emitByte(0xC7);
2215         emitOperandHelper(0, dst, 2);
2216         emitShort(imm16);
2217     }
2218 
2219     public final void movw(AMD64Address dst, Register src) {
2220         emitByte(0x66);
2221         prefix(dst, src);
2222         emitByte(0x89);
2223         emitOperandHelper(src, dst, 0);
2224     }
2225 
2226     public final void movw(Register dst, AMD64Address src) {
2227         emitByte(0x66);
2228         prefix(src, dst);
2229         emitByte(0x8B);
2230         emitOperandHelper(dst, src, 0);
2231     }
2232 
2233     public final void movzbl(Register dst, AMD64Address src) {
2234         prefix(src, dst);
2235         emitByte(0x0F);
2236         emitByte(0xB6);
2237         emitOperandHelper(dst, src, 0);
2238     }
2239 
2240     public final void movzbl(Register dst, Register src) {
2241         AMD64RMOp.MOVZXB.emit(this, DWORD, dst, src);
2242     }
2243 
2244     public final void movzbq(Register dst, Register src) {
2245         AMD64RMOp.MOVZXB.emit(this, QWORD, dst, src);
2246     }
2247 
2248     public final void movzbq(Register dst, AMD64Address src) {
2249         AMD64RMOp.MOVZXB.emit(this, QWORD, dst, src);
2250     }
2251 
2252     public final void movzwl(Register dst, AMD64Address src) {
2253         AMD64RMOp.MOVZX.emit(this, DWORD, dst, src);
2254     }
2255 
2256     public final void movzwq(Register dst, AMD64Address src) {
2257         AMD64RMOp.MOVZX.emit(this, QWORD, dst, src);
2258     }
2259 
2260     public final void negl(Register dst) {
2261         NEG.emit(this, DWORD, dst);
2262     }
2263 
2264     public final void notl(Register dst) {
2265         NOT.emit(this, DWORD, dst);
2266     }
2267 
2268     public final void notq(Register dst) {
2269         NOT.emit(this, QWORD, dst);
2270     }
2271 
2272     @Override
2273     public final void ensureUniquePC() {
2274         nop();
2275     }
2276 
2277     public final void nop() {


2553         emitByte(imm8);
2554     }
2555 
2556     public final void pcmpestri(Register dst, Register src, int imm8) {
2557         assert supports(CPUFeature.SSE4_2);
2558         assert inRC(XMM, dst) && inRC(XMM, src);
2559         simdPrefix(dst, Register.None, src, PD, P_0F3A, false);
2560         emitByte(0x61);
2561         emitModRM(dst, src);
2562         emitByte(imm8);
2563     }
2564 
2565     public final void pmovmskb(Register dst, Register src) {
2566         assert supports(CPUFeature.SSE2);
2567         assert inRC(CPU, dst) && inRC(XMM, src);
2568         simdPrefix(dst, Register.None, src, PD, P_0F, false);
2569         emitByte(0xD7);
2570         emitModRM(dst, src);
2571     }
2572 
2573     private void pmovSZx(Register dst, AMD64Address src, int op) {


2574         assert supports(CPUFeature.SSE4_1);
2575         assert inRC(XMM, dst);
2576         simdPrefix(dst, Register.None, src, PD, P_0F38, false);
2577         emitByte(op);
2578         emitOperandHelper(dst, src, 0);
2579     }
2580 
2581     public final void pmovsxbw(Register dst, AMD64Address src) {
2582         pmovSZx(dst, src, 0x20);
2583     }
2584 
2585     public final void pmovsxbd(Register dst, AMD64Address src) {
2586         pmovSZx(dst, src, 0x21);
2587     }
2588 
2589     public final void pmovsxbq(Register dst, AMD64Address src) {
2590         pmovSZx(dst, src, 0x22);
2591     }
2592 
2593     public final void pmovsxwd(Register dst, AMD64Address src) {
2594         pmovSZx(dst, src, 0x23);
2595     }
2596 
2597     public final void pmovsxwq(Register dst, AMD64Address src) {
2598         pmovSZx(dst, src, 0x24);
2599     }
2600 
2601     public final void pmovsxdq(Register dst, AMD64Address src) {
2602         pmovSZx(dst, src, 0x25);
2603     }
2604 
2605     // Insn: VPMOVZXBW xmm1, xmm2/m64
2606     public final void pmovzxbw(Register dst, AMD64Address src) {
2607         pmovSZx(dst, src, 0x30);
2608     }
2609 
2610     public final void pmovzxbd(Register dst, AMD64Address src) {
2611         pmovSZx(dst, src, 0x31);
2612     }
2613 
2614     public final void pmovzxbq(Register dst, AMD64Address src) {
2615         pmovSZx(dst, src, 0x32);
2616     }
2617 
2618     public final void pmovzxwd(Register dst, AMD64Address src) {
2619         pmovSZx(dst, src, 0x33);
2620     }
2621 
2622     public final void pmovzxwq(Register dst, AMD64Address src) {
2623         pmovSZx(dst, src, 0x34);
2624     }
2625 
2626     public final void pmovzxdq(Register dst, AMD64Address src) {
2627         pmovSZx(dst, src, 0x35);
2628     }
2629 
2630     public final void pmovzxbw(Register dst, Register src) {
2631         assert supports(CPUFeature.SSE4_1);
2632         assert inRC(XMM, dst) && inRC(XMM, src);
2633         simdPrefix(dst, Register.None, src, PD, P_0F38, false);
2634         emitByte(0x30);
2635         emitModRM(dst, src);
2636     }
2637 
2638     public final void push(Register src) {
2639         prefix(src);
2640         emitByte(0x50 + encode(src));
2641     }
2642 
2643     public void pushfq() {
2644         emitByte(0x9c);
2645     }
2646 
2647     public final void paddd(Register dst, Register src) {
2648         assert inRC(XMM, dst) && inRC(XMM, src);
2649         simdPrefix(dst, dst, src, PD, P_0F, false);


2924     }
2925 
2926     public final void unpckhpd(Register dst, Register src) {
2927         assert inRC(XMM, dst) && inRC(XMM, src);
2928         simdPrefix(dst, dst, src, PD, P_0F, false);
2929         emitByte(0x15);
2930         emitModRM(dst, src);
2931     }
2932 
2933     public final void unpcklpd(Register dst, Register src) {
2934         assert inRC(XMM, dst) && inRC(XMM, src);
2935         simdPrefix(dst, dst, src, PD, P_0F, false);
2936         emitByte(0x14);
2937         emitModRM(dst, src);
2938     }
2939 
2940     public final void xorl(Register dst, Register src) {
2941         XOR.rmOp.emit(this, DWORD, dst, src);
2942     }
2943 
2944     public final void xorq(Register dst, Register src) {
2945         XOR.rmOp.emit(this, QWORD, dst, src);
2946     }
2947 
2948     public final void xorpd(Register dst, Register src) {
2949         SSEOp.XOR.emit(this, PD, dst, src);
2950     }
2951 
2952     public final void xorps(Register dst, Register src) {
2953         SSEOp.XOR.emit(this, PS, dst, src);
2954     }
2955 
2956     protected final void decl(Register dst) {
2957         // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
2958         prefix(dst);
2959         emitByte(0xFF);
2960         emitModRM(1, dst);
2961     }
2962 
2963     protected final void incl(Register dst) {
2964         // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
2965         prefix(dst);
2966         emitByte(0xFF);
2967         emitModRM(0, dst);


3092         emitByte(0xFF);
3093         emitModRM(0, dst);
3094     }
3095 
3096     public final void incq(AMD64Address dst) {
3097         INC.emit(this, QWORD, dst);
3098     }
3099 
3100     public final void movq(Register dst, long imm64) {
3101         movq(dst, imm64, false);
3102     }
3103 
3104     public final void movq(Register dst, long imm64, boolean annotateImm) {
3105         int insnPos = position();
3106         prefixq(dst);
3107         emitByte(0xB8 + encode(dst));
3108         int immPos = position();
3109         emitLong(imm64);
3110         int nextInsnPos = position();
3111         if (annotateImm && codePatchingAnnotationConsumer != null) {
3112             codePatchingAnnotationConsumer.accept(new OperandDataAnnotation(insnPos, immPos, nextInsnPos - immPos, nextInsnPos));
3113         }
3114     }
3115 
3116     public final void movslq(Register dst, int imm32) {
3117         prefixq(dst);
3118         emitByte(0xC7);
3119         emitModRM(0, dst);
3120         emitInt(imm32);
3121     }
3122 
3123     public final void movdq(Register dst, AMD64Address src) {
3124         AMD64RMOp.MOVQ.emit(this, QWORD, dst, src);
3125     }
3126 
3127     public final void movdq(AMD64Address dst, Register src) {
3128         AMD64MROp.MOVQ.emit(this, QWORD, dst, src);
3129     }
3130 
3131     public final void movdq(Register dst, Register src) {
3132         if (inRC(XMM, dst) && inRC(CPU, src)) {


3234     }
3235 
3236     public final void shrq(Register dst, int imm8) {
3237         assert isShiftCount(imm8 >> 1) : "illegal shift count";
3238         prefixq(dst);
3239         if (imm8 == 1) {
3240             emitByte(0xD1);
3241             emitModRM(5, dst);
3242         } else {
3243             emitByte(0xC1);
3244             emitModRM(5, dst);
3245             emitByte(imm8);
3246         }
3247     }
3248 
3249     public final void shrq(Register dst) {
3250         prefixq(dst);
3251         emitByte(0xD3);
3252         // Unsigned divide dst by 2, CL times.
3253         emitModRM(5, dst);
3254     }
3255 
3256     public final void sarq(Register dst, int imm8) {
3257         assert isShiftCount(imm8 >> 1) : "illegal shift count";
3258         prefixq(dst);
3259         if (imm8 == 1) {
3260             emitByte(0xD1);
3261             emitModRM(7, dst);
3262         } else {
3263             emitByte(0xC1);
3264             emitModRM(7, dst);
3265             emitByte(imm8);
3266         }
3267     }
3268 
3269     public final void sbbq(Register dst, Register src) {
3270         SBB.rmOp.emit(this, QWORD, dst, src);
3271     }
3272 
3273     public final void subq(Register dst, int imm32) {
3274         SUB.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
3275     }
3276 
3277     public final void subq(AMD64Address dst, int imm32) {
3278         SUB.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
3279     }
3280 
3281     public final void subqWide(Register dst, int imm32) {
3282         // don't use the sign-extending version, forcing a 32-bit immediate
3283         SUB.getMIOpcode(QWORD, false).emit(this, QWORD, dst, imm32);
3284     }
3285 
3286     public final void subq(Register dst, Register src) {


< prev index next >