< prev index next >

src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64MacroAssembler.java

Print this page
rev 56282 : [mq]: graal

*** 1,7 **** /* ! * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 22,37 **** */ package org.graalvm.compiler.asm.amd64; - import static jdk.vm.ci.amd64.AMD64.rbp; - import static jdk.vm.ci.amd64.AMD64.rsp; import static org.graalvm.compiler.asm.amd64.AMD64AsmOptions.UseIncDec; import static org.graalvm.compiler.asm.amd64.AMD64AsmOptions.UseXmmLoadAndClearUpper; import static org.graalvm.compiler.asm.amd64.AMD64AsmOptions.UseXmmRegToRegMoveAll; import org.graalvm.compiler.core.common.NumUtil; import jdk.vm.ci.amd64.AMD64; import jdk.vm.ci.amd64.AMD64Kind; import jdk.vm.ci.code.Register; --- 22,36 ---- */ package org.graalvm.compiler.asm.amd64; import static org.graalvm.compiler.asm.amd64.AMD64AsmOptions.UseIncDec; import static org.graalvm.compiler.asm.amd64.AMD64AsmOptions.UseXmmLoadAndClearUpper; import static org.graalvm.compiler.asm.amd64.AMD64AsmOptions.UseXmmRegToRegMoveAll; + import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize; import org.graalvm.compiler.core.common.NumUtil; import jdk.vm.ci.amd64.AMD64; import jdk.vm.ci.amd64.AMD64Kind; import jdk.vm.ci.code.Register;
*** 82,105 **** } else { subq(dst, value); } } - public final void enter(int frameSize) { - if (NumUtil.isUShort(frameSize)) { - // Can use enter instruction only for frame size that fits in 16 bits. - emitByte(0xC8); - emitShort(frameSize); - emitByte(0x00); - } else { - // Fall back to manual sequence. - push(rbp); - movq(rbp, rsp); - decrementq(rsp, frameSize); - } - } - public void incrementq(Register reg, int value) { if (value == Integer.MIN_VALUE) { addq(reg, value); return; } --- 81,90 ----
*** 237,284 **** --- 222,302 ---- } public void movflt(Register dst, Register src) { assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM); if (UseXmmRegToRegMoveAll) { + if (isAVX512Register(dst) || isAVX512Register(src)) { + VexMoveOp.VMOVAPS.emit(this, AVXSize.XMM, dst, src); + } else { movaps(dst, src); + } + } else { + if (isAVX512Register(dst) || isAVX512Register(src)) { + VexMoveOp.VMOVSS.emit(this, AVXSize.XMM, dst, src); } else { movss(dst, src); } } + } public void movflt(Register dst, AMD64Address src) { assert dst.getRegisterCategory().equals(AMD64.XMM); + if (isAVX512Register(dst)) { + VexMoveOp.VMOVSS.emit(this, AVXSize.XMM, dst, src); + } else { movss(dst, src); } + } public void movflt(AMD64Address dst, Register src) { assert src.getRegisterCategory().equals(AMD64.XMM); + if (isAVX512Register(src)) { + VexMoveOp.VMOVSS.emit(this, AVXSize.XMM, dst, src); + } else { movss(dst, src); } + } public void movdbl(Register dst, Register src) { assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM); if (UseXmmRegToRegMoveAll) { + if (isAVX512Register(dst) || isAVX512Register(src)) { + VexMoveOp.VMOVAPD.emit(this, AVXSize.XMM, dst, src); + } else { movapd(dst, src); + } + } else { + if (isAVX512Register(dst) || isAVX512Register(src)) { + VexMoveOp.VMOVSD.emit(this, AVXSize.XMM, dst, src); } else { movsd(dst, src); } } + } public void movdbl(Register dst, AMD64Address src) { assert dst.getRegisterCategory().equals(AMD64.XMM); if (UseXmmLoadAndClearUpper) { + if (isAVX512Register(dst)) { + VexMoveOp.VMOVSD.emit(this, AVXSize.XMM, dst, src); + } else { movsd(dst, src); + } } else { + assert !isAVX512Register(dst); movlpd(dst, src); } } public void movdbl(AMD64Address dst, Register src) { assert src.getRegisterCategory().equals(AMD64.XMM); + if (isAVX512Register(src)) { + VexMoveOp.VMOVSD.emit(this, AVXSize.XMM, dst, src); + } else { movsd(dst, src); } + } /** * Non-atomic write of a 64-bit constant to memory. Do not use if the address might be a * volatile field! */
< prev index next >