< prev index next >

src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64ArithmeticLIRGenerator.java

Print this page
rev 52889 : 8214023: Update Graal

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -51,10 +51,22 @@
 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROL;
 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROR;
 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SAR;
 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHL;
 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHR;
+import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSD;
+import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSS;
+import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSD;
+import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSS;
+import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSD;
+import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSS;
+import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPD;
+import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPS;
+import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSD;
+import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSS;
+import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPD;
+import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPS;
 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.BYTE;
 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD;
 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD;
 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS;
 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD;

@@ -82,13 +94,15 @@
 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MROp;
 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMIOp;
 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp;
 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift;
 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
-import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRVMOp;
 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRMOp;
+import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRVMOp;
+import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp;
 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize;
+import org.graalvm.compiler.asm.amd64.AVXKind;
 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
 import org.graalvm.compiler.core.common.LIRKind;
 import org.graalvm.compiler.core.common.NumUtil;
 import org.graalvm.compiler.core.common.calc.FloatConvert;
 import org.graalvm.compiler.debug.GraalError;

@@ -108,19 +122,22 @@
 import org.graalvm.compiler.lir.amd64.AMD64MulDivOp;
 import org.graalvm.compiler.lir.amd64.AMD64ShiftOp;
 import org.graalvm.compiler.lir.amd64.AMD64SignExtendOp;
 import org.graalvm.compiler.lir.amd64.AMD64Unary;
 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary;
+import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary.AVXBinaryOp;
 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorUnary;
 import org.graalvm.compiler.lir.gen.ArithmeticLIRGenerator;
 import org.graalvm.compiler.lir.gen.LIRGenerator;
 
 import jdk.vm.ci.amd64.AMD64;
+import jdk.vm.ci.amd64.AMD64.CPUFeature;
 import jdk.vm.ci.amd64.AMD64Kind;
 import jdk.vm.ci.code.CodeUtil;
 import jdk.vm.ci.code.Register;
 import jdk.vm.ci.code.RegisterValue;
+import jdk.vm.ci.code.TargetDescription;
 import jdk.vm.ci.meta.AllocatableValue;
 import jdk.vm.ci.meta.Constant;
 import jdk.vm.ci.meta.JavaConstant;
 import jdk.vm.ci.meta.JavaKind;
 import jdk.vm.ci.meta.PlatformKind;

@@ -173,22 +190,33 @@
 
     @Override
     public Variable emitNegate(Value inputVal) {
         AllocatableValue input = asAllocatable(inputVal);
         Variable result = getLIRGen().newVariable(LIRKind.combine(input));
+        boolean isAvx = supportAVX();
         switch ((AMD64Kind) input.getPlatformKind()) {
             case DWORD:
                 getLIRGen().append(new AMD64Unary.MOp(NEG, DWORD, result, input));
                 break;
             case QWORD:
                 getLIRGen().append(new AMD64Unary.MOp(NEG, QWORD, result, input));
                 break;
             case SINGLE:
-                getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PS, result, input, JavaConstant.forFloat(Float.intBitsToFloat(0x80000000)), 16));
+                JavaConstant floatMask = JavaConstant.forFloat(Float.intBitsToFloat(0x80000000));
+                if (isAvx) {
+                    getLIRGen().append(new AVXBinaryOp(VXORPS, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(floatMask))));
+                } else {
+                    getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PS, result, input, floatMask, 16));
+                }
                 break;
             case DOUBLE:
-                getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PD, result, input, JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L)), 16));
+                JavaConstant doubleMask = JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L));
+                if (isAvx) {
+                    getLIRGen().append(new AVXBinaryOp(VXORPD, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(doubleMask))));
+                } else {
+                    getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PD, result, input, doubleMask, 16));
+                }
                 break;
             default:
                 throw GraalError.shouldNotReachHere(input.getPlatformKind().toString());
         }
         return result;

@@ -299,10 +327,11 @@
         return result;
     }
 
     @Override
     public Variable emitAdd(LIRKind resultKind, Value a, Value b, boolean setFlags) {
+        boolean isAvx = supportAVX();
         switch ((AMD64Kind) a.getPlatformKind()) {
             case DWORD:
                 if (isJavaConstant(b) && !setFlags) {
                     long displacement = asJavaConstant(b).asLong();
                     if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) {

@@ -317,29 +346,46 @@
                         return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.QWORD);
                     }
                 }
                 return emitBinary(resultKind, ADD, QWORD, true, a, b, setFlags);
             case SINGLE:
+                if (isAvx) {
+                    return emitBinary(resultKind, VADDSS, a, b);
+                } else {
                 return emitBinary(resultKind, SSEOp.ADD, SS, true, a, b);
+                }
             case DOUBLE:
+                if (isAvx) {
+                    return emitBinary(resultKind, VADDSD, a, b);
+                } else {
                 return emitBinary(resultKind, SSEOp.ADD, SD, true, a, b);
+                }
             default:
                 throw GraalError.shouldNotReachHere();
         }
     }
 
     @Override
     public Variable emitSub(LIRKind resultKind, Value a, Value b, boolean setFlags) {
+        boolean isAvx = supportAVX();
         switch ((AMD64Kind) a.getPlatformKind()) {
             case DWORD:
                 return emitBinary(resultKind, SUB, DWORD, false, a, b, setFlags);
             case QWORD:
                 return emitBinary(resultKind, SUB, QWORD, false, a, b, setFlags);
             case SINGLE:
+                if (isAvx) {
+                    return emitBinary(resultKind, VSUBSS, a, b);
+                } else {
                 return emitBinary(resultKind, SSEOp.SUB, SS, false, a, b);
+                }
             case DOUBLE:
+                if (isAvx) {
+                    return emitBinary(resultKind, VSUBSD, a, b);
+                } else {
                 return emitBinary(resultKind, SSEOp.SUB, SD, false, a, b);
+                }
             default:
                 throw GraalError.shouldNotReachHere();
         }
     }
 

@@ -372,20 +418,29 @@
         }
     }
 
     @Override
     public Variable emitMul(Value a, Value b, boolean setFlags) {
+        boolean isAvx = supportAVX();
         LIRKind resultKind = LIRKind.combine(a, b);
         switch ((AMD64Kind) a.getPlatformKind()) {
             case DWORD:
                 return emitIMUL(DWORD, a, b);
             case QWORD:
                 return emitIMUL(QWORD, a, b);
             case SINGLE:
+                if (isAvx) {
+                    return emitBinary(resultKind, VMULSS, a, b);
+                } else {
                 return emitBinary(resultKind, SSEOp.MUL, SS, true, a, b);
+                }
             case DOUBLE:
+                if (isAvx) {
+                    return emitBinary(resultKind, VMULSD, a, b);
+                } else {
                 return emitBinary(resultKind, SSEOp.MUL, SD, true, a, b);
+                }
             default:
                 throw GraalError.shouldNotReachHere();
         }
     }
 

@@ -505,22 +560,31 @@
         return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())};
     }
 
     @Override
     public Value emitDiv(Value a, Value b, LIRFrameState state) {
+        boolean isAvx = supportAVX();
         LIRKind resultKind = LIRKind.combine(a, b);
         switch ((AMD64Kind) a.getPlatformKind()) {
             case DWORD:
                 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state);
                 return getLIRGen().emitMove(op.getQuotient());
             case QWORD:
                 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state);
                 return getLIRGen().emitMove(lop.getQuotient());
             case SINGLE:
+                if (isAvx) {
+                    return emitBinary(resultKind, VDIVSS, a, b);
+                } else {
                 return emitBinary(resultKind, SSEOp.DIV, SS, false, a, b);
+                }
             case DOUBLE:
+                if (isAvx) {
+                    return emitBinary(resultKind, VDIVSD, a, b);
+                } else {
                 return emitBinary(resultKind, SSEOp.DIV, SD, false, a, b);
+                }
             default:
                 throw GraalError.shouldNotReachHere();
         }
     }
 

@@ -597,37 +661,55 @@
         }
     }
 
     @Override
     public Variable emitOr(Value a, Value b) {
+        boolean isAvx = supportAVX();
         LIRKind resultKind = LIRKind.combine(a, b);
         switch ((AMD64Kind) a.getPlatformKind()) {
             case DWORD:
                 return emitBinary(resultKind, OR, DWORD, true, a, b, false);
             case QWORD:
                 return emitBinary(resultKind, OR, QWORD, true, a, b, false);
             case SINGLE:
+                if (isAvx) {
+                    return emitBinary(resultKind, VORPS, a, b);
+                } else {
                 return emitBinary(resultKind, SSEOp.OR, PS, true, a, b);
+                }
             case DOUBLE:
+                if (isAvx) {
+                    return emitBinary(resultKind, VORPD, a, b);
+                } else {
                 return emitBinary(resultKind, SSEOp.OR, PD, true, a, b);
+                }
             default:
                 throw GraalError.shouldNotReachHere();
         }
     }
 
     @Override
     public Variable emitXor(Value a, Value b) {
+        boolean isAvx = supportAVX();
         LIRKind resultKind = LIRKind.combine(a, b);
         switch ((AMD64Kind) a.getPlatformKind()) {
             case DWORD:
                 return emitBinary(resultKind, XOR, DWORD, true, a, b, false);
             case QWORD:
                 return emitBinary(resultKind, XOR, QWORD, true, a, b, false);
             case SINGLE:
+                if (isAvx) {
+                    return emitBinary(resultKind, VXORPS, a, b);
+                } else {
                 return emitBinary(resultKind, SSEOp.XOR, PS, true, a, b);
+                }
             case DOUBLE:
+                if (isAvx) {
+                    return emitBinary(resultKind, VXORPD, a, b);
+                } else {
                 return emitBinary(resultKind, SSEOp.XOR, PD, true, a, b);
+                }
             default:
                 throw GraalError.shouldNotReachHere();
         }
     }
 

@@ -1290,6 +1372,27 @@
         } else {
             getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSD, OperandSize.PD, result, asAllocatable(value), mode.encoding));
         }
         return result;
     }
+
+    private boolean supportAVX() {
+        TargetDescription target = getLIRGen().target();
+        return ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX);
+    }
+
+    private static AVXSize getRegisterSize(Value a) {
+        AMD64Kind kind = (AMD64Kind) a.getPlatformKind();
+        if (kind.isXMM()) {
+            return AVXKind.getRegisterSize(kind);
+        } else {
+            return AVXSize.XMM;
+        }
+    }
+
+    private Variable emitBinary(LIRKind resultKind, VexRVMOp op, Value a, Value b) {
+        Variable result = getLIRGen().newVariable(resultKind);
+        getLIRGen().append(new AVXBinaryOp(op, getRegisterSize(result), result, asAllocatable(a), asAllocatable(b)));
+        return result;
+    }
+
 }
< prev index next >