1 /*
   2  * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 package org.graalvm.compiler.lir.aarch64;
  24 
  25 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  26 import static org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.ARMv8ConstantCategory.ARITHMETIC;
  27 import static org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.ARMv8ConstantCategory.LOGICAL;
  28 import static org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.ARMv8ConstantCategory.NONE;
  29 import static org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.ARMv8ConstantCategory.SHIFT;
  30 import static jdk.vm.ci.aarch64.AArch64.zr;
  31 import static jdk.vm.ci.code.ValueUtil.asRegister;
  32 
  33 import org.graalvm.compiler.asm.aarch64.AArch64Assembler;
  34 import org.graalvm.compiler.asm.aarch64.AArch64Assembler.ConditionFlag;
  35 import org.graalvm.compiler.debug.GraalError;
  36 import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler;
  37 import org.graalvm.compiler.lir.LIRInstructionClass;
  38 import org.graalvm.compiler.lir.Opcode;
  39 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
  40 
  41 import jdk.vm.ci.code.Register;
  42 import jdk.vm.ci.meta.AllocatableValue;
  43 import jdk.vm.ci.meta.JavaConstant;
  44 
  45 public enum AArch64ArithmeticOp {
  46     // TODO At least add and sub *can* be used with SP, so this should be supported
  47     NEG,
  48     NOT,
  49     ADD(ARITHMETIC),
  50     ADDS(ARITHMETIC),
  51     SUB(ARITHMETIC),
  52     SUBS(ARITHMETIC),
  53     MUL,
  54     DIV,
  55     SMULH,
  56     UMULH,
  57     REM,
  58     UDIV,
  59     UREM,
  60     AND(LOGICAL),
  61     ANDS(LOGICAL),
  62     OR(LOGICAL),
  63     XOR(LOGICAL),
  64     SHL(SHIFT),
  65     LSHR(SHIFT),
  66     ASHR(SHIFT),
  67     ABS,
  68 
  69     FADD,
  70     FSUB,
  71     FMUL,
  72     FDIV,
  73     FREM,
  74     FNEG,
  75     FABS,
  76     SQRT;
  77 
  78     /**
  79      * Specifies what constants can be used directly without having to be loaded into a register
  80      * with the given instruction.
  81      */
  82     public enum ARMv8ConstantCategory {
  83         NONE,
  84         LOGICAL,
  85         ARITHMETIC,
  86         SHIFT
  87     }
  88 
  89     public final ARMv8ConstantCategory category;
  90 
  91     AArch64ArithmeticOp(ARMv8ConstantCategory category) {
  92         this.category = category;
  93     }
  94 
  95     AArch64ArithmeticOp() {
  96         this(NONE);
  97     }
  98 
  99     public static class UnaryOp extends AArch64LIRInstruction {
 100         private static final LIRInstructionClass<UnaryOp> TYPE = LIRInstructionClass.create(UnaryOp.class);
 101 
 102         @Opcode private final AArch64ArithmeticOp opcode;
 103         @Def({REG}) protected AllocatableValue result;
 104         @Use({REG}) protected AllocatableValue x;
 105 
 106         public UnaryOp(AArch64ArithmeticOp opcode, AllocatableValue result, AllocatableValue x) {
 107             super(TYPE);
 108             this.opcode = opcode;
 109             this.result = result;
 110             this.x = x;
 111         }
 112 
 113         @Override
 114         public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
 115             Register dst = asRegister(result);
 116             Register src = asRegister(x);
 117             int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
 118             switch (opcode) {
 119                 case NEG:
 120                     masm.sub(size, dst, zr, src);
 121                     break;
 122                 case FNEG:
 123                     masm.fneg(size, dst, src);
 124                     break;
 125                 case NOT:
 126                     masm.not(size, dst, src);
 127                     break;
 128                 case ABS:
 129                     masm.cmp(size, src, 0);
 130                     masm.csneg(size, dst, src, ConditionFlag.LT);
 131                     break;
 132                 case FABS:
 133                     masm.fabs(size, dst, src);
 134                     break;
 135                 case SQRT:
 136                     masm.fsqrt(size, dst, src);
 137                     break;
 138                 default:
 139                     throw GraalError.shouldNotReachHere("op=" + opcode.name());
 140             }
 141         }
 142     }
 143 
 144     public static class BinaryConstOp extends AArch64LIRInstruction {
 145         private static final LIRInstructionClass<BinaryConstOp> TYPE = LIRInstructionClass.create(BinaryConstOp.class);
 146 
 147         @Opcode private final AArch64ArithmeticOp op;
 148         @Def({REG}) protected AllocatableValue result;
 149         @Use({REG}) protected AllocatableValue a;
 150         private final JavaConstant b;
 151 
 152         public BinaryConstOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue a, JavaConstant b) {
 153             super(TYPE);
 154             this.op = op;
 155             this.result = result;
 156             this.a = a;
 157             this.b = b;
 158         }
 159 
 160         @Override
 161         public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
 162             assert op.category != NONE;
 163             Register dst = asRegister(result);
 164             Register src = asRegister(a);
 165             int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
 166             switch (op) {
 167                 case ADD:
 168                     // Don't use asInt() here, since we can't use asInt on a long variable, even
 169                     // if the constant easily fits as an int.
 170                     assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
 171                     masm.add(size, dst, src, (int) b.asLong());
 172                     break;
 173                 case SUB:
 174                     // Don't use asInt() here, since we can't use asInt on a long variable, even
 175                     // if the constant easily fits as an int.
 176                     assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
 177                     masm.sub(size, dst, src, (int) b.asLong());
 178                     break;
 179                 case AND:
 180                     // XXX Should this be handled somewhere else?
 181                     if (size == 32 && b.asLong() == 0xFFFF_FFFFL) {
 182                         masm.mov(size, dst, src);
 183                     } else {
 184                         masm.and(size, dst, src, b.asLong());
 185                     }
 186                     break;
 187                 case ANDS:
 188                     masm.ands(size, dst, src, b.asLong());
 189                     break;
 190                 case OR:
 191                     masm.or(size, dst, src, b.asLong());
 192                     break;
 193                 case XOR:
 194                     masm.eor(size, dst, src, b.asLong());
 195                     break;
 196                 case SHL:
 197                     masm.shl(size, dst, src, b.asLong());
 198                     break;
 199                 case LSHR:
 200                     masm.lshr(size, dst, src, b.asLong());
 201                     break;
 202                 case ASHR:
 203                     masm.ashr(size, dst, src, b.asLong());
 204                     break;
 205                 default:
 206                     throw GraalError.shouldNotReachHere("op=" + op.name());
 207             }
 208         }
 209     }
 210 
 211     public static class BinaryOp extends AArch64LIRInstruction {
 212         private static final LIRInstructionClass<BinaryOp> TYPE = LIRInstructionClass.create(BinaryOp.class);
 213 
 214         @Opcode private final AArch64ArithmeticOp op;
 215         @Def({REG}) protected AllocatableValue result;
 216         @Use({REG}) protected AllocatableValue a;
 217         @Use({REG}) protected AllocatableValue b;
 218 
 219         public BinaryOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue a, AllocatableValue b) {
 220             super(TYPE);
 221             this.op = op;
 222             this.result = result;
 223             this.a = a;
 224             this.b = b;
 225         }
 226 
 227         @Override
 228         public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
 229             Register dst = asRegister(result);
 230             Register src1 = asRegister(a);
 231             Register src2 = asRegister(b);
 232             int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
 233             switch (op) {
 234                 case ADD:
 235                     masm.add(size, dst, src1, src2);
 236                     break;
 237                 case ADDS:
 238                     masm.adds(size, dst, src1, src2);
 239                     break;
 240                 case SUB:
 241                     masm.sub(size, dst, src1, src2);
 242                     break;
 243                 case SUBS:
 244                     masm.subs(size, dst, src1, src2);
 245                     break;
 246                 case MUL:
 247                     masm.mul(size, dst, src1, src2);
 248                     break;
 249                 case UMULH:
 250                     masm.umulh(size, dst, src1, src2);
 251                     break;
 252                 case SMULH:
 253                     masm.smulh(size, dst, src1, src2);
 254                     break;
 255                 case DIV:
 256                     masm.sdiv(size, dst, src1, src2);
 257                     break;
 258                 case UDIV:
 259                     masm.udiv(size, dst, src1, src2);
 260                     break;
 261                 case AND:
 262                     masm.and(size, dst, src1, src2);
 263                     break;
 264                 case ANDS:
 265                     masm.ands(size, dst, src1, src2);
 266                     break;
 267                 case OR:
 268                     masm.or(size, dst, src1, src2);
 269                     break;
 270                 case XOR:
 271                     masm.eor(size, dst, src1, src2);
 272                     break;
 273                 case SHL:
 274                     masm.shl(size, dst, src1, src2);
 275                     break;
 276                 case LSHR:
 277                     masm.lshr(size, dst, src1, src2);
 278                     break;
 279                 case ASHR:
 280                     masm.ashr(size, dst, src1, src2);
 281                     break;
 282                 case FADD:
 283                     masm.fadd(size, dst, src1, src2);
 284                     break;
 285                 case FSUB:
 286                     masm.fsub(size, dst, src1, src2);
 287                     break;
 288                 case FMUL:
 289                     masm.fmul(size, dst, src1, src2);
 290                     break;
 291                 case FDIV:
 292                     masm.fdiv(size, dst, src1, src2);
 293                     break;
 294                 default:
 295                     throw GraalError.shouldNotReachHere("op=" + op.name());
 296             }
 297         }
 298     }
 299 
 300     /**
 301      * Class used for instructions that have to reuse one of their arguments. This only applies to
 302      * the remainder instructions at the moment, since we have to compute n % d using rem = n -
 303      * TruncatingDivision(n, d) * d
 304      *
 305      * TODO (das) Replace the remainder nodes in the LIR.
 306      */
 307     public static class BinaryCompositeOp extends AArch64LIRInstruction {
 308         private static final LIRInstructionClass<BinaryCompositeOp> TYPE = LIRInstructionClass.create(BinaryCompositeOp.class);
 309         @Opcode private final AArch64ArithmeticOp op;
 310         @Def({REG}) protected AllocatableValue result;
 311         @Alive({REG}) protected AllocatableValue a;
 312         @Alive({REG}) protected AllocatableValue b;
 313 
 314         public BinaryCompositeOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue a, AllocatableValue b) {
 315             super(TYPE);
 316             this.op = op;
 317             this.result = result;
 318             this.a = a;
 319             this.b = b;
 320         }
 321 
 322         @Override
 323         public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
 324             Register dst = asRegister(result);
 325             Register src1 = asRegister(a);
 326             Register src2 = asRegister(b);
 327             int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
 328             switch (op) {
 329                 case REM:
 330                     masm.rem(size, dst, src1, src2);
 331                     break;
 332                 case UREM:
 333                     masm.urem(size, dst, src1, src2);
 334                     break;
 335                 case FREM:
 336                     masm.frem(size, dst, src1, src2);
 337                     break;
 338                 default:
 339                     throw GraalError.shouldNotReachHere();
 340             }
 341         }
 342     }
 343 
 344     public static class AddSubShiftOp extends AArch64LIRInstruction {
 345         private static final LIRInstructionClass<AddSubShiftOp> TYPE = LIRInstructionClass.create(AddSubShiftOp.class);
 346 
 347         @Opcode private final AArch64ArithmeticOp op;
 348         @Def(REG) protected AllocatableValue result;
 349         @Use(REG) protected AllocatableValue src1;
 350         @Use(REG) protected AllocatableValue src2;
 351         private final AArch64MacroAssembler.ShiftType shiftType;
 352         private final int shiftAmt;
 353 
 354         /**
 355          * Computes <code>result = src1 <op> src2 <shiftType> <shiftAmt></code>.
 356          */
 357         public AddSubShiftOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue src1, AllocatableValue src2, AArch64MacroAssembler.ShiftType shiftType, int shiftAmt) {
 358             super(TYPE);
 359             assert op == ADD || op == SUB;
 360             this.op = op;
 361             this.result = result;
 362             this.src1 = src1;
 363             this.src2 = src2;
 364             this.shiftType = shiftType;
 365             this.shiftAmt = shiftAmt;
 366         }
 367 
 368         @Override
 369         public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
 370             int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
 371             switch (op) {
 372                 case ADD:
 373                     masm.add(size, asRegister(result), asRegister(src1), asRegister(src2), shiftType, shiftAmt);
 374                     break;
 375                 case SUB:
 376                     masm.sub(size, asRegister(result), asRegister(src1), asRegister(src2), shiftType, shiftAmt);
 377                     break;
 378                 default:
 379                     throw GraalError.shouldNotReachHere();
 380             }
 381         }
 382     }
 383 
 384     public static class ExtendedAddShiftOp extends AArch64LIRInstruction {
 385         private static final LIRInstructionClass<ExtendedAddShiftOp> TYPE = LIRInstructionClass.create(ExtendedAddShiftOp.class);
 386         @Def(REG) protected AllocatableValue result;
 387         @Use(REG) protected AllocatableValue src1;
 388         @Use(REG) protected AllocatableValue src2;
 389         private final AArch64Assembler.ExtendType extendType;
 390         private final int shiftAmt;
 391 
 392         /**
 393          * Computes <code>result = src1 + extendType(src2) << shiftAmt</code>.
 394          *
 395          * @param extendType defines how src2 is extended to the same size as src1.
 396          * @param shiftAmt must be in range 0 to 4.
 397          */
 398         public ExtendedAddShiftOp(AllocatableValue result, AllocatableValue src1, AllocatableValue src2, AArch64Assembler.ExtendType extendType, int shiftAmt) {
 399             super(TYPE);
 400             this.result = result;
 401             this.src1 = src1;
 402             this.src2 = src2;
 403             this.extendType = extendType;
 404             this.shiftAmt = shiftAmt;
 405         }
 406 
 407         @Override
 408         public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
 409             int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
 410             masm.add(size, asRegister(result), asRegister(src1), asRegister(src2), extendType, shiftAmt);
 411         }
 412     }
 413 
 414 }