34 import org.graalvm.compiler.asm.aarch64.AArch64Assembler.ConditionFlag;
35 import org.graalvm.compiler.debug.GraalError;
36 import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler;
37 import org.graalvm.compiler.lir.LIRInstructionClass;
38 import org.graalvm.compiler.lir.Opcode;
39 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
40
41 import jdk.vm.ci.code.Register;
42 import jdk.vm.ci.meta.AllocatableValue;
43 import jdk.vm.ci.meta.JavaConstant;
44
45 public enum AArch64ArithmeticOp {
46 // TODO At least add and sub *can* be used with SP, so this should be supported
47 NEG,
48 NOT,
49 ADD(ARITHMETIC),
50 ADDS(ARITHMETIC),
51 SUB(ARITHMETIC),
52 SUBS(ARITHMETIC),
53 MUL,
54 DIV,
55 SMULH,
56 UMULH,
57 REM,
58 UDIV,
59 UREM,
60 AND(LOGICAL),
61 ANDS(LOGICAL),
62 OR(LOGICAL),
63 XOR(LOGICAL),
64 SHL(SHIFT),
65 LSHR(SHIFT),
66 ASHR(SHIFT),
67 ABS,
68
69 FADD,
70 FSUB,
71 FMUL,
72 FDIV,
73 FREM,
159
160 @Override
161 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
162 assert op.category != NONE;
163 Register dst = asRegister(result);
164 Register src = asRegister(a);
165 int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
166 switch (op) {
167 case ADD:
168 // Don't use asInt() here, since we can't use asInt on a long variable, even
169 // if the constant easily fits as an int.
170 assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
171 masm.add(size, dst, src, (int) b.asLong());
172 break;
173 case SUB:
174 // Don't use asInt() here, since we can't use asInt on a long variable, even
175 // if the constant easily fits as an int.
176 assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
177 masm.sub(size, dst, src, (int) b.asLong());
178 break;
179 case AND:
180 // XXX Should this be handled somewhere else?
181 if (size == 32 && b.asLong() == 0xFFFF_FFFFL) {
182 masm.mov(size, dst, src);
183 } else {
184 masm.and(size, dst, src, b.asLong());
185 }
186 break;
187 case ANDS:
188 masm.ands(size, dst, src, b.asLong());
189 break;
190 case OR:
191 masm.or(size, dst, src, b.asLong());
192 break;
193 case XOR:
194 masm.eor(size, dst, src, b.asLong());
195 break;
196 case SHL:
197 masm.shl(size, dst, src, b.asLong());
198 break;
274 masm.shl(size, dst, src1, src2);
275 break;
276 case LSHR:
277 masm.lshr(size, dst, src1, src2);
278 break;
279 case ASHR:
280 masm.ashr(size, dst, src1, src2);
281 break;
282 case FADD:
283 masm.fadd(size, dst, src1, src2);
284 break;
285 case FSUB:
286 masm.fsub(size, dst, src1, src2);
287 break;
288 case FMUL:
289 masm.fmul(size, dst, src1, src2);
290 break;
291 case FDIV:
292 masm.fdiv(size, dst, src1, src2);
293 break;
294 default:
295 throw GraalError.shouldNotReachHere("op=" + op.name());
296 }
297 }
298 }
299
300 /**
301 * Class used for instructions that have to reuse one of their arguments. This only applies to
302 * the remainder instructions at the moment, since we have to compute n % d using rem = n -
303 * TruncatingDivision(n, d) * d
304 *
305 * TODO (das) Replace the remainder nodes in the LIR.
306 */
307 public static class BinaryCompositeOp extends AArch64LIRInstruction {
308 private static final LIRInstructionClass<BinaryCompositeOp> TYPE = LIRInstructionClass.create(BinaryCompositeOp.class);
309 @Opcode private final AArch64ArithmeticOp op;
310 @Def({REG}) protected AllocatableValue result;
311 @Alive({REG}) protected AllocatableValue a;
312 @Alive({REG}) protected AllocatableValue b;
313
|
34 import org.graalvm.compiler.asm.aarch64.AArch64Assembler.ConditionFlag;
35 import org.graalvm.compiler.debug.GraalError;
36 import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler;
37 import org.graalvm.compiler.lir.LIRInstructionClass;
38 import org.graalvm.compiler.lir.Opcode;
39 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
40
41 import jdk.vm.ci.code.Register;
42 import jdk.vm.ci.meta.AllocatableValue;
43 import jdk.vm.ci.meta.JavaConstant;
44
45 public enum AArch64ArithmeticOp {
46 // TODO At least add and sub *can* be used with SP, so this should be supported
47 NEG,
48 NOT,
49 ADD(ARITHMETIC),
50 ADDS(ARITHMETIC),
51 SUB(ARITHMETIC),
52 SUBS(ARITHMETIC),
53 MUL,
54 MULVS,
55 DIV,
56 SMULH,
57 UMULH,
58 REM,
59 UDIV,
60 UREM,
61 AND(LOGICAL),
62 ANDS(LOGICAL),
63 OR(LOGICAL),
64 XOR(LOGICAL),
65 SHL(SHIFT),
66 LSHR(SHIFT),
67 ASHR(SHIFT),
68 ABS,
69
70 FADD,
71 FSUB,
72 FMUL,
73 FDIV,
74 FREM,
160
161 @Override
162 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
163 assert op.category != NONE;
164 Register dst = asRegister(result);
165 Register src = asRegister(a);
166 int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
167 switch (op) {
168 case ADD:
169 // Don't use asInt() here, since we can't use asInt on a long variable, even
170 // if the constant easily fits as an int.
171 assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
172 masm.add(size, dst, src, (int) b.asLong());
173 break;
174 case SUB:
175 // Don't use asInt() here, since we can't use asInt on a long variable, even
176 // if the constant easily fits as an int.
177 assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
178 masm.sub(size, dst, src, (int) b.asLong());
179 break;
180 case ADDS:
181 assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
182 masm.adds(size, dst, src, (int) b.asLong());
183 break;
184 case SUBS:
185 assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
186 masm.subs(size, dst, src, (int) b.asLong());
187 break;
188 case AND:
189 // XXX Should this be handled somewhere else?
190 if (size == 32 && b.asLong() == 0xFFFF_FFFFL) {
191 masm.mov(size, dst, src);
192 } else {
193 masm.and(size, dst, src, b.asLong());
194 }
195 break;
196 case ANDS:
197 masm.ands(size, dst, src, b.asLong());
198 break;
199 case OR:
200 masm.or(size, dst, src, b.asLong());
201 break;
202 case XOR:
203 masm.eor(size, dst, src, b.asLong());
204 break;
205 case SHL:
206 masm.shl(size, dst, src, b.asLong());
207 break;
283 masm.shl(size, dst, src1, src2);
284 break;
285 case LSHR:
286 masm.lshr(size, dst, src1, src2);
287 break;
288 case ASHR:
289 masm.ashr(size, dst, src1, src2);
290 break;
291 case FADD:
292 masm.fadd(size, dst, src1, src2);
293 break;
294 case FSUB:
295 masm.fsub(size, dst, src1, src2);
296 break;
297 case FMUL:
298 masm.fmul(size, dst, src1, src2);
299 break;
300 case FDIV:
301 masm.fdiv(size, dst, src1, src2);
302 break;
303 case MULVS:
304 masm.mulvs(size, dst, src1, src2);
305 break;
306 default:
307 throw GraalError.shouldNotReachHere("op=" + op.name());
308 }
309 }
310 }
311
312 /**
313 * Class used for instructions that have to reuse one of their arguments. This only applies to
314 * the remainder instructions at the moment, since we have to compute n % d using rem = n -
315 * TruncatingDivision(n, d) * d
316 *
317 * TODO (das) Replace the remainder nodes in the LIR.
318 */
319 public static class BinaryCompositeOp extends AArch64LIRInstruction {
320 private static final LIRInstructionClass<BinaryCompositeOp> TYPE = LIRInstructionClass.create(BinaryCompositeOp.class);
321 @Opcode private final AArch64ArithmeticOp op;
322 @Def({REG}) protected AllocatableValue result;
323 @Alive({REG}) protected AllocatableValue a;
324 @Alive({REG}) protected AllocatableValue b;
325
|