44 import org.graalvm.compiler.lir.Variable;
45 import org.graalvm.compiler.lir.aarch64.AArch64AddressValue;
46 import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp;
47 import org.graalvm.compiler.lir.aarch64.AArch64ArrayCompareToOp;
48 import org.graalvm.compiler.lir.aarch64.AArch64ArrayEqualsOp;
49 import org.graalvm.compiler.lir.aarch64.AArch64ByteSwapOp;
50 import org.graalvm.compiler.lir.aarch64.AArch64Compare;
51 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow;
52 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.BranchOp;
53 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.CondMoveOp;
54 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.StrategySwitchOp;
55 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.TableSwitchOp;
56 import org.graalvm.compiler.lir.aarch64.AArch64LIRFlagsVersioned;
57 import org.graalvm.compiler.lir.aarch64.AArch64Move;
58 import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndAddOp;
59 import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndAddLSEOp;
60 import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.CompareAndSwapOp;
61 import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndWriteOp;
62 import org.graalvm.compiler.lir.aarch64.AArch64Move.MembarOp;
63 import org.graalvm.compiler.lir.aarch64.AArch64PauseOp;
64 import org.graalvm.compiler.lir.gen.LIRGenerationResult;
65 import org.graalvm.compiler.lir.gen.LIRGenerator;
66 import org.graalvm.compiler.phases.util.Providers;
67
68 import jdk.vm.ci.aarch64.AArch64;
69 import jdk.vm.ci.aarch64.AArch64Kind;
70 import jdk.vm.ci.code.CallingConvention;
71 import jdk.vm.ci.code.RegisterValue;
72 import jdk.vm.ci.meta.AllocatableValue;
73 import jdk.vm.ci.meta.JavaConstant;
74 import jdk.vm.ci.meta.JavaKind;
75 import jdk.vm.ci.meta.PlatformKind;
76 import jdk.vm.ci.meta.PrimitiveConstant;
77 import jdk.vm.ci.meta.Value;
78 import jdk.vm.ci.meta.ValueKind;
79
80 public abstract class AArch64LIRGenerator extends LIRGenerator {
81
82 public AArch64LIRGenerator(LIRKindTool lirKindTool, AArch64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, LIRGenerationResult lirGenRes) {
83 super(lirKindTool, arithmeticLIRGen, moveFactory, providers, lirGenRes);
448 append(new AArch64ByteSwapOp(result, input));
449 return result;
450 }
451
452 @Override
453 public Variable emitArrayCompareTo(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length1, Value length2) {
454 LIRKind resultKind = LIRKind.value(AArch64Kind.DWORD);
455 // DMS TODO: check calling conversion and registers used
456 RegisterValue res = AArch64.r0.asValue(resultKind);
457 RegisterValue cnt1 = AArch64.r1.asValue(length1.getValueKind());
458 RegisterValue cnt2 = AArch64.r2.asValue(length2.getValueKind());
459 emitMove(cnt1, length1);
460 emitMove(cnt2, length2);
461 append(new AArch64ArrayCompareToOp(this, kind1, kind2, res, array1, array2, cnt1, cnt2));
462 Variable result = newVariable(resultKind);
463 emitMove(result, res);
464 return result;
465 }
466
467 @Override
468 public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length) {
469 Variable result = newVariable(LIRKind.value(AArch64Kind.DWORD));
470 append(new AArch64ArrayEqualsOp(this, kind, result, array1, array2, asAllocatable(length)));
471 return result;
472 }
473
474 @Override
475 protected JavaConstant zapValueForKind(PlatformKind kind) {
476 long dead = 0xDEADDEADDEADDEADL;
477 switch ((AArch64Kind) kind) {
478 case BYTE:
479 return JavaConstant.forByte((byte) dead);
480 case WORD:
481 return JavaConstant.forShort((short) dead);
482 case DWORD:
483 return JavaConstant.forInt((int) dead);
484 case QWORD:
485 return JavaConstant.forLong(dead);
486 case SINGLE:
487 return JavaConstant.forFloat(Float.intBitsToFloat((int) dead));
488 case DOUBLE:
489 return JavaConstant.forDouble(Double.longBitsToDouble(dead));
490 default:
496 * Loads value into virtual register. Contrary to {@link #load(Value)} this handles
497 * RegisterValues (i.e. values corresponding to fixed physical registers) correctly, by not
498 * creating an unnecessary move into a virtual register.
499 *
500 * This avoids generating the following code: mov x0, x19 # x19 is fixed thread register ldr x0,
501 * [x0] instead of: ldr x0, [x19].
502 */
503 protected AllocatableValue loadReg(Value val) {
504 if (!(val instanceof Variable || val instanceof RegisterValue)) {
505 return emitMove(val);
506 }
507 return (AllocatableValue) val;
508 }
509
510 @Override
511 public void emitPause() {
512 append(new AArch64PauseOp());
513 }
514
515 public abstract void emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args);
516 }
|
44 import org.graalvm.compiler.lir.Variable;
45 import org.graalvm.compiler.lir.aarch64.AArch64AddressValue;
46 import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp;
47 import org.graalvm.compiler.lir.aarch64.AArch64ArrayCompareToOp;
48 import org.graalvm.compiler.lir.aarch64.AArch64ArrayEqualsOp;
49 import org.graalvm.compiler.lir.aarch64.AArch64ByteSwapOp;
50 import org.graalvm.compiler.lir.aarch64.AArch64Compare;
51 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow;
52 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.BranchOp;
53 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.CondMoveOp;
54 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.StrategySwitchOp;
55 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.TableSwitchOp;
56 import org.graalvm.compiler.lir.aarch64.AArch64LIRFlagsVersioned;
57 import org.graalvm.compiler.lir.aarch64.AArch64Move;
58 import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndAddOp;
59 import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndAddLSEOp;
60 import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.CompareAndSwapOp;
61 import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndWriteOp;
62 import org.graalvm.compiler.lir.aarch64.AArch64Move.MembarOp;
63 import org.graalvm.compiler.lir.aarch64.AArch64PauseOp;
64 import org.graalvm.compiler.lir.aarch64.AArch64SpeculativeBarrier;
65 import org.graalvm.compiler.lir.gen.LIRGenerationResult;
66 import org.graalvm.compiler.lir.gen.LIRGenerator;
67 import org.graalvm.compiler.phases.util.Providers;
68
69 import jdk.vm.ci.aarch64.AArch64;
70 import jdk.vm.ci.aarch64.AArch64Kind;
71 import jdk.vm.ci.code.CallingConvention;
72 import jdk.vm.ci.code.RegisterValue;
73 import jdk.vm.ci.meta.AllocatableValue;
74 import jdk.vm.ci.meta.JavaConstant;
75 import jdk.vm.ci.meta.JavaKind;
76 import jdk.vm.ci.meta.PlatformKind;
77 import jdk.vm.ci.meta.PrimitiveConstant;
78 import jdk.vm.ci.meta.Value;
79 import jdk.vm.ci.meta.ValueKind;
80
81 public abstract class AArch64LIRGenerator extends LIRGenerator {
82
83 public AArch64LIRGenerator(LIRKindTool lirKindTool, AArch64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, LIRGenerationResult lirGenRes) {
84 super(lirKindTool, arithmeticLIRGen, moveFactory, providers, lirGenRes);
449 append(new AArch64ByteSwapOp(result, input));
450 return result;
451 }
452
453 @Override
454 public Variable emitArrayCompareTo(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length1, Value length2) {
455 LIRKind resultKind = LIRKind.value(AArch64Kind.DWORD);
456 // DMS TODO: check calling conversion and registers used
457 RegisterValue res = AArch64.r0.asValue(resultKind);
458 RegisterValue cnt1 = AArch64.r1.asValue(length1.getValueKind());
459 RegisterValue cnt2 = AArch64.r2.asValue(length2.getValueKind());
460 emitMove(cnt1, length1);
461 emitMove(cnt2, length2);
462 append(new AArch64ArrayCompareToOp(this, kind1, kind2, res, array1, array2, cnt1, cnt2));
463 Variable result = newVariable(resultKind);
464 emitMove(result, res);
465 return result;
466 }
467
468 @Override
469 public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length, int constantLength, boolean directPointers) {
470 Variable result = newVariable(LIRKind.value(AArch64Kind.DWORD));
471 append(new AArch64ArrayEqualsOp(this, kind, result, array1, array2, asAllocatable(length), directPointers));
472 return result;
473 }
474
475 @Override
476 protected JavaConstant zapValueForKind(PlatformKind kind) {
477 long dead = 0xDEADDEADDEADDEADL;
478 switch ((AArch64Kind) kind) {
479 case BYTE:
480 return JavaConstant.forByte((byte) dead);
481 case WORD:
482 return JavaConstant.forShort((short) dead);
483 case DWORD:
484 return JavaConstant.forInt((int) dead);
485 case QWORD:
486 return JavaConstant.forLong(dead);
487 case SINGLE:
488 return JavaConstant.forFloat(Float.intBitsToFloat((int) dead));
489 case DOUBLE:
490 return JavaConstant.forDouble(Double.longBitsToDouble(dead));
491 default:
497 * Loads value into virtual register. Contrary to {@link #load(Value)} this handles
498 * RegisterValues (i.e. values corresponding to fixed physical registers) correctly, by not
499 * creating an unnecessary move into a virtual register.
500 *
501 * This avoids generating the following code: mov x0, x19 # x19 is fixed thread register ldr x0,
502 * [x0] instead of: ldr x0, [x19].
503 */
504 protected AllocatableValue loadReg(Value val) {
505 if (!(val instanceof Variable || val instanceof RegisterValue)) {
506 return emitMove(val);
507 }
508 return (AllocatableValue) val;
509 }
510
511 @Override
512 public void emitPause() {
513 append(new AArch64PauseOp());
514 }
515
516 public abstract void emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args);
517
518 @Override
519 public void emitSpeculationFence() {
520 append(new AArch64SpeculativeBarrier());
521 }
522 }
|