< prev index next >

src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.aarch64/src/org/graalvm/compiler/lir/aarch64/AArch64Move.java

Print this page




  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 
  25 package org.graalvm.compiler.lir.aarch64;
  26 
  27 import static jdk.vm.ci.aarch64.AArch64.sp;
  28 import static jdk.vm.ci.aarch64.AArch64.zr;
  29 import static jdk.vm.ci.code.ValueUtil.asAllocatableValue;
  30 import static jdk.vm.ci.code.ValueUtil.asRegister;
  31 import static jdk.vm.ci.code.ValueUtil.asStackSlot;
  32 import static jdk.vm.ci.code.ValueUtil.isRegister;
  33 import static jdk.vm.ci.code.ValueUtil.isStackSlot;

  34 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;

  35 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;

  36 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  37 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
  38 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.UNINITIALIZED;
  39 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
  40 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
  41 

  42 import org.graalvm.compiler.asm.aarch64.AArch64Address;

  43 import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler;
  44 import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler.ScratchRegister;

  45 import org.graalvm.compiler.core.common.LIRKind;

  46 import org.graalvm.compiler.core.common.type.DataPointerConstant;
  47 import org.graalvm.compiler.debug.GraalError;
  48 import org.graalvm.compiler.lir.LIRFrameState;
  49 import org.graalvm.compiler.lir.LIRInstructionClass;
  50 import org.graalvm.compiler.lir.Opcode;
  51 import org.graalvm.compiler.lir.StandardOp;
  52 import org.graalvm.compiler.lir.StandardOp.LoadConstantOp;
  53 import org.graalvm.compiler.lir.StandardOp.NullCheck;
  54 import org.graalvm.compiler.lir.StandardOp.ValueMoveOp;
  55 import org.graalvm.compiler.lir.VirtualStackSlot;
  56 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;

  57 
  58 import jdk.vm.ci.aarch64.AArch64Kind;
  59 import jdk.vm.ci.code.MemoryBarriers;
  60 import jdk.vm.ci.code.Register;
  61 import jdk.vm.ci.code.StackSlot;
  62 import jdk.vm.ci.meta.AllocatableValue;
  63 import jdk.vm.ci.meta.Constant;
  64 import jdk.vm.ci.meta.JavaConstant;
  65 import jdk.vm.ci.meta.PlatformKind;
  66 import jdk.vm.ci.meta.Value;
  67 
  68 public class AArch64Move {
  69 
  70     public static class LoadInlineConstant extends AArch64LIRInstruction implements LoadConstantOp {
  71         public static final LIRInstructionClass<LoadInlineConstant> TYPE = LIRInstructionClass.create(LoadInlineConstant.class);
  72 
  73         private JavaConstant constant;
  74         @Def({REG, STACK}) AllocatableValue result;
  75 
  76         public LoadInlineConstant(JavaConstant constant, AllocatableValue result) {


 490                     }
 491                 } else {
 492                     masm.fldr(32, dst, (AArch64Address) crb.asFloatConstRef(input));
 493                 }
 494                 break;
 495             case Double:
 496                 if (AArch64MacroAssembler.isDoubleImmediate(input.asDouble())) {
 497                     masm.fmov(64, dst, input.asDouble());
 498                 } else if (crb.compilationResult.isImmutablePIC()) {
 499                     try (ScratchRegister scr = masm.getScratchRegister()) {
 500                         Register scratch = scr.getRegister();
 501                         masm.mov(scratch, Double.doubleToRawLongBits(input.asDouble()));
 502                         masm.fmov(64, dst, scratch);
 503                     }
 504                 } else {
 505                     masm.fldr(64, dst, (AArch64Address) crb.asDoubleConstRef(input));
 506                 }
 507                 break;
 508             case Object:
 509                 if (input.isNull()) {
 510                     masm.mov(dst, 0);




 511                 } else if (crb.target.inlineObjects) {
 512                     crb.recordInlineDataInCode(input);
 513                     masm.movNativeAddress(dst, 0xDEADDEADDEADDEADL);
 514                 } else {
 515                     masm.ldr(64, dst, (AArch64Address) crb.recordDataReferenceInCode(input, 8));
 516                 }
 517                 break;
 518             default:
 519                 throw GraalError.shouldNotReachHere("kind=" + input.getJavaKind().getStackKind());
 520         }
 521     }
 522 
 523     private static void const2stack(CompilationResultBuilder crb, AArch64MacroAssembler masm, Value result, JavaConstant constant) {
 524         try (ScratchRegister addrReg = masm.getScratchRegister()) {
 525             StackSlot slot = (StackSlot) result;
 526             AArch64Address resultAddress = loadStackSlotAddress(crb, masm, slot, addrReg.getRegister());
 527             if (constant.isDefaultForKind() || constant.isNull()) {
 528                 emitStore(crb, masm, (AArch64Kind) result.getPlatformKind(), resultAddress, zr.asValue(LIRKind.combine(result)));
 529             } else {
 530                 try (ScratchRegister sc = masm.getScratchRegister()) {
 531                     Value scratchRegisterValue = sc.getRegister().asValue(LIRKind.combine(result));
 532                     const2reg(crb, masm, scratchRegisterValue, constant);
 533                     emitStore(crb, masm, (AArch64Kind) result.getPlatformKind(), resultAddress, scratchRegisterValue);


 542      * 9-bit signed, which cannot be handled by that method.
 543      *
 544      * Instead we create an address ourselves. We use scaled unsigned addressing since we know the
 545      * transfersize, which gives us a 15-bit address range (for longs/doubles) respectively a 14-bit
 546      * range (for everything else).
 547      *
 548      * @param scratch Scratch register that can be used to load address. If Value.ILLEGAL this
 549      *            instruction fails if we try to access a StackSlot that is too large to be loaded
 550      *            directly.
 551      * @return AArch64Address of given StackSlot. Uses scratch register if necessary to do so.
 552      */
 553     private static AArch64Address loadStackSlotAddress(CompilationResultBuilder crb, AArch64MacroAssembler masm, StackSlot slot, AllocatableValue scratch) {
 554         Register scratchReg = Value.ILLEGAL.equals(scratch) ? zr : asRegister(scratch);
 555         return loadStackSlotAddress(crb, masm, slot, scratchReg);
 556     }
 557 
 558     private static AArch64Address loadStackSlotAddress(CompilationResultBuilder crb, AArch64MacroAssembler masm, StackSlot slot, Register scratchReg) {
 559         int displacement = crb.frameMap.offsetForStackSlot(slot);
 560         int transferSize = slot.getPlatformKind().getSizeInBytes();
 561         return masm.makeAddress(sp, displacement, scratchReg, transferSize, /* allowOverwrite */false);































































































































 562     }
 563 
 564 }


  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 
  25 package org.graalvm.compiler.lir.aarch64;
  26 
  27 import static jdk.vm.ci.aarch64.AArch64.sp;
  28 import static jdk.vm.ci.aarch64.AArch64.zr;
  29 import static jdk.vm.ci.code.ValueUtil.asAllocatableValue;
  30 import static jdk.vm.ci.code.ValueUtil.asRegister;
  31 import static jdk.vm.ci.code.ValueUtil.asStackSlot;
  32 import static jdk.vm.ci.code.ValueUtil.isRegister;
  33 import static jdk.vm.ci.code.ValueUtil.isStackSlot;
  34 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
  35 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
  36 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.CONST;
  37 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
  38 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
  39 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  40 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
  41 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.UNINITIALIZED;
  42 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
  43 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
  44 
  45 import org.graalvm.compiler.asm.Label;
  46 import org.graalvm.compiler.asm.aarch64.AArch64Address;
  47 import org.graalvm.compiler.asm.aarch64.AArch64Assembler;
  48 import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler;
  49 import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler.ScratchRegister;
  50 import org.graalvm.compiler.core.common.CompressEncoding;
  51 import org.graalvm.compiler.core.common.LIRKind;
  52 import org.graalvm.compiler.core.common.spi.LIRKindTool;
  53 import org.graalvm.compiler.core.common.type.DataPointerConstant;
  54 import org.graalvm.compiler.debug.GraalError;
  55 import org.graalvm.compiler.lir.LIRFrameState;
  56 import org.graalvm.compiler.lir.LIRInstructionClass;
  57 import org.graalvm.compiler.lir.Opcode;
  58 import org.graalvm.compiler.lir.StandardOp;
  59 import org.graalvm.compiler.lir.StandardOp.LoadConstantOp;
  60 import org.graalvm.compiler.lir.StandardOp.NullCheck;
  61 import org.graalvm.compiler.lir.StandardOp.ValueMoveOp;
  62 import org.graalvm.compiler.lir.VirtualStackSlot;
  63 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
  64 import org.graalvm.compiler.options.OptionValues;
  65 
  66 import jdk.vm.ci.aarch64.AArch64Kind;
  67 import jdk.vm.ci.code.MemoryBarriers;
  68 import jdk.vm.ci.code.Register;
  69 import jdk.vm.ci.code.StackSlot;
  70 import jdk.vm.ci.meta.AllocatableValue;
  71 import jdk.vm.ci.meta.Constant;
  72 import jdk.vm.ci.meta.JavaConstant;
  73 import jdk.vm.ci.meta.PlatformKind;
  74 import jdk.vm.ci.meta.Value;
  75 
  76 public class AArch64Move {
  77 
  78     public static class LoadInlineConstant extends AArch64LIRInstruction implements LoadConstantOp {
  79         public static final LIRInstructionClass<LoadInlineConstant> TYPE = LIRInstructionClass.create(LoadInlineConstant.class);
  80 
  81         private JavaConstant constant;
  82         @Def({REG, STACK}) AllocatableValue result;
  83 
  84         public LoadInlineConstant(JavaConstant constant, AllocatableValue result) {


 498                     }
 499                 } else {
 500                     masm.fldr(32, dst, (AArch64Address) crb.asFloatConstRef(input));
 501                 }
 502                 break;
 503             case Double:
 504                 if (AArch64MacroAssembler.isDoubleImmediate(input.asDouble())) {
 505                     masm.fmov(64, dst, input.asDouble());
 506                 } else if (crb.compilationResult.isImmutablePIC()) {
 507                     try (ScratchRegister scr = masm.getScratchRegister()) {
 508                         Register scratch = scr.getRegister();
 509                         masm.mov(scratch, Double.doubleToRawLongBits(input.asDouble()));
 510                         masm.fmov(64, dst, scratch);
 511                     }
 512                 } else {
 513                     masm.fldr(64, dst, (AArch64Address) crb.asDoubleConstRef(input));
 514                 }
 515                 break;
 516             case Object:
 517                 if (input.isNull()) {
 518                     if (crb.mustReplaceWithNullRegister(input)) {
 519                         masm.mov(64, dst, crb.nullRegister);
 520                     } else {
 521                         masm.mov(dst, 0);
 522                     }
 523                 } else if (crb.target.inlineObjects) {
 524                     crb.recordInlineDataInCode(input);
 525                     masm.mov(dst, 0xDEADDEADDEADDEADL, true);
 526                 } else {
 527                     masm.ldr(64, dst, (AArch64Address) crb.recordDataReferenceInCode(input, 8));
 528                 }
 529                 break;
 530             default:
 531                 throw GraalError.shouldNotReachHere("kind=" + input.getJavaKind().getStackKind());
 532         }
 533     }
 534 
 535     private static void const2stack(CompilationResultBuilder crb, AArch64MacroAssembler masm, Value result, JavaConstant constant) {
 536         try (ScratchRegister addrReg = masm.getScratchRegister()) {
 537             StackSlot slot = (StackSlot) result;
 538             AArch64Address resultAddress = loadStackSlotAddress(crb, masm, slot, addrReg.getRegister());
 539             if (constant.isDefaultForKind() || constant.isNull()) {
 540                 emitStore(crb, masm, (AArch64Kind) result.getPlatformKind(), resultAddress, zr.asValue(LIRKind.combine(result)));
 541             } else {
 542                 try (ScratchRegister sc = masm.getScratchRegister()) {
 543                     Value scratchRegisterValue = sc.getRegister().asValue(LIRKind.combine(result));
 544                     const2reg(crb, masm, scratchRegisterValue, constant);
 545                     emitStore(crb, masm, (AArch64Kind) result.getPlatformKind(), resultAddress, scratchRegisterValue);


 554      * 9-bit signed, which cannot be handled by that method.
 555      *
 556      * Instead we create an address ourselves. We use scaled unsigned addressing since we know the
 557      * transfersize, which gives us a 15-bit address range (for longs/doubles) respectively a 14-bit
 558      * range (for everything else).
 559      *
 560      * @param scratch Scratch register that can be used to load address. If Value.ILLEGAL this
 561      *            instruction fails if we try to access a StackSlot that is too large to be loaded
 562      *            directly.
 563      * @return AArch64Address of given StackSlot. Uses scratch register if necessary to do so.
 564      */
 565     private static AArch64Address loadStackSlotAddress(CompilationResultBuilder crb, AArch64MacroAssembler masm, StackSlot slot, AllocatableValue scratch) {
 566         Register scratchReg = Value.ILLEGAL.equals(scratch) ? zr : asRegister(scratch);
 567         return loadStackSlotAddress(crb, masm, slot, scratchReg);
 568     }
 569 
 570     private static AArch64Address loadStackSlotAddress(CompilationResultBuilder crb, AArch64MacroAssembler masm, StackSlot slot, Register scratchReg) {
 571         int displacement = crb.frameMap.offsetForStackSlot(slot);
 572         int transferSize = slot.getPlatformKind().getSizeInBytes();
 573         return masm.makeAddress(sp, displacement, scratchReg, transferSize, /* allowOverwrite */false);
 574     }
 575 
 576     public abstract static class PointerCompressionOp extends AArch64LIRInstruction {
 577 
 578         @Def({REG, HINT}) private AllocatableValue result;
 579         @Use({REG, CONST}) private Value input;
 580         @Alive({REG, ILLEGAL, UNINITIALIZED}) private AllocatableValue baseRegister;
 581 
 582         protected final CompressEncoding encoding;
 583         protected final boolean nonNull;
 584         protected final LIRKindTool lirKindTool;
 585 
 586         protected PointerCompressionOp(LIRInstructionClass<? extends PointerCompressionOp> type, AllocatableValue result, Value input,
 587                         AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 588 
 589             super(type);
 590             this.result = result;
 591             this.input = input;
 592             this.baseRegister = baseRegister;
 593             this.encoding = encoding;
 594             this.nonNull = nonNull;
 595             this.lirKindTool = lirKindTool;
 596         }
 597 
 598         public static boolean hasBase(OptionValues options, CompressEncoding encoding) {
 599             return GeneratePIC.getValue(options) || encoding.hasBase();
 600         }
 601 
 602         public final Value getInput() {
 603             return input;
 604         }
 605 
 606         public final AllocatableValue getResult() {
 607             return result;
 608         }
 609 
 610         protected final Register getResultRegister() {
 611             return asRegister(result);
 612         }
 613 
 614         protected final Register getBaseRegister(CompilationResultBuilder crb) {
 615             return hasBase(crb.getOptions(), encoding) ? asRegister(baseRegister) : Register.None;
 616         }
 617 
 618         protected final int getShift() {
 619             return encoding.getShift();
 620         }
 621 
 622         protected final void move(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
 623             AArch64Move.move(crb, masm, result, input);
 624         }
 625     }
 626 
 627     public static class CompressPointerOp extends PointerCompressionOp {
 628         public static final LIRInstructionClass<CompressPointerOp> TYPE = LIRInstructionClass.create(CompressPointerOp.class);
 629 
 630         public CompressPointerOp(AllocatableValue result, Value input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 631             this(TYPE, result, input, baseRegister, encoding, nonNull, lirKindTool);
 632         }
 633 
 634         private CompressPointerOp(LIRInstructionClass<? extends PointerCompressionOp> type, AllocatableValue result, Value input,
 635                         AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 636 
 637             super(type, result, input, baseRegister, encoding, nonNull, lirKindTool);
 638         }
 639 
 640         @Override
 641         protected void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
 642             Register resultRegister = getResultRegister();
 643             Register ptr = asRegister(getInput());
 644             Register base = getBaseRegister(crb);
 645             // result = (ptr - base) >> shift
 646             if (!encoding.hasBase()) {
 647                 if (encoding.hasShift()) {
 648                     masm.lshr(64, resultRegister, ptr, encoding.getShift());
 649                 } else {
 650                     masm.movx(resultRegister, ptr);
 651                 }
 652             } else if (nonNull) {
 653                 masm.sub(64, resultRegister, ptr, base);
 654                 if (encoding.hasShift()) {
 655                     masm.lshr(64, resultRegister, resultRegister, encoding.getShift());
 656                 }
 657             } else {
 658                 // if ptr is null it still has to be null after compression
 659                 masm.cmp(64, ptr, 0);
 660                 masm.cmov(64, resultRegister, ptr, base, AArch64Assembler.ConditionFlag.NE);
 661                 masm.sub(64, resultRegister, resultRegister, base);
 662                 if (encoding.hasShift()) {
 663                     masm.lshr(64, resultRegister, resultRegister, encoding.getShift());
 664                 }
 665             }
 666         }
 667     }
 668 
 669     public static class UncompressPointerOp extends PointerCompressionOp {
 670         public static final LIRInstructionClass<UncompressPointerOp> TYPE = LIRInstructionClass.create(UncompressPointerOp.class);
 671 
 672         public UncompressPointerOp(AllocatableValue result, Value input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 673             this(TYPE, result, input, baseRegister, encoding, nonNull, lirKindTool);
 674         }
 675 
 676         private UncompressPointerOp(LIRInstructionClass<? extends PointerCompressionOp> type, AllocatableValue result, Value input,
 677                         AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 678             super(type, result, input, baseRegister, encoding, nonNull, lirKindTool);
 679         }
 680 
 681         @Override
 682         protected void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
 683             Register inputRegister = asRegister(getInput());
 684             Register resultRegister = getResultRegister();
 685             Register base = encoding.hasBase() ? getBaseRegister(crb) : null;
 686 
 687             // result = base + (ptr << shift)
 688             if (nonNull || base == null) {
 689                 masm.add(64, resultRegister, base == null ? zr : base, inputRegister, AArch64Assembler.ShiftType.LSL, encoding.getShift());
 690             } else {
 691                 // if ptr is null it has to be null after decompression
 692                 Label done = new Label();
 693                 if (!resultRegister.equals(inputRegister)) {
 694                     masm.mov(32, resultRegister, inputRegister);
 695                 }
 696                 masm.cbz(32, resultRegister, done);
 697                 masm.add(64, resultRegister, base, resultRegister, AArch64Assembler.ShiftType.LSL, encoding.getShift());
 698                 masm.bind(done);
 699             }
 700         }
 701     }
 702 
 703 }
< prev index next >